Merge "Revert "libkeystore-wifi-hidl: Updated to vendor_available:true""
diff --git a/OWNERS b/OWNERS
index bb51005..4e214e8 100644
--- a/OWNERS
+++ b/OWNERS
@@ -4,3 +4,4 @@
hasinitg@google.com
kroot@google.com
zeuthen@google.com
+jbires@google.com
diff --git a/fsverity_init/fsverity_init.cpp b/fsverity_init/fsverity_init.cpp
index 7ab4097..7bc6022 100644
--- a/fsverity_init/fsverity_init.cpp
+++ b/fsverity_init/fsverity_init.cpp
@@ -37,15 +37,17 @@
return true;
}
-void LoadKeyFromStdin(key_serial_t keyring_id, const char* keyname) {
+bool LoadKeyFromStdin(key_serial_t keyring_id, const char* keyname) {
std::string content;
if (!android::base::ReadFdToString(STDIN_FILENO, &content)) {
LOG(ERROR) << "Failed to read key from stdin";
- return;
+ return false;
}
if (!LoadKeyToKeyring(keyring_id, keyname, content.c_str(), content.size())) {
LOG(ERROR) << "Failed to load key from stdin";
+ return false;
}
+ return true;
}
void LoadKeyFromFile(key_serial_t keyring_id, const char* keyname, const std::string& path) {
@@ -101,7 +103,9 @@
LOG(ERROR) << "--load-extra-key requires <key_name> argument.";
return -1;
}
- LoadKeyFromStdin(keyring_id, argv[2]);
+ if (!LoadKeyFromStdin(keyring_id, argv[2])) {
+ return -1;
+ }
} else if (command == "--lock") {
// Requires files backed by fs-verity to be verified with a key in .fs-verity
// keyring.
diff --git a/identity/Android.bp b/identity/Android.bp
index 8267a6b..ecdf9a4 100644
--- a/identity/Android.bp
+++ b/identity/Android.bp
@@ -48,8 +48,8 @@
"android.hardware.identity-support-lib",
"libkeymaster4support",
"libkeystore-attestation-application-id",
- "android.hardware.security.keymint-V1-ndk_platform",
- "android.security.authorization-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk",
+ "android.security.authorization-ndk",
],
static_libs: [
"android.hardware.identity-V3-cpp",
diff --git a/keystore-engine/Android.bp b/keystore-engine/Android.bp
index 0cecfd8..cb75cde 100644
--- a/keystore-engine/Android.bp
+++ b/keystore-engine/Android.bp
@@ -36,7 +36,7 @@
],
shared_libs: [
- "android.system.keystore2-V1-ndk_platform",
+ "android.system.keystore2-V1-ndk",
"libbinder_ndk",
"libcrypto",
"libcutils",
@@ -66,7 +66,7 @@
],
shared_libs: [
- "android.system.keystore2-V1-ndk_platform",
+ "android.system.keystore2-V1-ndk",
"libbase",
"libbinder_ndk",
"libcrypto",
diff --git a/keystore/Android.bp b/keystore/Android.bp
index 0ab5198..ad4b4b1 100644
--- a/keystore/Android.bp
+++ b/keystore/Android.bp
@@ -47,8 +47,8 @@
"keystore_client.proto",
],
shared_libs: [
- "android.security.apc-ndk_platform",
- "android.system.keystore2-V1-ndk_platform",
+ "android.security.apc-ndk",
+ "android.system.keystore2-V1-ndk",
"libbinder",
"libbinder_ndk",
"libchrome",
diff --git a/keystore/tests/Android.bp b/keystore/tests/Android.bp
index 39601eb..f51cc2f 100644
--- a/keystore/tests/Android.bp
+++ b/keystore/tests/Android.bp
@@ -62,7 +62,7 @@
"libgtest_main",
"libutils",
"liblog",
- "android.security.apc-ndk_platform",
+ "android.security.apc-ndk",
],
shared_libs: [
"libbinder_ndk",
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index 32493c0..18d082b 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -35,17 +35,16 @@
"android.security.authorization-rust",
"android.security.compat-rust",
"android.security.maintenance-rust",
+ "android.security.metrics-rust",
"android.security.remoteprovisioning-rust",
"android.system.keystore2-V1-rust",
"libanyhow",
"libbinder_rs",
- "libcutils_bindgen",
"libkeystore2_aaid-rust",
"libkeystore2_apc_compat-rust",
"libkeystore2_crypto_rust",
"libkeystore2_km_compat",
"libkeystore2_selinux",
- "libkeystore2_system_property-rust",
"libkeystore2_vintf_rust",
"liblazy_static",
"liblibc",
@@ -54,9 +53,7 @@
"liblog_rust",
"librand",
"librusqlite",
- "libstatslog_rust",
- "libstatslog_rust_header",
- "libstatspull_rust",
+ "librustutils",
"libthiserror",
],
shared_libs: [
@@ -108,7 +105,7 @@
"libbinder_rs",
"libkeystore2",
"liblog_rust",
- "libvpnprofilestore-rust",
+ "liblegacykeystore-rust",
],
init_rc: ["keystore2.rc"],
@@ -126,8 +123,8 @@
// be removed once that works correctly.
shared_libs: [
"android.hardware.confirmationui@1.0",
- "android.hardware.security.sharedsecret-V1-ndk_platform",
- "android.security.compat-ndk_platform",
+ "android.hardware.security.sharedsecret-V1-ndk",
+ "android.security.compat-ndk",
"libc",
"libdl_android",
"libdl",
diff --git a/keystore2/aaid/Android.bp b/keystore2/aaid/Android.bp
index c04ce51..3417960 100644
--- a/keystore2/aaid/Android.bp
+++ b/keystore2/aaid/Android.bp
@@ -57,3 +57,13 @@
"libkeystore2_aaid",
],
}
+
+rust_test {
+ name: "libkeystore2_aaid_bindgen_test",
+ srcs: [":libkeystore2_aaid_bindgen"],
+ crate_name: "keystore2_aaid_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+}
diff --git a/keystore2/aidl/Android.bp b/keystore2/aidl/Android.bp
index 06fdb48..3bcec07 100644
--- a/keystore2/aidl/Android.bp
+++ b/keystore2/aidl/Android.bp
@@ -24,7 +24,7 @@
aidl_interface {
name: "android.security.attestationmanager",
srcs: [ "android/security/attestationmanager/*.aidl", ],
- imports: [ "android.hardware.security.keymint" ],
+ imports: [ "android.hardware.security.keymint-V1" ],
unstable: true,
backend: {
java: {
@@ -45,8 +45,8 @@
name: "android.security.authorization",
srcs: [ "android/security/authorization/*.aidl" ],
imports: [
- "android.hardware.security.keymint",
- "android.hardware.security.secureclock",
+ "android.hardware.security.keymint-V1",
+ "android.hardware.security.secureclock-V1",
],
unstable: true,
backend: {
@@ -86,9 +86,9 @@
name: "android.security.compat",
srcs: [ "android/security/compat/*.aidl" ],
imports: [
- "android.hardware.security.keymint",
- "android.hardware.security.secureclock",
- "android.hardware.security.sharedsecret",
+ "android.hardware.security.keymint-V1",
+ "android.hardware.security.secureclock-V1",
+ "android.hardware.security.sharedsecret-V1",
],
unstable: true,
backend: {
@@ -110,7 +110,7 @@
name: "android.security.remoteprovisioning",
srcs: [ "android/security/remoteprovisioning/*.aidl" ],
imports: [
- "android.hardware.security.keymint",
+ "android.hardware.security.keymint-V1",
],
unstable: true,
backend: {
@@ -151,8 +151,8 @@
}
aidl_interface {
- name: "android.security.vpnprofilestore",
- srcs: [ "android/security/vpnprofilestore/*.aidl" ],
+ name: "android.security.legacykeystore",
+ srcs: [ "android/security/legacykeystore/*.aidl" ],
unstable: true,
backend: {
java: {
@@ -162,6 +162,32 @@
rust: {
enabled: true,
},
+ ndk: {
+ enabled: true,
+ apps_enabled: false,
+ }
+ },
+}
+
+aidl_interface {
+ name: "android.security.metrics",
+ srcs: [ "android/security/metrics/*.aidl" ],
+ imports: [
+ "android.system.keystore2-V1",
+ ],
+ unstable: true,
+ backend: {
+ java: {
+ platform_apis: true,
+ srcs_available: true,
+ },
+ rust: {
+ enabled: true,
+ },
+ ndk: {
+ enabled: true,
+ apps_enabled: false,
+ }
},
}
diff --git a/keystore2/aidl/android/security/legacykeystore/ILegacyKeystore.aidl b/keystore2/aidl/android/security/legacykeystore/ILegacyKeystore.aidl
new file mode 100644
index 0000000..fe93673
--- /dev/null
+++ b/keystore2/aidl/android/security/legacykeystore/ILegacyKeystore.aidl
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.legacykeystore;
+
+/**
+ * Internal interface for accessing and storing legacy keystore blobs.
+ * Before Android S, Keystore offered a key-value store that was intended for storing
+ * data associated with certain types of keys. E.g., public certificates for asymmetric keys.
+ * This key value store no longer exists as part of the Keystore 2.0 protocol.
+ * However, there are some clients that used Keystore in an unintended way.
+ * This interface exists to give these clients a grace period to migrate their keys
+ * out of legacy keystore. In Android S, this legacy keystore may be used as keystore was
+ * used in earlier versions, and provides access to entries that were put into keystore
+ * before Android S.
+ *
+ * DEPRECATION NOTICE: In Android T, the `put` function is slated to be removed.
+ * This will allow clients to use the `get`, `list`, and `remove` API to migrate blobs out
+ * of legacy keystore.
+ * @hide
+ */
+interface ILegacyKeystore {
+
+ /**
+ * Special value indicating the callers uid.
+ */
+ const int UID_SELF = -1;
+
+ /**
+ * Service specific error code indicating that an unexpected system error occurred.
+ */
+ const int ERROR_SYSTEM_ERROR = 4;
+
+ /**
+ * Service specific error code indicating that the caller does not have the
+ * right to access the requested uid.
+ */
+ const int ERROR_PERMISSION_DENIED = 6;
+
+ /**
+ * Service specific error code indicating that the entry was not found.
+ */
+ const int ERROR_ENTRY_NOT_FOUND = 7;
+
+ /**
+ * Returns the blob stored under the given name.
+ *
+ * @param alias name of the blob entry.
+ * @param uid designates the legacy namespace. Specify UID_SELF for the caller's namespace.
+ * @return The unstructured blob that was passed as blob parameter into put()
+ */
+ byte[] get(in String alias, int uid);
+
+ /**
+ * Stores one entry as unstructured blob under the given alias.
+ * Overwrites existing entries with the same alias.
+ *
+ * @param alias name of the new entry.
+ * @param uid designates the legacy namespace. Specify UID_SELF for the caller's namespace.
+ * @param blob the payload of the new entry.
+ *
+ * IMPORTANT DEPRECATION NOTICE: This function is slated to be removed in Android T.
+ * Do not add new callers. The remaining functionality will remain for the purpose
+ * of migrating legacy configuration out.
+ */
+ void put(in String alias, int uid, in byte[] blob);
+
+ /**
+ * Deletes the entry under the given alias.
+ *
+ * @param alias name of the entry to be removed.
+ * @param uid designates the legacy namespace of the entry. Specify UID_SELF for the caller's
+ * namespace.
+ */
+ void remove(in String alias, int uid);
+
+ /**
+ * Returns a list of aliases of entries stored. The list is filtered by prefix.
+ * The resulting strings are the full aliases including the prefix.
+ *
+ * @param prefix used to filter results.
+ * @param uid legacy namespace to list. Specify UID_SELF for caller's namespace.
+ */
+ String[] list(in String prefix, int uid);
+}
diff --git a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
index 5f91e79..6a37c78 100644
--- a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
+++ b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
@@ -123,4 +123,12 @@
* `ResponseCode::SYSTEM_ERROR` - An unexpected system error occurred.
*/
void migrateKeyNamespace(in KeyDescriptor source, in KeyDescriptor destination);
+
+ /**
+ * Deletes all keys in all hardware keystores. Used when keystore is reset completely. After
+ * this function is called all keys with Tag::ROLLBACK_RESISTANCE in their hardware-enforced
+ * authorization lists must be rendered permanently unusable. Keys without
+ * Tag::ROLLBACK_RESISTANCE may or may not be rendered unusable.
+ */
+ void deleteAllKeys();
}
diff --git a/keystore2/aidl/android/security/metrics/Algorithm.aidl b/keystore2/aidl/android/security/metrics/Algorithm.aidl
new file mode 100644
index 0000000..8e8d107
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/Algorithm.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * Algorithm enum as defined in stats/enums/system/security/keystore2/enums.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum Algorithm {
+ /** ALGORITHM is prepended because UNSPECIFIED exists in other enums as well. */
+ ALGORITHM_UNSPECIFIED = 0,
+
+ /** Asymmetric algorithms. */
+ RSA = 1,
+
+ /** 2 removed, do not reuse. */
+ EC = 3,
+
+ /** Block cipher algorithms. */
+ AES = 32,
+ TRIPLE_DES = 33,
+
+ /** MAC algorithms. */
+ HMAC = 128,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/AtomID.aidl b/keystore2/aidl/android/security/metrics/AtomID.aidl
new file mode 100644
index 0000000..166e753
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/AtomID.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * Atom IDs as defined in frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum AtomID {
+ STORAGE_STATS = 10103,
+ RKP_POOL_STATS = 10104,
+ KEY_CREATION_WITH_GENERAL_INFO = 10118,
+ KEY_CREATION_WITH_AUTH_INFO = 10119,
+ KEY_CREATION_WITH_PURPOSE_AND_MODES_INFO = 10120,
+ KEYSTORE2_ATOM_WITH_OVERFLOW = 10121,
+ KEY_OPERATION_WITH_PURPOSE_AND_MODES_INFO = 10122,
+ KEY_OPERATION_WITH_GENERAL_INFO = 10123,
+ RKP_ERROR_STATS = 10124,
+ CRASH_STATS = 10125,
+}
\ No newline at end of file
diff --git a/keystore2/system_property/system_property_bindgen.hpp b/keystore2/aidl/android/security/metrics/CrashStats.aidl
similarity index 63%
rename from keystore2/system_property/system_property_bindgen.hpp
rename to keystore2/aidl/android/security/metrics/CrashStats.aidl
index e3c1ade..8ca043b 100644
--- a/keystore2/system_property/system_property_bindgen.hpp
+++ b/keystore2/aidl/android/security/metrics/CrashStats.aidl
@@ -1,11 +1,11 @@
/*
- * Copyright (C) 2021 The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,6 +13,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#pragma once
-#include "sys/system_properties.h"
+package android.security.metrics;
+
+/** @hide */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable CrashStats {
+ int count_of_crash_events;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/EcCurve.aidl b/keystore2/aidl/android/security/metrics/EcCurve.aidl
new file mode 100644
index 0000000..b190d83
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/EcCurve.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * EcCurve enum as defined in Keystore2KeyCreationWithGeneralInfo of
+ * frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum EcCurve {
+ /** Unspecified takes 0. Other values are incremented by 1 compared to the keymint spec. */
+ EC_CURVE_UNSPECIFIED = 0,
+ P_224 = 1,
+ P_256 = 2,
+ P_384 = 3,
+ P_521 = 4,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/HardwareAuthenticatorType.aidl b/keystore2/aidl/android/security/metrics/HardwareAuthenticatorType.aidl
new file mode 100644
index 0000000..b13f6ea
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/HardwareAuthenticatorType.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * HardwareAuthenticatorType enum as defined in Keystore2KeyCreationWithAuthInfo of
+ * frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum HardwareAuthenticatorType {
+ /** Unspecified takes 0. Other values are incremented by 1 compared to keymint spec. */
+ AUTH_TYPE_UNSPECIFIED = 0,
+ NONE = 1,
+ PASSWORD = 2,
+ FINGERPRINT = 3,
+ ANY = 5,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/IKeystoreMetrics.aidl b/keystore2/aidl/android/security/metrics/IKeystoreMetrics.aidl
new file mode 100644
index 0000000..342cf01
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/IKeystoreMetrics.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.KeystoreAtom;
+import android.security.metrics.AtomID;
+
+/**
+ * IKeystoreMetrics interface exposes the method for system server to pull metrics from keystore.
+ * @hide
+ */
+interface IKeystoreMetrics {
+ /**
+ * Allows the metrics routing proxy to pull the metrics from keystore.
+ *
+ * @return an array of KeystoreAtom objects with the atomID. There can be multiple atom objects
+ * for the same atomID, encapsulating different combinations of values for the atom fields.
+ * If there is no atom object found for the atomID in the metrics store, an empty array is
+ * returned.
+ *
+ * Callers require 'PullMetrics' permission.
+ *
+ * @param atomID - ID of the atom to be pulled.
+ *
+ * Errors are reported as service specific errors.
+ */
+ KeystoreAtom[] pullMetrics(in AtomID atomID);
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeyCreationWithAuthInfo.aidl b/keystore2/aidl/android/security/metrics/KeyCreationWithAuthInfo.aidl
new file mode 100644
index 0000000..ff200bc
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeyCreationWithAuthInfo.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.HardwareAuthenticatorType;
+import android.security.metrics.SecurityLevel;
+
+/**
+ * Atom that encapsulates authentication related information in key creation events.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable KeyCreationWithAuthInfo {
+ HardwareAuthenticatorType user_auth_type;
+ /**
+ * Base 10 logarithm of time out in seconds.
+ * Logarithm is taken in order to reduce the cardinaltiy.
+ */
+ int log10_auth_key_timeout_seconds;
+ SecurityLevel security_level;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeyCreationWithGeneralInfo.aidl b/keystore2/aidl/android/security/metrics/KeyCreationWithGeneralInfo.aidl
new file mode 100644
index 0000000..74cd9ef
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeyCreationWithGeneralInfo.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.Algorithm;
+import android.security.metrics.EcCurve;
+import android.security.metrics.KeyOrigin;
+
+/**
+ * Atom that encapsulates a set of general information in key creation events.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable KeyCreationWithGeneralInfo {
+ Algorithm algorithm;
+ int key_size;
+ EcCurve ec_curve;
+ KeyOrigin key_origin;
+ int error_code;
+ boolean attestation_requested = false;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeyCreationWithPurposeAndModesInfo.aidl b/keystore2/aidl/android/security/metrics/KeyCreationWithPurposeAndModesInfo.aidl
new file mode 100644
index 0000000..dda61c4
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeyCreationWithPurposeAndModesInfo.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.Algorithm;
+
+/**
+ * Atom that encapsulates the repeated fields in key creation events.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable KeyCreationWithPurposeAndModesInfo {
+ Algorithm algorithm;
+ int purpose_bitmap;
+ int padding_mode_bitmap;
+ int digest_bitmap;
+ int block_mode_bitmap;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeyOperationWithGeneralInfo.aidl b/keystore2/aidl/android/security/metrics/KeyOperationWithGeneralInfo.aidl
new file mode 100644
index 0000000..d70aaf3
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeyOperationWithGeneralInfo.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.Outcome;
+import android.security.metrics.SecurityLevel;
+
+/**
+ * Atom that encapsulates a set of general information in key operation events.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable KeyOperationWithGeneralInfo {
+ Outcome outcome;
+ int error_code;
+ boolean key_upgraded;
+ SecurityLevel security_level;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeyOperationWithPurposeAndModesInfo.aidl b/keystore2/aidl/android/security/metrics/KeyOperationWithPurposeAndModesInfo.aidl
new file mode 100644
index 0000000..e3769e1
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeyOperationWithPurposeAndModesInfo.aidl
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.Purpose;
+
+/**
+ * Atom that encapsulates the purpose, padding mode, digest and block mode fields in key operations.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable KeyOperationWithPurposeAndModesInfo {
+ Purpose purpose;
+ int padding_mode_bitmap;
+ int digest_bitmap;
+ int block_mode_bitmap;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeyOrigin.aidl b/keystore2/aidl/android/security/metrics/KeyOrigin.aidl
new file mode 100644
index 0000000..b472bc3
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeyOrigin.aidl
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * KeyOrigin enum as defined in Keystore2KeyCreationWithGeneralInfo of
+ * frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum KeyOrigin {
+ /** Unspecified takes 0. Other values are incremented by 1 compared to keymint spec. */
+ ORIGIN_UNSPECIFIED = 0,
+
+ /** Generated in KeyMint. Should not exist outside the TEE. */
+ GENERATED = 1,
+
+ /** Derived inside KeyMint. Likely exists off-device. */
+ DERIVED = 2,
+
+ /** Imported into KeyMint. Existed as cleartext in Android. */
+ IMPORTED = 3,
+
+ /** Previously used for another purpose that is now obsolete. */
+ RESERVED = 4,
+
+ /** Securely imported into KeyMint. */
+ SECURELY_IMPORTED = 5,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/Keystore2AtomWithOverflow.aidl b/keystore2/aidl/android/security/metrics/Keystore2AtomWithOverflow.aidl
new file mode 100644
index 0000000..f2ac399
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/Keystore2AtomWithOverflow.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.AtomID;
+
+/**
+ * Logs the atom id of the atoms associated with key creation/operation events, that have reached
+ * the maximum storage limit allocated for different atom objects of that atom,
+ * in keystore in-memory store.
+ *
+ * Size of the storage bucket for each atom is limited considering their expected cardinaltity.
+ * This limit may exceed if the dimensions of the atoms take a large number of unexpected
+ * combinations. This atom is used to track such cases.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable Keystore2AtomWithOverflow {
+ AtomID atom_id;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/KeystoreAtom.aidl b/keystore2/aidl/android/security/metrics/KeystoreAtom.aidl
new file mode 100644
index 0000000..266267a
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeystoreAtom.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.KeystoreAtomPayload;
+
+/**
+ * Encapsulates a particular atom object of type KeystoreAtomPayload its count. Note that
+ * the field: count is only relevant for the atom types that are stored in the
+ * in-memory metrics store. E.g. count field is not relevant for the atom types such as StorageStats
+ * and RkpPoolStats that are not stored in the metrics store.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable KeystoreAtom {
+ KeystoreAtomPayload payload;
+ int count;
+}
diff --git a/keystore2/aidl/android/security/metrics/KeystoreAtomPayload.aidl b/keystore2/aidl/android/security/metrics/KeystoreAtomPayload.aidl
new file mode 100644
index 0000000..a3e4dd6
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/KeystoreAtomPayload.aidl
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.KeyCreationWithGeneralInfo;
+import android.security.metrics.KeyCreationWithPurposeAndModesInfo;
+import android.security.metrics.KeyCreationWithAuthInfo;
+import android.security.metrics.KeyOperationWithGeneralInfo;
+import android.security.metrics.KeyOperationWithPurposeAndModesInfo;
+import android.security.metrics.StorageStats;
+import android.security.metrics.Keystore2AtomWithOverflow;
+import android.security.metrics.RkpErrorStats;
+import android.security.metrics.RkpPoolStats;
+import android.security.metrics.CrashStats;
+
+/** @hide */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+union KeystoreAtomPayload {
+ StorageStats storageStats;
+ RkpPoolStats rkpPoolStats;
+ KeyCreationWithGeneralInfo keyCreationWithGeneralInfo;
+ KeyCreationWithAuthInfo keyCreationWithAuthInfo;
+ KeyCreationWithPurposeAndModesInfo keyCreationWithPurposeAndModesInfo;
+ Keystore2AtomWithOverflow keystore2AtomWithOverflow;
+ KeyOperationWithPurposeAndModesInfo keyOperationWithPurposeAndModesInfo;
+ KeyOperationWithGeneralInfo keyOperationWithGeneralInfo;
+ RkpErrorStats rkpErrorStats;
+ CrashStats crashStats;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/Outcome.aidl b/keystore2/aidl/android/security/metrics/Outcome.aidl
new file mode 100644
index 0000000..006548b
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/Outcome.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * Outcome enum as defined in Keystore2KeyOperationWithGeneralInfo of
+ * frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum Outcome {
+ OUTCOME_UNSPECIFIED = 0,
+ DROPPED = 1,
+ SUCCESS = 2,
+ ABORT = 3,
+ PRUNED = 4,
+ ERROR = 5,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/PoolStatus.aidl b/keystore2/aidl/android/security/metrics/PoolStatus.aidl
new file mode 100644
index 0000000..3530163
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/PoolStatus.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * Status of the remotely provisioned keys, as defined in RkpPoolStats of
+ * frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum PoolStatus {
+ EXPIRING = 1,
+ UNASSIGNED = 2,
+ ATTESTED = 3,
+ TOTAL = 4,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/Purpose.aidl b/keystore2/aidl/android/security/metrics/Purpose.aidl
new file mode 100644
index 0000000..f003cea
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/Purpose.aidl
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * Purpose enum as defined in Keystore2KeyOperationWithPurposeAndModesInfo of
+ * frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum Purpose {
+ /** Unspecified takes 0. Other values are incremented by 1 compared to keymint spec. */
+ KEY_PURPOSE_UNSPECIFIED = 0,
+
+ /** Usable with RSA, 3DES and AES keys. */
+ ENCRYPT = 1,
+
+ /** Usable with RSA, 3DES and AES keys. */
+ DECRYPT = 2,
+
+ /** Usable with RSA, EC and HMAC keys. */
+ SIGN = 3,
+
+ /** Usable with RSA, EC and HMAC keys. */
+ VERIFY = 4,
+
+ /** 4 is reserved */
+
+ /** Usable with RSA keys. */
+ WRAP_KEY = 6,
+
+ /** Key Agreement, usable with EC keys. */
+ AGREE_KEY = 7,
+
+ /**
+ * Usable as an attestation signing key. Keys with this purpose must not have any other
+ * purpose.
+ */
+ ATTEST_KEY = 8,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/RkpError.aidl b/keystore2/aidl/android/security/metrics/RkpError.aidl
new file mode 100644
index 0000000..c33703d
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/RkpError.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * KeyOrigin enum as defined in RkpErrorStats of frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum RkpError {
+ RKP_ERROR_UNSPECIFIED = 0,
+
+ /** The key pool is out of keys. */
+ OUT_OF_KEYS = 1,
+
+ /** Falling back to factory provisioned keys during hybrid mode. */
+ FALL_BACK_DURING_HYBRID = 2,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/RkpErrorStats.aidl b/keystore2/aidl/android/security/metrics/RkpErrorStats.aidl
new file mode 100644
index 0000000..616d129
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/RkpErrorStats.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.RkpError;
+/**
+ * Atom that encapsulates error information in remote key provisioning events.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable RkpErrorStats {
+ RkpError rkpError;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/RkpPoolStats.aidl b/keystore2/aidl/android/security/metrics/RkpPoolStats.aidl
new file mode 100644
index 0000000..016b6ff
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/RkpPoolStats.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.SecurityLevel;
+
+/**
+ * Count of keys in the attestation key pool related to Remote Key Provisioning (RKP).
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable RkpPoolStats {
+ SecurityLevel security_level;
+ int expiring;
+ int unassigned;
+ int attested;
+ int total;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/SecurityLevel.aidl b/keystore2/aidl/android/security/metrics/SecurityLevel.aidl
new file mode 100644
index 0000000..f627be2
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/SecurityLevel.aidl
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * SecurityLevel enum as defined in stats/enums/system/security/keystore2/enums.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum SecurityLevel {
+ /** Unspecified takes 0. Other values are incremented by 1 compared to keymint spec. */
+ SECURITY_LEVEL_UNSPECIFIED = 0,
+ SECURITY_LEVEL_SOFTWARE = 1,
+ SECURITY_LEVEL_TRUSTED_ENVIRONMENT = 2,
+ SECURITY_LEVEL_STRONGBOX = 3,
+ SECURITY_LEVEL_KEYSTORE = 4,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/Storage.aidl b/keystore2/aidl/android/security/metrics/Storage.aidl
new file mode 100644
index 0000000..1ba6e1f
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/Storage.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+/**
+ * Storage enum as defined in Keystore2StorageStats of frameworks/proto_logging/stats/atoms.proto.
+ * @hide
+ */
+@Backing(type="int")
+enum Storage {
+ STORAGE_UNSPECIFIED = 0,
+ KEY_ENTRY = 1,
+ KEY_ENTRY_ID_INDEX = 2,
+ KEY_ENTRY_DOMAIN_NAMESPACE_INDEX = 3,
+ BLOB_ENTRY = 4,
+ BLOB_ENTRY_KEY_ENTRY_ID_INDEX = 5,
+ KEY_PARAMETER = 6,
+ KEY_PARAMETER_KEY_ENTRY_ID_INDEX = 7,
+ KEY_METADATA = 8,
+ KEY_METADATA_KEY_ENTRY_ID_INDEX = 9,
+ GRANT = 10,
+ AUTH_TOKEN = 11,
+ BLOB_METADATA = 12,
+ BLOB_METADATA_BLOB_ENTRY_ID_INDEX =13,
+ METADATA = 14,
+ DATABASE = 15,
+ LEGACY_STORAGE = 16,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/metrics/StorageStats.aidl b/keystore2/aidl/android/security/metrics/StorageStats.aidl
new file mode 100644
index 0000000..6822e86
--- /dev/null
+++ b/keystore2/aidl/android/security/metrics/StorageStats.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.metrics;
+
+import android.security.metrics.Storage;
+
+/**
+ * Atom that encapsulates a set of general information in key creation events.
+ * @hide
+ */
+@RustDerive(Clone=true, Eq=true, PartialEq=true, Ord=true, PartialOrd=true, Hash=true)
+parcelable StorageStats {
+ Storage storage_type;
+ int size;
+ int unused_size;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
index 4a092af..ecdc790 100644
--- a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
+++ b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
@@ -20,6 +20,7 @@
import android.hardware.security.keymint.ProtectedData;
import android.hardware.security.keymint.SecurityLevel;
import android.security.remoteprovisioning.AttestationPoolStatus;
+import android.security.remoteprovisioning.ImplInfo;
/**
* `IRemoteProvisioning` is the interface provided to use the remote provisioning functionality
@@ -127,13 +128,14 @@
void generateKeyPair(in boolean is_test_mode, in SecurityLevel secLevel);
/**
- * This method returns the SecurityLevels of whichever instances of
+ * This method returns implementation information for whichever instances of
* IRemotelyProvisionedComponent are running on the device. The RemoteProvisioner app needs to
- * know which KM instances it should be generating and managing attestation keys for.
+ * know which KM instances it should be generating and managing attestation keys for, and which
+ * EC curves are supported in those instances.
*
- * @return The array of security levels.
+ * @return The array of ImplInfo parcelables.
*/
- SecurityLevel[] getSecurityLevels();
+ ImplInfo[] getImplementationInfo();
/**
* This method deletes all remotely provisioned attestation keys in the database, regardless
diff --git a/keystore2/aidl/android/security/remoteprovisioning/ImplInfo.aidl b/keystore2/aidl/android/security/remoteprovisioning/ImplInfo.aidl
new file mode 100644
index 0000000..9baeb24
--- /dev/null
+++ b/keystore2/aidl/android/security/remoteprovisioning/ImplInfo.aidl
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.remoteprovisioning;
+
+import android.hardware.security.keymint.SecurityLevel;
+
+/**
+ * This parcelable provides information about the underlying IRemotelyProvisionedComponent
+ * implementation.
+ * @hide
+ */
+parcelable ImplInfo {
+ /**
+ * The security level of the underlying implementation: TEE or StrongBox.
+ */
+ SecurityLevel secLevel;
+ /**
+ * An integer denoting which EC curve is supported in the underlying implementation. The current
+ * options are either P256 or 25519, with values defined in
+ * hardware/interfaces/security/keymint/aidl/.../RpcHardwareInfo.aidl
+ */
+ int supportedCurve;
+}
diff --git a/keystore2/aidl/android/security/vpnprofilestore/IVpnProfileStore.aidl b/keystore2/aidl/android/security/vpnprofilestore/IVpnProfileStore.aidl
deleted file mode 100644
index 8375b7b..0000000
--- a/keystore2/aidl/android/security/vpnprofilestore/IVpnProfileStore.aidl
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.security.vpnprofilestore;
-
-/**
- * Internal interface for accessing and storing VPN profiles.
- * @hide
- */
-interface IVpnProfileStore {
- /**
- * Service specific error code indicating that the profile was not found.
- */
- const int ERROR_PROFILE_NOT_FOUND = 1;
-
- /**
- * Service specific error code indicating that an unexpected system error occurred.
- */
- const int ERROR_SYSTEM_ERROR = 2;
-
- /**
- * Returns the profile stored under the given alias.
- *
- * @param alias name of the profile.
- * @return The unstructured blob that was passed as profile parameter into put()
- */
- byte[] get(in String alias);
-
- /**
- * Stores one profile as unstructured blob under the given alias.
- */
- void put(in String alias, in byte[] profile);
-
- /**
- * Deletes the profile under the given alias.
- */
- void remove(in String alias);
-
- /**
- * Returns a list of aliases of profiles stored. The list is filtered by prefix.
- * The resulting strings are the full aliases including the prefix.
- */
- String[] list(in String prefix);
-}
\ No newline at end of file
diff --git a/keystore2/apc_compat/Android.bp b/keystore2/apc_compat/Android.bp
index bf21675..df7521e 100644
--- a/keystore2/apc_compat/Android.bp
+++ b/keystore2/apc_compat/Android.bp
@@ -63,3 +63,13 @@
"libkeystore2_apc_compat",
],
}
+
+rust_test {
+ name: "libkeystore2_apc_compat_bindgen_test",
+ srcs: [":libkeystore2_apc_compat_bindgen"],
+ crate_name: "keystore2_apc_compat_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+}
diff --git a/keystore2/vpnprofilestore/Android.bp b/keystore2/legacykeystore/Android.bp
similarity index 83%
rename from keystore2/vpnprofilestore/Android.bp
rename to keystore2/legacykeystore/Android.bp
index 7ddf0d6..da6aa8a 100644
--- a/keystore2/vpnprofilestore/Android.bp
+++ b/keystore2/legacykeystore/Android.bp
@@ -22,36 +22,38 @@
}
rust_library {
- name: "libvpnprofilestore-rust",
- crate_name: "vpnprofilestore",
+ name: "liblegacykeystore-rust",
+ crate_name: "legacykeystore",
srcs: [
"lib.rs",
],
rustlibs: [
- "android.security.vpnprofilestore-rust",
+ "android.security.legacykeystore-rust",
"libanyhow",
"libbinder_rs",
"libkeystore2",
"liblog_rust",
"librusqlite",
+ "librustutils",
"libthiserror",
],
}
rust_test {
- name: "vpnprofilestore_test",
- crate_name: "vpnprofilestore",
+ name: "legacykeystore_test",
+ crate_name: "legacykeystore",
srcs: ["lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
rustlibs: [
- "android.security.vpnprofilestore-rust",
+ "android.security.legacykeystore-rust",
"libanyhow",
"libbinder_rs",
"libkeystore2",
"libkeystore2_test_utils",
"liblog_rust",
"librusqlite",
+ "librustutils",
"libthiserror",
],
}
diff --git a/keystore2/legacykeystore/TEST_MAPPING b/keystore2/legacykeystore/TEST_MAPPING
new file mode 100644
index 0000000..37d1439
--- /dev/null
+++ b/keystore2/legacykeystore/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "legacykeystore_test"
+ }
+ ]
+}
diff --git a/keystore2/legacykeystore/lib.rs b/keystore2/legacykeystore/lib.rs
new file mode 100644
index 0000000..da60297
--- /dev/null
+++ b/keystore2/legacykeystore/lib.rs
@@ -0,0 +1,724 @@
+// Copyright 2020, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implements the android.security.legacykeystore interface.
+
+use android_security_legacykeystore::aidl::android::security::legacykeystore::{
+ ILegacyKeystore::BnLegacyKeystore, ILegacyKeystore::ILegacyKeystore,
+ ILegacyKeystore::ERROR_ENTRY_NOT_FOUND, ILegacyKeystore::ERROR_PERMISSION_DENIED,
+ ILegacyKeystore::ERROR_SYSTEM_ERROR, ILegacyKeystore::UID_SELF,
+};
+use android_security_legacykeystore::binder::{
+ BinderFeatures, ExceptionCode, Result as BinderResult, Status as BinderStatus, Strong,
+ ThreadState,
+};
+use anyhow::{Context, Result};
+use keystore2::{
+ async_task::AsyncTask, legacy_blob::LegacyBlobLoader, maintenance::DeleteListener,
+ maintenance::Domain, utils::watchdog as wd,
+};
+use rusqlite::{
+ params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
+};
+use std::sync::Arc;
+use std::{
+ collections::HashSet,
+ path::{Path, PathBuf},
+};
+
+struct DB {
+ conn: Connection,
+}
+
+impl DB {
+ fn new(db_file: &Path) -> Result<Self> {
+ let mut db = Self {
+ conn: Connection::open(db_file).context("Failed to initialize SQLite connection.")?,
+ };
+
+ db.init_tables().context("Trying to initialize legacy keystore db.")?;
+ Ok(db)
+ }
+
+ fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
+ where
+ F: Fn(&Transaction) -> Result<T>,
+ {
+ loop {
+ match self
+ .conn
+ .transaction_with_behavior(behavior)
+ .context("In with_transaction.")
+ .and_then(|tx| f(&tx).map(|result| (result, tx)))
+ .and_then(|(result, tx)| {
+ tx.commit().context("In with_transaction: Failed to commit transaction.")?;
+ Ok(result)
+ }) {
+ Ok(result) => break Ok(result),
+ Err(e) => {
+ if Self::is_locked_error(&e) {
+ std::thread::sleep(std::time::Duration::from_micros(500));
+ continue;
+ } else {
+ return Err(e).context("In with_transaction.");
+ }
+ }
+ }
+ }
+ }
+
+ fn is_locked_error(e: &anyhow::Error) -> bool {
+ matches!(
+ e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
+ Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseBusy, .. })
+ | Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseLocked, .. })
+ )
+ }
+
+ fn init_tables(&mut self) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "CREATE TABLE IF NOT EXISTS profiles (
+ owner INTEGER,
+ alias BLOB,
+ profile BLOB,
+ UNIQUE(owner, alias));",
+ NO_PARAMS,
+ )
+ .context("Failed to initialize \"profiles\" table.")?;
+ Ok(())
+ })
+ }
+
+ fn list(&mut self, caller_uid: u32) -> Result<Vec<String>> {
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ let mut stmt = tx
+ .prepare("SELECT alias FROM profiles WHERE owner = ? ORDER BY alias ASC;")
+ .context("In list: Failed to prepare statement.")?;
+
+ let aliases = stmt
+ .query_map(params![caller_uid], |row| row.get(0))?
+ .collect::<rusqlite::Result<Vec<String>>>()
+ .context("In list: query_map failed.");
+ aliases
+ })
+ }
+
+ fn put(&mut self, caller_uid: u32, alias: &str, entry: &[u8]) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "INSERT OR REPLACE INTO profiles (owner, alias, profile) values (?, ?, ?)",
+ params![caller_uid, alias, entry,],
+ )
+ .context("In put: Failed to insert or replace.")?;
+ Ok(())
+ })
+ }
+
+ fn get(&mut self, caller_uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ tx.query_row(
+ "SELECT profile FROM profiles WHERE owner = ? AND alias = ?;",
+ params![caller_uid, alias],
+ |row| row.get(0),
+ )
+ .optional()
+ .context("In get: failed loading entry.")
+ })
+ }
+
+ fn remove(&mut self, caller_uid: u32, alias: &str) -> Result<bool> {
+ let removed = self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "DELETE FROM profiles WHERE owner = ? AND alias = ?;",
+ params![caller_uid, alias],
+ )
+ .context("In remove: Failed to delete row.")
+ })?;
+ Ok(removed == 1)
+ }
+
+ fn remove_uid(&mut self, uid: u32) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute("DELETE FROM profiles WHERE owner = ?;", params![uid])
+ .context("In remove_uid: Failed to delete.")
+ })?;
+ Ok(())
+ }
+
+ fn remove_user(&mut self, user_id: u32) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "DELETE FROM profiles WHERE cast ( ( owner/? ) as int) = ?;",
+ params![rustutils::users::AID_USER_OFFSET, user_id],
+ )
+ .context("In remove_uid: Failed to delete.")
+ })?;
+ Ok(())
+ }
+}
+
+/// This is the main LegacyKeystore error type, it wraps binder exceptions and the
+/// LegacyKeystore errors.
+#[derive(Debug, thiserror::Error, PartialEq)]
+pub enum Error {
+ /// Wraps a LegacyKeystore error code.
+ #[error("Error::Error({0:?})")]
+ Error(i32),
+ /// Wraps a Binder exception code other than a service specific exception.
+ #[error("Binder exception code {0:?}, {1:?}")]
+ Binder(ExceptionCode, i32),
+}
+
+impl Error {
+ /// Short hand for `Error::Error(ERROR_SYSTEM_ERROR)`
+ pub fn sys() -> Self {
+ Error::Error(ERROR_SYSTEM_ERROR)
+ }
+
+ /// Short hand for `Error::Error(ERROR_ENTRY_NOT_FOUND)`
+ pub fn not_found() -> Self {
+ Error::Error(ERROR_ENTRY_NOT_FOUND)
+ }
+
+ /// Short hand for `Error::Error(ERROR_PERMISSION_DENIED)`
+ pub fn perm() -> Self {
+ Error::Error(ERROR_PERMISSION_DENIED)
+ }
+}
+
+/// This function should be used by legacykeystore service calls to translate error conditions
+/// into service specific exceptions.
+///
+/// All error conditions get logged by this function, except for ERROR_ENTRY_NOT_FOUND error.
+///
+/// `Error::Error(x)` variants get mapped onto a service specific error code of `x`.
+///
+/// All non `Error` error conditions get mapped onto `ERROR_SYSTEM_ERROR`.
+///
+/// `handle_ok` will be called if `result` is `Ok(value)` where `value` will be passed
+/// as argument to `handle_ok`. `handle_ok` must generate a `BinderResult<T>`, but it
+/// typically returns Ok(value).
+fn map_or_log_err<T, U, F>(result: Result<U>, handle_ok: F) -> BinderResult<T>
+where
+ F: FnOnce(U) -> BinderResult<T>,
+{
+ result.map_or_else(
+ |e| {
+ let root_cause = e.root_cause();
+ let (rc, log_error) = match root_cause.downcast_ref::<Error>() {
+ // Make the entry not found errors silent.
+ Some(Error::Error(ERROR_ENTRY_NOT_FOUND)) => (ERROR_ENTRY_NOT_FOUND, false),
+ Some(Error::Error(e)) => (*e, true),
+ Some(Error::Binder(_, _)) | None => (ERROR_SYSTEM_ERROR, true),
+ };
+ if log_error {
+ log::error!("{:?}", e);
+ }
+ Err(BinderStatus::new_service_specific_error(rc, None))
+ },
+ handle_ok,
+ )
+}
+
+struct LegacyKeystoreDeleteListener {
+ legacy_keystore: Arc<LegacyKeystore>,
+}
+
+impl DeleteListener for LegacyKeystoreDeleteListener {
+ fn delete_namespace(&self, domain: Domain, namespace: i64) -> Result<()> {
+ self.legacy_keystore.delete_namespace(domain, namespace)
+ }
+ fn delete_user(&self, user_id: u32) -> Result<()> {
+ self.legacy_keystore.delete_user(user_id)
+ }
+}
+
+/// Implements ILegacyKeystore AIDL interface.
+pub struct LegacyKeystore {
+ db_path: PathBuf,
+ async_task: AsyncTask,
+}
+
+struct AsyncState {
+ recently_imported: HashSet<(u32, String)>,
+ legacy_loader: LegacyBlobLoader,
+ db_path: PathBuf,
+}
+
+impl LegacyKeystore {
+ /// Note: The filename was chosen before the purpose of this module was extended.
+ /// It is kept for backward compatibility with early adopters.
+ const LEGACY_KEYSTORE_FILE_NAME: &'static str = "vpnprofilestore.sqlite";
+
+ const WIFI_NAMESPACE: i64 = 102;
+ const AID_WIFI: u32 = 1010;
+
+ /// Creates a new LegacyKeystore instance.
+ pub fn new_native_binder(
+ path: &Path,
+ ) -> (Box<dyn DeleteListener + Send + Sync + 'static>, Strong<dyn ILegacyKeystore>) {
+ let mut db_path = path.to_path_buf();
+ db_path.push(Self::LEGACY_KEYSTORE_FILE_NAME);
+
+ let legacy_keystore = Arc::new(Self { db_path, async_task: Default::default() });
+ legacy_keystore.init_shelf(path);
+ let service = LegacyKeystoreService { legacy_keystore: legacy_keystore.clone() };
+ (
+ Box::new(LegacyKeystoreDeleteListener { legacy_keystore }),
+ BnLegacyKeystore::new_binder(service, BinderFeatures::default()),
+ )
+ }
+
+ fn open_db(&self) -> Result<DB> {
+ DB::new(&self.db_path).context("In open_db: Failed to open db.")
+ }
+
+ fn get_effective_uid(uid: i32) -> Result<u32> {
+ const AID_SYSTEM: u32 = 1000;
+ let calling_uid = ThreadState::get_calling_uid();
+ let uid = uid as u32;
+
+ if uid == UID_SELF as u32 || uid == calling_uid {
+ Ok(calling_uid)
+ } else if calling_uid == AID_SYSTEM && uid == Self::AID_WIFI {
+ // The only exception for legacy reasons is allowing SYSTEM to access
+ // the WIFI namespace.
+ // IMPORTANT: If you attempt to add more exceptions, it means you are adding
+ // more callers to this deprecated feature. DON'T!
+ Ok(Self::AID_WIFI)
+ } else {
+ Err(Error::perm()).with_context(|| {
+ format!("In get_effective_uid: caller: {}, requested uid: {}.", calling_uid, uid)
+ })
+ }
+ }
+
+ fn get(&self, alias: &str, uid: i32) -> Result<Vec<u8>> {
+ let mut db = self.open_db().context("In get.")?;
+ let uid = Self::get_effective_uid(uid).context("In get.")?;
+
+ if let Some(entry) = db.get(uid, alias).context("In get: Trying to load entry from DB.")? {
+ return Ok(entry);
+ }
+ if self.get_legacy(uid, alias).context("In get: Trying to migrate legacy blob.")? {
+ // If we were able to migrate a legacy blob try again.
+ if let Some(entry) =
+ db.get(uid, alias).context("In get: Trying to load entry from DB.")?
+ {
+ return Ok(entry);
+ }
+ }
+ Err(Error::not_found()).context("In get: No such entry.")
+ }
+
+ fn put(&self, alias: &str, uid: i32, entry: &[u8]) -> Result<()> {
+ let uid = Self::get_effective_uid(uid).context("In put.")?;
+ // In order to make sure that we don't have stale legacy entries, make sure they are
+ // migrated before replacing them.
+ let _ = self.get_legacy(uid, alias);
+ let mut db = self.open_db().context("In put.")?;
+ db.put(uid, alias, entry).context("In put: Trying to insert entry into DB.")
+ }
+
+ fn remove(&self, alias: &str, uid: i32) -> Result<()> {
+ let uid = Self::get_effective_uid(uid).context("In remove.")?;
+ let mut db = self.open_db().context("In remove.")?;
+ // In order to make sure that we don't have stale legacy entries, make sure they are
+ // migrated before removing them.
+ let _ = self.get_legacy(uid, alias);
+ let removed =
+ db.remove(uid, alias).context("In remove: Trying to remove entry from DB.")?;
+ if removed {
+ Ok(())
+ } else {
+ Err(Error::not_found()).context("In remove: No such entry.")
+ }
+ }
+
+ fn delete_namespace(&self, domain: Domain, namespace: i64) -> Result<()> {
+ let uid = match domain {
+ Domain::APP => namespace as u32,
+ Domain::SELINUX => {
+ if namespace == Self::WIFI_NAMESPACE {
+ // Namespace WIFI gets mapped to AID_WIFI.
+ Self::AID_WIFI
+ } else {
+ // Nothing to do for any other namespace.
+ return Ok(());
+ }
+ }
+ _ => return Ok(()),
+ };
+
+ if let Err(e) = self.bulk_delete_uid(uid) {
+ log::warn!("In LegacyKeystore::delete_namespace: {:?}", e);
+ }
+ let mut db = self.open_db().context("In LegacyKeystore::delete_namespace.")?;
+ db.remove_uid(uid).context("In LegacyKeystore::delete_namespace.")
+ }
+
+ fn delete_user(&self, user_id: u32) -> Result<()> {
+ if let Err(e) = self.bulk_delete_user(user_id) {
+ log::warn!("In LegacyKeystore::delete_user: {:?}", e);
+ }
+ let mut db = self.open_db().context("In LegacyKeystore::delete_user.")?;
+ db.remove_user(user_id).context("In LegacyKeystore::delete_user.")
+ }
+
+ fn list(&self, prefix: &str, uid: i32) -> Result<Vec<String>> {
+ let mut db = self.open_db().context("In list.")?;
+ let uid = Self::get_effective_uid(uid).context("In list.")?;
+ let mut result = self.list_legacy(uid).context("In list.")?;
+ result.append(&mut db.list(uid).context("In list: Trying to get list of entries.")?);
+ result = result.into_iter().filter(|s| s.starts_with(prefix)).collect();
+ result.sort_unstable();
+ result.dedup();
+ Ok(result)
+ }
+
+ fn init_shelf(&self, path: &Path) {
+ let mut db_path = path.to_path_buf();
+ self.async_task.queue_hi(move |shelf| {
+ let legacy_loader = LegacyBlobLoader::new(&db_path);
+ db_path.push(Self::LEGACY_KEYSTORE_FILE_NAME);
+
+ shelf.put(AsyncState { legacy_loader, db_path, recently_imported: Default::default() });
+ })
+ }
+
+ fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Result<T>
+ where
+ F: FnOnce(&mut AsyncState) -> Result<T> + Send + 'static,
+ {
+ let (sender, receiver) = std::sync::mpsc::channel::<Result<T>>();
+ self.async_task.queue_hi(move |shelf| {
+ let state = shelf.get_downcast_mut::<AsyncState>().expect("Failed to get shelf.");
+ sender.send(f(state)).expect("Failed to send result.");
+ });
+ receiver.recv().context("In do_serialized: Failed to receive result.")?
+ }
+
+ fn list_legacy(&self, uid: u32) -> Result<Vec<String>> {
+ self.do_serialized(move |state| {
+ state
+ .legacy_loader
+ .list_legacy_keystore_entries_for_uid(uid)
+ .context("Trying to list legacy keystore entries.")
+ })
+ .context("In list_legacy.")
+ }
+
+ fn get_legacy(&self, uid: u32, alias: &str) -> Result<bool> {
+ let alias = alias.to_string();
+ self.do_serialized(move |state| {
+ if state.recently_imported.contains(&(uid, alias.clone())) {
+ return Ok(true);
+ }
+ let mut db = DB::new(&state.db_path).context("In open_db: Failed to open db.")?;
+ let migrated =
+ Self::migrate_one_legacy_entry(uid, &alias, &state.legacy_loader, &mut db)
+ .context("Trying to migrate legacy keystore entries.")?;
+ if migrated {
+ state.recently_imported.insert((uid, alias));
+ }
+ Ok(migrated)
+ })
+ .context("In get_legacy.")
+ }
+
+ fn bulk_delete_uid(&self, uid: u32) -> Result<()> {
+ self.do_serialized(move |state| {
+ let entries = state
+ .legacy_loader
+ .list_legacy_keystore_entries_for_uid(uid)
+ .context("In bulk_delete_uid: Trying to list entries.")?;
+ for alias in entries.iter() {
+ if let Err(e) = state.legacy_loader.remove_legacy_keystore_entry(uid, alias) {
+ log::warn!("In bulk_delete_uid: Failed to delete legacy entry. {:?}", e);
+ }
+ }
+ Ok(())
+ })
+ }
+
+ fn bulk_delete_user(&self, user_id: u32) -> Result<()> {
+ self.do_serialized(move |state| {
+ let entries = state
+ .legacy_loader
+ .list_legacy_keystore_entries_for_user(user_id)
+ .context("In bulk_delete_user: Trying to list entries.")?;
+ for (uid, entries) in entries.iter() {
+ for alias in entries.iter() {
+ if let Err(e) = state.legacy_loader.remove_legacy_keystore_entry(*uid, alias) {
+ log::warn!("In bulk_delete_user: Failed to delete legacy entry. {:?}", e);
+ }
+ }
+ }
+ Ok(())
+ })
+ }
+
+ fn migrate_one_legacy_entry(
+ uid: u32,
+ alias: &str,
+ legacy_loader: &LegacyBlobLoader,
+ db: &mut DB,
+ ) -> Result<bool> {
+ let blob = legacy_loader
+ .read_legacy_keystore_entry(uid, alias)
+ .context("In migrate_one_legacy_entry: Trying to read legacy keystore entry.")?;
+ if let Some(entry) = blob {
+ db.put(uid, alias, &entry)
+ .context("In migrate_one_legacy_entry: Trying to insert entry into DB.")?;
+ legacy_loader
+ .remove_legacy_keystore_entry(uid, alias)
+ .context("In migrate_one_legacy_entry: Trying to delete legacy keystore entry.")?;
+ Ok(true)
+ } else {
+ Ok(false)
+ }
+ }
+}
+
+struct LegacyKeystoreService {
+ legacy_keystore: Arc<LegacyKeystore>,
+}
+
+impl binder::Interface for LegacyKeystoreService {}
+
+impl ILegacyKeystore for LegacyKeystoreService {
+ fn get(&self, alias: &str, uid: i32) -> BinderResult<Vec<u8>> {
+ let _wp = wd::watch_millis("ILegacyKeystore::get", 500);
+ map_or_log_err(self.legacy_keystore.get(alias, uid), Ok)
+ }
+ fn put(&self, alias: &str, uid: i32, entry: &[u8]) -> BinderResult<()> {
+ let _wp = wd::watch_millis("ILegacyKeystore::put", 500);
+ map_or_log_err(self.legacy_keystore.put(alias, uid, entry), Ok)
+ }
+ fn remove(&self, alias: &str, uid: i32) -> BinderResult<()> {
+ let _wp = wd::watch_millis("ILegacyKeystore::remove", 500);
+ map_or_log_err(self.legacy_keystore.remove(alias, uid), Ok)
+ }
+ fn list(&self, prefix: &str, uid: i32) -> BinderResult<Vec<String>> {
+ let _wp = wd::watch_millis("ILegacyKeystore::list", 500);
+ map_or_log_err(self.legacy_keystore.list(prefix, uid), Ok)
+ }
+}
+
+#[cfg(test)]
+mod db_test {
+ use super::*;
+ use keystore2_test_utils::TempDir;
+ use std::sync::Arc;
+ use std::thread;
+ use std::time::Duration;
+ use std::time::Instant;
+
+ static TEST_ALIAS: &str = "test_alias";
+ static TEST_BLOB1: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+ static TEST_BLOB2: &[u8] = &[2, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+ static TEST_BLOB3: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+ static TEST_BLOB4: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+
+ #[test]
+ fn test_entry_db() {
+ let test_dir = TempDir::new("entrydb_test_").expect("Failed to create temp dir.");
+ let mut db = DB::new(&test_dir.build().push(LegacyKeystore::LEGACY_KEYSTORE_FILE_NAME))
+ .expect("Failed to open database.");
+
+ // Insert three entries for owner 2.
+ db.put(2, "test1", TEST_BLOB1).expect("Failed to insert test1.");
+ db.put(2, "test2", TEST_BLOB2).expect("Failed to insert test2.");
+ db.put(2, "test3", TEST_BLOB3).expect("Failed to insert test3.");
+
+ // Check list returns all inserted aliases.
+ assert_eq!(
+ vec!["test1".to_string(), "test2".to_string(), "test3".to_string(),],
+ db.list(2).expect("Failed to list entries.")
+ );
+
+ // There should be no entries for owner 1.
+ assert_eq!(Vec::<String>::new(), db.list(1).expect("Failed to list entries."));
+
+ // Check the content of the three entries.
+ assert_eq!(Some(TEST_BLOB1), db.get(2, "test1").expect("Failed to get entry.").as_deref());
+ assert_eq!(Some(TEST_BLOB2), db.get(2, "test2").expect("Failed to get entry.").as_deref());
+ assert_eq!(Some(TEST_BLOB3), db.get(2, "test3").expect("Failed to get entry.").as_deref());
+
+ // Remove test2 and check and check that it is no longer retrievable.
+ assert!(db.remove(2, "test2").expect("Failed to remove entry."));
+ assert!(db.get(2, "test2").expect("Failed to get entry.").is_none());
+
+ // test2 should now no longer be in the list.
+ assert_eq!(
+ vec!["test1".to_string(), "test3".to_string(),],
+ db.list(2).expect("Failed to list entries.")
+ );
+
+ // Put on existing alias replaces it.
+ // Verify test1 is TEST_BLOB1.
+ assert_eq!(Some(TEST_BLOB1), db.get(2, "test1").expect("Failed to get entry.").as_deref());
+ db.put(2, "test1", TEST_BLOB4).expect("Failed to replace test1.");
+ // Verify test1 is TEST_BLOB4.
+ assert_eq!(Some(TEST_BLOB4), db.get(2, "test1").expect("Failed to get entry.").as_deref());
+ }
+
+ #[test]
+ fn test_delete_uid() {
+ let test_dir = TempDir::new("test_delete_uid_").expect("Failed to create temp dir.");
+ let mut db = DB::new(&test_dir.build().push(LegacyKeystore::LEGACY_KEYSTORE_FILE_NAME))
+ .expect("Failed to open database.");
+
+ // Insert three entries for owner 2.
+ db.put(2, "test1", TEST_BLOB1).expect("Failed to insert test1.");
+ db.put(2, "test2", TEST_BLOB2).expect("Failed to insert test2.");
+ db.put(3, "test3", TEST_BLOB3).expect("Failed to insert test3.");
+
+ db.remove_uid(2).expect("Failed to remove uid 2");
+
+ assert_eq!(Vec::<String>::new(), db.list(2).expect("Failed to list entries."));
+
+ assert_eq!(vec!["test3".to_string(),], db.list(3).expect("Failed to list entries."));
+ }
+
+ #[test]
+ fn test_delete_user() {
+ let test_dir = TempDir::new("test_delete_user_").expect("Failed to create temp dir.");
+ let mut db = DB::new(&test_dir.build().push(LegacyKeystore::LEGACY_KEYSTORE_FILE_NAME))
+ .expect("Failed to open database.");
+
+ // Insert three entries for owner 2.
+ db.put(2 + 2 * rustutils::users::AID_USER_OFFSET, "test1", TEST_BLOB1)
+ .expect("Failed to insert test1.");
+ db.put(4 + 2 * rustutils::users::AID_USER_OFFSET, "test2", TEST_BLOB2)
+ .expect("Failed to insert test2.");
+ db.put(3, "test3", TEST_BLOB3).expect("Failed to insert test3.");
+
+ db.remove_user(2).expect("Failed to remove user 2");
+
+ assert_eq!(
+ Vec::<String>::new(),
+ db.list(2 + 2 * rustutils::users::AID_USER_OFFSET).expect("Failed to list entries.")
+ );
+
+ assert_eq!(
+ Vec::<String>::new(),
+ db.list(4 + 2 * rustutils::users::AID_USER_OFFSET).expect("Failed to list entries.")
+ );
+
+ assert_eq!(vec!["test3".to_string(),], db.list(3).expect("Failed to list entries."));
+ }
+
+ #[test]
+ fn concurrent_legacy_keystore_entry_test() -> Result<()> {
+ let temp_dir = Arc::new(
+ TempDir::new("concurrent_legacy_keystore_entry_test_")
+ .expect("Failed to create temp dir."),
+ );
+
+ let db_path = temp_dir.build().push(LegacyKeystore::LEGACY_KEYSTORE_FILE_NAME).to_owned();
+
+ let test_begin = Instant::now();
+
+ let mut db = DB::new(&db_path).expect("Failed to open database.");
+ const ENTRY_COUNT: u32 = 5000u32;
+ const ENTRY_DB_COUNT: u32 = 5000u32;
+
+ let mut actual_entry_count = ENTRY_COUNT;
+ // First insert ENTRY_COUNT entries.
+ for count in 0..ENTRY_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(15) {
+ actual_entry_count = count;
+ break;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.put(1, &alias, TEST_BLOB1).expect("Failed to add entry (1).");
+ }
+
+ // Insert more keys from a different thread and into a different namespace.
+ let db_path1 = db_path.clone();
+ let handle1 = thread::spawn(move || {
+ let mut db = DB::new(&db_path1).expect("Failed to open database.");
+
+ for count in 0..actual_entry_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.put(2, &alias, TEST_BLOB2).expect("Failed to add entry (2).");
+ }
+
+ // Then delete them again.
+ for count in 0..actual_entry_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.remove(2, &alias).expect("Remove Failed (2).");
+ }
+ });
+
+ // And start deleting the first set of entries.
+ let db_path2 = db_path.clone();
+ let handle2 = thread::spawn(move || {
+ let mut db = DB::new(&db_path2).expect("Failed to open database.");
+
+ for count in 0..actual_entry_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.remove(1, &alias).expect("Remove Failed (1)).");
+ }
+ });
+
+ // While a lot of inserting and deleting is going on we have to open database connections
+ // successfully and then insert and delete a specific entry.
+ let db_path3 = db_path.clone();
+ let handle3 = thread::spawn(move || {
+ for _count in 0..ENTRY_DB_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let mut db = DB::new(&db_path3).expect("Failed to open database.");
+
+ db.put(3, TEST_ALIAS, TEST_BLOB3).expect("Failed to add entry (3).");
+
+ db.remove(3, TEST_ALIAS).expect("Remove failed (3).");
+ }
+ });
+
+ // While thread 3 is inserting and deleting TEST_ALIAS, we try to get the alias.
+ // This may yield an entry or none, but it must not fail.
+ let handle4 = thread::spawn(move || {
+ for _count in 0..ENTRY_DB_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let mut db = DB::new(&db_path).expect("Failed to open database.");
+
+ // This may return Some or None but it must not fail.
+ db.get(3, TEST_ALIAS).expect("Failed to get entry (4).");
+ }
+ });
+
+ handle1.join().expect("Thread 1 panicked.");
+ handle2.join().expect("Thread 2 panicked.");
+ handle3.join().expect("Thread 3 panicked.");
+ handle4.join().expect("Thread 4 panicked.");
+
+ Ok(())
+ }
+}
diff --git a/keystore2/selinux/src/lib.rs b/keystore2/selinux/src/lib.rs
index 5197cf6..cf6dfd3 100644
--- a/keystore2/selinux/src/lib.rs
+++ b/keystore2/selinux/src/lib.rs
@@ -130,7 +130,7 @@
fn deref(&self) -> &Self::Target {
match self {
Self::Raw(p) => unsafe { CStr::from_ptr(*p) },
- Self::CString(cstr) => &cstr,
+ Self::CString(cstr) => cstr,
}
}
}
diff --git a/keystore2/src/async_task.rs b/keystore2/src/async_task.rs
index 45f0274..0515c8f 100644
--- a/keystore2/src/async_task.rs
+++ b/keystore2/src/async_task.rs
@@ -19,7 +19,6 @@
//! processed all tasks before it terminates.
//! Note that low priority tasks are processed only when the high priority queue is empty.
-use crate::utils::watchdog as wd;
use std::{any::Any, any::TypeId, time::Duration};
use std::{
collections::{HashMap, VecDeque},
@@ -241,14 +240,11 @@
// Now that the lock has been dropped, perform the action.
match action {
Action::QueuedFn(f) => {
- let _wd = wd::watch_millis("async_task thread: calling queued fn", 500);
f(&mut shelf);
done_idle = false;
}
Action::IdleFns(idle_fns) => {
for idle_fn in idle_fns {
- let _wd =
- wd::watch_millis("async_task thread: calling idle_fn", 500);
idle_fn(&mut shelf);
}
done_idle = true;
@@ -421,7 +417,9 @@
Err(RecvTimeoutError::Timeout)
);
done_receiver.recv().unwrap();
- idle_done_receiver.recv_timeout(Duration::from_millis(1)).unwrap();
+ // Now that the last low-priority job has completed, the idle task should
+ // fire pretty much immediately.
+ idle_done_receiver.recv_timeout(Duration::from_millis(50)).unwrap();
// Idle callback not executed again even if we wait for a while.
assert_eq!(
@@ -442,7 +440,7 @@
Err(RecvTimeoutError::Timeout)
);
done_receiver.recv().unwrap();
- idle_done_receiver.recv_timeout(Duration::from_millis(1)).unwrap();
+ idle_done_receiver.recv_timeout(Duration::from_millis(50)).unwrap();
}
#[test]
diff --git a/keystore2/src/attestation_key_utils.rs b/keystore2/src/attestation_key_utils.rs
index 425eec6..b6a8e31 100644
--- a/keystore2/src/attestation_key_utils.rs
+++ b/keystore2/src/attestation_key_utils.rs
@@ -22,7 +22,7 @@
use crate::remote_provisioning::RemProvState;
use crate::utils::check_key_permission;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- AttestationKey::AttestationKey, Certificate::Certificate, KeyParameter::KeyParameter,
+ AttestationKey::AttestationKey, Certificate::Certificate, KeyParameter::KeyParameter, Tag::Tag,
};
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor,
@@ -47,8 +47,8 @@
}
/// This function loads and, optionally, assigns the caller's remote provisioned
-/// attestation key or, if `attest_key_descriptor` is given, it loads the user
-/// generated attestation key from the database.
+/// attestation key if a challenge is present. Alternatively, if `attest_key_descriptor` is given,
+/// it loads the user generated attestation key from the database.
pub fn get_attest_key_info(
key: &KeyDescriptor,
caller_uid: u32,
@@ -57,9 +57,10 @@
rem_prov_state: &RemProvState,
db: &mut KeystoreDB,
) -> Result<Option<AttestationKeyInfo>> {
+ let challenge_present = params.iter().any(|kp| kp.tag == Tag::ATTESTATION_CHALLENGE);
match attest_key_descriptor {
- None => rem_prov_state
- .get_remotely_provisioned_attestation_key_and_certs(&key, caller_uid, params, db)
+ None if challenge_present => rem_prov_state
+ .get_remotely_provisioned_attestation_key_and_certs(key, caller_uid, params, db)
.context(concat!(
"In get_attest_key_and_cert_chain: ",
"Trying to get remotely provisioned attestation key."
@@ -69,7 +70,8 @@
AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }
})
}),
- Some(attest_key) => get_user_generated_attestation_key(&attest_key, caller_uid, db)
+ None => Ok(None),
+ Some(attest_key) => get_user_generated_attestation_key(attest_key, caller_uid, db)
.context("In get_attest_key_and_cert_chain: Trying to load attest key")
.map(Some),
}
@@ -81,7 +83,7 @@
db: &mut KeystoreDB,
) -> Result<AttestationKeyInfo> {
let (key_id_guard, blob, cert, blob_metadata) =
- load_attest_key_blob_and_cert(&key, caller_uid, db)
+ load_attest_key_blob_and_cert(key, caller_uid, db)
.context("In get_user_generated_attestation_key: Failed to load blob and cert")?;
let issuer_subject: Vec<u8> = parse_subject_from_certificate(&cert).context(
@@ -103,7 +105,7 @@
_ => {
let (key_id_guard, mut key_entry) = db
.load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::BOTH,
caller_uid,
diff --git a/keystore2/src/boot_level_keys.rs b/keystore2/src/boot_level_keys.rs
index 1110caf..08c52af 100644
--- a/keystore2/src/boot_level_keys.rs
+++ b/keystore2/src/boot_level_keys.rs
@@ -243,40 +243,40 @@
fn test_output_is_consistent() -> Result<()> {
let initial_key = b"initial key";
let mut blkc = BootLevelKeyCache::new(ZVec::try_from(initial_key as &[u8])?);
- assert_eq!(true, blkc.level_accessible(0));
- assert_eq!(true, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(blkc.level_accessible(0));
+ assert!(blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
let v0 = blkc.aes_key(0).unwrap().unwrap();
let v10 = blkc.aes_key(10).unwrap().unwrap();
assert_eq!(Some(&v0), blkc.aes_key(0)?.as_ref());
assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
blkc.advance_boot_level(5)?;
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(true, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
blkc.advance_boot_level(10)?;
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(false, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(!blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
blkc.advance_boot_level(0)?;
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(false, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(!blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(Some(v10), blkc.aes_key(10)?);
blkc.finish();
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(false, blkc.level_accessible(9));
- assert_eq!(false, blkc.level_accessible(10));
- assert_eq!(false, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(!blkc.level_accessible(9));
+ assert!(!blkc.level_accessible(10));
+ assert!(!blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(None, blkc.aes_key(10)?);
Ok(())
diff --git a/keystore2/src/crypto/Android.bp b/keystore2/src/crypto/Android.bp
index 3ba47cd..4e76507 100644
--- a/keystore2/src/crypto/Android.bp
+++ b/keystore2/src/crypto/Android.bp
@@ -125,3 +125,13 @@
"libcrypto",
],
}
+
+rust_test {
+ name: "libkeystore2_crypto_bindgen_test",
+ srcs: [":libkeystore2_crypto_bindgen"],
+ crate_name: "keystore2_crypto_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+}
diff --git a/keystore2/src/crypto/crypto.cpp b/keystore2/src/crypto/crypto.cpp
index e4a1ac3..5d360a1 100644
--- a/keystore2/src/crypto/crypto.cpp
+++ b/keystore2/src/crypto/crypto.cpp
@@ -225,7 +225,7 @@
EC_KEY* ECKEYGenerateKey() {
EC_KEY* key = EC_KEY_new();
- EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_secp521r1);
EC_KEY_set_group(key, group);
auto result = EC_KEY_generate_key(key);
if (result == 0) {
@@ -251,7 +251,7 @@
EC_KEY* ECKEYParsePrivateKey(const uint8_t* buf, size_t len) {
CBS cbs;
CBS_init(&cbs, buf, len);
- EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_secp521r1);
auto result = EC_KEY_parse_private_key(&cbs, group);
EC_GROUP_free(group);
if (result != nullptr && CBS_len(&cbs) != 0) {
@@ -262,7 +262,7 @@
}
size_t ECPOINTPoint2Oct(const EC_POINT* point, uint8_t* buf, size_t len) {
- EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_secp521r1);
point_conversion_form_t form = POINT_CONVERSION_UNCOMPRESSED;
auto result = EC_POINT_point2oct(group, point, form, buf, len, nullptr);
EC_GROUP_free(group);
@@ -270,7 +270,7 @@
}
EC_POINT* ECPOINTOct2Point(const uint8_t* buf, size_t len) {
- EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_secp521r1);
EC_POINT* point = EC_POINT_new(group);
auto result = EC_POINT_oct2point(group, point, buf, len, nullptr);
EC_GROUP_free(group);
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index db23d1f..5f8a2ef 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -346,7 +346,7 @@
/// Calls the boringssl EC_KEY_marshal_private_key function.
pub fn ec_key_marshal_private_key(key: &ECKey) -> Result<ZVec, Error> {
- let len = 39; // Empirically observed length of private key
+ let len = 73; // Empirically observed length of private key
let mut buf = ZVec::new(len)?;
// Safety: the key is valid.
// This will not write past the specified length of the buffer; if the
@@ -381,8 +381,8 @@
/// Calls the boringssl EC_POINT_point2oct.
pub fn ec_point_point_to_oct(point: &EC_POINT) -> Result<Vec<u8>, Error> {
- // We fix the length to 65 (1 + 2 * field_elem_size), as we get an error if it's too small.
- let len = 65;
+ // We fix the length to 133 (1 + 2 * field_elem_size), as we get an error if it's too small.
+ let len = 133;
let mut buf = vec![0; len];
// Safety: EC_POINT_point2oct writes at most len bytes. The point is valid.
let result = unsafe { ECPOINTPoint2Oct(point, buf.as_mut_ptr(), len) };
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index b4122bb..ae2875c 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -47,6 +47,7 @@
use crate::impl_metadata; // This is in db_utils.rs
use crate::key_parameter::{KeyParameter, Tag};
+use crate::metrics_store::log_rkp_error_stats;
use crate::permission::KeyPermSet;
use crate::utils::{get_current_time_in_milliseconds, watchdog as wd, AID_USER_OFFSET};
use crate::{
@@ -69,8 +70,10 @@
use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
AttestationPoolStatus::AttestationPoolStatus,
};
-use statslog_rust::keystore2_storage_stats::{
- Keystore2StorageStats, StorageType as StatsdStorageType,
+use android_security_metrics::aidl::android::security::metrics::{
+ StorageStats::StorageStats,
+ Storage::Storage as MetricsStorage,
+ RkpError::RkpError as MetricsRkpError,
};
use keystore2_crypto::ZVec;
@@ -139,7 +142,7 @@
let db_tag: i64 = row.get(0).context("Failed to read tag.")?;
metadata.insert(
db_tag,
- KeyMetaEntry::new_from_sql(db_tag, &SqlField::new(1, &row))
+ KeyMetaEntry::new_from_sql(db_tag, &SqlField::new(1, row))
.context("Failed to read KeyMetaEntry.")?,
);
Ok(())
@@ -214,7 +217,7 @@
let db_tag: i64 = row.get(0).context("Failed to read tag.")?;
metadata.insert(
db_tag,
- BlobMetaEntry::new_from_sql(db_tag, &SqlField::new(1, &row))
+ BlobMetaEntry::new_from_sql(db_tag, &SqlField::new(1, row))
.context("Failed to read BlobMetaEntry.")?,
);
Ok(())
@@ -385,12 +388,12 @@
}
/// Returns unix epoch time in milliseconds.
- pub fn to_millis_epoch(&self) -> i64 {
+ pub fn to_millis_epoch(self) -> i64 {
self.0
}
/// Returns unix epoch time in seconds.
- pub fn to_secs_epoch(&self) -> i64 {
+ pub fn to_secs_epoch(self) -> i64 {
self.0 / 1000
}
}
@@ -829,21 +832,7 @@
const UPGRADERS: &'static [fn(&Transaction) -> Result<u32>] = &[Self::from_0_to_1];
/// Name of the file that holds the cross-boot persistent database.
- pub const PERSISTENT_DB_FILENAME: &'static str = &"persistent.sqlite";
-
- /// Set write-ahead logging mode on the persistent database found in `db_root`.
- pub fn set_wal_mode(db_root: &Path) -> Result<()> {
- let path = Self::make_persistent_path(&db_root)?;
- let conn =
- Connection::open(path).context("In KeystoreDB::set_wal_mode: Failed to open DB")?;
- let mode: String = conn
- .pragma_update_and_check(None, "journal_mode", &"WAL", |row| row.get(0))
- .context("In KeystoreDB::set_wal_mode: Failed to set journal_mode")?;
- match mode.as_str() {
- "wal" => Ok(()),
- _ => Err(anyhow!("Unable to set WAL mode, db is still in {} mode.", mode)),
- }
- }
+ pub const PERSISTENT_DB_FILENAME: &'static str = "persistent.sqlite";
/// This will create a new database connection connecting the two
/// files persistent.sqlite and perboot.sqlite in the given directory.
@@ -853,7 +842,7 @@
pub fn new(db_root: &Path, gc: Option<Arc<Gc>>) -> Result<Self> {
let _wp = wd::watch_millis("KeystoreDB::new", 500);
- let persistent_path = Self::make_persistent_path(&db_root)?;
+ let persistent_path = Self::make_persistent_path(db_root)?;
let conn = Self::make_connection(&persistent_path)?;
let mut db = Self { conn, gc, perboot: perboot::PERBOOT_DB.clone() };
@@ -1035,23 +1024,23 @@
fn do_table_size_query(
&mut self,
- storage_type: StatsdStorageType,
+ storage_type: MetricsStorage,
query: &str,
params: &[&str],
- ) -> Result<Keystore2StorageStats> {
+ ) -> Result<StorageStats> {
let (total, unused) = self.with_transaction(TransactionBehavior::Deferred, |tx| {
tx.query_row(query, params_from_iter(params), |row| Ok((row.get(0)?, row.get(1)?)))
.with_context(|| {
- format!("get_storage_stat: Error size of storage type {}", storage_type as i32)
+ format!("get_storage_stat: Error size of storage type {}", storage_type.0)
})
.no_gc()
})?;
- Ok(Keystore2StorageStats { storage_type, size: total, unused_size: unused })
+ Ok(StorageStats { storage_type, size: total, unused_size: unused })
}
- fn get_total_size(&mut self) -> Result<Keystore2StorageStats> {
+ fn get_total_size(&mut self) -> Result<StorageStats> {
self.do_table_size_query(
- StatsdStorageType::Database,
+ MetricsStorage::DATABASE,
"SELECT page_count * page_size, freelist_count * page_size
FROM pragma_page_count('persistent'),
pragma_page_size('persistent'),
@@ -1062,10 +1051,10 @@
fn get_table_size(
&mut self,
- storage_type: StatsdStorageType,
+ storage_type: MetricsStorage,
schema: &str,
table: &str,
- ) -> Result<Keystore2StorageStats> {
+ ) -> Result<StorageStats> {
self.do_table_size_query(
storage_type,
"SELECT pgsize,unused FROM dbstat(?1)
@@ -1077,63 +1066,57 @@
/// Fetches a storage statisitics atom for a given storage type. For storage
/// types that map to a table, information about the table's storage is
/// returned. Requests for storage types that are not DB tables return None.
- pub fn get_storage_stat(
- &mut self,
- storage_type: StatsdStorageType,
- ) -> Result<Keystore2StorageStats> {
+ pub fn get_storage_stat(&mut self, storage_type: MetricsStorage) -> Result<StorageStats> {
let _wp = wd::watch_millis("KeystoreDB::get_storage_stat", 500);
match storage_type {
- StatsdStorageType::Database => self.get_total_size(),
- StatsdStorageType::KeyEntry => {
+ MetricsStorage::DATABASE => self.get_total_size(),
+ MetricsStorage::KEY_ENTRY => {
self.get_table_size(storage_type, "persistent", "keyentry")
}
- StatsdStorageType::KeyEntryIdIndex => {
+ MetricsStorage::KEY_ENTRY_ID_INDEX => {
self.get_table_size(storage_type, "persistent", "keyentry_id_index")
}
- StatsdStorageType::KeyEntryDomainNamespaceIndex => {
+ MetricsStorage::KEY_ENTRY_DOMAIN_NAMESPACE_INDEX => {
self.get_table_size(storage_type, "persistent", "keyentry_domain_namespace_index")
}
- StatsdStorageType::BlobEntry => {
+ MetricsStorage::BLOB_ENTRY => {
self.get_table_size(storage_type, "persistent", "blobentry")
}
- StatsdStorageType::BlobEntryKeyEntryIdIndex => {
+ MetricsStorage::BLOB_ENTRY_KEY_ENTRY_ID_INDEX => {
self.get_table_size(storage_type, "persistent", "blobentry_keyentryid_index")
}
- StatsdStorageType::KeyParameter => {
+ MetricsStorage::KEY_PARAMETER => {
self.get_table_size(storage_type, "persistent", "keyparameter")
}
- StatsdStorageType::KeyParameterKeyEntryIdIndex => {
+ MetricsStorage::KEY_PARAMETER_KEY_ENTRY_ID_INDEX => {
self.get_table_size(storage_type, "persistent", "keyparameter_keyentryid_index")
}
- StatsdStorageType::KeyMetadata => {
+ MetricsStorage::KEY_METADATA => {
self.get_table_size(storage_type, "persistent", "keymetadata")
}
- StatsdStorageType::KeyMetadataKeyEntryIdIndex => {
+ MetricsStorage::KEY_METADATA_KEY_ENTRY_ID_INDEX => {
self.get_table_size(storage_type, "persistent", "keymetadata_keyentryid_index")
}
- StatsdStorageType::Grant => self.get_table_size(storage_type, "persistent", "grant"),
- StatsdStorageType::AuthToken => {
+ MetricsStorage::GRANT => self.get_table_size(storage_type, "persistent", "grant"),
+ MetricsStorage::AUTH_TOKEN => {
// Since the table is actually a BTreeMap now, unused_size is not meaningfully
// reportable
// Size provided is only an approximation
- Ok(Keystore2StorageStats {
+ Ok(StorageStats {
storage_type,
size: (self.perboot.auth_tokens_len() * std::mem::size_of::<AuthTokenEntry>())
- as i64,
+ as i32,
unused_size: 0,
})
}
- StatsdStorageType::BlobMetadata => {
+ MetricsStorage::BLOB_METADATA => {
self.get_table_size(storage_type, "persistent", "blobmetadata")
}
- StatsdStorageType::BlobMetadataBlobEntryIdIndex => {
+ MetricsStorage::BLOB_METADATA_BLOB_ENTRY_ID_INDEX => {
self.get_table_size(storage_type, "persistent", "blobmetadata_blobentryid_index")
}
- _ => Err(anyhow::Error::msg(format!(
- "Unsupported storage type: {}",
- storage_type as i32
- ))),
+ _ => Err(anyhow::Error::msg(format!("Unsupported storage type: {}", storage_type.0))),
}
}
@@ -1261,7 +1244,7 @@
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_descriptor =
KeyDescriptor { domain, nspace, alias: Some(alias.to_string()), blob: None };
- let result = Self::load_key_entry_id(&tx, &key_descriptor, key_type);
+ let result = Self::load_key_entry_id(tx, &key_descriptor, key_type);
match result {
Ok(_) => Ok(true),
Err(error) => match error.root_cause().downcast_ref::<KsError>() {
@@ -1307,7 +1290,7 @@
key_metadata.store_in_db(key_id, tx).context("KeyMetaData::store_in_db failed")?;
Self::set_blob_internal(
- &tx,
+ tx,
key_id,
SubComponentType::KEY_BLOB,
Some(blob),
@@ -1337,10 +1320,10 @@
alias: Some(key_type.alias.into()),
blob: None,
};
- let id = Self::load_key_entry_id(&tx, &key_descriptor, KeyType::Super);
+ let id = Self::load_key_entry_id(tx, &key_descriptor, KeyType::Super);
match id {
Ok(id) => {
- let key_entry = Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ let key_entry = Self::load_key_components(tx, KeyEntryLoadBits::KM, id)
.context("In load_super_key. Failed to load key entry.")?;
Ok(Some((KEY_ID_LOCK.get(id), key_entry)))
}
@@ -1400,7 +1383,7 @@
let (id, entry) = match id {
Some(id) => (
id,
- Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ Self::load_key_components(tx, KeyEntryLoadBits::KM, id)
.context("In get_or_create_key_with.")?,
),
@@ -1426,7 +1409,7 @@
let (blob, metadata) =
create_new_key().context("In get_or_create_key_with.")?;
Self::set_blob_internal(
- &tx,
+ tx,
id,
SubComponentType::KEY_BLOB,
Some(&blob),
@@ -1577,7 +1560,7 @@
.context("In create_key_entry")?,
);
Self::set_blob_internal(
- &tx,
+ tx,
key_id.0,
SubComponentType::KEY_BLOB,
Some(private_key),
@@ -1586,7 +1569,7 @@
let mut metadata = KeyMetaData::new();
metadata.add(KeyMetaEntry::AttestationMacedPublicKey(maced_public_key.to_vec()));
metadata.add(KeyMetaEntry::AttestationRawPubKey(raw_public_key.to_vec()));
- metadata.store_in_db(key_id.0, &tx)?;
+ metadata.store_in_db(key_id.0, tx)?;
Ok(()).no_gc()
})
.context("In create_attestation_key_entry")
@@ -1609,7 +1592,7 @@
let _wp = wd::watch_millis("KeystoreDB::set_blob", 500);
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::set_blob_internal(&tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
+ Self::set_blob_internal(tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
})
.context("In set_blob.")
}
@@ -1623,7 +1606,7 @@
self.with_transaction(TransactionBehavior::Immediate, |tx| {
Self::set_blob_internal(
- &tx,
+ tx,
Self::UNASSIGNED_KEY_ID,
SubComponentType::KEY_BLOB,
Some(blob),
@@ -1716,7 +1699,7 @@
#[cfg(test)]
fn insert_key_metadata(&mut self, key_id: &KeyIdGuard, metadata: &KeyMetaData) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- metadata.store_in_db(key_id.0, &tx).no_gc()
+ metadata.store_in_db(key_id.0, tx).no_gc()
})
.context("In insert_key_metadata.")
}
@@ -1778,16 +1761,16 @@
metadata.add(KeyMetaEntry::AttestationExpirationDate(DateTime::from_millis_epoch(
expiration_date,
)));
- metadata.store_in_db(key_id, &tx).context("Failed to insert key metadata.")?;
+ metadata.store_in_db(key_id, tx).context("Failed to insert key metadata.")?;
Self::set_blob_internal(
- &tx,
+ tx,
key_id,
SubComponentType::CERT_CHAIN,
Some(cert_chain),
None,
)
.context("Failed to insert cert chain")?;
- Self::set_blob_internal(&tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
+ Self::set_blob_internal(tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
.context("Failed to insert cert")?;
Ok(()).no_gc()
})
@@ -1848,6 +1831,7 @@
)
.context("Failed to assign attestation key")?;
if result == 0 {
+ log_rkp_error_stats(MetricsRkpError::OUT_OF_KEYS);
return Err(KsError::Rc(ResponseCode::OUT_OF_KEYS)).context("Out of keys.");
} else if result > 1 {
return Err(KsError::sys())
@@ -1930,7 +1914,7 @@
);
let mut num_deleted = 0;
for id in key_ids_to_check.iter().filter(|kt| kt.1 < curr_time).map(|kt| kt.0) {
- if Self::mark_unreferenced(&tx, id)? {
+ if Self::mark_unreferenced(tx, id)? {
num_deleted += 1;
}
}
@@ -1957,7 +1941,7 @@
.context("Failed to execute statement")?;
let num_deleted = keys_to_delete
.iter()
- .map(|id| Self::mark_unreferenced(&tx, *id))
+ .map(|id| Self::mark_unreferenced(tx, *id))
.collect::<Result<Vec<bool>>>()
.context("Failed to execute mark_unreferenced on a keyid")?
.into_iter()
@@ -2243,7 +2227,7 @@
/// fields, and rebinds the given alias to the new key.
/// The boolean returned is a hint for the garbage collector. If true, a key was replaced,
/// is now unreferenced and needs to be collected.
- #[allow(clippy::clippy::too_many_arguments)]
+ #[allow(clippy::too_many_arguments)]
pub fn store_new_key(
&mut self,
key: &KeyDescriptor,
@@ -2275,11 +2259,11 @@
key_id.id(),
SubComponentType::KEY_BLOB,
Some(blob),
- Some(&blob_metadata),
+ Some(blob_metadata),
)
.context("Trying to insert the key blob.")?;
if let Some(cert) = &cert_info.cert {
- Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(&cert), None)
+ Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(cert), None)
.context("Trying to insert the certificate.")?;
}
if let Some(cert_chain) = &cert_info.cert_chain {
@@ -2287,7 +2271,7 @@
tx,
key_id.id(),
SubComponentType::CERT_CHAIN,
- Some(&cert_chain),
+ Some(cert_chain),
None,
)
.context("Trying to insert the certificate chain.")?;
@@ -2295,7 +2279,7 @@
Self::insert_keyparameter_internal(tx, &key_id, params)
.context("Trying to insert key parameters.")?;
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
+ let need_gc = Self::rebind_alias(tx, &key_id, alias, &domain, namespace, key_type)
.context("Trying to rebind alias.")?;
Ok(key_id).do_gc(need_gc)
})
@@ -2345,7 +2329,7 @@
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
+ let need_gc = Self::rebind_alias(tx, &key_id, alias, &domain, namespace, key_type)
.context("Trying to rebind alias.")?;
Ok(key_id).do_gc(need_gc)
})
@@ -2414,7 +2398,7 @@
if access_key.domain == Domain::APP {
access_key.nspace = caller_uid as i64;
}
- let key_id = Self::load_key_entry_id(&tx, &access_key, key_type)
+ let key_id = Self::load_key_entry_id(tx, &access_key, key_type)
.with_context(|| format!("With key.domain = {:?}.", access_key.domain))?;
Ok((key_id, access_key, None))
@@ -2579,7 +2563,7 @@
let tag = Tag(row.get(0).context("Failed to read tag.")?);
let sec_level = SecurityLevel(row.get(2).context("Failed to read sec_level.")?);
parameters.push(
- KeyParameter::new_from_sql(tag, &SqlField::new(1, &row), sec_level)
+ KeyParameter::new_from_sql(tag, &SqlField::new(1, row), sec_level)
.context("Failed to read KeyParameter.")?,
);
Ok(())
@@ -2957,7 +2941,7 @@
}
}
}
- notify_gc = Self::mark_unreferenced(&tx, key_id)
+ notify_gc = Self::mark_unreferenced(tx, key_id)
.context("In unbind_keys_for_user.")?
|| notify_gc;
}
@@ -2971,16 +2955,15 @@
load_bits: KeyEntryLoadBits,
key_id: i64,
) -> Result<KeyEntry> {
- let metadata = KeyMetaData::load_from_db(key_id, &tx).context("In load_key_components.")?;
+ let metadata = KeyMetaData::load_from_db(key_id, tx).context("In load_key_components.")?;
let (has_km_blob, key_blob_info, cert_blob, cert_chain_blob) =
- Self::load_blob_components(key_id, load_bits, &tx)
- .context("In load_key_components.")?;
+ Self::load_blob_components(key_id, load_bits, tx).context("In load_key_components.")?;
- let parameters = Self::load_key_parameters(key_id, &tx)
+ let parameters = Self::load_key_parameters(key_id, tx)
.context("In load_key_components: Trying to load key parameters.")?;
- let km_uuid = Self::get_key_km_uuid(&tx, key_id)
+ let km_uuid = Self::get_key_km_uuid(tx, key_id)
.context("In load_key_components: Trying to get KM uuid.")?;
Ok(KeyEntry {
@@ -3064,7 +3047,7 @@
// But even if we load the access tuple by grant here, the permission
// check denies the attempt to create a grant by grant descriptor.
let (key_id, access_key_descriptor, _) =
- Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
+ Self::load_access_tuple(tx, key, KeyType::Client, caller_uid)
.context("In grant")?;
// Perform access control. It is vital that we return here if the permission
@@ -3124,7 +3107,7 @@
// Load the key_id and complete the access control tuple.
// We ignore the access vector here because grants cannot be granted.
let (key_id, access_key_descriptor, _) =
- Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
+ Self::load_access_tuple(tx, key, KeyType::Client, caller_uid)
.context("In ungrant.")?;
// Perform access control. We must return here if the permission
@@ -3244,7 +3227,6 @@
use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
Timestamp::Timestamp,
};
- use rusqlite::DatabaseName::Attached;
use rusqlite::NO_PARAMS;
use rusqlite::TransactionBehavior;
use std::cell::RefCell;
@@ -3483,7 +3465,7 @@
load_attestation_key_pool(&mut db, expiration_date, namespace, base_byte)?;
let chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
- assert_eq!(true, chain.is_some());
+ assert!(chain.is_some());
let cert_chain = chain.unwrap();
assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
assert_eq!(cert_chain.batch_cert, loaded_values.batch_cert);
@@ -4326,8 +4308,8 @@
let mut db = new_test_db()?;
const SOURCE_UID: u32 = 1u32;
const DESTINATION_UID: u32 = 2u32;
- static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
- static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ static SOURCE_ALIAS: &str = "SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = "DESTINATION_ALIAS";
let key_id_guard =
make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
.context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
@@ -4395,8 +4377,8 @@
const SOURCE_UID: u32 = 1u32;
const DESTINATION_UID: u32 = 2u32;
const DESTINATION_NAMESPACE: i64 = 1000i64;
- static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
- static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ static SOURCE_ALIAS: &str = "SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = "DESTINATION_ALIAS";
let key_id_guard =
make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
.context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
@@ -4463,8 +4445,8 @@
let mut db = new_test_db()?;
const SOURCE_UID: u32 = 1u32;
const DESTINATION_UID: u32 = 2u32;
- static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
- static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ static SOURCE_ALIAS: &str = "SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = "DESTINATION_ALIAS";
let key_id_guard =
make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
.context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
@@ -4496,9 +4478,9 @@
#[test]
fn test_upgrade_0_to_1() {
- const ALIAS1: &str = &"test_upgrade_0_to_1_1";
- const ALIAS2: &str = &"test_upgrade_0_to_1_2";
- const ALIAS3: &str = &"test_upgrade_0_to_1_3";
+ const ALIAS1: &str = "test_upgrade_0_to_1_1";
+ const ALIAS2: &str = "test_upgrade_0_to_1_2";
+ const ALIAS3: &str = "test_upgrade_0_to_1_3";
const UID: u32 = 33;
let temp_dir = Arc::new(TempDir::new("test_upgrade_0_to_1").unwrap());
let mut db = KeystoreDB::new(temp_dir.path(), None).unwrap();
@@ -4758,8 +4740,9 @@
let test_begin = Instant::now();
- let mut db = KeystoreDB::new(temp_dir.path()).expect("Failed to open database.");
const KEY_COUNT: u32 = 500u32;
+ let mut db =
+ new_test_db_with_gc(temp_dir.path(), |_, _| Ok(())).expect("Failed to open database.");
const OPEN_DB_COUNT: u32 = 50u32;
let mut actual_key_count = KEY_COUNT;
@@ -4777,7 +4760,8 @@
// Insert more keys from a different thread and into a different namespace.
let temp_dir1 = temp_dir.clone();
let handle1 = thread::spawn(move || {
- let mut db = KeystoreDB::new(temp_dir1.path()).expect("Failed to open database.");
+ let mut db = new_test_db_with_gc(temp_dir1.path(), |_, _| Ok(()))
+ .expect("Failed to open database.");
for count in 0..actual_key_count {
if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
@@ -4806,7 +4790,8 @@
// And start unbinding the first set of keys.
let temp_dir2 = temp_dir.clone();
let handle2 = thread::spawn(move || {
- let mut db = KeystoreDB::new(temp_dir2.path()).expect("Failed to open database.");
+ let mut db = new_test_db_with_gc(temp_dir2.path(), |_, _| Ok(()))
+ .expect("Failed to open database.");
for count in 0..actual_key_count {
if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
@@ -4822,27 +4807,6 @@
}
});
- let stop_deleting = Arc::new(AtomicU8::new(0));
- let stop_deleting2 = stop_deleting.clone();
-
- // And delete anything that is unreferenced keys.
- let temp_dir3 = temp_dir.clone();
- let handle3 = thread::spawn(move || {
- let mut db = KeystoreDB::new(temp_dir3.path()).expect("Failed to open database.");
-
- while stop_deleting2.load(Ordering::Relaxed) != 1 {
- while let Some((key_guard, _key)) =
- db.get_unreferenced_key().expect("Failed to get unreferenced Key.")
- {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
- return;
- }
- db.purge_key_entry(key_guard).expect("Failed to purge key.");
- }
- std::thread::sleep(std::time::Duration::from_millis(100));
- }
- });
-
// While a lot of inserting and deleting is going on we have to open database connections
// successfully and use them.
// This clone is not redundant, because temp_dir needs to be kept alive until db goes
@@ -4854,7 +4818,8 @@
if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
return;
}
- let mut db = KeystoreDB::new(temp_dir4.path()).expect("Failed to open database.");
+ let mut db = new_test_db_with_gc(temp_dir4.path(), |_, _| Ok(()))
+ .expect("Failed to open database.");
let alias = format!("test_alias_{}", count);
make_test_key_entry(&mut db, Domain::APP, 3, &alias, None)
@@ -4873,9 +4838,6 @@
handle2.join().expect("Thread 2 panicked.");
handle4.join().expect("Thread 4 panicked.");
- stop_deleting.store(1, Ordering::Relaxed);
- handle3.join().expect("Thread 3 panicked.");
-
Ok(())
}
@@ -5003,10 +4965,7 @@
Ok(KeyEntryRow {
id: row.get(0)?,
key_type: row.get(1)?,
- domain: match row.get(2)? {
- Some(i) => Some(Domain(i)),
- None => None,
- },
+ domain: row.get::<_, Option<_>>(2)?.map(Domain),
namespace: row.get(3)?,
alias: row.get(4)?,
state: row.get(5)?,
@@ -5516,7 +5475,7 @@
)?;
//check if super key exists
- assert!(db.key_exists(Domain::APP, 1, &USER_SUPER_KEY.alias, KeyType::Super)?);
+ assert!(db.key_exists(Domain::APP, 1, USER_SUPER_KEY.alias, KeyType::Super)?);
let (_, key_entry) = db.load_super_key(&USER_SUPER_KEY, 1)?.unwrap();
let loaded_super_key = SuperKeyManager::extract_super_key_from_key_entry(
@@ -5532,21 +5491,21 @@
Ok(())
}
- fn get_valid_statsd_storage_types() -> Vec<StatsdStorageType> {
+ fn get_valid_statsd_storage_types() -> Vec<MetricsStorage> {
vec![
- StatsdStorageType::KeyEntry,
- StatsdStorageType::KeyEntryIdIndex,
- StatsdStorageType::KeyEntryDomainNamespaceIndex,
- StatsdStorageType::BlobEntry,
- StatsdStorageType::BlobEntryKeyEntryIdIndex,
- StatsdStorageType::KeyParameter,
- StatsdStorageType::KeyParameterKeyEntryIdIndex,
- StatsdStorageType::KeyMetadata,
- StatsdStorageType::KeyMetadataKeyEntryIdIndex,
- StatsdStorageType::Grant,
- StatsdStorageType::AuthToken,
- StatsdStorageType::BlobMetadata,
- StatsdStorageType::BlobMetadataBlobEntryIdIndex,
+ MetricsStorage::KEY_ENTRY,
+ MetricsStorage::KEY_ENTRY_ID_INDEX,
+ MetricsStorage::KEY_ENTRY_DOMAIN_NAMESPACE_INDEX,
+ MetricsStorage::BLOB_ENTRY,
+ MetricsStorage::BLOB_ENTRY_KEY_ENTRY_ID_INDEX,
+ MetricsStorage::KEY_PARAMETER,
+ MetricsStorage::KEY_PARAMETER_KEY_ENTRY_ID_INDEX,
+ MetricsStorage::KEY_METADATA,
+ MetricsStorage::KEY_METADATA_KEY_ENTRY_ID_INDEX,
+ MetricsStorage::GRANT,
+ MetricsStorage::AUTH_TOKEN,
+ MetricsStorage::BLOB_METADATA,
+ MetricsStorage::BLOB_METADATA_BLOB_ENTRY_ID_INDEX,
]
}
@@ -5554,7 +5513,7 @@
/// that are supported by the DB. Check for reasonable values.
#[test]
fn test_query_all_valid_table_sizes() -> Result<()> {
- const PAGE_SIZE: i64 = 4096;
+ const PAGE_SIZE: i32 = 4096;
let mut db = new_test_db()?;
@@ -5562,7 +5521,7 @@
let stat = db.get_storage_stat(t)?;
// AuthToken can be less than a page since it's in a btree, not sqlite
// TODO(b/187474736) stop using if-let here
- if let StatsdStorageType::AuthToken = t {
+ if let MetricsStorage::AUTH_TOKEN = t {
} else {
assert!(stat.size >= PAGE_SIZE);
}
@@ -5572,35 +5531,35 @@
Ok(())
}
- fn get_storage_stats_map(db: &mut KeystoreDB) -> BTreeMap<i32, Keystore2StorageStats> {
+ fn get_storage_stats_map(db: &mut KeystoreDB) -> BTreeMap<i32, StorageStats> {
get_valid_statsd_storage_types()
.into_iter()
- .map(|t| (t as i32, db.get_storage_stat(t).unwrap()))
+ .map(|t| (t.0, db.get_storage_stat(t).unwrap()))
.collect()
}
fn assert_storage_increased(
db: &mut KeystoreDB,
- increased_storage_types: Vec<StatsdStorageType>,
- baseline: &mut BTreeMap<i32, Keystore2StorageStats>,
+ increased_storage_types: Vec<MetricsStorage>,
+ baseline: &mut BTreeMap<i32, StorageStats>,
) {
for storage in increased_storage_types {
// Verify the expected storage increased.
let new = db.get_storage_stat(storage).unwrap();
- let storage = storage as i32;
- let old = &baseline[&storage];
- assert!(new.size >= old.size, "{}: {} >= {}", storage, new.size, old.size);
+ let storage = storage;
+ let old = &baseline[&storage.0];
+ assert!(new.size >= old.size, "{}: {} >= {}", storage.0, new.size, old.size);
assert!(
new.unused_size <= old.unused_size,
"{}: {} <= {}",
- storage,
+ storage.0,
new.unused_size,
old.unused_size
);
// Update the baseline with the new value so that it succeeds in the
// later comparison.
- baseline.insert(storage, new);
+ baseline.insert(storage.0, new);
}
// Get an updated map of the storage and verify there were no unexpected changes.
@@ -5608,7 +5567,7 @@
assert_eq!(updated_stats.len(), baseline.len());
for &k in baseline.keys() {
- let stringify = |map: &BTreeMap<i32, Keystore2StorageStats>| -> String {
+ let stringify = |map: &BTreeMap<i32, StorageStats>| -> String {
let mut s = String::new();
for &k in map.keys() {
writeln!(&mut s, " {}: {}, {}", &k, map[&k].size, map[&k].unused_size)
@@ -5622,7 +5581,7 @@
&& updated_stats[&k].unused_size == baseline[&k].unused_size,
"updated_stats:\n{}\nbaseline:\n{}",
stringify(&updated_stats),
- stringify(&baseline)
+ stringify(baseline)
);
}
}
@@ -5636,9 +5595,9 @@
assert_storage_increased(
&mut db,
vec![
- StatsdStorageType::KeyEntry,
- StatsdStorageType::KeyEntryIdIndex,
- StatsdStorageType::KeyEntryDomainNamespaceIndex,
+ MetricsStorage::KEY_ENTRY,
+ MetricsStorage::KEY_ENTRY_ID_INDEX,
+ MetricsStorage::KEY_ENTRY_DOMAIN_NAMESPACE_INDEX,
],
&mut working_stats,
);
@@ -5649,10 +5608,10 @@
assert_storage_increased(
&mut db,
vec![
- StatsdStorageType::BlobEntry,
- StatsdStorageType::BlobEntryKeyEntryIdIndex,
- StatsdStorageType::BlobMetadata,
- StatsdStorageType::BlobMetadataBlobEntryIdIndex,
+ MetricsStorage::BLOB_ENTRY,
+ MetricsStorage::BLOB_ENTRY_KEY_ENTRY_ID_INDEX,
+ MetricsStorage::BLOB_METADATA,
+ MetricsStorage::BLOB_METADATA_BLOB_ENTRY_ID_INDEX,
],
&mut working_stats,
);
@@ -5661,7 +5620,7 @@
db.insert_keyparameter(&key_id, ¶ms)?;
assert_storage_increased(
&mut db,
- vec![StatsdStorageType::KeyParameter, StatsdStorageType::KeyParameterKeyEntryIdIndex],
+ vec![MetricsStorage::KEY_PARAMETER, MetricsStorage::KEY_PARAMETER_KEY_ENTRY_ID_INDEX],
&mut working_stats,
);
@@ -5670,7 +5629,7 @@
db.insert_key_metadata(&key_id, &metadata)?;
assert_storage_increased(
&mut db,
- vec![StatsdStorageType::KeyMetadata, StatsdStorageType::KeyMetadataKeyEntryIdIndex],
+ vec![MetricsStorage::KEY_METADATA, MetricsStorage::KEY_METADATA_KEY_ENTRY_ID_INDEX],
&mut working_stats,
);
@@ -5678,7 +5637,7 @@
for stat in working_stats.values() {
sum += stat.size;
}
- let total = db.get_storage_stat(StatsdStorageType::Database)?.size;
+ let total = db.get_storage_stat(MetricsStorage::DATABASE)?.size;
assert!(sum <= total, "Expected sum <= total. sum: {}, total: {}", sum, total);
Ok(())
@@ -5696,7 +5655,7 @@
timestamp: Timestamp { milliSeconds: 10 },
mac: b"mac".to_vec(),
});
- assert_storage_increased(&mut db, vec![StatsdStorageType::AuthToken], &mut working_stats);
+ assert_storage_increased(&mut db, vec![MetricsStorage::AUTH_TOKEN], &mut working_stats);
Ok(())
}
@@ -5720,7 +5679,7 @@
|_, _| Ok(()),
)?;
- assert_storage_increased(&mut db, vec![StatsdStorageType::Grant], &mut working_stats);
+ assert_storage_increased(&mut db, vec![MetricsStorage::GRANT], &mut working_stats);
Ok(())
}
@@ -5762,28 +5721,6 @@
}
#[test]
- fn test_set_wal_mode() -> Result<()> {
- let temp_dir = TempDir::new("test_set_wal_mode")?;
- let mut db = KeystoreDB::new(temp_dir.path(), None)?;
- let mode: String =
- db.conn.pragma_query_value(Some(Attached("persistent")), "journal_mode", |row| {
- row.get(0)
- })?;
- assert_eq!(mode, "delete");
- db.conn.close().expect("Close didn't work");
-
- KeystoreDB::set_wal_mode(temp_dir.path())?;
-
- db = KeystoreDB::new(temp_dir.path(), None)?;
- let mode: String =
- db.conn.pragma_query_value(Some(Attached("persistent")), "journal_mode", |row| {
- row.get(0)
- })?;
- assert_eq!(mode, "wal");
- Ok(())
- }
-
- #[test]
fn test_load_key_descriptor() -> Result<()> {
let mut db = new_test_db()?;
let key_id = make_test_key_entry(&mut db, Domain::APP, 1, TEST_ALIAS, None)?.0;
diff --git a/keystore2/src/database/utils.rs b/keystore2/src/database/utils.rs
index 90f5616..b4590da 100644
--- a/keystore2/src/database/utils.rs
+++ b/keystore2/src/database/utils.rs
@@ -44,7 +44,7 @@
loop {
match rows.next().context("In with_rows_extract_all: Failed to unpack row")? {
Some(row) => {
- row_extractor(&row).context("In with_rows_extract_all.")?;
+ row_extractor(row).context("In with_rows_extract_all.")?;
}
None => break Ok(()),
}
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 29a3f0b..e9a58f9 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -28,14 +28,13 @@
KeyParameter::KeyParameter as KmKeyParameter, KeyPurpose::KeyPurpose, Tag::Tag,
};
use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
- ISecureClock::ISecureClock, TimeStampToken::TimeStampToken,
+ TimeStampToken::TimeStampToken,
};
use android_security_authorization::aidl::android::security::authorization::ResponseCode::ResponseCode as AuthzResponseCode;
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, IKeystoreSecurityLevel::KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING,
OperationChallenge::OperationChallenge,
};
-use android_system_keystore2::binder::Strong;
use anyhow::{Context, Result};
use std::{
collections::{HashMap, HashSet},
@@ -219,13 +218,10 @@
}
fn get_timestamp_token(challenge: i64) -> Result<TimeStampToken, Error> {
- let dev: Strong<dyn ISecureClock> = get_timestamp_service()
- .expect(concat!(
- "Secure Clock service must be present ",
- "if TimeStampTokens are required."
- ))
- .get_interface()
- .expect("Fatal: Timestamp service does not implement ISecureClock.");
+ let dev = get_timestamp_service().expect(concat!(
+ "Secure Clock service must be present ",
+ "if TimeStampTokens are required."
+ ));
map_binder_status(dev.generateTimeStamp(challenge))
}
diff --git a/keystore2/src/fuzzers/Android.bp b/keystore2/src/fuzzers/Android.bp
new file mode 100644
index 0000000..384ab77
--- /dev/null
+++ b/keystore2/src/fuzzers/Android.bp
@@ -0,0 +1,29 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_fuzz {
+ name: "legacy_blob_fuzzer",
+ srcs: ["legacy_blob_fuzzer.rs"],
+ rustlibs: [
+ "libkeystore2",
+ ],
+ fuzz_config: {
+ fuzz_on_haiku_device: true,
+ fuzz_on_haiku_host: false,
+ },
+}
diff --git a/keystore2/src/fuzzers/legacy_blob_fuzzer.rs b/keystore2/src/fuzzers/legacy_blob_fuzzer.rs
new file mode 100644
index 0000000..5c89ca4
--- /dev/null
+++ b/keystore2/src/fuzzers/legacy_blob_fuzzer.rs
@@ -0,0 +1,24 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![allow(missing_docs)]
+#![no_main]
+#[macro_use]
+extern crate libfuzzer_sys;
+use keystore2::legacy_blob::LegacyBlobLoader;
+
+fuzz_target!(|data: &[u8]| {
+ let string = data.iter().filter_map(|c| std::char::from_u32(*c as u32)).collect::<String>();
+ let _res = LegacyBlobLoader::decode_alias(&string);
+});
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index 2010c79..25f08c8 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -123,7 +123,7 @@
.super_key
.unwrap_key_if_required(&blob_metadata, &blob)
.context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
- (self.invalidate_key)(&uuid, &*blob)
+ (self.invalidate_key)(uuid, &*blob)
.context("In process_one_key: Trying to invalidate key.")?;
}
}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 89114a6..a03a61c 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -21,7 +21,6 @@
use crate::legacy_migrator::LegacyMigrator;
use crate::super_key::SuperKeyManager;
use crate::utils::watchdog as wd;
-use crate::utils::Asp;
use crate::{async_task::AsyncTask, database::MonotonicRawTime};
use crate::{
database::KeystoreDB,
@@ -33,6 +32,9 @@
IKeyMintDevice::IKeyMintDevice, IRemotelyProvisionedComponent::IRemotelyProvisionedComponent,
KeyMintHardwareInfo::KeyMintHardwareInfo, SecurityLevel::SecurityLevel,
};
+use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
+ ISecureClock::ISecureClock,
+};
use android_hardware_security_keymint::binder::{StatusCode, Strong};
use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
use anyhow::{Context, Result};
@@ -44,7 +46,6 @@
use std::{collections::HashMap, path::Path, path::PathBuf};
static DB_INIT: Once = Once::new();
-static DB_SET_WAL_MODE: Once = Once::new();
/// Open a connection to the Keystore 2.0 database. This is called during the initialization of
/// the thread local DB field. It should never be called directly. The first time this is called
@@ -57,12 +58,6 @@
pub fn create_thread_local_db() -> KeystoreDB {
let db_path = DB_PATH.read().expect("Could not get the database directory.");
- DB_SET_WAL_MODE.call_once(|| {
- log::info!("Setting Keystore 2.0 database to WAL mode first time since boot.");
- KeystoreDB::set_wal_mode(&db_path)
- .expect("In create_thread_local_db: Could not set WAL mode.");
- });
-
let mut db = KeystoreDB::new(&db_path, Some(GC.clone())).expect("Failed to open database.");
DB_INIT.call_once(|| {
@@ -92,34 +87,33 @@
RefCell::new(create_thread_local_db());
}
-#[derive(Default)]
-struct DevicesMap {
- devices_by_uuid: HashMap<Uuid, (Asp, KeyMintHardwareInfo)>,
+struct DevicesMap<T: FromIBinder + ?Sized> {
+ devices_by_uuid: HashMap<Uuid, (Strong<T>, KeyMintHardwareInfo)>,
uuid_by_sec_level: HashMap<SecurityLevel, Uuid>,
}
-impl DevicesMap {
+impl<T: FromIBinder + ?Sized> DevicesMap<T> {
fn dev_by_sec_level(
&self,
sec_level: &SecurityLevel,
- ) -> Option<(Asp, KeyMintHardwareInfo, Uuid)> {
+ ) -> Option<(Strong<T>, KeyMintHardwareInfo, Uuid)> {
self.uuid_by_sec_level.get(sec_level).and_then(|uuid| self.dev_by_uuid(uuid))
}
- fn dev_by_uuid(&self, uuid: &Uuid) -> Option<(Asp, KeyMintHardwareInfo, Uuid)> {
+ fn dev_by_uuid(&self, uuid: &Uuid) -> Option<(Strong<T>, KeyMintHardwareInfo, Uuid)> {
self.devices_by_uuid
.get(uuid)
.map(|(dev, hw_info)| ((*dev).clone(), (*hw_info).clone(), *uuid))
}
- fn devices<T: FromIBinder + ?Sized>(&self) -> Vec<Strong<T>> {
- self.devices_by_uuid.values().filter_map(|(asp, _)| asp.get_interface::<T>().ok()).collect()
+ fn devices(&self) -> Vec<Strong<T>> {
+ self.devices_by_uuid.values().map(|(dev, _)| dev.clone()).collect()
}
/// The requested security level and the security level of the actual implementation may
/// differ. So we map the requested security level to the uuid of the implementation
/// so that there cannot be any confusion as to which KeyMint instance is requested.
- fn insert(&mut self, sec_level: SecurityLevel, dev: Asp, hw_info: KeyMintHardwareInfo) {
+ fn insert(&mut self, sec_level: SecurityLevel, dev: Strong<T>, hw_info: KeyMintHardwareInfo) {
// For now we use the reported security level of the KM instance as UUID.
// TODO update this section once UUID was added to the KM hardware info.
let uuid: Uuid = sec_level.into();
@@ -128,17 +122,31 @@
}
}
-#[derive(Default)]
-struct RemotelyProvisionedDevicesMap {
- devices_by_sec_level: HashMap<SecurityLevel, Asp>,
+impl<T: FromIBinder + ?Sized> Default for DevicesMap<T> {
+ fn default() -> Self {
+ Self {
+ devices_by_uuid: HashMap::<Uuid, (Strong<T>, KeyMintHardwareInfo)>::new(),
+ uuid_by_sec_level: Default::default(),
+ }
+ }
}
-impl RemotelyProvisionedDevicesMap {
- fn dev_by_sec_level(&self, sec_level: &SecurityLevel) -> Option<Asp> {
+struct RemotelyProvisionedDevicesMap<T: FromIBinder + ?Sized> {
+ devices_by_sec_level: HashMap<SecurityLevel, Strong<T>>,
+}
+
+impl<T: FromIBinder + ?Sized> Default for RemotelyProvisionedDevicesMap<T> {
+ fn default() -> Self {
+ Self { devices_by_sec_level: HashMap::<SecurityLevel, Strong<T>>::new() }
+ }
+}
+
+impl<T: FromIBinder + ?Sized> RemotelyProvisionedDevicesMap<T> {
+ fn dev_by_sec_level(&self, sec_level: &SecurityLevel) -> Option<Strong<T>> {
self.devices_by_sec_level.get(sec_level).map(|dev| (*dev).clone())
}
- fn insert(&mut self, sec_level: SecurityLevel, dev: Asp) {
+ fn insert(&mut self, sec_level: SecurityLevel, dev: Strong<T>) {
self.devices_by_sec_level.insert(sec_level, dev);
}
}
@@ -150,11 +158,13 @@
/// Runtime database of unwrapped super keys.
pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
/// Map of KeyMint devices.
- static ref KEY_MINT_DEVICES: Mutex<DevicesMap> = Default::default();
+ static ref KEY_MINT_DEVICES: Mutex<DevicesMap<dyn IKeyMintDevice>> = Default::default();
/// Timestamp service.
- static ref TIME_STAMP_DEVICE: Mutex<Option<Asp>> = Default::default();
+ static ref TIME_STAMP_DEVICE: Mutex<Option<Strong<dyn ISecureClock>>> = Default::default();
/// RemotelyProvisionedComponent HAL devices.
- static ref REMOTELY_PROVISIONED_COMPONENT_DEVICES: Mutex<RemotelyProvisionedDevicesMap> = Default::default();
+ static ref REMOTELY_PROVISIONED_COMPONENT_DEVICES:
+ Mutex<RemotelyProvisionedDevicesMap<dyn IRemotelyProvisionedComponent>> =
+ Default::default();
/// A single on-demand worker thread that handles deferred tasks with two different
/// priorities.
pub static ref ASYNC_TASK: Arc<AsyncTask> = Default::default();
@@ -173,8 +183,7 @@
static ref GC: Arc<Gc> = Arc::new(Gc::new_init_with(ASYNC_TASK.clone(), || {
(
Box::new(|uuid, blob| {
- let km_dev: Strong<dyn IKeyMintDevice> =
- get_keymint_dev_by_uuid(uuid).map(|(dev, _)| dev)?.get_interface()?;
+ let km_dev = get_keymint_dev_by_uuid(uuid).map(|(dev, _)| dev)?;
let _wp = wd::watch_millis("In invalidate key closure: calling deleteKey", 500);
map_km_error(km_dev.deleteKey(&*blob))
.context("In invalidate key closure: Trying to invalidate key blob.")
@@ -191,7 +200,9 @@
/// Make a new connection to a KeyMint device of the given security level.
/// If no native KeyMint device can be found this function also brings
/// up the compatibility service and attempts to connect to the legacy wrapper.
-fn connect_keymint(security_level: &SecurityLevel) -> Result<(Asp, KeyMintHardwareInfo)> {
+fn connect_keymint(
+ security_level: &SecurityLevel,
+) -> Result<(Strong<dyn IKeyMintDevice>, KeyMintHardwareInfo)> {
let keymint_instances =
get_aidl_instances("android.hardware.security.keymint", 1, "IKeyMintDevice");
@@ -216,9 +227,12 @@
}
};
- let keymint = if let Some(service_name) = service_name {
- map_binder_status_code(binder::get_interface(&service_name))
- .context("In connect_keymint: Trying to connect to genuine KeyMint service.")
+ let (keymint, hal_version) = if let Some(service_name) = service_name {
+ (
+ map_binder_status_code(binder::get_interface(&service_name))
+ .context("In connect_keymint: Trying to connect to genuine KeyMint service.")?,
+ Some(100i32), // The HAL version code for KeyMint V1 is 100.
+ )
} else {
// This is a no-op if it was called before.
keystore2_km_compat::add_keymint_device_service();
@@ -226,22 +240,36 @@
let keystore_compat_service: Strong<dyn IKeystoreCompatService> =
map_binder_status_code(binder::get_interface("android.security.compat"))
.context("In connect_keymint: Trying to connect to compat service.")?;
- map_binder_status(keystore_compat_service.getKeyMintDevice(*security_level))
- .map_err(|e| match e {
- Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
- Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
- }
- e => e,
- })
- .context("In connect_keymint: Trying to get Legacy wrapper.")
- }?;
+ (
+ map_binder_status(keystore_compat_service.getKeyMintDevice(*security_level))
+ .map_err(|e| match e {
+ Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
+ Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
+ }
+ e => e,
+ })
+ .context("In connect_keymint: Trying to get Legacy wrapper.")?,
+ None,
+ )
+ };
let wp = wd::watch_millis("In connect_keymint: calling getHardwareInfo()", 500);
- let hw_info = map_km_error(keymint.getHardwareInfo())
+ let mut hw_info = map_km_error(keymint.getHardwareInfo())
.context("In connect_keymint: Failed to get hardware info.")?;
drop(wp);
- Ok((Asp::new(keymint.as_binder()), hw_info))
+ // The legacy wrapper sets hw_info.versionNumber to the underlying HAL version like so:
+ // 10 * <major> + <minor>, e.g., KM 3.0 = 30. So 30, 40, and 41 are the only viable values.
+ // For KeyMint the versionNumber is implementation defined and thus completely meaningless
+ // to Keystore 2.0. So at this point the versionNumber field is set to the HAL version, so
+ // that higher levels have a meaningful guide as to which feature set to expect from the
+ // implementation. As of this writing the only meaningful version number is 100 for KeyMint V1,
+ // and future AIDL versions should follow the pattern <AIDL version> * 100.
+ if let Some(hal_version) = hal_version {
+ hw_info.versionNumber = hal_version;
+ }
+
+ Ok((keymint, hw_info))
}
/// Get a keymint device for the given security level either from our cache or
@@ -249,9 +277,9 @@
/// TODO the latter can be removed when the uuid is part of the hardware info.
pub fn get_keymint_device(
security_level: &SecurityLevel,
-) -> Result<(Asp, KeyMintHardwareInfo, Uuid)> {
+) -> Result<(Strong<dyn IKeyMintDevice>, KeyMintHardwareInfo, Uuid)> {
let mut devices_map = KEY_MINT_DEVICES.lock().unwrap();
- if let Some((dev, hw_info, uuid)) = devices_map.dev_by_sec_level(&security_level) {
+ if let Some((dev, hw_info, uuid)) = devices_map.dev_by_sec_level(security_level) {
Ok((dev, hw_info, uuid))
} else {
let (dev, hw_info) = connect_keymint(security_level).context("In get_keymint_device.")?;
@@ -265,7 +293,9 @@
/// attempt to establish a new connection. It is assumed that the cache is already populated
/// when this is called. This is a fair assumption, because service.rs iterates through all
/// security levels when it gets instantiated.
-pub fn get_keymint_dev_by_uuid(uuid: &Uuid) -> Result<(Asp, KeyMintHardwareInfo)> {
+pub fn get_keymint_dev_by_uuid(
+ uuid: &Uuid,
+) -> Result<(Strong<dyn IKeyMintDevice>, KeyMintHardwareInfo)> {
let devices_map = KEY_MINT_DEVICES.lock().unwrap();
if let Some((dev, hw_info, _)) = devices_map.dev_by_uuid(uuid) {
Ok((dev, hw_info))
@@ -284,7 +314,7 @@
/// Make a new connection to a secure clock service.
/// If no native SecureClock device can be found brings up the compatibility service and attempts
/// to connect to the legacy wrapper.
-fn connect_secureclock() -> Result<Asp> {
+fn connect_secureclock() -> Result<Strong<dyn ISecureClock>> {
let secureclock_instances =
get_aidl_instances("android.hardware.security.secureclock", 1, "ISecureClock");
@@ -315,12 +345,12 @@
.context("In connect_secureclock: Trying to get Legacy wrapper.")
}?;
- Ok(Asp::new(secureclock.as_binder()))
+ Ok(secureclock)
}
/// Get the timestamp service that verifies auth token timeliness towards security levels with
/// different clocks.
-pub fn get_timestamp_service() -> Result<Asp> {
+pub fn get_timestamp_service() -> Result<Strong<dyn ISecureClock>> {
let mut ts_device = TIME_STAMP_DEVICE.lock().unwrap();
if let Some(dev) = &*ts_device {
Ok(dev.clone())
@@ -334,7 +364,9 @@
static REMOTE_PROVISIONING_HAL_SERVICE_NAME: &str =
"android.hardware.security.keymint.IRemotelyProvisionedComponent";
-fn connect_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+fn connect_remotely_provisioned_component(
+ security_level: &SecurityLevel,
+) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
let remotely_prov_instances =
get_aidl_instances("android.hardware.security.keymint", 1, "IRemotelyProvisionedComponent");
@@ -365,14 +397,16 @@
" RemotelyProvisionedComponent service."
))
.map_err(|e| e)?;
- Ok(Asp::new(rem_prov_hal.as_binder()))
+ Ok(rem_prov_hal)
}
/// Get a remote provisiong component device for the given security level either from the cache or
/// by making a new connection. Returns the device.
-pub fn get_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+pub fn get_remotely_provisioned_component(
+ security_level: &SecurityLevel,
+) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
let mut devices_map = REMOTELY_PROVISIONED_COMPONENT_DEVICES.lock().unwrap();
- if let Some(dev) = devices_map.dev_by_sec_level(&security_level) {
+ if let Some(dev) = devices_map.dev_by_sec_level(security_level) {
Ok(dev)
} else {
let dev = connect_remotely_provisioned_component(security_level)
diff --git a/keystore2/src/id_rotation.rs b/keystore2/src/id_rotation.rs
index dbf0fc9..e3992d8 100644
--- a/keystore2/src/id_rotation.rs
+++ b/keystore2/src/id_rotation.rs
@@ -27,7 +27,7 @@
use std::time::Duration;
const ID_ROTATION_PERIOD: Duration = Duration::from_secs(30 * 24 * 60 * 60); // Thirty days.
-static TIMESTAMP_FILE_NAME: &str = &"timestamp";
+static TIMESTAMP_FILE_NAME: &str = "timestamp";
/// The IdRotationState stores the path to the timestamp file for deferred usage. The data
/// partition is usually not available when Keystore 2.0 starts up. So this object is created
@@ -83,7 +83,7 @@
fn test_had_factory_reset_since_id_rotation() -> Result<()> {
let temp_dir = TempDir::new("test_had_factory_reset_since_id_rotation_")
.expect("Failed to create temp dir.");
- let id_rotation_state = IdRotationState::new(&temp_dir.path());
+ let id_rotation_state = IdRotationState::new(temp_dir.path());
let mut temp_file_path = temp_dir.path().to_owned();
temp_file_path.push(TIMESTAMP_FILE_NAME);
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 53461da..f1f01c6 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -17,21 +17,23 @@
use keystore2::entropy;
use keystore2::globals::ENFORCEMENTS;
use keystore2::maintenance::Maintenance;
-use keystore2::metrics;
+use keystore2::metrics::Metrics;
+use keystore2::metrics_store;
use keystore2::remote_provisioning::RemoteProvisioningService;
use keystore2::service::KeystoreService;
use keystore2::{apc::ApcManager, shared_secret_negotiation};
use keystore2::{authorization::AuthorizationManager, id_rotation::IdRotationState};
+use legacykeystore::LegacyKeystore;
use log::{error, info};
use std::{panic, path::Path, sync::mpsc::channel};
-use vpnprofilestore::VpnProfileStore;
static KS2_SERVICE_NAME: &str = "android.system.keystore2.IKeystoreService/default";
static APC_SERVICE_NAME: &str = "android.security.apc";
static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
+static METRICS_SERVICE_NAME: &str = "android.security.metrics";
static REMOTE_PROVISIONING_SERVICE_NAME: &str = "android.security.remoteprovisioning";
static USER_MANAGER_SERVICE_NAME: &str = "android.security.maintenance";
-static VPNPROFILESTORE_SERVICE_NAME: &str = "android.security.vpnprofilestore";
+static LEGACY_KEYSTORE_SERVICE_NAME: &str = "android.security.legacykeystore";
/// Keystore 2.0 takes one argument which is a path indicating its designated working directory.
fn main() {
@@ -50,6 +52,9 @@
let mut args = std::env::args();
args.next().expect("That's odd. How is there not even a first argument?");
+ // Write/update keystore.crash_count system property.
+ metrics_store::update_keystore_crash_sysprop();
+
// Keystore 2.0 cannot change to the database directory (typically /data/misc/keystore) on
// startup as Keystore 1.0 did because Keystore 2.0 is intended to run much earlier than
// Keystore 1.0. Instead we set a global variable to the database path.
@@ -58,7 +63,7 @@
let db_path = Path::new(&dir);
*keystore2::globals::DB_PATH.write().expect("Could not lock DB_PATH.") =
db_path.to_path_buf();
- IdRotationState::new(&db_path)
+ IdRotationState::new(db_path)
} else {
panic!("Must specify a database directory.");
};
@@ -96,7 +101,11 @@
panic!("Failed to register service {} because of {:?}.", AUTHORIZATION_SERVICE_NAME, e);
});
- let maintenance_service = Maintenance::new_native_binder().unwrap_or_else(|e| {
+ let (delete_listener, legacykeystore) = LegacyKeystore::new_native_binder(
+ &keystore2::globals::DB_PATH.read().expect("Could not get DB_PATH."),
+ );
+
+ let maintenance_service = Maintenance::new_native_binder(delete_listener).unwrap_or_else(|e| {
panic!("Failed to create service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
});
binder::add_service(USER_MANAGER_SERVICE_NAME, maintenance_service.as_binder()).unwrap_or_else(
@@ -105,6 +114,13 @@
},
);
+ let metrics_service = Metrics::new_native_binder().unwrap_or_else(|e| {
+ panic!("Failed to create service {} because of {:?}.", METRICS_SERVICE_NAME, e);
+ });
+ binder::add_service(METRICS_SERVICE_NAME, metrics_service.as_binder()).unwrap_or_else(|e| {
+ panic!("Failed to register service {} because of {:?}.", METRICS_SERVICE_NAME, e);
+ });
+
// Devices with KS2 and KM 1.0 may not have any IRemotelyProvisionedComponent HALs at all. Do
// not panic if new_native_binder returns failure because it could not find the TEE HAL.
if let Ok(remote_provisioning_service) = RemoteProvisioningService::new_native_binder() {
@@ -120,25 +136,15 @@
});
}
- let vpnprofilestore = VpnProfileStore::new_native_binder(
- &keystore2::globals::DB_PATH.read().expect("Could not get DB_PATH."),
- );
- binder::add_service(VPNPROFILESTORE_SERVICE_NAME, vpnprofilestore.as_binder()).unwrap_or_else(
+ binder::add_service(LEGACY_KEYSTORE_SERVICE_NAME, legacykeystore.as_binder()).unwrap_or_else(
|e| {
panic!(
"Failed to register service {} because of {:?}.",
- VPNPROFILESTORE_SERVICE_NAME, e
+ LEGACY_KEYSTORE_SERVICE_NAME, e
);
},
);
- std::thread::spawn(|| {
- match metrics::register_pull_metrics_callbacks() {
- Err(e) => error!("register_pull_metrics_callbacks failed: {:?}.", e),
- _ => info!("Pull metrics callbacks successfully registered."),
- };
- });
-
info!("Successfully registered Keystore 2.0 service.");
info!("Joining thread pool now.");
diff --git a/keystore2/src/km_compat/Android.bp b/keystore2/src/km_compat/Android.bp
index 541788e..32406ae 100644
--- a/keystore2/src/km_compat/Android.bp
+++ b/keystore2/src/km_compat/Android.bp
@@ -57,11 +57,11 @@
"android.hardware.keymaster@3.0",
"android.hardware.keymaster@4.0",
"android.hardware.keymaster@4.1",
- "android.hardware.security.keymint-V1-ndk_platform",
- "android.hardware.security.secureclock-V1-ndk_platform",
- "android.hardware.security.sharedsecret-V1-ndk_platform",
- "android.security.compat-ndk_platform",
- "android.system.keystore2-V1-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk",
+ "android.hardware.security.secureclock-V1-ndk",
+ "android.hardware.security.sharedsecret-V1-ndk",
+ "android.security.compat-ndk",
+ "android.system.keystore2-V1-ndk",
"libbase",
"libbinder_ndk",
"libcrypto",
@@ -78,10 +78,10 @@
name: "libkm_compat_service",
srcs: ["km_compat_service.cpp"],
shared_libs: [
- "android.hardware.security.keymint-V1-ndk_platform",
- "android.hardware.security.secureclock-V1-ndk_platform",
- "android.hardware.security.sharedsecret-V1-ndk_platform",
- "android.security.compat-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk",
+ "android.hardware.security.secureclock-V1-ndk",
+ "android.hardware.security.sharedsecret-V1-ndk",
+ "android.security.compat-ndk",
"libbinder_ndk",
"libcrypto",
"libkm_compat",
@@ -107,11 +107,11 @@
"android.hardware.keymaster@3.0",
"android.hardware.keymaster@4.0",
"android.hardware.keymaster@4.1",
- "android.hardware.security.keymint-V1-ndk_platform",
- "android.hardware.security.secureclock-V1-ndk_platform",
- "android.hardware.security.sharedsecret-V1-ndk_platform",
- "android.security.compat-ndk_platform",
- "android.system.keystore2-V1-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk",
+ "android.hardware.security.secureclock-V1-ndk",
+ "android.hardware.security.sharedsecret-V1-ndk",
+ "android.security.compat-ndk",
+ "android.system.keystore2-V1-ndk",
"libbase",
"libbinder_ndk",
"libcrypto",
diff --git a/keystore2/src/km_compat/km_compat.cpp b/keystore2/src/km_compat/km_compat.cpp
index f6f8bfe..40ca554 100644
--- a/keystore2/src/km_compat/km_compat.cpp
+++ b/keystore2/src/km_compat/km_compat.cpp
@@ -304,33 +304,36 @@
static std::vector<KeyCharacteristics>
processLegacyCharacteristics(KeyMintSecurityLevel securityLevel,
const std::vector<KeyParameter>& genParams,
- const V4_0_KeyCharacteristics& legacyKc, bool hwEnforcedOnly = false) {
+ const V4_0_KeyCharacteristics& legacyKc, bool kmEnforcedOnly = false) {
- KeyCharacteristics hwEnforced{securityLevel,
- convertKeyParametersFromLegacy(legacyKc.hardwareEnforced)};
+ KeyCharacteristics kmEnforced{securityLevel, convertKeyParametersFromLegacy(
+ securityLevel == KeyMintSecurityLevel::SOFTWARE
+ ? legacyKc.softwareEnforced
+ : legacyKc.hardwareEnforced)};
- if (hwEnforcedOnly) {
- return {hwEnforced};
+ if (securityLevel == KeyMintSecurityLevel::SOFTWARE && legacyKc.hardwareEnforced.size() > 0) {
+ LOG(WARNING) << "Unexpected hardware enforced parameters.";
}
- KeyCharacteristics keystoreEnforced{KeyMintSecurityLevel::KEYSTORE,
- convertKeyParametersFromLegacy(legacyKc.softwareEnforced)};
+ if (kmEnforcedOnly) {
+ return {kmEnforced};
+ }
+
+ KeyCharacteristics keystoreEnforced{KeyMintSecurityLevel::KEYSTORE, {}};
+
+ if (securityLevel != KeyMintSecurityLevel::SOFTWARE) {
+ // Don't include these tags on software backends, else they'd end up duplicated
+ // across both the keystore-enforced and software keymaster-enforced tags.
+ keystoreEnforced.authorizations = convertKeyParametersFromLegacy(legacyKc.softwareEnforced);
+ }
// Add all parameters that we know can be enforced by keystore but not by the legacy backend.
auto unsupported_requested = extractNewAndKeystoreEnforceableParams(genParams);
- std::copy(unsupported_requested.begin(), unsupported_requested.end(),
- std::back_insert_iterator(keystoreEnforced.authorizations));
+ keystoreEnforced.authorizations.insert(keystoreEnforced.authorizations.end(),
+ std::begin(unsupported_requested),
+ std::end(unsupported_requested));
- if (securityLevel == KeyMintSecurityLevel::SOFTWARE) {
- // If the security level of the backend is `software` we expect the hardware enforced list
- // to be empty. Log a warning otherwise.
- if (legacyKc.hardwareEnforced.size() != 0) {
- LOG(WARNING) << "Unexpected hardware enforced parameters.";
- }
- return {keystoreEnforced};
- }
-
- return {hwEnforced, keystoreEnforced};
+ return {kmEnforced, keystoreEnforced};
}
static V4_0_KeyFormat convertKeyFormatToLegacy(const KeyFormat& kf) {
@@ -722,7 +725,7 @@
km_error = convert(errorCode);
*keyCharacteristics =
processLegacyCharacteristics(securityLevel_, {} /* getParams */,
- v40KeyCharacteristics, true /* hwEnforcedOnly */);
+ v40KeyCharacteristics, true /* kmEnforcedOnly */);
});
if (!ret.isOk()) {
@@ -1311,7 +1314,7 @@
CHECK(serviceManager.get()) << "Failed to get ServiceManager";
auto result = enumerateKeymasterDevices<Keymaster4>(serviceManager.get());
auto softKeymaster = result[SecurityLevel::SOFTWARE];
- if (!result[SecurityLevel::TRUSTED_ENVIRONMENT]) {
+ if ((!result[SecurityLevel::TRUSTED_ENVIRONMENT]) && (!result[SecurityLevel::STRONGBOX])) {
result = enumerateKeymasterDevices<Keymaster3>(serviceManager.get());
}
if (softKeymaster) result[SecurityLevel::SOFTWARE] = softKeymaster;
diff --git a/keystore2/src/km_compat/lib.rs b/keystore2/src/km_compat/lib.rs
index 56c35bf..8d7310b 100644
--- a/keystore2/src/km_compat/lib.rs
+++ b/keystore2/src/km_compat/lib.rs
@@ -260,7 +260,7 @@
if let Some(mut extras) = extra_params {
kps.append(&mut extras);
}
- let result = legacy.begin(purpose, &blob, &kps, None);
+ let result = legacy.begin(purpose, blob, &kps, None);
assert!(result.is_ok(), "{:?}", result);
result.unwrap()
}
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 9eebb36..7454cca 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -416,14 +416,14 @@
BlobValue::Encrypted { iv, tag, data } => Ok(Blob {
flags: blob.flags,
value: BlobValue::Decrypted(
- decrypt(&data, &iv, &tag, None, None)
+ decrypt(data, iv, tag, None, None)
.context("In new_from_stream_decrypt_with.")?,
),
}),
BlobValue::PwEncrypted { iv, tag, data, salt, key_size } => Ok(Blob {
flags: blob.flags,
value: BlobValue::Decrypted(
- decrypt(&data, &iv, &tag, Some(salt), Some(*key_size))
+ decrypt(data, iv, tag, Some(salt), Some(*key_size))
.context("In new_from_stream_decrypt_with.")?,
),
}),
@@ -599,6 +599,15 @@
// * USRCERT was used for public certificates of USRPKEY entries. But KeyChain also
// used this for user installed certificates without private key material.
+ const KNOWN_KEYSTORE_PREFIXES: &'static [&'static str] =
+ &["USRPKEY_", "USRSKEY_", "USRCERT_", "CACERT_"];
+
+ fn is_keystore_alias(encoded_alias: &str) -> bool {
+ // We can check the encoded alias because the prefixes we are interested
+ // in are all in the printable range that don't get mangled.
+ Self::KNOWN_KEYSTORE_PREFIXES.iter().any(|prefix| encoded_alias.starts_with(prefix))
+ }
+
fn read_km_blob_file(&self, uid: u32, alias: &str) -> Result<Option<(Blob, String)>> {
let mut iter = ["USRPKEY", "USRSKEY"].iter();
@@ -630,28 +639,28 @@
Ok(Some(Self::new_from_stream(&mut file).context("In read_generic_blob.")?))
}
- /// Read a legacy vpn profile blob.
- pub fn read_vpn_profile(&self, uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
- let path = match self.make_vpn_profile_filename(uid, alias) {
+ /// Read a legacy keystore entry blob.
+ pub fn read_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
+ let path = match self.make_legacy_keystore_entry_filename(uid, alias) {
Some(path) => path,
None => return Ok(None),
};
- let blob =
- Self::read_generic_blob(&path).context("In read_vpn_profile: Failed to read blob.")?;
+ let blob = Self::read_generic_blob(&path)
+ .context("In read_legacy_keystore_entry: Failed to read blob.")?;
Ok(blob.and_then(|blob| match blob.value {
BlobValue::Generic(blob) => Some(blob),
_ => {
- log::info!("Unexpected vpn profile blob type. Ignoring");
+ log::info!("Unexpected legacy keystore entry blob type. Ignoring");
None
}
}))
}
- /// Remove a vpn profile by the name alias with owner uid.
- pub fn remove_vpn_profile(&self, uid: u32, alias: &str) -> Result<()> {
- let path = match self.make_vpn_profile_filename(uid, alias) {
+ /// Remove a legacy keystore entry by the name alias with owner uid.
+ pub fn remove_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<()> {
+ let path = match self.make_legacy_keystore_entry_filename(uid, alias) {
Some(path) => path,
None => return Ok(()),
};
@@ -659,25 +668,17 @@
if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
match e.kind() {
ErrorKind::NotFound => return Ok(()),
- _ => return Err(e).context("In remove_vpn_profile."),
+ _ => return Err(e).context("In remove_legacy_keystore_entry."),
}
}
let user_id = uid_to_android_user(uid);
self.remove_user_dir_if_empty(user_id)
- .context("In remove_vpn_profile: Trying to remove empty user dir.")
+ .context("In remove_legacy_keystore_entry: Trying to remove empty user dir.")
}
- fn is_vpn_profile(encoded_alias: &str) -> bool {
- // We can check the encoded alias because the prefixes we are interested
- // in are all in the printable range that don't get mangled.
- encoded_alias.starts_with("VPN_")
- || encoded_alias.starts_with("PLATFORM_VPN_")
- || encoded_alias == "LOCKDOWN_VPN"
- }
-
- /// List all profiles belonging to the given uid.
- pub fn list_vpn_profiles(&self, uid: u32) -> Result<Vec<String>> {
+ /// List all entries belonging to the given uid.
+ pub fn list_legacy_keystore_entries_for_uid(&self, uid: u32) -> Result<Vec<String>> {
let mut path = self.path.clone();
let user_id = uid_to_android_user(uid);
path.push(format!("user_{}", user_id));
@@ -688,7 +689,10 @@
ErrorKind::NotFound => return Ok(Default::default()),
_ => {
return Err(e).context(format!(
- "In list_vpn_profiles: Failed to open legacy blob database. {:?}",
+ concat!(
+ "In list_legacy_keystore_entries_for_uid: ,",
+ "Failed to open legacy blob database: {:?}"
+ ),
path
))
}
@@ -696,27 +700,64 @@
};
let mut result: Vec<String> = Vec::new();
for entry in dir {
- let file_name =
- entry.context("In list_vpn_profiles: Trying to access dir entry")?.file_name();
+ let file_name = entry
+ .context("In list_legacy_keystore_entries_for_uid: Trying to access dir entry")?
+ .file_name();
if let Some(f) = file_name.to_str() {
let encoded_alias = &f[uid_str.len() + 1..];
- if f.starts_with(&uid_str) && Self::is_vpn_profile(encoded_alias) {
- result.push(
- Self::decode_alias(encoded_alias)
- .context("In list_vpn_profiles: Trying to decode alias.")?,
- )
+ if f.starts_with(&uid_str) && !Self::is_keystore_alias(encoded_alias) {
+ result.push(Self::decode_alias(encoded_alias).context(
+ "In list_legacy_keystore_entries_for_uid: Trying to decode alias.",
+ )?)
}
}
}
Ok(result)
}
- /// This function constructs the vpn_profile file name which has the form:
- /// user_<android user id>/<uid>_<alias>.
- fn make_vpn_profile_filename(&self, uid: u32, alias: &str) -> Option<PathBuf> {
- // legacy vpn entries must start with VPN_ or PLATFORM_VPN_ or are literally called
- // LOCKDOWN_VPN.
- if !Self::is_vpn_profile(alias) {
+ fn extract_legacy_alias(encoded_alias: &str) -> Option<String> {
+ if !Self::is_keystore_alias(encoded_alias) {
+ Self::decode_alias(encoded_alias).ok()
+ } else {
+ None
+ }
+ }
+
+ /// Lists all keystore entries belonging to the given user. Returns a map of UIDs
+ /// to sets of decoded aliases. Only returns entries that do not begin with
+ /// KNOWN_KEYSTORE_PREFIXES.
+ pub fn list_legacy_keystore_entries_for_user(
+ &self,
+ user_id: u32,
+ ) -> Result<HashMap<u32, HashSet<String>>> {
+ let user_entries = self
+ .list_user(user_id)
+ .context("In list_legacy_keystore_entries_for_user: Trying to list user.")?;
+
+ let result =
+ user_entries.into_iter().fold(HashMap::<u32, HashSet<String>>::new(), |mut acc, v| {
+ if let Some(sep_pos) = v.find('_') {
+ if let Ok(uid) = v[0..sep_pos].parse::<u32>() {
+ if let Some(alias) = Self::extract_legacy_alias(&v[sep_pos + 1..]) {
+ let entry = acc.entry(uid).or_default();
+ entry.insert(alias);
+ }
+ }
+ }
+ acc
+ });
+ Ok(result)
+ }
+
+ /// This function constructs the legacy blob file name which has the form:
+ /// user_<android user id>/<uid>_<alias>. Legacy blob file names must not use
+ /// known keystore prefixes.
+ fn make_legacy_keystore_entry_filename(&self, uid: u32, alias: &str) -> Option<PathBuf> {
+ // Legacy entries must not use known keystore prefixes.
+ if Self::is_keystore_alias(alias) {
+ log::warn!(
+ "Known keystore prefixes cannot be used with legacy keystore -> ignoring request."
+ );
return None;
}
@@ -790,12 +831,12 @@
.is_none())
}
- fn extract_alias(encoded_alias: &str) -> Option<String> {
+ fn extract_keystore_alias(encoded_alias: &str) -> Option<String> {
// We can check the encoded alias because the prefixes we are interested
// in are all in the printable range that don't get mangled.
- for prefix in &["USRPKEY_", "USRSKEY_", "USRCERT_", "CACERT_"] {
+ for prefix in Self::KNOWN_KEYSTORE_PREFIXES {
if let Some(alias) = encoded_alias.strip_prefix(prefix) {
- return Self::decode_alias(&alias).ok();
+ return Self::decode_alias(alias).ok();
}
}
None
@@ -841,7 +882,7 @@
user_entries.into_iter().fold(HashMap::<u32, HashSet<String>>::new(), |mut acc, v| {
if let Some(sep_pos) = v.find('_') {
if let Ok(uid) = v[0..sep_pos].parse::<u32>() {
- if let Some(alias) = Self::extract_alias(&v[sep_pos + 1..]) {
+ if let Some(alias) = Self::extract_keystore_alias(&v[sep_pos + 1..]) {
let entry = acc.entry(uid).or_default();
entry.insert(alias);
}
@@ -869,7 +910,7 @@
return None;
}
let encoded_alias = &v[uid_str.len()..];
- Self::extract_alias(encoded_alias)
+ Self::extract_keystore_alias(encoded_alias)
})
.collect();
@@ -1376,11 +1417,11 @@
}
#[test]
- fn list_vpn_profiles_on_non_existing_user() -> Result<()> {
- let temp_dir = TempDir::new("list_vpn_profiles_on_non_existing_user")?;
+ fn list_legacy_keystore_entries_on_non_existing_user() -> Result<()> {
+ let temp_dir = TempDir::new("list_legacy_keystore_entries_on_non_existing_user")?;
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
- assert!(legacy_blob_loader.list_vpn_profiles(20)?.is_empty());
+ assert!(legacy_blob_loader.list_legacy_keystore_entries_for_user(20)?.is_empty());
Ok(())
}
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
index f92fd45..65f4b0b 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_migrator.rs
@@ -567,7 +567,7 @@
if let Some(super_key) = self
.legacy_loader
- .load_super_key(user_id, &pw)
+ .load_super_key(user_id, pw)
.context("In check_and_migrate_super_key: Trying to load legacy super key.")?
{
let (blob, blob_metadata) =
@@ -724,8 +724,8 @@
fn deref(&self) -> &Self::Target {
match self {
- Self::Vec(v) => &v,
- Self::ZVec(v) => &v,
+ Self::Vec(v) => v,
+ Self::ZVec(v) => v,
}
}
}
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 51316d7..8b629b1 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -32,6 +32,7 @@
pub mod legacy_migrator;
pub mod maintenance;
pub mod metrics;
+pub mod metrics_store;
pub mod operation;
pub mod permission;
pub mod raw_device;
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index 0633bc1..7ce9042 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -23,8 +23,9 @@
use crate::permission::{KeyPerm, KeystorePerm};
use crate::super_key::UserState;
use crate::utils::{check_key_permission, check_keystore_permission, watchdog as wd};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::IKeyMintDevice::IKeyMintDevice;
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ IKeyMintDevice::IKeyMintDevice, SecurityLevel::SecurityLevel,
+};
use android_security_maintenance::aidl::android::security::maintenance::{
IKeystoreMaintenance::{BnKeystoreMaintenance, IKeystoreMaintenance},
UserState::UserState as AidlUserState,
@@ -32,22 +33,35 @@
use android_security_maintenance::binder::{
BinderFeatures, Interface, Result as BinderResult, Strong, ThreadState,
};
+use android_system_keystore2::aidl::android::system::keystore2::KeyDescriptor::KeyDescriptor;
use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
-use android_system_keystore2::aidl::android::system::keystore2::{
- Domain::Domain, KeyDescriptor::KeyDescriptor,
-};
use anyhow::{Context, Result};
use keystore2_crypto::Password;
+/// Reexport Domain for the benefit of DeleteListener
+pub use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
+
+/// The Maintenance module takes a delete listener argument which observes user and namespace
+/// deletion events.
+pub trait DeleteListener {
+ /// Called by the maintenance module when an app/namespace is deleted.
+ fn delete_namespace(&self, domain: Domain, namespace: i64) -> Result<()>;
+ /// Called by the maintenance module when a user is deleted.
+ fn delete_user(&self, user_id: u32) -> Result<()>;
+}
+
/// This struct is defined to implement the aforementioned AIDL interface.
-/// As of now, it is an empty struct.
-pub struct Maintenance;
+pub struct Maintenance {
+ delete_listener: Box<dyn DeleteListener + Send + Sync + 'static>,
+}
impl Maintenance {
- /// Create a new instance of Keystore User Manager service.
- pub fn new_native_binder() -> Result<Strong<dyn IKeystoreMaintenance>> {
+ /// Create a new instance of Keystore Maintenance service.
+ pub fn new_native_binder(
+ delete_listener: Box<dyn DeleteListener + Send + Sync + 'static>,
+ ) -> Result<Strong<dyn IKeystoreMaintenance>> {
Ok(BnKeystoreMaintenance::new_binder(
- Self,
+ Self { delete_listener },
BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
))
}
@@ -89,7 +103,7 @@
}
}
- fn add_or_remove_user(user_id: i32) -> Result<()> {
+ fn add_or_remove_user(&self, user_id: i32) -> Result<()> {
// Check permission. Function should return if this failed. Therefore having '?' at the end
// is very important.
check_keystore_permission(KeystorePerm::change_user()).context("In add_or_remove_user.")?;
@@ -102,10 +116,13 @@
false,
)
})
- .context("In add_or_remove_user: Trying to delete keys from db.")
+ .context("In add_or_remove_user: Trying to delete keys from db.")?;
+ self.delete_listener
+ .delete_user(user_id as u32)
+ .context("In add_or_remove_user: While invoking the delete listener.")
}
- fn clear_namespace(domain: Domain, nspace: i64) -> Result<()> {
+ fn clear_namespace(&self, domain: Domain, nspace: i64) -> Result<()> {
// Permission check. Must return on error. Do not touch the '?'.
check_keystore_permission(KeystorePerm::clear_uid()).context("In clear_namespace.")?;
@@ -113,7 +130,10 @@
.bulk_delete_uid(domain, nspace)
.context("In clear_namespace: Trying to delete legacy keys.")?;
DB.with(|db| db.borrow_mut().unbind_keys_for_namespace(domain, nspace))
- .context("In clear_namespace: Trying to delete keys from db.")
+ .context("In clear_namespace: Trying to delete keys from db.")?;
+ self.delete_listener
+ .delete_namespace(domain, nspace)
+ .context("In clear_namespace: While invoking the delete listener.")
}
fn get_state(user_id: i32) -> Result<AidlUserState> {
@@ -133,22 +153,47 @@
}
}
- fn early_boot_ended_help(sec_level: SecurityLevel) -> Result<()> {
- let (dev, _, _) = get_keymint_device(&sec_level)
- .context("In early_boot_ended: getting keymint device")?;
- let km_dev: Strong<dyn IKeyMintDevice> =
- dev.get_interface().context("In early_boot_ended: getting keymint device interface")?;
+ fn call_with_watchdog<F>(sec_level: SecurityLevel, name: &'static str, op: &F) -> Result<()>
+ where
+ F: Fn(Strong<dyn IKeyMintDevice>) -> binder::public_api::Result<()>,
+ {
+ let (km_dev, _, _) = get_keymint_device(&sec_level)
+ .context("In call_with_watchdog: getting keymint device")?;
- let _wp = wd::watch_millis_with(
- "In early_boot_ended_help: calling earlyBootEnded()",
- 500,
- move || format!("Seclevel: {:?}", sec_level),
- );
- map_km_error(km_dev.earlyBootEnded())
- .context("In keymint device: calling earlyBootEnded")?;
+ let _wp = wd::watch_millis_with("In call_with_watchdog", 500, move || {
+ format!("Seclevel: {:?} Op: {}", sec_level, name)
+ });
+ map_km_error(op(km_dev)).with_context(|| format!("In keymint device: calling {}", name))?;
Ok(())
}
+ fn call_on_all_security_levels<F>(name: &'static str, op: F) -> Result<()>
+ where
+ F: Fn(Strong<dyn IKeyMintDevice>) -> binder::public_api::Result<()>,
+ {
+ let sec_levels = [
+ (SecurityLevel::TRUSTED_ENVIRONMENT, "TRUSTED_ENVIRONMENT"),
+ (SecurityLevel::STRONGBOX, "STRONGBOX"),
+ ];
+ sec_levels.iter().fold(Ok(()), move |result, (sec_level, sec_level_string)| {
+ let curr_result = Maintenance::call_with_watchdog(*sec_level, name, &op);
+ match curr_result {
+ Ok(()) => log::info!(
+ "Call to {} succeeded for security level {}.",
+ name,
+ &sec_level_string
+ ),
+ Err(ref e) => log::error!(
+ "Call to {} failed for security level {}: {}.",
+ name,
+ &sec_level_string,
+ e
+ ),
+ }
+ result.and(curr_result)
+ })
+ }
+
fn early_boot_ended() -> Result<()> {
check_keystore_permission(KeystorePerm::early_boot_ended())
.context("In early_boot_ended. Checking permission")?;
@@ -157,21 +202,7 @@
if let Err(e) = DB.with(|db| SUPER_KEY.set_up_boot_level_cache(&mut db.borrow_mut())) {
log::error!("SUPER_KEY.set_up_boot_level_cache failed:\n{:?}\n:(", e);
}
-
- let sec_levels = [
- (SecurityLevel::TRUSTED_ENVIRONMENT, "TRUSTED_ENVIRONMENT"),
- (SecurityLevel::STRONGBOX, "STRONGBOX"),
- ];
- sec_levels.iter().fold(Ok(()), |result, (sec_level, sec_level_string)| {
- let curr_result = Maintenance::early_boot_ended_help(*sec_level);
- if curr_result.is_err() {
- log::error!(
- "Call to earlyBootEnded failed for security level {}.",
- &sec_level_string
- );
- }
- result.and(curr_result)
- })
+ Maintenance::call_on_all_security_levels("earlyBootEnded", |dev| dev.earlyBootEnded())
}
fn on_device_off_body() -> Result<()> {
@@ -190,9 +221,9 @@
let key_id_guard = match source.domain {
Domain::APP | Domain::SELINUX | Domain::KEY_ID => {
let (key_id_guard, _) = LEGACY_MIGRATOR
- .with_try_migrate(&source, caller_uid, || {
+ .with_try_migrate(source, caller_uid, || {
db.borrow_mut().load_key_entry(
- &source,
+ source,
KeyType::Client,
KeyEntryLoadBits::NONE,
caller_uid,
@@ -219,6 +250,15 @@
})
})
}
+
+ fn delete_all_keys() -> Result<()> {
+ // Security critical permission check. This statement must return on fail.
+ check_keystore_permission(KeystorePerm::delete_all_keys())
+ .context("In delete_all_keys. Checking permission")?;
+ log::info!("In delete_all_keys.");
+
+ Maintenance::call_on_all_security_levels("deleteAllKeys", |dev| dev.deleteAllKeys())
+ }
}
impl Interface for Maintenance {}
@@ -231,17 +271,17 @@
fn onUserAdded(&self, user_id: i32) -> BinderResult<()> {
let _wp = wd::watch_millis("IKeystoreMaintenance::onUserAdded", 500);
- map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+ map_or_log_err(self.add_or_remove_user(user_id), Ok)
}
fn onUserRemoved(&self, user_id: i32) -> BinderResult<()> {
let _wp = wd::watch_millis("IKeystoreMaintenance::onUserRemoved", 500);
- map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+ map_or_log_err(self.add_or_remove_user(user_id), Ok)
}
fn clearNamespace(&self, domain: Domain, nspace: i64) -> BinderResult<()> {
let _wp = wd::watch_millis("IKeystoreMaintenance::clearNamespace", 500);
- map_or_log_err(Self::clear_namespace(domain, nspace), Ok)
+ map_or_log_err(self.clear_namespace(domain, nspace), Ok)
}
fn getState(&self, user_id: i32) -> BinderResult<AidlUserState> {
@@ -267,4 +307,9 @@
let _wp = wd::watch_millis("IKeystoreMaintenance::migrateKeyNamespace", 500);
map_or_log_err(Self::migrate_key_namespace(source, destination), Ok)
}
+
+ fn deleteAllKeys(&self) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::deleteAllKeys", 500);
+ map_or_log_err(Self::delete_all_keys(), Ok)
+ }
}
diff --git a/keystore2/src/metrics.rs b/keystore2/src/metrics.rs
index 07c3d64..42295b7 100644
--- a/keystore2/src/metrics.rs
+++ b/keystore2/src/metrics.rs
@@ -12,496 +12,45 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! This module provides convenience functions for keystore2 logging.
-use crate::error::get_error_code;
-use crate::globals::{DB, LOGS_HANDLER};
-use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
-use crate::operation::Outcome;
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- Algorithm::Algorithm, BlockMode::BlockMode, Digest::Digest, EcCurve::EcCurve,
- HardwareAuthenticatorType::HardwareAuthenticatorType, KeyOrigin::KeyOrigin,
- KeyParameter::KeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
- SecurityLevel::SecurityLevel,
+//! This module implements the IKeystoreMetrics AIDL interface, which exposes the API method for the
+//! proxy in the system server to pull the aggregated metrics in keystore.
+use crate::error::map_or_log_err;
+use crate::metrics_store::METRICS_STORE;
+use crate::permission::KeystorePerm;
+use crate::utils::{check_keystore_permission, watchdog as wd};
+use android_security_metrics::aidl::android::security::metrics::{
+ AtomID::AtomID,
+ IKeystoreMetrics::{BnKeystoreMetrics, IKeystoreMetrics},
+ KeystoreAtom::KeystoreAtom,
};
-use anyhow::Result;
-use keystore2_system_property::PropertyWatcher;
-use statslog_rust::{
- keystore2_key_creation_event_reported::{
- Algorithm as StatsdAlgorithm, EcCurve as StatsdEcCurve, KeyOrigin as StatsdKeyOrigin,
- Keystore2KeyCreationEventReported, SecurityLevel as StatsdKeyCreationSecurityLevel,
- UserAuthType as StatsdUserAuthType,
- },
- keystore2_key_operation_event_reported::{
- Keystore2KeyOperationEventReported, Outcome as StatsdOutcome, Purpose as StatsdKeyPurpose,
- SecurityLevel as StatsdKeyOperationSecurityLevel,
- },
- keystore2_storage_stats::StorageType as StatsdStorageType,
-};
-use statslog_rust_header::Atoms;
-use statspull_rust::{set_pull_atom_callback, StatsPullResult};
+use android_security_metrics::binder::{BinderFeatures, Interface, Result as BinderResult, Strong};
+use anyhow::{Context, Result};
-fn create_default_key_creation_atom() -> Keystore2KeyCreationEventReported {
- // If a value is not present, fields represented by bitmaps and i32 fields
- // will take 0, except error_code which defaults to 1 indicating NO_ERROR and key_size,
- // and auth_time_out which default to -1.
- // The boolean fields are set to false by default.
- // Some keymint enums do have 0 as an enum variant value. In such cases, the corresponding
- // enum variant value in atoms.proto is incremented by 1, in order to have 0 as the reserved
- // value for unspecified fields.
- Keystore2KeyCreationEventReported {
- algorithm: StatsdAlgorithm::AlgorithmUnspecified,
- key_size: -1,
- key_origin: StatsdKeyOrigin::OriginUnspecified,
- user_auth_type: StatsdUserAuthType::AuthTypeUnspecified,
- user_auth_key_timeout_seconds: -1,
- padding_mode_bitmap: 0,
- digest_bitmap: 0,
- block_mode_bitmap: 0,
- purpose_bitmap: 0,
- ec_curve: StatsdEcCurve::EcCurveUnspecified,
- // as per keystore2/ResponseCode.aidl, 1 is reserved for NO_ERROR
- error_code: 1,
- attestation_requested: false,
- security_level: StatsdKeyCreationSecurityLevel::SecurityLevelUnspecified,
+/// This struct is defined to implement IKeystoreMetrics AIDL interface.
+pub struct Metrics;
+
+impl Metrics {
+ /// Create a new instance of Keystore Metrics service.
+ pub fn new_native_binder() -> Result<Strong<dyn IKeystoreMetrics>> {
+ Ok(BnKeystoreMetrics::new_binder(
+ Self,
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
+ }
+
+ fn pull_metrics(&self, atom_id: AtomID) -> Result<Vec<KeystoreAtom>> {
+ // Check permission. Function should return if this failed. Therefore having '?' at the end
+ // is very important.
+ check_keystore_permission(KeystorePerm::pull_metrics()).context("In pull_metrics.")?;
+ METRICS_STORE.get_atoms(atom_id)
}
}
-fn create_default_key_operation_atom() -> Keystore2KeyOperationEventReported {
- Keystore2KeyOperationEventReported {
- purpose: StatsdKeyPurpose::KeyPurposeUnspecified,
- padding_mode_bitmap: 0,
- digest_bitmap: 0,
- block_mode_bitmap: 0,
- outcome: StatsdOutcome::OutcomeUnspecified,
- error_code: 1,
- key_upgraded: false,
- security_level: StatsdKeyOperationSecurityLevel::SecurityLevelUnspecified,
+impl Interface for Metrics {}
+
+impl IKeystoreMetrics for Metrics {
+ fn pullMetrics(&self, atom_id: AtomID) -> BinderResult<Vec<KeystoreAtom>> {
+ let _wp = wd::watch_millis("IKeystoreMetrics::pullMetrics", 500);
+ map_or_log_err(self.pull_metrics(atom_id), Ok)
}
}
-
-/// Log key creation events via statsd API.
-pub fn log_key_creation_event_stats<U>(
- sec_level: SecurityLevel,
- key_params: &[KeyParameter],
- result: &Result<U>,
-) {
- let key_creation_event_stats =
- construct_key_creation_event_stats(sec_level, key_params, result);
-
- LOGS_HANDLER.queue_lo(move |_| {
- let logging_result = key_creation_event_stats.stats_write();
-
- if let Err(e) = logging_result {
- log::error!("Error in logging key creation event in the async task. {:?}", e);
- }
- });
-}
-
-/// Log key operation events via statsd API.
-pub fn log_key_operation_event_stats(
- sec_level: SecurityLevel,
- key_purpose: KeyPurpose,
- op_params: &[KeyParameter],
- op_outcome: &Outcome,
- key_upgraded: bool,
-) {
- let key_operation_event_stats = construct_key_operation_event_stats(
- sec_level,
- key_purpose,
- op_params,
- op_outcome,
- key_upgraded,
- );
-
- LOGS_HANDLER.queue_lo(move |_| {
- let logging_result = key_operation_event_stats.stats_write();
-
- if let Err(e) = logging_result {
- log::error!("Error in logging key operation event in the async task. {:?}", e);
- }
- });
-}
-
-fn construct_key_creation_event_stats<U>(
- sec_level: SecurityLevel,
- key_params: &[KeyParameter],
- result: &Result<U>,
-) -> Keystore2KeyCreationEventReported {
- let mut key_creation_event_atom = create_default_key_creation_atom();
-
- if let Err(ref e) = result {
- key_creation_event_atom.error_code = get_error_code(e);
- }
-
- key_creation_event_atom.security_level = match sec_level {
- SecurityLevel::SOFTWARE => StatsdKeyCreationSecurityLevel::SecurityLevelSoftware,
- SecurityLevel::TRUSTED_ENVIRONMENT => {
- StatsdKeyCreationSecurityLevel::SecurityLevelTrustedEnvironment
- }
- SecurityLevel::STRONGBOX => StatsdKeyCreationSecurityLevel::SecurityLevelStrongbox,
- //KEYSTORE is not a valid variant here
- _ => StatsdKeyCreationSecurityLevel::SecurityLevelUnspecified,
- };
-
- for key_param in key_params.iter().map(KsKeyParamValue::from) {
- match key_param {
- KsKeyParamValue::Algorithm(a) => {
- key_creation_event_atom.algorithm = match a {
- Algorithm::RSA => StatsdAlgorithm::Rsa,
- Algorithm::EC => StatsdAlgorithm::Ec,
- Algorithm::AES => StatsdAlgorithm::Aes,
- Algorithm::TRIPLE_DES => StatsdAlgorithm::TripleDes,
- Algorithm::HMAC => StatsdAlgorithm::Hmac,
- _ => StatsdAlgorithm::AlgorithmUnspecified,
- }
- }
- KsKeyParamValue::KeySize(s) => {
- key_creation_event_atom.key_size = s;
- }
- KsKeyParamValue::KeyOrigin(o) => {
- key_creation_event_atom.key_origin = match o {
- KeyOrigin::GENERATED => StatsdKeyOrigin::Generated,
- KeyOrigin::DERIVED => StatsdKeyOrigin::Derived,
- KeyOrigin::IMPORTED => StatsdKeyOrigin::Imported,
- KeyOrigin::RESERVED => StatsdKeyOrigin::Reserved,
- KeyOrigin::SECURELY_IMPORTED => StatsdKeyOrigin::SecurelyImported,
- _ => StatsdKeyOrigin::OriginUnspecified,
- }
- }
- KsKeyParamValue::HardwareAuthenticatorType(a) => {
- key_creation_event_atom.user_auth_type = match a {
- HardwareAuthenticatorType::NONE => StatsdUserAuthType::None,
- HardwareAuthenticatorType::PASSWORD => StatsdUserAuthType::Password,
- HardwareAuthenticatorType::FINGERPRINT => StatsdUserAuthType::Fingerprint,
- HardwareAuthenticatorType::ANY => StatsdUserAuthType::Any,
- _ => StatsdUserAuthType::AuthTypeUnspecified,
- }
- }
- KsKeyParamValue::AuthTimeout(t) => {
- key_creation_event_atom.user_auth_key_timeout_seconds = t;
- }
- KsKeyParamValue::PaddingMode(p) => {
- key_creation_event_atom.padding_mode_bitmap =
- compute_padding_mode_bitmap(&key_creation_event_atom.padding_mode_bitmap, p);
- }
- KsKeyParamValue::Digest(d) => {
- key_creation_event_atom.digest_bitmap =
- compute_digest_bitmap(&key_creation_event_atom.digest_bitmap, d);
- }
- KsKeyParamValue::BlockMode(b) => {
- key_creation_event_atom.block_mode_bitmap =
- compute_block_mode_bitmap(&key_creation_event_atom.block_mode_bitmap, b);
- }
- KsKeyParamValue::KeyPurpose(k) => {
- key_creation_event_atom.purpose_bitmap =
- compute_purpose_bitmap(&key_creation_event_atom.purpose_bitmap, k);
- }
- KsKeyParamValue::EcCurve(e) => {
- key_creation_event_atom.ec_curve = match e {
- EcCurve::P_224 => StatsdEcCurve::P224,
- EcCurve::P_256 => StatsdEcCurve::P256,
- EcCurve::P_384 => StatsdEcCurve::P384,
- EcCurve::P_521 => StatsdEcCurve::P521,
- _ => StatsdEcCurve::EcCurveUnspecified,
- }
- }
- KsKeyParamValue::AttestationChallenge(_) => {
- key_creation_event_atom.attestation_requested = true;
- }
- _ => {}
- }
- }
- key_creation_event_atom
-}
-
-fn construct_key_operation_event_stats(
- sec_level: SecurityLevel,
- key_purpose: KeyPurpose,
- op_params: &[KeyParameter],
- op_outcome: &Outcome,
- key_upgraded: bool,
-) -> Keystore2KeyOperationEventReported {
- let mut key_operation_event_atom = create_default_key_operation_atom();
-
- key_operation_event_atom.security_level = match sec_level {
- SecurityLevel::SOFTWARE => StatsdKeyOperationSecurityLevel::SecurityLevelSoftware,
- SecurityLevel::TRUSTED_ENVIRONMENT => {
- StatsdKeyOperationSecurityLevel::SecurityLevelTrustedEnvironment
- }
- SecurityLevel::STRONGBOX => StatsdKeyOperationSecurityLevel::SecurityLevelStrongbox,
- //KEYSTORE is not a valid variant here
- _ => StatsdKeyOperationSecurityLevel::SecurityLevelUnspecified,
- };
-
- key_operation_event_atom.key_upgraded = key_upgraded;
-
- key_operation_event_atom.purpose = match key_purpose {
- KeyPurpose::ENCRYPT => StatsdKeyPurpose::Encrypt,
- KeyPurpose::DECRYPT => StatsdKeyPurpose::Decrypt,
- KeyPurpose::SIGN => StatsdKeyPurpose::Sign,
- KeyPurpose::VERIFY => StatsdKeyPurpose::Verify,
- KeyPurpose::WRAP_KEY => StatsdKeyPurpose::WrapKey,
- KeyPurpose::AGREE_KEY => StatsdKeyPurpose::AgreeKey,
- KeyPurpose::ATTEST_KEY => StatsdKeyPurpose::AttestKey,
- _ => StatsdKeyPurpose::KeyPurposeUnspecified,
- };
-
- key_operation_event_atom.outcome = match op_outcome {
- Outcome::Unknown | Outcome::Dropped => StatsdOutcome::Dropped,
- Outcome::Success => StatsdOutcome::Success,
- Outcome::Abort => StatsdOutcome::Abort,
- Outcome::Pruned => StatsdOutcome::Pruned,
- Outcome::ErrorCode(e) => {
- key_operation_event_atom.error_code = e.0;
- StatsdOutcome::Error
- }
- };
-
- for key_param in op_params.iter().map(KsKeyParamValue::from) {
- match key_param {
- KsKeyParamValue::PaddingMode(p) => {
- key_operation_event_atom.padding_mode_bitmap =
- compute_padding_mode_bitmap(&key_operation_event_atom.padding_mode_bitmap, p);
- }
- KsKeyParamValue::Digest(d) => {
- key_operation_event_atom.digest_bitmap =
- compute_digest_bitmap(&key_operation_event_atom.digest_bitmap, d);
- }
- KsKeyParamValue::BlockMode(b) => {
- key_operation_event_atom.block_mode_bitmap =
- compute_block_mode_bitmap(&key_operation_event_atom.block_mode_bitmap, b);
- }
- _ => {}
- }
- }
-
- key_operation_event_atom
-}
-
-fn compute_purpose_bitmap(purpose_bitmap: &i32, purpose: KeyPurpose) -> i32 {
- let mut bitmap = *purpose_bitmap;
- match purpose {
- KeyPurpose::ENCRYPT => {
- bitmap |= 1 << KeyPurposeBitPosition::ENCRYPT_BIT_POS as i32;
- }
- KeyPurpose::DECRYPT => {
- bitmap |= 1 << KeyPurposeBitPosition::DECRYPT_BIT_POS as i32;
- }
- KeyPurpose::SIGN => {
- bitmap |= 1 << KeyPurposeBitPosition::SIGN_BIT_POS as i32;
- }
- KeyPurpose::VERIFY => {
- bitmap |= 1 << KeyPurposeBitPosition::VERIFY_BIT_POS as i32;
- }
- KeyPurpose::WRAP_KEY => {
- bitmap |= 1 << KeyPurposeBitPosition::WRAP_KEY_BIT_POS as i32;
- }
- KeyPurpose::AGREE_KEY => {
- bitmap |= 1 << KeyPurposeBitPosition::AGREE_KEY_BIT_POS as i32;
- }
- KeyPurpose::ATTEST_KEY => {
- bitmap |= 1 << KeyPurposeBitPosition::ATTEST_KEY_BIT_POS as i32;
- }
- _ => {}
- }
- bitmap
-}
-
-fn compute_padding_mode_bitmap(padding_mode_bitmap: &i32, padding_mode: PaddingMode) -> i32 {
- let mut bitmap = *padding_mode_bitmap;
- match padding_mode {
- PaddingMode::NONE => {
- bitmap |= 1 << PaddingModeBitPosition::NONE_BIT_POSITION as i32;
- }
- PaddingMode::RSA_OAEP => {
- bitmap |= 1 << PaddingModeBitPosition::RSA_OAEP_BIT_POS as i32;
- }
- PaddingMode::RSA_PSS => {
- bitmap |= 1 << PaddingModeBitPosition::RSA_PSS_BIT_POS as i32;
- }
- PaddingMode::RSA_PKCS1_1_5_ENCRYPT => {
- bitmap |= 1 << PaddingModeBitPosition::RSA_PKCS1_1_5_ENCRYPT_BIT_POS as i32;
- }
- PaddingMode::RSA_PKCS1_1_5_SIGN => {
- bitmap |= 1 << PaddingModeBitPosition::RSA_PKCS1_1_5_SIGN_BIT_POS as i32;
- }
- PaddingMode::PKCS7 => {
- bitmap |= 1 << PaddingModeBitPosition::PKCS7_BIT_POS as i32;
- }
- _ => {}
- }
- bitmap
-}
-
-fn compute_digest_bitmap(digest_bitmap: &i32, digest: Digest) -> i32 {
- let mut bitmap = *digest_bitmap;
- match digest {
- Digest::NONE => {
- bitmap |= 1 << DigestBitPosition::NONE_BIT_POSITION as i32;
- }
- Digest::MD5 => {
- bitmap |= 1 << DigestBitPosition::MD5_BIT_POS as i32;
- }
- Digest::SHA1 => {
- bitmap |= 1 << DigestBitPosition::SHA_1_BIT_POS as i32;
- }
- Digest::SHA_2_224 => {
- bitmap |= 1 << DigestBitPosition::SHA_2_224_BIT_POS as i32;
- }
- Digest::SHA_2_256 => {
- bitmap |= 1 << DigestBitPosition::SHA_2_256_BIT_POS as i32;
- }
- Digest::SHA_2_384 => {
- bitmap |= 1 << DigestBitPosition::SHA_2_384_BIT_POS as i32;
- }
- Digest::SHA_2_512 => {
- bitmap |= 1 << DigestBitPosition::SHA_2_512_BIT_POS as i32;
- }
- _ => {}
- }
- bitmap
-}
-
-fn compute_block_mode_bitmap(block_mode_bitmap: &i32, block_mode: BlockMode) -> i32 {
- let mut bitmap = *block_mode_bitmap;
- match block_mode {
- BlockMode::ECB => {
- bitmap |= 1 << BlockModeBitPosition::ECB_BIT_POS as i32;
- }
- BlockMode::CBC => {
- bitmap |= 1 << BlockModeBitPosition::CBC_BIT_POS as i32;
- }
- BlockMode::CTR => {
- bitmap |= 1 << BlockModeBitPosition::CTR_BIT_POS as i32;
- }
- BlockMode::GCM => {
- bitmap |= 1 << BlockModeBitPosition::GCM_BIT_POS as i32;
- }
- _ => {}
- }
- bitmap
-}
-
-/// Registers pull metrics callbacks
-pub fn register_pull_metrics_callbacks() -> Result<()> {
- // Before registering the callbacks with statsd, we have to wait for the system to finish
- // booting up. This avoids possible races that may occur at startup. For example, statsd
- // depends on a companion service, and if registration happens too soon it will fail since
- // the companion service isn't up yet.
- let mut watcher = PropertyWatcher::new("sys.boot_completed")?;
- loop {
- watcher.wait()?;
- let value = watcher.read(|_name, value| Ok(value.trim().to_string()));
- if value? == "1" {
- set_pull_atom_callback(Atoms::Keystore2StorageStats, None, pull_metrics_callback);
- break;
- }
- }
- Ok(())
-}
-
-fn pull_metrics_callback() -> StatsPullResult {
- let mut result = StatsPullResult::new();
- let mut append = |stat| {
- match stat {
- Ok(s) => result.push(Box::new(s)),
- Err(error) => {
- log::error!("pull_metrics_callback: Error getting storage stat: {}", error)
- }
- };
- };
- DB.with(|db| {
- let mut db = db.borrow_mut();
- append(db.get_storage_stat(StatsdStorageType::Database));
- append(db.get_storage_stat(StatsdStorageType::KeyEntry));
- append(db.get_storage_stat(StatsdStorageType::KeyEntryIdIndex));
- append(db.get_storage_stat(StatsdStorageType::KeyEntryDomainNamespaceIndex));
- append(db.get_storage_stat(StatsdStorageType::BlobEntry));
- append(db.get_storage_stat(StatsdStorageType::BlobEntryKeyEntryIdIndex));
- append(db.get_storage_stat(StatsdStorageType::KeyParameter));
- append(db.get_storage_stat(StatsdStorageType::KeyParameterKeyEntryIdIndex));
- append(db.get_storage_stat(StatsdStorageType::KeyMetadata));
- append(db.get_storage_stat(StatsdStorageType::KeyMetadataKeyEntryIdIndex));
- append(db.get_storage_stat(StatsdStorageType::Grant));
- append(db.get_storage_stat(StatsdStorageType::AuthToken));
- append(db.get_storage_stat(StatsdStorageType::BlobMetadata));
- append(db.get_storage_stat(StatsdStorageType::BlobMetadataBlobEntryIdIndex));
- });
- result
-}
-
-/// Enum defining the bit position for each padding mode. Since padding mode can be repeatable, it
-/// is represented using a bitmap.
-#[allow(non_camel_case_types)]
-#[repr(i32)]
-pub enum PaddingModeBitPosition {
- ///Bit position in the PaddingMode bitmap for NONE.
- NONE_BIT_POSITION = 0,
- ///Bit position in the PaddingMode bitmap for RSA_OAEP.
- RSA_OAEP_BIT_POS = 1,
- ///Bit position in the PaddingMode bitmap for RSA_PSS.
- RSA_PSS_BIT_POS = 2,
- ///Bit position in the PaddingMode bitmap for RSA_PKCS1_1_5_ENCRYPT.
- RSA_PKCS1_1_5_ENCRYPT_BIT_POS = 3,
- ///Bit position in the PaddingMode bitmap for RSA_PKCS1_1_5_SIGN.
- RSA_PKCS1_1_5_SIGN_BIT_POS = 4,
- ///Bit position in the PaddingMode bitmap for RSA_PKCS7.
- PKCS7_BIT_POS = 5,
-}
-
-/// Enum defining the bit position for each digest type. Since digest can be repeatable in
-/// key parameters, it is represented using a bitmap.
-#[allow(non_camel_case_types)]
-#[repr(i32)]
-pub enum DigestBitPosition {
- ///Bit position in the Digest bitmap for NONE.
- NONE_BIT_POSITION = 0,
- ///Bit position in the Digest bitmap for MD5.
- MD5_BIT_POS = 1,
- ///Bit position in the Digest bitmap for SHA1.
- SHA_1_BIT_POS = 2,
- ///Bit position in the Digest bitmap for SHA_2_224.
- SHA_2_224_BIT_POS = 3,
- ///Bit position in the Digest bitmap for SHA_2_256.
- SHA_2_256_BIT_POS = 4,
- ///Bit position in the Digest bitmap for SHA_2_384.
- SHA_2_384_BIT_POS = 5,
- ///Bit position in the Digest bitmap for SHA_2_512.
- SHA_2_512_BIT_POS = 6,
-}
-
-/// Enum defining the bit position for each block mode type. Since block mode can be repeatable in
-/// key parameters, it is represented using a bitmap.
-#[allow(non_camel_case_types)]
-#[repr(i32)]
-enum BlockModeBitPosition {
- ///Bit position in the BlockMode bitmap for ECB.
- ECB_BIT_POS = 1,
- ///Bit position in the BlockMode bitmap for CBC.
- CBC_BIT_POS = 2,
- ///Bit position in the BlockMode bitmap for CTR.
- CTR_BIT_POS = 3,
- ///Bit position in the BlockMode bitmap for GCM.
- GCM_BIT_POS = 4,
-}
-
-/// Enum defining the bit position for each key purpose. Since key purpose can be repeatable in
-/// key parameters, it is represented using a bitmap.
-#[allow(non_camel_case_types)]
-#[repr(i32)]
-enum KeyPurposeBitPosition {
- ///Bit position in the KeyPurpose bitmap for Encrypt.
- ENCRYPT_BIT_POS = 1,
- ///Bit position in the KeyPurpose bitmap for Decrypt.
- DECRYPT_BIT_POS = 2,
- ///Bit position in the KeyPurpose bitmap for Sign.
- SIGN_BIT_POS = 3,
- ///Bit position in the KeyPurpose bitmap for Verify.
- VERIFY_BIT_POS = 4,
- ///Bit position in the KeyPurpose bitmap for Wrap Key.
- WRAP_KEY_BIT_POS = 5,
- ///Bit position in the KeyPurpose bitmap for Agree Key.
- AGREE_KEY_BIT_POS = 6,
- ///Bit position in the KeyPurpose bitmap for Attest Key.
- ATTEST_KEY_BIT_POS = 7,
-}
diff --git a/keystore2/src/metrics_store.rs b/keystore2/src/metrics_store.rs
new file mode 100644
index 0000000..741d65e
--- /dev/null
+++ b/keystore2/src/metrics_store.rs
@@ -0,0 +1,724 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This is the metrics store module of keystore. It does the following tasks:
+//! 1. Processes the data about keystore events asynchronously, and
+//! stores them in an in-memory store.
+//! 2. Returns the collected metrics when requested by the statsd proxy.
+
+use crate::error::get_error_code;
+use crate::globals::DB;
+use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
+use crate::operation::Outcome;
+use crate::remote_provisioning::get_pool_status;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, BlockMode::BlockMode, Digest::Digest, EcCurve::EcCurve,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, KeyOrigin::KeyOrigin,
+ KeyParameter::KeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
+ SecurityLevel::SecurityLevel,
+};
+use android_security_metrics::aidl::android::security::metrics::{
+ Algorithm::Algorithm as MetricsAlgorithm, AtomID::AtomID, CrashStats::CrashStats,
+ EcCurve::EcCurve as MetricsEcCurve,
+ HardwareAuthenticatorType::HardwareAuthenticatorType as MetricsHardwareAuthenticatorType,
+ KeyCreationWithAuthInfo::KeyCreationWithAuthInfo,
+ KeyCreationWithGeneralInfo::KeyCreationWithGeneralInfo,
+ KeyCreationWithPurposeAndModesInfo::KeyCreationWithPurposeAndModesInfo,
+ KeyOperationWithGeneralInfo::KeyOperationWithGeneralInfo,
+ KeyOperationWithPurposeAndModesInfo::KeyOperationWithPurposeAndModesInfo,
+ KeyOrigin::KeyOrigin as MetricsKeyOrigin, Keystore2AtomWithOverflow::Keystore2AtomWithOverflow,
+ KeystoreAtom::KeystoreAtom, KeystoreAtomPayload::KeystoreAtomPayload,
+ Outcome::Outcome as MetricsOutcome, Purpose::Purpose as MetricsPurpose,
+ RkpError::RkpError as MetricsRkpError, RkpErrorStats::RkpErrorStats,
+ RkpPoolStats::RkpPoolStats, SecurityLevel::SecurityLevel as MetricsSecurityLevel,
+ Storage::Storage as MetricsStorage,
+};
+use anyhow::{Context, Result};
+use lazy_static::lazy_static;
+use rustutils::system_properties::PropertyWatcherError;
+use std::collections::HashMap;
+use std::sync::Mutex;
+use std::time::{Duration, SystemTime, UNIX_EPOCH};
+
+// Note: Crash events are recorded at keystore restarts, based on the assumption that keystore only
+// gets restarted after a crash, during a boot cycle.
+const KEYSTORE_CRASH_COUNT_PROPERTY: &str = "keystore.crash_count";
+
+lazy_static! {
+ /// Singleton for MetricsStore.
+ pub static ref METRICS_STORE: MetricsStore = Default::default();
+}
+
+/// MetricsStore stores the <atom object, count> as <key, value> in the inner hash map,
+/// indexed by the atom id, in the outer hash map.
+/// There can be different atom objects with the same atom id based on the values assigned to the
+/// fields of the atom objects. When an atom object with a particular combination of field values is
+/// inserted, we first check if that atom object is in the inner hash map. If one exists, count
+/// is inceremented. Otherwise, the atom object is inserted with count = 1. Note that count field
+/// of the atom object itself is set to 0 while the object is stored in the hash map. When the atom
+/// objects are queried by the atom id, the corresponding atom objects are retrieved, cloned, and
+/// the count field of the cloned objects is set to the corresponding value field in the inner hash
+/// map before the query result is returned.
+#[derive(Default)]
+pub struct MetricsStore {
+ metrics_store: Mutex<HashMap<AtomID, HashMap<KeystoreAtomPayload, i32>>>,
+}
+
+impl MetricsStore {
+ /// There are some atoms whose maximum cardinality exceeds the cardinality limits tolerated
+ /// by statsd. Statsd tolerates cardinality between 200-300. Therefore, the in-memory storage
+ /// limit for a single atom is set to 250. If the number of atom objects created for a
+ /// particular atom exceeds this limit, an overflow atom object is created to track the ID of
+ /// such atoms.
+ const SINGLE_ATOM_STORE_MAX_SIZE: usize = 250;
+
+ /// Return a vector of atom objects with the given atom ID, if one exists in the metrics_store.
+ /// If any atom object does not exist in the metrics_store for the given atom ID, return an
+ /// empty vector.
+ pub fn get_atoms(&self, atom_id: AtomID) -> Result<Vec<KeystoreAtom>> {
+ // StorageStats is an original pulled atom (i.e. not a pushed atom converted to a
+ // pulledd atom). Therefore, it is handled separately.
+ if AtomID::STORAGE_STATS == atom_id {
+ return pull_storage_stats();
+ }
+
+ // Process and return RKP pool stats.
+ if AtomID::RKP_POOL_STATS == atom_id {
+ return pull_attestation_pool_stats();
+ }
+
+ // Process keystore crash stats.
+ if AtomID::CRASH_STATS == atom_id {
+ return Ok(vec![KeystoreAtom {
+ payload: KeystoreAtomPayload::CrashStats(CrashStats {
+ count_of_crash_events: read_keystore_crash_count()?,
+ }),
+ ..Default::default()
+ }]);
+ }
+
+ // It is safe to call unwrap here since the lock can not be poisoned based on its usage
+ // in this module and the lock is not acquired in the same thread before.
+ let metrics_store_guard = self.metrics_store.lock().unwrap();
+ metrics_store_guard.get(&atom_id).map_or(Ok(Vec::<KeystoreAtom>::new()), |atom_count_map| {
+ Ok(atom_count_map
+ .iter()
+ .map(|(atom, count)| KeystoreAtom { payload: atom.clone(), count: *count })
+ .collect())
+ })
+ }
+
+ /// Insert an atom object to the metrics_store indexed by the atom ID.
+ fn insert_atom(&self, atom_id: AtomID, atom: KeystoreAtomPayload) {
+ // It is ok to unwrap here since the mutex cannot be poisoned according to the way it is
+ // used in this module. And the lock is not acquired by this thread before.
+ let mut metrics_store_guard = self.metrics_store.lock().unwrap();
+ let atom_count_map = metrics_store_guard.entry(atom_id).or_insert_with(HashMap::new);
+ if atom_count_map.len() < MetricsStore::SINGLE_ATOM_STORE_MAX_SIZE {
+ let atom_count = atom_count_map.entry(atom).or_insert(0);
+ *atom_count += 1;
+ } else {
+ // Insert an overflow atom
+ let overflow_atom_count_map = metrics_store_guard
+ .entry(AtomID::KEYSTORE2_ATOM_WITH_OVERFLOW)
+ .or_insert_with(HashMap::new);
+
+ if overflow_atom_count_map.len() < MetricsStore::SINGLE_ATOM_STORE_MAX_SIZE {
+ let overflow_atom = Keystore2AtomWithOverflow { atom_id };
+ let atom_count = overflow_atom_count_map
+ .entry(KeystoreAtomPayload::Keystore2AtomWithOverflow(overflow_atom))
+ .or_insert(0);
+ *atom_count += 1;
+ } else {
+ // This is a rare case, if at all.
+ log::error!("In insert_atom: Maximum storage limit reached for overflow atom.")
+ }
+ }
+ }
+}
+
+/// Log key creation events to be sent to statsd.
+pub fn log_key_creation_event_stats<U>(
+ sec_level: SecurityLevel,
+ key_params: &[KeyParameter],
+ result: &Result<U>,
+) {
+ let (
+ key_creation_with_general_info,
+ key_creation_with_auth_info,
+ key_creation_with_purpose_and_modes_info,
+ ) = process_key_creation_event_stats(sec_level, key_params, result);
+
+ METRICS_STORE
+ .insert_atom(AtomID::KEY_CREATION_WITH_GENERAL_INFO, key_creation_with_general_info);
+ METRICS_STORE.insert_atom(AtomID::KEY_CREATION_WITH_AUTH_INFO, key_creation_with_auth_info);
+ METRICS_STORE.insert_atom(
+ AtomID::KEY_CREATION_WITH_PURPOSE_AND_MODES_INFO,
+ key_creation_with_purpose_and_modes_info,
+ );
+}
+
+// Process the statistics related to key creations and return the three atom objects related to key
+// creations: i) KeyCreationWithGeneralInfo ii) KeyCreationWithAuthInfo
+// iii) KeyCreationWithPurposeAndModesInfo
+fn process_key_creation_event_stats<U>(
+ sec_level: SecurityLevel,
+ key_params: &[KeyParameter],
+ result: &Result<U>,
+) -> (KeystoreAtomPayload, KeystoreAtomPayload, KeystoreAtomPayload) {
+ // In the default atom objects, fields represented by bitmaps and i32 fields
+ // will take 0, except error_code which defaults to 1 indicating NO_ERROR and key_size,
+ // and auth_time_out which defaults to -1.
+ // The boolean fields are set to false by default.
+ // Some keymint enums do have 0 as an enum variant value. In such cases, the corresponding
+ // enum variant value in atoms.proto is incremented by 1, in order to have 0 as the reserved
+ // value for unspecified fields.
+ let mut key_creation_with_general_info = KeyCreationWithGeneralInfo {
+ algorithm: MetricsAlgorithm::ALGORITHM_UNSPECIFIED,
+ key_size: -1,
+ ec_curve: MetricsEcCurve::EC_CURVE_UNSPECIFIED,
+ key_origin: MetricsKeyOrigin::ORIGIN_UNSPECIFIED,
+ error_code: 1,
+ // Default for bool is false (for attestation_requested field).
+ ..Default::default()
+ };
+
+ let mut key_creation_with_auth_info = KeyCreationWithAuthInfo {
+ user_auth_type: MetricsHardwareAuthenticatorType::AUTH_TYPE_UNSPECIFIED,
+ log10_auth_key_timeout_seconds: -1,
+ security_level: MetricsSecurityLevel::SECURITY_LEVEL_UNSPECIFIED,
+ };
+
+ let mut key_creation_with_purpose_and_modes_info = KeyCreationWithPurposeAndModesInfo {
+ algorithm: MetricsAlgorithm::ALGORITHM_UNSPECIFIED,
+ // Default for i32 is 0 (for the remaining bitmap fields).
+ ..Default::default()
+ };
+
+ if let Err(ref e) = result {
+ key_creation_with_general_info.error_code = get_error_code(e);
+ }
+
+ key_creation_with_auth_info.security_level = process_security_level(sec_level);
+
+ for key_param in key_params.iter().map(KsKeyParamValue::from) {
+ match key_param {
+ KsKeyParamValue::Algorithm(a) => {
+ let algorithm = match a {
+ Algorithm::RSA => MetricsAlgorithm::RSA,
+ Algorithm::EC => MetricsAlgorithm::EC,
+ Algorithm::AES => MetricsAlgorithm::AES,
+ Algorithm::TRIPLE_DES => MetricsAlgorithm::TRIPLE_DES,
+ Algorithm::HMAC => MetricsAlgorithm::HMAC,
+ _ => MetricsAlgorithm::ALGORITHM_UNSPECIFIED,
+ };
+ key_creation_with_general_info.algorithm = algorithm;
+ key_creation_with_purpose_and_modes_info.algorithm = algorithm;
+ }
+ KsKeyParamValue::KeySize(s) => {
+ key_creation_with_general_info.key_size = s;
+ }
+ KsKeyParamValue::KeyOrigin(o) => {
+ key_creation_with_general_info.key_origin = match o {
+ KeyOrigin::GENERATED => MetricsKeyOrigin::GENERATED,
+ KeyOrigin::DERIVED => MetricsKeyOrigin::DERIVED,
+ KeyOrigin::IMPORTED => MetricsKeyOrigin::IMPORTED,
+ KeyOrigin::RESERVED => MetricsKeyOrigin::RESERVED,
+ KeyOrigin::SECURELY_IMPORTED => MetricsKeyOrigin::SECURELY_IMPORTED,
+ _ => MetricsKeyOrigin::ORIGIN_UNSPECIFIED,
+ }
+ }
+ KsKeyParamValue::HardwareAuthenticatorType(a) => {
+ key_creation_with_auth_info.user_auth_type = match a {
+ HardwareAuthenticatorType::NONE => MetricsHardwareAuthenticatorType::NONE,
+ HardwareAuthenticatorType::PASSWORD => {
+ MetricsHardwareAuthenticatorType::PASSWORD
+ }
+ HardwareAuthenticatorType::FINGERPRINT => {
+ MetricsHardwareAuthenticatorType::FINGERPRINT
+ }
+ HardwareAuthenticatorType::ANY => MetricsHardwareAuthenticatorType::ANY,
+ _ => MetricsHardwareAuthenticatorType::AUTH_TYPE_UNSPECIFIED,
+ }
+ }
+ KsKeyParamValue::AuthTimeout(t) => {
+ key_creation_with_auth_info.log10_auth_key_timeout_seconds =
+ f32::log10(t as f32) as i32;
+ }
+ KsKeyParamValue::PaddingMode(p) => {
+ compute_padding_mode_bitmap(
+ &mut key_creation_with_purpose_and_modes_info.padding_mode_bitmap,
+ p,
+ );
+ }
+ KsKeyParamValue::Digest(d) => {
+ // key_creation_with_purpose_and_modes_info.digest_bitmap =
+ compute_digest_bitmap(
+ &mut key_creation_with_purpose_and_modes_info.digest_bitmap,
+ d,
+ );
+ }
+ KsKeyParamValue::BlockMode(b) => {
+ compute_block_mode_bitmap(
+ &mut key_creation_with_purpose_and_modes_info.block_mode_bitmap,
+ b,
+ );
+ }
+ KsKeyParamValue::KeyPurpose(k) => {
+ compute_purpose_bitmap(
+ &mut key_creation_with_purpose_and_modes_info.purpose_bitmap,
+ k,
+ );
+ }
+ KsKeyParamValue::EcCurve(e) => {
+ key_creation_with_general_info.ec_curve = match e {
+ EcCurve::P_224 => MetricsEcCurve::P_224,
+ EcCurve::P_256 => MetricsEcCurve::P_256,
+ EcCurve::P_384 => MetricsEcCurve::P_384,
+ EcCurve::P_521 => MetricsEcCurve::P_521,
+ _ => MetricsEcCurve::EC_CURVE_UNSPECIFIED,
+ }
+ }
+ KsKeyParamValue::AttestationChallenge(_) => {
+ key_creation_with_general_info.attestation_requested = true;
+ }
+ _ => {}
+ }
+ }
+ if key_creation_with_general_info.algorithm == MetricsAlgorithm::EC {
+ // Do not record key sizes if Algorithm = EC, in order to reduce cardinality.
+ key_creation_with_general_info.key_size = -1;
+ }
+
+ (
+ KeystoreAtomPayload::KeyCreationWithGeneralInfo(key_creation_with_general_info),
+ KeystoreAtomPayload::KeyCreationWithAuthInfo(key_creation_with_auth_info),
+ KeystoreAtomPayload::KeyCreationWithPurposeAndModesInfo(
+ key_creation_with_purpose_and_modes_info,
+ ),
+ )
+}
+
+/// Log key operation events to be sent to statsd.
+pub fn log_key_operation_event_stats(
+ sec_level: SecurityLevel,
+ key_purpose: KeyPurpose,
+ op_params: &[KeyParameter],
+ op_outcome: &Outcome,
+ key_upgraded: bool,
+) {
+ let (key_operation_with_general_info, key_operation_with_purpose_and_modes_info) =
+ process_key_operation_event_stats(
+ sec_level,
+ key_purpose,
+ op_params,
+ op_outcome,
+ key_upgraded,
+ );
+ METRICS_STORE
+ .insert_atom(AtomID::KEY_OPERATION_WITH_GENERAL_INFO, key_operation_with_general_info);
+ METRICS_STORE.insert_atom(
+ AtomID::KEY_OPERATION_WITH_PURPOSE_AND_MODES_INFO,
+ key_operation_with_purpose_and_modes_info,
+ );
+}
+
+// Process the statistics related to key operations and return the two atom objects related to key
+// operations: i) KeyOperationWithGeneralInfo ii) KeyOperationWithPurposeAndModesInfo
+fn process_key_operation_event_stats(
+ sec_level: SecurityLevel,
+ key_purpose: KeyPurpose,
+ op_params: &[KeyParameter],
+ op_outcome: &Outcome,
+ key_upgraded: bool,
+) -> (KeystoreAtomPayload, KeystoreAtomPayload) {
+ let mut key_operation_with_general_info = KeyOperationWithGeneralInfo {
+ outcome: MetricsOutcome::OUTCOME_UNSPECIFIED,
+ error_code: 1,
+ security_level: MetricsSecurityLevel::SECURITY_LEVEL_UNSPECIFIED,
+ // Default for bool is false (for key_upgraded field).
+ ..Default::default()
+ };
+
+ let mut key_operation_with_purpose_and_modes_info = KeyOperationWithPurposeAndModesInfo {
+ purpose: MetricsPurpose::KEY_PURPOSE_UNSPECIFIED,
+ // Default for i32 is 0 (for the remaining bitmap fields).
+ ..Default::default()
+ };
+
+ key_operation_with_general_info.security_level = process_security_level(sec_level);
+
+ key_operation_with_general_info.key_upgraded = key_upgraded;
+
+ key_operation_with_purpose_and_modes_info.purpose = match key_purpose {
+ KeyPurpose::ENCRYPT => MetricsPurpose::ENCRYPT,
+ KeyPurpose::DECRYPT => MetricsPurpose::DECRYPT,
+ KeyPurpose::SIGN => MetricsPurpose::SIGN,
+ KeyPurpose::VERIFY => MetricsPurpose::VERIFY,
+ KeyPurpose::WRAP_KEY => MetricsPurpose::WRAP_KEY,
+ KeyPurpose::AGREE_KEY => MetricsPurpose::AGREE_KEY,
+ KeyPurpose::ATTEST_KEY => MetricsPurpose::ATTEST_KEY,
+ _ => MetricsPurpose::KEY_PURPOSE_UNSPECIFIED,
+ };
+
+ key_operation_with_general_info.outcome = match op_outcome {
+ Outcome::Unknown | Outcome::Dropped => MetricsOutcome::DROPPED,
+ Outcome::Success => MetricsOutcome::SUCCESS,
+ Outcome::Abort => MetricsOutcome::ABORT,
+ Outcome::Pruned => MetricsOutcome::PRUNED,
+ Outcome::ErrorCode(e) => {
+ key_operation_with_general_info.error_code = e.0;
+ MetricsOutcome::ERROR
+ }
+ };
+
+ for key_param in op_params.iter().map(KsKeyParamValue::from) {
+ match key_param {
+ KsKeyParamValue::PaddingMode(p) => {
+ compute_padding_mode_bitmap(
+ &mut key_operation_with_purpose_and_modes_info.padding_mode_bitmap,
+ p,
+ );
+ }
+ KsKeyParamValue::Digest(d) => {
+ compute_digest_bitmap(
+ &mut key_operation_with_purpose_and_modes_info.digest_bitmap,
+ d,
+ );
+ }
+ KsKeyParamValue::BlockMode(b) => {
+ compute_block_mode_bitmap(
+ &mut key_operation_with_purpose_and_modes_info.block_mode_bitmap,
+ b,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ (
+ KeystoreAtomPayload::KeyOperationWithGeneralInfo(key_operation_with_general_info),
+ KeystoreAtomPayload::KeyOperationWithPurposeAndModesInfo(
+ key_operation_with_purpose_and_modes_info,
+ ),
+ )
+}
+
+fn process_security_level(sec_level: SecurityLevel) -> MetricsSecurityLevel {
+ match sec_level {
+ SecurityLevel::SOFTWARE => MetricsSecurityLevel::SECURITY_LEVEL_SOFTWARE,
+ SecurityLevel::TRUSTED_ENVIRONMENT => {
+ MetricsSecurityLevel::SECURITY_LEVEL_TRUSTED_ENVIRONMENT
+ }
+ SecurityLevel::STRONGBOX => MetricsSecurityLevel::SECURITY_LEVEL_STRONGBOX,
+ SecurityLevel::KEYSTORE => MetricsSecurityLevel::SECURITY_LEVEL_KEYSTORE,
+ _ => MetricsSecurityLevel::SECURITY_LEVEL_UNSPECIFIED,
+ }
+}
+
+fn compute_padding_mode_bitmap(padding_mode_bitmap: &mut i32, padding_mode: PaddingMode) {
+ match padding_mode {
+ PaddingMode::NONE => {
+ *padding_mode_bitmap |= 1 << PaddingModeBitPosition::NONE_BIT_POSITION as i32;
+ }
+ PaddingMode::RSA_OAEP => {
+ *padding_mode_bitmap |= 1 << PaddingModeBitPosition::RSA_OAEP_BIT_POS as i32;
+ }
+ PaddingMode::RSA_PSS => {
+ *padding_mode_bitmap |= 1 << PaddingModeBitPosition::RSA_PSS_BIT_POS as i32;
+ }
+ PaddingMode::RSA_PKCS1_1_5_ENCRYPT => {
+ *padding_mode_bitmap |=
+ 1 << PaddingModeBitPosition::RSA_PKCS1_1_5_ENCRYPT_BIT_POS as i32;
+ }
+ PaddingMode::RSA_PKCS1_1_5_SIGN => {
+ *padding_mode_bitmap |= 1 << PaddingModeBitPosition::RSA_PKCS1_1_5_SIGN_BIT_POS as i32;
+ }
+ PaddingMode::PKCS7 => {
+ *padding_mode_bitmap |= 1 << PaddingModeBitPosition::PKCS7_BIT_POS as i32;
+ }
+ _ => {}
+ }
+}
+
+fn compute_digest_bitmap(digest_bitmap: &mut i32, digest: Digest) {
+ match digest {
+ Digest::NONE => {
+ *digest_bitmap |= 1 << DigestBitPosition::NONE_BIT_POSITION as i32;
+ }
+ Digest::MD5 => {
+ *digest_bitmap |= 1 << DigestBitPosition::MD5_BIT_POS as i32;
+ }
+ Digest::SHA1 => {
+ *digest_bitmap |= 1 << DigestBitPosition::SHA_1_BIT_POS as i32;
+ }
+ Digest::SHA_2_224 => {
+ *digest_bitmap |= 1 << DigestBitPosition::SHA_2_224_BIT_POS as i32;
+ }
+ Digest::SHA_2_256 => {
+ *digest_bitmap |= 1 << DigestBitPosition::SHA_2_256_BIT_POS as i32;
+ }
+ Digest::SHA_2_384 => {
+ *digest_bitmap |= 1 << DigestBitPosition::SHA_2_384_BIT_POS as i32;
+ }
+ Digest::SHA_2_512 => {
+ *digest_bitmap |= 1 << DigestBitPosition::SHA_2_512_BIT_POS as i32;
+ }
+ _ => {}
+ }
+}
+
+fn compute_block_mode_bitmap(block_mode_bitmap: &mut i32, block_mode: BlockMode) {
+ match block_mode {
+ BlockMode::ECB => {
+ *block_mode_bitmap |= 1 << BlockModeBitPosition::ECB_BIT_POS as i32;
+ }
+ BlockMode::CBC => {
+ *block_mode_bitmap |= 1 << BlockModeBitPosition::CBC_BIT_POS as i32;
+ }
+ BlockMode::CTR => {
+ *block_mode_bitmap |= 1 << BlockModeBitPosition::CTR_BIT_POS as i32;
+ }
+ BlockMode::GCM => {
+ *block_mode_bitmap |= 1 << BlockModeBitPosition::GCM_BIT_POS as i32;
+ }
+ _ => {}
+ }
+}
+
+fn compute_purpose_bitmap(purpose_bitmap: &mut i32, purpose: KeyPurpose) {
+ match purpose {
+ KeyPurpose::ENCRYPT => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::ENCRYPT_BIT_POS as i32;
+ }
+ KeyPurpose::DECRYPT => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::DECRYPT_BIT_POS as i32;
+ }
+ KeyPurpose::SIGN => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::SIGN_BIT_POS as i32;
+ }
+ KeyPurpose::VERIFY => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::VERIFY_BIT_POS as i32;
+ }
+ KeyPurpose::WRAP_KEY => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::WRAP_KEY_BIT_POS as i32;
+ }
+ KeyPurpose::AGREE_KEY => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::AGREE_KEY_BIT_POS as i32;
+ }
+ KeyPurpose::ATTEST_KEY => {
+ *purpose_bitmap |= 1 << KeyPurposeBitPosition::ATTEST_KEY_BIT_POS as i32;
+ }
+ _ => {}
+ }
+}
+
+fn pull_storage_stats() -> Result<Vec<KeystoreAtom>> {
+ let mut atom_vec: Vec<KeystoreAtom> = Vec::new();
+ let mut append = |stat| {
+ match stat {
+ Ok(s) => atom_vec.push(KeystoreAtom {
+ payload: KeystoreAtomPayload::StorageStats(s),
+ ..Default::default()
+ }),
+ Err(error) => {
+ log::error!("pull_metrics_callback: Error getting storage stat: {}", error)
+ }
+ };
+ };
+ DB.with(|db| {
+ let mut db = db.borrow_mut();
+ append(db.get_storage_stat(MetricsStorage::DATABASE));
+ append(db.get_storage_stat(MetricsStorage::KEY_ENTRY));
+ append(db.get_storage_stat(MetricsStorage::KEY_ENTRY_ID_INDEX));
+ append(db.get_storage_stat(MetricsStorage::KEY_ENTRY_DOMAIN_NAMESPACE_INDEX));
+ append(db.get_storage_stat(MetricsStorage::BLOB_ENTRY));
+ append(db.get_storage_stat(MetricsStorage::BLOB_ENTRY_KEY_ENTRY_ID_INDEX));
+ append(db.get_storage_stat(MetricsStorage::KEY_PARAMETER));
+ append(db.get_storage_stat(MetricsStorage::KEY_PARAMETER_KEY_ENTRY_ID_INDEX));
+ append(db.get_storage_stat(MetricsStorage::KEY_METADATA));
+ append(db.get_storage_stat(MetricsStorage::KEY_METADATA_KEY_ENTRY_ID_INDEX));
+ append(db.get_storage_stat(MetricsStorage::GRANT));
+ append(db.get_storage_stat(MetricsStorage::AUTH_TOKEN));
+ append(db.get_storage_stat(MetricsStorage::BLOB_METADATA));
+ append(db.get_storage_stat(MetricsStorage::BLOB_METADATA_BLOB_ENTRY_ID_INDEX));
+ });
+ Ok(atom_vec)
+}
+
+fn pull_attestation_pool_stats() -> Result<Vec<KeystoreAtom>> {
+ let mut atoms = Vec::<KeystoreAtom>::new();
+ for sec_level in &[SecurityLevel::TRUSTED_ENVIRONMENT, SecurityLevel::STRONGBOX] {
+ let expired_by = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .unwrap_or_else(|_| Duration::new(0, 0))
+ .as_secs() as i64;
+
+ let result = get_pool_status(expired_by, *sec_level);
+
+ if let Ok(pool_status) = result {
+ let rkp_pool_stats = RkpPoolStats {
+ security_level: process_security_level(*sec_level),
+ expiring: pool_status.expiring,
+ unassigned: pool_status.unassigned,
+ attested: pool_status.attested,
+ total: pool_status.total,
+ };
+ atoms.push(KeystoreAtom {
+ payload: KeystoreAtomPayload::RkpPoolStats(rkp_pool_stats),
+ ..Default::default()
+ });
+ } else {
+ log::error!(
+ concat!(
+ "In pull_attestation_pool_stats: Failed to retrieve pool status",
+ " for security level: {:?}"
+ ),
+ sec_level
+ );
+ }
+ }
+ Ok(atoms)
+}
+
+/// Log error events related to Remote Key Provisioning (RKP).
+pub fn log_rkp_error_stats(rkp_error: MetricsRkpError) {
+ let rkp_error_stats = KeystoreAtomPayload::RkpErrorStats(RkpErrorStats { rkpError: rkp_error });
+ METRICS_STORE.insert_atom(AtomID::RKP_ERROR_STATS, rkp_error_stats);
+}
+
+/// This function tries to read and update the system property: keystore.crash_count.
+/// If the property is absent, it sets the property with value 0. If the property is present, it
+/// increments the value. This helps tracking keystore crashes internally.
+pub fn update_keystore_crash_sysprop() {
+ let crash_count = read_keystore_crash_count();
+ let new_count = match crash_count {
+ Ok(count) => count + 1,
+ Err(error) => {
+ // If the property is absent, this is the first start up during the boot.
+ // Proceed to write the system property with value 0. Otherwise, log and return.
+ if !matches!(
+ error.root_cause().downcast_ref::<PropertyWatcherError>(),
+ Some(PropertyWatcherError::SystemPropertyAbsent)
+ ) {
+ log::warn!(
+ concat!(
+ "In update_keystore_crash_sysprop: ",
+ "Failed to read the existing system property due to: {:?}.",
+ "Therefore, keystore crashes will not be logged."
+ ),
+ error
+ );
+ return;
+ }
+ 0
+ }
+ };
+
+ if let Err(e) =
+ rustutils::system_properties::write(KEYSTORE_CRASH_COUNT_PROPERTY, &new_count.to_string())
+ {
+ log::error!(
+ concat!(
+ "In update_keystore_crash_sysprop:: ",
+ "Failed to write the system property due to error: {:?}"
+ ),
+ e
+ );
+ }
+}
+
+/// Read the system property: keystore.crash_count.
+pub fn read_keystore_crash_count() -> Result<i32> {
+ rustutils::system_properties::read("keystore.crash_count")
+ .context("In read_keystore_crash_count: Failed read property.")?
+ .parse::<i32>()
+ .map_err(std::convert::Into::into)
+}
+
+/// Enum defining the bit position for each padding mode. Since padding mode can be repeatable, it
+/// is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+enum PaddingModeBitPosition {
+ ///Bit position in the PaddingMode bitmap for NONE.
+ NONE_BIT_POSITION = 0,
+ ///Bit position in the PaddingMode bitmap for RSA_OAEP.
+ RSA_OAEP_BIT_POS = 1,
+ ///Bit position in the PaddingMode bitmap for RSA_PSS.
+ RSA_PSS_BIT_POS = 2,
+ ///Bit position in the PaddingMode bitmap for RSA_PKCS1_1_5_ENCRYPT.
+ RSA_PKCS1_1_5_ENCRYPT_BIT_POS = 3,
+ ///Bit position in the PaddingMode bitmap for RSA_PKCS1_1_5_SIGN.
+ RSA_PKCS1_1_5_SIGN_BIT_POS = 4,
+ ///Bit position in the PaddingMode bitmap for RSA_PKCS7.
+ PKCS7_BIT_POS = 5,
+}
+
+/// Enum defining the bit position for each digest type. Since digest can be repeatable in
+/// key parameters, it is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+enum DigestBitPosition {
+ ///Bit position in the Digest bitmap for NONE.
+ NONE_BIT_POSITION = 0,
+ ///Bit position in the Digest bitmap for MD5.
+ MD5_BIT_POS = 1,
+ ///Bit position in the Digest bitmap for SHA1.
+ SHA_1_BIT_POS = 2,
+ ///Bit position in the Digest bitmap for SHA_2_224.
+ SHA_2_224_BIT_POS = 3,
+ ///Bit position in the Digest bitmap for SHA_2_256.
+ SHA_2_256_BIT_POS = 4,
+ ///Bit position in the Digest bitmap for SHA_2_384.
+ SHA_2_384_BIT_POS = 5,
+ ///Bit position in the Digest bitmap for SHA_2_512.
+ SHA_2_512_BIT_POS = 6,
+}
+
+/// Enum defining the bit position for each block mode type. Since block mode can be repeatable in
+/// key parameters, it is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+enum BlockModeBitPosition {
+ ///Bit position in the BlockMode bitmap for ECB.
+ ECB_BIT_POS = 1,
+ ///Bit position in the BlockMode bitmap for CBC.
+ CBC_BIT_POS = 2,
+ ///Bit position in the BlockMode bitmap for CTR.
+ CTR_BIT_POS = 3,
+ ///Bit position in the BlockMode bitmap for GCM.
+ GCM_BIT_POS = 4,
+}
+
+/// Enum defining the bit position for each key purpose. Since key purpose can be repeatable in
+/// key parameters, it is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+enum KeyPurposeBitPosition {
+ ///Bit position in the KeyPurpose bitmap for Encrypt.
+ ENCRYPT_BIT_POS = 1,
+ ///Bit position in the KeyPurpose bitmap for Decrypt.
+ DECRYPT_BIT_POS = 2,
+ ///Bit position in the KeyPurpose bitmap for Sign.
+ SIGN_BIT_POS = 3,
+ ///Bit position in the KeyPurpose bitmap for Verify.
+ VERIFY_BIT_POS = 4,
+ ///Bit position in the KeyPurpose bitmap for Wrap Key.
+ WRAP_KEY_BIT_POS = 5,
+ ///Bit position in the KeyPurpose bitmap for Agree Key.
+ AGREE_KEY_BIT_POS = 6,
+ ///Bit position in the KeyPurpose bitmap for Attest Key.
+ ATTEST_KEY_BIT_POS = 7,
+}
diff --git a/keystore2/src/operation.rs b/keystore2/src/operation.rs
index 8d7ad0a..7e08f4e 100644
--- a/keystore2/src/operation.rs
+++ b/keystore2/src/operation.rs
@@ -127,13 +127,13 @@
use crate::enforcements::AuthInfo;
use crate::error::{map_err_with, map_km_error, map_or_log_err, Error, ErrorCode, ResponseCode};
-use crate::metrics::log_key_operation_event_stats;
-use crate::utils::{watchdog as wd, Asp};
+use crate::metrics_store::log_key_operation_event_stats;
+use crate::utils::watchdog as wd;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
IKeyMintOperation::IKeyMintOperation, KeyParameter::KeyParameter, KeyPurpose::KeyPurpose,
SecurityLevel::SecurityLevel,
};
-use android_hardware_security_keymint::binder::BinderFeatures;
+use android_hardware_security_keymint::binder::{BinderFeatures, Strong};
use android_system_keystore2::aidl::android::system::keystore2::{
IKeystoreOperation::BnKeystoreOperation, IKeystoreOperation::IKeystoreOperation,
};
@@ -170,7 +170,7 @@
pub struct Operation {
// The index of this operation in the OperationDb.
index: usize,
- km_op: Asp,
+ km_op: Strong<dyn IKeyMintOperation>,
last_usage: Mutex<Instant>,
outcome: Mutex<Outcome>,
owner: u32, // Uid of the operation's owner.
@@ -222,7 +222,7 @@
) -> Self {
Self {
index,
- km_op: Asp::new(km_op.as_binder()),
+ km_op,
last_usage: Mutex::new(Instant::now()),
outcome: Mutex::new(Outcome::Unknown),
owner,
@@ -282,19 +282,10 @@
}
*locked_outcome = Outcome::Pruned;
- let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
- match self.km_op.get_interface() {
- Ok(km_op) => km_op,
- Err(e) => {
- log::error!("In prune: Failed to get KeyMintOperation interface.\n {:?}", e);
- return Err(Error::sys());
- }
- };
-
let _wp = wd::watch_millis("In Operation::prune: calling abort()", 500);
// We abort the operation. If there was an error we log it but ignore it.
- if let Err(e) = map_km_error(km_op.abort()) {
+ if let Err(e) = map_km_error(self.km_op.abort()) {
log::error!("In prune: KeyMint::abort failed with {:?}.", e);
}
@@ -362,9 +353,6 @@
Self::check_input_length(aad_input).context("In update_aad")?;
self.touch();
- let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
- self.km_op.get_interface().context("In update: Failed to get KeyMintOperation.")?;
-
let (hat, tst) = self
.auth_info
.lock()
@@ -374,7 +362,7 @@
self.update_outcome(&mut *outcome, {
let _wp = wd::watch_millis("Operation::update_aad: calling updateAad", 500);
- map_km_error(km_op.updateAad(aad_input, hat.as_ref(), tst.as_ref()))
+ map_km_error(self.km_op.updateAad(aad_input, hat.as_ref(), tst.as_ref()))
})
.context("In update_aad: KeyMint::update failed.")?;
@@ -388,9 +376,6 @@
Self::check_input_length(input).context("In update")?;
self.touch();
- let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
- self.km_op.get_interface().context("In update: Failed to get KeyMintOperation.")?;
-
let (hat, tst) = self
.auth_info
.lock()
@@ -401,7 +386,7 @@
let output = self
.update_outcome(&mut *outcome, {
let _wp = wd::watch_millis("Operation::update: calling update", 500);
- map_km_error(km_op.update(input, hat.as_ref(), tst.as_ref()))
+ map_km_error(self.km_op.update(input, hat.as_ref(), tst.as_ref()))
})
.context("In update: KeyMint::update failed.")?;
@@ -421,9 +406,6 @@
}
self.touch();
- let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
- self.km_op.get_interface().context("In finish: Failed to get KeyMintOperation.")?;
-
let (hat, tst, confirmation_token) = self
.auth_info
.lock()
@@ -434,7 +416,7 @@
let output = self
.update_outcome(&mut *outcome, {
let _wp = wd::watch_millis("Operation::finish: calling finish", 500);
- map_km_error(km_op.finish(
+ map_km_error(self.km_op.finish(
input,
signature,
hat.as_ref(),
@@ -462,12 +444,10 @@
fn abort(&self, outcome: Outcome) -> Result<()> {
let mut locked_outcome = self.check_active().context("In abort")?;
*locked_outcome = outcome;
- let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
- self.km_op.get_interface().context("In abort: Failed to get KeyMintOperation.")?;
{
let _wp = wd::watch_millis("Operation::abort: calling abort", 500);
- map_km_error(km_op.abort()).context("In abort: KeyMint::abort failed.")
+ map_km_error(self.km_op.abort()).context("In abort: KeyMint::abort failed.")
}
}
}
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index e7999bc..4392acf 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -158,7 +158,7 @@
impl $name {
/// Returns a string representation of the permission as required by
/// `selinux::check_access`.
- pub fn to_selinux(&self) -> &'static str {
+ pub fn to_selinux(self) -> &'static str {
match self {
Self($aidl_name::$def_name) => stringify!($def_selinux_name),
$(Self($aidl_name::$element_name) => stringify!($selinux_name),)*
@@ -266,7 +266,7 @@
impl $name {
/// Returns a string representation of the permission as required by
/// `selinux::check_access`.
- pub fn to_selinux(&self) -> &'static str {
+ pub fn to_selinux(self) -> &'static str {
match self {
Self::$def_name => stringify!($def_selinux_name),
$(Self::$element_name => stringify!($selinux_name),)*
@@ -315,6 +315,10 @@
EarlyBootEnded = 0x800, selinux name: early_boot_ended;
/// Checked when IKeystoreMaintenance::onDeviceOffBody is called.
ReportOffBody = 0x1000, selinux name: report_off_body;
+ /// Checked when IkeystoreMetrics::pullMetris is called.
+ PullMetrics = 0x2000, selinux name: pull_metrics;
+ /// Checked when IKeystoreMaintenance::deleteAllKeys is called.
+ DeleteAllKeys = 0x4000, selinux name: delete_all_keys;
}
);
@@ -850,23 +854,19 @@
blob: None,
};
+ assert!(check_key_permission(0, &sctx, KeyPerm::use_(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::delete(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::get_info(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::rebind(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::update(), &key, &None).is_ok());
+
if is_su {
- assert!(check_key_permission(0, &sctx, KeyPerm::use_(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::delete(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::get_info(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::rebind(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::update(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::grant(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::manage_blob(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::use_dev_id(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::gen_unique_id(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::req_forced_op(), &key, &None).is_ok());
} else {
- assert!(check_key_permission(0, &sctx, KeyPerm::use_(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::delete(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::get_info(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::rebind(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::update(), &key, &None).is_ok());
assert_perm_failed!(check_key_permission(0, &sctx, KeyPerm::grant(), &key, &None));
assert_perm_failed!(check_key_permission(
0,
diff --git a/keystore2/src/raw_device.rs b/keystore2/src/raw_device.rs
index cd54915..991535f 100644
--- a/keystore2/src/raw_device.rs
+++ b/keystore2/src/raw_device.rs
@@ -62,11 +62,11 @@
/// Get a [`KeyMintDevice`] for the given [`SecurityLevel`]
pub fn get(security_level: SecurityLevel) -> Result<KeyMintDevice> {
- let (asp, hw_info, km_uuid) = get_keymint_device(&security_level)
+ let (km_dev, hw_info, km_uuid) = get_keymint_device(&security_level)
.context("In KeyMintDevice::get: get_keymint_device failed")?;
Ok(KeyMintDevice {
- km_dev: asp.get_interface()?,
+ km_dev,
km_uuid,
version: hw_info.versionNumber,
security_level: hw_info.securityLevel,
@@ -120,7 +120,7 @@
blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
db.store_new_key(
- &key_desc,
+ key_desc,
key_type,
&key_parameters,
&(&creation_result.keyBlob, &blob_metadata),
@@ -148,7 +148,7 @@
key_desc: &KeyDescriptor,
key_type: KeyType,
) -> Result<(KeyIdGuard, KeyEntry)> {
- db.load_key_entry(&key_desc, key_type, KeyEntryLoadBits::KM, AID_KEYSTORE, |_, _| Ok(()))
+ db.load_key_entry(key_desc, key_type, KeyEntryLoadBits::KM, AID_KEYSTORE, |_, _| Ok(()))
.context("In lookup_from_desc: load_key_entry failed.")
}
@@ -228,8 +228,8 @@
};
}
- self.create_and_store_key(db, &key_desc, key_type, |km_dev| {
- km_dev.generateKey(¶ms, None)
+ self.create_and_store_key(db, key_desc, key_type, |km_dev| {
+ km_dev.generateKey(params, None)
})
.context("In lookup_or_generate_key: generate_and_store_key failed")?;
Self::lookup_from_desc(db, key_desc, key_type)
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index 1f3f8e8..a19462b 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -30,7 +30,7 @@
};
use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
- IRemoteProvisioning::IRemoteProvisioning,
+ IRemoteProvisioning::IRemoteProvisioning, ImplInfo::ImplInfo,
};
use android_security_remoteprovisioning::binder::{BinderFeatures, Strong};
use android_system_keystore2::aidl::android::system::keystore2::{
@@ -43,7 +43,9 @@
use crate::database::{CertificateChain, KeystoreDB, Uuid};
use crate::error::{self, map_or_log_err, map_rem_prov_error, Error};
use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
-use crate::utils::{watchdog as wd, Asp};
+use crate::metrics_store::log_rkp_error_stats;
+use crate::utils::watchdog as wd;
+use android_security_metrics::aidl::android::security::metrics::RkpError::RkpError as MetricsRkpError;
/// Contains helper functions to check if remote provisioning is enabled on the system and, if so,
/// to assign and retrieve attestation keys and certificate chains.
@@ -180,23 +182,35 @@
// and therefore will not be attested.
Ok(None)
} else {
- match self.get_rem_prov_attest_key(&key, caller_uid, db).context(concat!(
- "In get_remote_provisioning_key_and_certs: Failed to get ",
- "attestation key"
- ))? {
- Some(cert_chain) => Ok(Some((
- AttestationKey {
- keyBlob: cert_chain.private_key.to_vec(),
- attestKeyParams: vec![],
- issuerSubjectName: parse_subject_from_certificate(&cert_chain.batch_cert)
+ match self.get_rem_prov_attest_key(key, caller_uid, db) {
+ Err(e) => {
+ log::error!(
+ concat!(
+ "In get_remote_provisioning_key_and_certs: Failed to get ",
+ "attestation key. {:?}"
+ ),
+ e
+ );
+ log_rkp_error_stats(MetricsRkpError::FALL_BACK_DURING_HYBRID);
+ Ok(None)
+ }
+ Ok(v) => match v {
+ Some(cert_chain) => Ok(Some((
+ AttestationKey {
+ keyBlob: cert_chain.private_key.to_vec(),
+ attestKeyParams: vec![],
+ issuerSubjectName: parse_subject_from_certificate(
+ &cert_chain.batch_cert,
+ )
.context(concat!(
- "In get_remote_provisioning_key_and_certs: Failed to ",
- "parse subject."
- ))?,
- },
- Certificate { encodedCertificate: cert_chain.cert_chain },
- ))),
- None => Ok(None),
+ "In get_remote_provisioning_key_and_certs: Failed to ",
+ "parse subject."
+ ))?,
+ },
+ Certificate { encodedCertificate: cert_chain.cert_chain },
+ ))),
+ None => Ok(None),
+ },
}
}
}
@@ -204,7 +218,8 @@
/// Implementation of the IRemoteProvisioning service.
#[derive(Default)]
pub struct RemoteProvisioningService {
- device_by_sec_level: HashMap<SecurityLevel, Asp>,
+ device_by_sec_level: HashMap<SecurityLevel, Strong<dyn IRemotelyProvisionedComponent>>,
+ curve_by_sec_level: HashMap<SecurityLevel, i32>,
}
impl RemoteProvisioningService {
@@ -213,7 +228,7 @@
sec_level: &SecurityLevel,
) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
if let Some(dev) = self.device_by_sec_level.get(sec_level) {
- dev.get_interface().context("In get_dev_by_sec_level.")
+ Ok(dev.clone())
} else {
Err(error::Error::sys()).context(concat!(
"In get_dev_by_sec_level: Remote instance for requested security level",
@@ -227,33 +242,25 @@
let mut result: Self = Default::default();
let dev = get_remotely_provisioned_component(&SecurityLevel::TRUSTED_ENVIRONMENT)
.context("In new_native_binder: Failed to get TEE Remote Provisioner instance.")?;
+ result.curve_by_sec_level.insert(
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ dev.getHardwareInfo()
+ .context("In new_native_binder: Failed to get hardware info for the TEE.")?
+ .supportedEekCurve,
+ );
result.device_by_sec_level.insert(SecurityLevel::TRUSTED_ENVIRONMENT, dev);
if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
+ result.curve_by_sec_level.insert(
+ SecurityLevel::STRONGBOX,
+ dev.getHardwareInfo()
+ .context("In new_native_binder: Failed to get hardware info for StrongBox.")?
+ .supportedEekCurve,
+ );
result.device_by_sec_level.insert(SecurityLevel::STRONGBOX, dev);
}
Ok(BnRemoteProvisioning::new_binder(result, BinderFeatures::default()))
}
- /// Populates the AttestationPoolStatus parcelable with information about how many
- /// certs will be expiring by the date provided in `expired_by` along with how many
- /// keys have not yet been assigned.
- pub fn get_pool_status(
- &self,
- expired_by: i64,
- sec_level: SecurityLevel,
- ) -> Result<AttestationPoolStatus> {
- let (_, _, uuid) = get_keymint_device(&sec_level)?;
- DB.with::<_, Result<AttestationPoolStatus>>(|db| {
- let mut db = db.borrow_mut();
- // delete_expired_attestation_keys is always safe to call, and will remove anything
- // older than the date at the time of calling. No work should be done on the
- // attestation keys unless the pool status is checked first, so this call should be
- // enough to routinely clean out expired keys.
- db.delete_expired_attestation_keys()?;
- db.get_attestation_pool_status(expired_by, &uuid)
- })
- }
-
/// Generates a CBOR blob which will be assembled by the calling code into a larger
/// CBOR blob intended for delivery to a provisioning serever. This blob will contain
/// `num_csr` certificate signing requests for attestation keys generated in the TEE,
@@ -302,9 +309,17 @@
(mac.len() as u8),
];
cose_mac_0.append(&mut mac);
+ // If this is a test mode key, there is an extra 6 bytes added as an additional entry in
+ // the COSE_Key struct to denote that.
+ let test_mode_entry_shift = if test_mode { 0 } else { 6 };
+ let byte_dist_mac0_payload = 8;
+ let cose_key_size = 83 - test_mode_entry_shift;
for maced_public_key in keys_to_sign {
- if maced_public_key.macedKey.len() > 83 + 8 {
- cose_mac_0.extend_from_slice(&maced_public_key.macedKey[8..83 + 8]);
+ if maced_public_key.macedKey.len() > cose_key_size + byte_dist_mac0_payload {
+ cose_mac_0.extend_from_slice(
+ &maced_public_key.macedKey
+ [byte_dist_mac0_payload..cose_key_size + byte_dist_mac0_payload],
+ );
}
}
Ok(cose_mac_0)
@@ -367,8 +382,12 @@
/// Checks the security level of each available IRemotelyProvisionedComponent hal and returns
/// all levels in an array to the caller.
- pub fn get_security_levels(&self) -> Result<Vec<SecurityLevel>> {
- Ok(self.device_by_sec_level.keys().cloned().collect())
+ pub fn get_implementation_info(&self) -> Result<Vec<ImplInfo>> {
+ Ok(self
+ .curve_by_sec_level
+ .iter()
+ .map(|(sec_level, curve)| ImplInfo { secLevel: *sec_level, supportedCurve: *curve })
+ .collect())
}
/// Deletes all attestation keys generated by the IRemotelyProvisionedComponent from the device,
@@ -381,6 +400,22 @@
}
}
+/// Populates the AttestationPoolStatus parcelable with information about how many
+/// certs will be expiring by the date provided in `expired_by` along with how many
+/// keys have not yet been assigned.
+pub fn get_pool_status(expired_by: i64, sec_level: SecurityLevel) -> Result<AttestationPoolStatus> {
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ DB.with::<_, Result<AttestationPoolStatus>>(|db| {
+ let mut db = db.borrow_mut();
+ // delete_expired_attestation_keys is always safe to call, and will remove anything
+ // older than the date at the time of calling. No work should be done on the
+ // attestation keys unless the pool status is checked first, so this call should be
+ // enough to routinely clean out expired keys.
+ db.delete_expired_attestation_keys()?;
+ db.get_attestation_pool_status(expired_by, &uuid)
+ })
+}
+
impl binder::Interface for RemoteProvisioningService {}
// Implementation of IRemoteProvisioning. See AIDL spec at
@@ -392,7 +427,7 @@
sec_level: SecurityLevel,
) -> binder::public_api::Result<AttestationPoolStatus> {
let _wp = wd::watch_millis("IRemoteProvisioning::getPoolStatus", 500);
- map_or_log_err(self.get_pool_status(expired_by, sec_level), Ok)
+ map_or_log_err(get_pool_status(expired_by, sec_level), Ok)
}
fn generateCsr(
@@ -444,9 +479,9 @@
map_or_log_err(self.generate_key_pair(is_test_mode, sec_level), Ok)
}
- fn getSecurityLevels(&self) -> binder::public_api::Result<Vec<SecurityLevel>> {
+ fn getImplementationInfo(&self) -> binder::public_api::Result<Vec<ImplInfo>> {
let _wp = wd::watch_millis("IRemoteProvisioning::getSecurityLevels", 500);
- map_or_log_err(self.get_security_levels(), Ok)
+ map_or_log_err(self.get_implementation_info(), Ok)
}
fn deleteAllKeys(&self) -> binder::public_api::Result<i64> {
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index f78d98b..74aba3c 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -23,12 +23,12 @@
use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
use crate::key_parameter::KeyParameter as KsKeyParam;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
-use crate::metrics::log_key_creation_event_stats;
+use crate::metrics_store::log_key_creation_event_stats;
use crate::remote_provisioning::RemProvState;
use crate::super_key::{KeyBlob, SuperKeyManager};
use crate::utils::{
check_device_attestation_permissions, check_key_permission, is_device_id_attestation_tag,
- key_characteristics_to_internal, uid_to_android_user, watchdog as wd, Asp,
+ key_characteristics_to_internal, uid_to_android_user, watchdog as wd,
};
use crate::{
database::{
@@ -61,7 +61,7 @@
/// Implementation of the IKeystoreSecurityLevel Interface.
pub struct KeystoreSecurityLevel {
security_level: SecurityLevel,
- keymint: Asp,
+ keymint: Strong<dyn IKeyMintDevice>,
hw_info: KeyMintHardwareInfo,
km_uuid: Uuid,
operation_db: OperationDb,
@@ -241,9 +241,9 @@
_ => {
let (key_id_guard, mut key_entry) = DB
.with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::KM,
caller_uid,
@@ -304,25 +304,25 @@
.unwrap_key_if_required(&blob_metadata, km_blob)
.context("In create_operation. Failed to handle super encryption.")?;
- let km_dev: Strong<dyn IKeyMintDevice> = self
- .keymint
- .get_interface()
- .context("In create_operation: Failed to get KeyMint device")?;
-
let (begin_result, upgraded_blob) = self
.upgrade_keyblob_if_required_with(
- &*km_dev,
+ &*self.keymint,
key_id_guard,
&km_blob,
&blob_metadata,
- &operation_parameters,
+ operation_parameters,
|blob| loop {
match map_km_error({
let _wp = self.watch_millis(
"In KeystoreSecurityLevel::create_operation: calling begin",
500,
);
- km_dev.begin(purpose, blob, &operation_parameters, immediate_hat.as_ref())
+ self.keymint.begin(
+ purpose,
+ blob,
+ operation_parameters,
+ immediate_hat.as_ref(),
+ )
}) {
Err(Error::Km(ErrorCode::TOO_MANY_OPERATIONS)) => {
self.operation_db.prune(caller_uid, forced)?;
@@ -508,8 +508,6 @@
.add_certificate_parameters(caller_uid, params, &key)
.context("In generate_key: Trying to get aaid.")?;
- let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface()?;
-
let creation_result = match attestation_key_info {
Some(AttestationKeyInfo::UserGenerated {
key_id_guard,
@@ -518,7 +516,7 @@
issuer_subject,
}) => self
.upgrade_keyblob_if_required_with(
- &*km_dev,
+ &*self.keymint,
Some(key_id_guard),
&KeyBlob::Ref(&blob),
&blob_metadata,
@@ -537,7 +535,7 @@
),
5000, // Generate can take a little longer.
);
- km_dev.generateKey(¶ms, attest_key.as_ref())
+ self.keymint.generateKey(¶ms, attest_key.as_ref())
})
},
)
@@ -552,7 +550,7 @@
),
5000, // Generate can take a little longer.
);
- km_dev.generateKey(¶ms, Some(&attestation_key))
+ self.keymint.generateKey(¶ms, Some(&attestation_key))
})
.context("While generating Key with remote provisioned attestation key.")
.map(|mut creation_result| {
@@ -568,7 +566,7 @@
),
5000, // Generate can take a little longer.
);
- km_dev.generateKey(¶ms, None)
+ self.keymint.generateKey(¶ms, None)
})
.context("While generating Key without explicit attestation key."),
}
@@ -625,8 +623,7 @@
})
.context("In import_key.")?;
- let km_dev: Strong<dyn IKeyMintDevice> =
- self.keymint.get_interface().context("In import_key: Trying to get the KM device")?;
+ let km_dev = &self.keymint;
let creation_result = map_km_error({
let _wp =
self.watch_millis("In KeystoreSecurityLevel::import_key: calling importKey.", 500);
@@ -694,7 +691,7 @@
.with(|db| {
LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &wrapping_key,
+ wrapping_key,
KeyType::Client,
KeyEntryLoadBits::KM,
caller_uid,
@@ -736,10 +733,9 @@
let masking_key = masking_key.unwrap_or(ZERO_BLOB_32);
- let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface()?;
let (creation_result, _) = self
.upgrade_keyblob_if_required_with(
- &*km_dev,
+ &*self.keymint,
Some(wrapping_key_id_guard),
&wrapping_key_blob,
&wrapping_blob_metadata,
@@ -749,11 +745,11 @@
"In KeystoreSecurityLevel::import_wrapped_key: calling importWrappedKey.",
500,
);
- let creation_result = map_km_error(km_dev.importWrappedKey(
+ let creation_result = map_km_error(self.keymint.importWrappedKey(
wrapped_data,
wrapping_blob,
masking_key,
- ¶ms,
+ params,
pw_sid,
fp_sid,
))?;
@@ -773,7 +769,7 @@
upgraded_blob: &[u8],
) -> Result<()> {
let (upgraded_blob_to_be_stored, new_blob_metadata) =
- SuperKeyManager::reencrypt_if_required(key_blob, &upgraded_blob)
+ SuperKeyManager::reencrypt_if_required(key_blob, upgraded_blob)
.context("In store_upgraded_keyblob: Failed to handle super encryption.")?;
let mut new_blob_metadata = new_blob_metadata.unwrap_or_default();
@@ -883,10 +879,7 @@
check_key_permission(KeyPerm::convert_storage_key_to_ephemeral(), storage_key, &None)
.context("In convert_storage_key_to_ephemeral: Check permission")?;
- let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface().context(concat!(
- "In IKeystoreSecurityLevel convert_storage_key_to_ephemeral: ",
- "Getting keymint device interface"
- ))?;
+ let km_dev = &self.keymint;
match {
let _wp = self.watch_millis(
concat!(
@@ -914,7 +907,7 @@
"In convert_storage_key_to_ephemeral: calling convertStorageKeyToEphemeral (2)",
500,
);
- map_km_error(km_dev.convertStorageKeyToEphemeral(key_blob))
+ map_km_error(km_dev.convertStorageKeyToEphemeral(&upgraded_blob))
}
.context(concat!(
"In convert_storage_key_to_ephemeral: ",
@@ -945,14 +938,11 @@
check_key_permission(KeyPerm::delete(), key, &None)
.context("In IKeystoreSecurityLevel delete_key: Checking delete permissions")?;
- let km_dev: Strong<dyn IKeyMintDevice> = self
- .keymint
- .get_interface()
- .context("In IKeystoreSecurityLevel delete_key: Getting keymint device interface")?;
+ let km_dev = &self.keymint;
{
let _wp =
self.watch_millis("In KeystoreSecuritylevel::delete_key: calling deleteKey", 500);
- map_km_error(km_dev.deleteKey(&key_blob)).context("In keymint device deleteKey")
+ map_km_error(km_dev.deleteKey(key_blob)).context("In keymint device deleteKey")
}
}
}
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index d65743d..b35fe36 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -22,7 +22,7 @@
use crate::security_level::KeystoreSecurityLevel;
use crate::utils::{
check_grant_permission, check_key_permission, check_keystore_permission,
- key_parameters_to_authorizations, watchdog as wd, Asp,
+ key_parameters_to_authorizations, watchdog as wd,
};
use crate::{
database::Uuid,
@@ -51,7 +51,7 @@
/// Implementation of the IKeystoreService.
#[derive(Default)]
pub struct KeystoreService {
- i_sec_level_by_uuid: HashMap<Uuid, Asp>,
+ i_sec_level_by_uuid: HashMap<Uuid, Strong<dyn IKeystoreSecurityLevel>>,
uuid_by_sec_level: HashMap<SecurityLevel, Uuid>,
}
@@ -68,15 +68,13 @@
.context(concat!(
"In KeystoreService::new_native_binder: ",
"Trying to construct mandatory security level TEE."
- ))
- .map(|(dev, uuid)| (Asp::new(dev.as_binder()), uuid))?;
+ ))?;
result.i_sec_level_by_uuid.insert(uuid, dev);
result.uuid_by_sec_level.insert(SecurityLevel::TRUSTED_ENVIRONMENT, uuid);
// Strongbox is optional, so we ignore errors and turn the result into an Option.
if let Ok((dev, uuid)) =
KeystoreSecurityLevel::new_native_binder(SecurityLevel::STRONGBOX, id_rotation_state)
- .map(|(dev, uuid)| (Asp::new(dev.as_binder()), uuid))
{
result.i_sec_level_by_uuid.insert(uuid, dev);
result.uuid_by_sec_level.insert(SecurityLevel::STRONGBOX, uuid);
@@ -107,7 +105,7 @@
fn get_i_sec_level_by_uuid(&self, uuid: &Uuid) -> Result<Strong<dyn IKeystoreSecurityLevel>> {
if let Some(dev) = self.i_sec_level_by_uuid.get(uuid) {
- dev.get_interface().context("In get_i_sec_level_by_uuid.")
+ Ok(dev.clone())
} else {
Err(error::Error::sys())
.context("In get_i_sec_level_by_uuid: KeyMint instance for key not found.")
@@ -123,7 +121,7 @@
.get(&sec_level)
.and_then(|uuid| self.i_sec_level_by_uuid.get(uuid))
{
- dev.get_interface().context("In get_security_level.")
+ Ok(dev.clone())
} else {
Err(error::Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
.context("In get_security_level: No such security level.")
@@ -134,9 +132,9 @@
let caller_uid = ThreadState::get_calling_uid();
let (key_id_guard, mut key_entry) = DB
.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::PUBLIC,
caller_uid,
@@ -185,9 +183,9 @@
) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
DB.with::<_, Result<()>>(|db| {
- let entry = match LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ let entry = match LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::NONE,
caller_uid,
@@ -309,8 +307,8 @@
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
- db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ db.borrow_mut().unbind_key(key, KeyType::Client, caller_uid, |k, av| {
check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
})
})
@@ -327,9 +325,9 @@
) -> Result<KeyDescriptor> {
let caller_uid = ThreadState::get_calling_uid();
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().grant(
- &key,
+ key,
caller_uid,
grantee_uid as u32,
access_vector,
@@ -342,7 +340,7 @@
fn ungrant(&self, key: &KeyDescriptor, grantee_uid: i32) -> Result<()> {
DB.with(|db| {
- db.borrow_mut().ungrant(&key, ThreadState::get_calling_uid(), grantee_uid as u32, |k| {
+ db.borrow_mut().ungrant(key, ThreadState::get_calling_uid(), grantee_uid as u32, |k| {
check_key_permission(KeyPerm::grant(), k, &None)
})
})
diff --git a/keystore2/src/shared_secret_negotiation.rs b/keystore2/src/shared_secret_negotiation.rs
index 64bc2c3..e32b675 100644
--- a/keystore2/src/shared_secret_negotiation.rs
+++ b/keystore2/src/shared_secret_negotiation.rs
@@ -149,14 +149,15 @@
.collect::<Result<Vec<_>>>()
.map(|v| v.into_iter().flatten())
.and_then(|i| {
- let participants_aidl: Vec<SharedSecretParticipant> =
+ Ok(i.chain(
get_aidl_instances(SHARED_SECRET_PACKAGE_NAME, 1, SHARED_SECRET_INTERFACE_NAME)
.as_vec()
.context("In list_participants: Trying to convert KM1.0 names to vector.")?
.into_iter()
.map(|name| SharedSecretParticipant::Aidl(name.to_string()))
- .collect();
- Ok(i.chain(participants_aidl.into_iter()))
+ .collect::<Vec<_>>()
+ .into_iter(),
+ ))
})
.context("In list_participants.")?
.collect())
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 9fb267a..a1e4c48 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -46,7 +46,7 @@
aes_gcm_decrypt, aes_gcm_encrypt, generate_aes256_key, generate_salt, Password, ZVec,
AES_256_KEY_LENGTH,
};
-use keystore2_system_property::PropertyWatcher;
+use rustutils::system_properties::PropertyWatcher;
use std::{
collections::HashMap,
sync::Arc,
@@ -68,8 +68,8 @@
pub enum SuperEncryptionAlgorithm {
/// Symmetric encryption with AES-256-GCM
Aes256Gcm,
- /// Public-key encryption with ECDH P-256
- EcdhP256,
+ /// Public-key encryption with ECDH P-521
+ EcdhP521,
}
/// A particular user may have several superencryption keys in the database, each for a
@@ -96,9 +96,9 @@
/// Key used for ScreenLockBound keys; the corresponding superencryption key is loaded in memory
/// each time the user enters their LSKF, and cleared from memory each time the device is locked.
/// Asymmetric, so keys can be encrypted when the device is locked.
-pub const USER_SCREEN_LOCK_BOUND_ECDH_KEY: SuperKeyType = SuperKeyType {
- alias: "USER_SCREEN_LOCK_BOUND_ECDH_KEY",
- algorithm: SuperEncryptionAlgorithm::EcdhP256,
+pub const USER_SCREEN_LOCK_BOUND_P521_KEY: SuperKeyType = SuperKeyType {
+ alias: "USER_SCREEN_LOCK_BOUND_P521_KEY",
+ algorithm: SuperEncryptionAlgorithm::EcdhP521,
};
/// Superencryption to apply to a new key.
@@ -126,10 +126,8 @@
fn from_metadata(metadata: &BlobMetaData) -> Option<Self> {
if let Some(EncryptedBy::KeyId(key_id)) = metadata.encrypted_by() {
Some(SuperKeyIdentifier::DatabaseId(*key_id))
- } else if let Some(boot_level) = metadata.max_boot_level() {
- Some(SuperKeyIdentifier::BootLevel(*boot_level))
} else {
- None
+ metadata.max_boot_level().map(|boot_level| SuperKeyIdentifier::BootLevel(*boot_level))
}
}
@@ -398,7 +396,7 @@
.get_or_create_key_with(
Domain::APP,
user as u64 as i64,
- &USER_SUPER_KEY.alias,
+ USER_SUPER_KEY.alias,
crate::database::KEYSTORE_UUID,
|| {
// For backward compatibility we need to check if there is a super key present.
@@ -468,7 +466,7 @@
tag.is_some(),
)),
},
- SuperEncryptionAlgorithm::EcdhP256 => {
+ SuperEncryptionAlgorithm::EcdhP521 => {
match (metadata.public_key(), metadata.salt(), metadata.iv(), metadata.aead_tag()) {
(Some(public_key), Some(salt), Some(iv), Some(aead_tag)) => {
ECDHPrivateKey::from_private_key(&key.key)
@@ -501,7 +499,7 @@
user_id: UserId,
) -> Result<bool> {
let key_in_db = db
- .key_exists(Domain::APP, user_id as u64 as i64, &USER_SUPER_KEY.alias, KeyType::Super)
+ .key_exists(Domain::APP, user_id as u64 as i64, USER_SUPER_KEY.alias, KeyType::Super)
.context("In super_key_exists_in_db_for_user.")?;
if key_in_db {
@@ -737,7 +735,7 @@
match Enforcements::super_encryption_required(domain, key_parameters, flags) {
SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
SuperEncryptionType::LskfBound => self
- .super_encrypt_on_key_init(db, legacy_migrator, user_id, &key_blob)
+ .super_encrypt_on_key_init(db, legacy_migrator, user_id, key_blob)
.context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to super encrypt with LskfBound key."
@@ -746,14 +744,14 @@
let mut data = self.data.lock().unwrap();
let entry = data.user_keys.entry(user_id).or_default();
if let Some(super_key) = entry.screen_lock_bound.as_ref() {
- Self::encrypt_with_aes_super_key(key_blob, &super_key).context(concat!(
+ Self::encrypt_with_aes_super_key(key_blob, super_key).context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to encrypt with ScreenLockBound key."
))
} else {
// Symmetric key is not available, use public key encryption
let loaded =
- db.load_super_key(&USER_SCREEN_LOCK_BOUND_ECDH_KEY, user_id).context(
+ db.load_super_key(&USER_SCREEN_LOCK_BOUND_P521_KEY, user_id).context(
"In handle_super_encryption_on_key_init: load_super_key failed.",
)?;
let (key_id_guard, key_entry) = loaded.ok_or_else(Error::sys).context(
@@ -836,7 +834,7 @@
.context("In get_or_create_super_key: Failed to generate AES 256 key.")?,
None,
),
- SuperEncryptionAlgorithm::EcdhP256 => {
+ SuperEncryptionAlgorithm::EcdhP521 => {
let key = ECDHPrivateKey::generate()
.context("In get_or_create_super_key: Failed to generate ECDH key")?;
(
@@ -903,7 +901,7 @@
Self::get_or_create_super_key(
db,
user_id,
- &USER_SCREEN_LOCK_BOUND_ECDH_KEY,
+ &USER_SCREEN_LOCK_BOUND_P521_KEY,
password,
Some(aes.clone()),
)
@@ -1215,8 +1213,8 @@
fn deref(&self) -> &Self::Target {
match self {
- Self::Sensitive { key, .. } => &key,
- Self::NonSensitive(key) => &key,
+ Self::Sensitive { key, .. } => key,
+ Self::NonSensitive(key) => key,
Self::Ref(key) => key,
}
}
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index a110c64..f6d92ee 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -29,14 +29,13 @@
use android_system_keystore2::aidl::android::system::keystore2::{
Authorization::Authorization, KeyDescriptor::KeyDescriptor,
};
-use anyhow::{anyhow, Context};
-use binder::{FromIBinder, SpIBinder, ThreadState};
+use anyhow::Context;
+use binder::{Strong, ThreadState};
use keystore2_apc_compat::{
ApcCompatUiOptions, APC_COMPAT_ERROR_ABORTED, APC_COMPAT_ERROR_CANCELLED,
APC_COMPAT_ERROR_IGNORED, APC_COMPAT_ERROR_OK, APC_COMPAT_ERROR_OPERATION_PENDING,
APC_COMPAT_ERROR_SYSTEM_ERROR,
};
-use std::sync::Mutex;
/// This function uses its namesake in the permission module and in
/// combination with with_calling_sid from the binder crate to check
@@ -44,7 +43,7 @@
pub fn check_keystore_permission(perm: KeystorePerm) -> anyhow::Result<()> {
ThreadState::with_calling_sid(|calling_sid| {
permission::check_keystore_permission(
- &calling_sid.ok_or_else(Error::sys).context(
+ calling_sid.ok_or_else(Error::sys).context(
"In check_keystore_permission: Cannot check permission without calling_sid.",
)?,
perm,
@@ -58,7 +57,7 @@
pub fn check_grant_permission(access_vec: KeyPermSet, key: &KeyDescriptor) -> anyhow::Result<()> {
ThreadState::with_calling_sid(|calling_sid| {
permission::check_grant_permission(
- &calling_sid.ok_or_else(Error::sys).context(
+ calling_sid.ok_or_else(Error::sys).context(
"In check_grant_permission: Cannot check permission without calling_sid.",
)?,
access_vec,
@@ -78,7 +77,7 @@
ThreadState::with_calling_sid(|calling_sid| {
permission::check_key_permission(
ThreadState::get_calling_uid(),
- &calling_sid
+ calling_sid
.ok_or_else(Error::sys)
.context("In check_key_permission: Cannot check permission without calling_sid.")?,
perm,
@@ -103,7 +102,7 @@
/// identifiers. It throws an error if the permissions cannot be verified, or if the caller doesn't
/// have the right permissions, and returns silently otherwise.
pub fn check_device_attestation_permissions() -> anyhow::Result<()> {
- let permission_controller: binder::Strong<dyn IPermissionController::IPermissionController> =
+ let permission_controller: Strong<dyn IPermissionController::IPermissionController> =
binder::get_interface("permission")?;
let binder_result = {
@@ -128,39 +127,6 @@
}
}
-/// Thread safe wrapper around SpIBinder. It is safe to have SpIBinder smart pointers to the
-/// same object in multiple threads, but cloning a SpIBinder is not thread safe.
-/// Keystore frequently hands out binder tokens to the security level interface. If this
-/// is to happen from a multi threaded thread pool, the SpIBinder needs to be protected by a
-/// Mutex.
-#[derive(Debug)]
-pub struct Asp(Mutex<SpIBinder>);
-
-impl Asp {
- /// Creates a new instance owning a SpIBinder wrapped in a Mutex.
- pub fn new(i: SpIBinder) -> Self {
- Self(Mutex::new(i))
- }
-
- /// Clones the owned SpIBinder and attempts to convert it into the requested interface.
- pub fn get_interface<T: FromIBinder + ?Sized>(&self) -> anyhow::Result<binder::Strong<T>> {
- // We can use unwrap here because we never panic when locked, so the mutex
- // can never be poisoned.
- let lock = self.0.lock().unwrap();
- (*lock)
- .clone()
- .into_interface()
- .map_err(|e| anyhow!(format!("get_interface failed with error code {:?}", e)))
- }
-}
-
-impl Clone for Asp {
- fn clone(&self) -> Self {
- let lock = self.0.lock().unwrap();
- Self(Mutex::new((*lock).clone()))
- }
-}
-
/// Converts a set of key characteristics as returned from KeyMint into the internal
/// representation of the keystore service.
pub fn key_characteristics_to_internal(
@@ -222,16 +188,15 @@
}
/// AID offset for uid space partitioning.
-pub const AID_USER_OFFSET: u32 = cutils_bindgen::AID_USER_OFFSET;
+pub const AID_USER_OFFSET: u32 = rustutils::users::AID_USER_OFFSET;
/// AID of the keystore process itself, used for keys that
/// keystore generates for its own use.
-pub const AID_KEYSTORE: u32 = cutils_bindgen::AID_KEYSTORE;
+pub const AID_KEYSTORE: u32 = rustutils::users::AID_KEYSTORE;
/// Extracts the android user from the given uid.
pub fn uid_to_android_user(uid: u32) -> u32 {
- // Safety: No memory access
- unsafe { cutils_bindgen::multiuser_get_user_id(uid) }
+ rustutils::users::multiuser_get_user_id(uid)
}
/// This module provides helpers for simplified use of the watchdog module.
diff --git a/keystore2/src/vintf/Android.bp b/keystore2/src/vintf/Android.bp
index 3ab0ec5..352f2be 100644
--- a/keystore2/src/vintf/Android.bp
+++ b/keystore2/src/vintf/Android.bp
@@ -78,3 +78,13 @@
"libvintf",
],
}
+
+rust_test {
+ name: "libkeystore2_vintf_bindgen_test",
+ srcs: [":libkeystore2_vintf_bindgen"],
+ crate_name: "keystore2_vintf_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+}
diff --git a/keystore2/system_property/Android.bp b/keystore2/system_property/Android.bp
deleted file mode 100644
index 9e7b056..0000000
--- a/keystore2/system_property/Android.bp
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "system_security_license"
- // to get the below license kinds:
- // SPDX-license-identifier-Apache-2.0
- default_applicable_licenses: ["system_security_license"],
-}
-
-rust_bindgen {
- name: "libkeystore2_system_property_bindgen",
- wrapper_src: "system_property_bindgen.hpp",
- crate_name: "keystore2_system_property_bindgen",
- source_stem: "bindings",
-
- bindgen_flags: [
- "--size_t-is-usize",
- "--allowlist-function=__system_property_find",
- "--allowlist-function=__system_property_read_callback",
- "--allowlist-function=__system_property_wait",
- ],
-}
-
-rust_library {
- name: "libkeystore2_system_property-rust",
- crate_name: "keystore2_system_property",
- srcs: [
- "lib.rs",
- ],
- rustlibs: [
- "libanyhow",
- "libkeystore2_system_property_bindgen",
- "libthiserror",
- ],
- shared_libs: [
- "libbase",
- ],
-}
diff --git a/keystore2/system_property/lib.rs b/keystore2/system_property/lib.rs
deleted file mode 100644
index be13c88..0000000
--- a/keystore2/system_property/lib.rs
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! This crate provides the PropertyWatcher type, which watches for changes
-//! in Android system properties.
-
-use keystore2_system_property_bindgen::prop_info as PropInfo;
-use std::os::raw::c_char;
-use std::ptr::null;
-use std::{
- ffi::{c_void, CStr, CString},
- str::Utf8Error,
-};
-use thiserror::Error;
-
-/// Errors this crate can generate
-#[derive(Error, Debug)]
-pub enum PropertyWatcherError {
- /// We can't watch for a property whose name contains a NUL character.
- #[error("Cannot convert name to C string")]
- BadNameError(#[from] std::ffi::NulError),
- /// We can only watch for properties that exist when the watcher is created.
- #[error("System property is absent")]
- SystemPropertyAbsent,
- /// __system_property_wait timed out despite being given no timeout.
- #[error("Wait failed")]
- WaitFailed,
- /// read callback was not called
- #[error("__system_property_read_callback did not call callback")]
- ReadCallbackNotCalled,
- /// read callback gave us a NULL pointer
- #[error("__system_property_read_callback gave us a NULL pointer instead of a string")]
- MissingCString,
- /// read callback gave us a bad C string
- #[error("__system_property_read_callback gave us a non-UTF8 C string")]
- BadCString(#[from] Utf8Error),
- /// read callback returned an error
- #[error("Callback failed")]
- CallbackError(#[from] anyhow::Error),
-}
-
-/// Result type specific for this crate.
-pub type Result<T> = std::result::Result<T, PropertyWatcherError>;
-
-/// PropertyWatcher takes the name of an Android system property such
-/// as `keystore.boot_level`; it can report the current value of this
-/// property, or wait for it to change.
-pub struct PropertyWatcher {
- prop_name: CString,
- prop_info: *const PropInfo,
- serial: keystore2_system_property_bindgen::__uint32_t,
-}
-
-impl PropertyWatcher {
- /// Create a PropertyWatcher for the named system property.
- pub fn new(name: &str) -> Result<Self> {
- Ok(Self { prop_name: CString::new(name)?, prop_info: null(), serial: 0 })
- }
-
- // Lazy-initializing accessor for self.prop_info.
- fn get_prop_info(&mut self) -> Option<*const PropInfo> {
- if self.prop_info.is_null() {
- // Unsafe required for FFI call. Input and output are both const.
- // The returned pointer is valid for the lifetime of the program.
- self.prop_info = unsafe {
- keystore2_system_property_bindgen::__system_property_find(self.prop_name.as_ptr())
- };
- }
- if self.prop_info.is_null() {
- None
- } else {
- Some(self.prop_info)
- }
- }
-
- fn read_raw(prop_info: *const PropInfo, mut f: impl FnOnce(Option<&CStr>, Option<&CStr>)) {
- // Unsafe function converts values passed to us by
- // __system_property_read_callback to Rust form
- // and pass them to inner callback.
- unsafe extern "C" fn callback(
- res_p: *mut c_void,
- name: *const c_char,
- value: *const c_char,
- _: keystore2_system_property_bindgen::__uint32_t,
- ) {
- let name = if name.is_null() { None } else { Some(CStr::from_ptr(name)) };
- let value = if value.is_null() { None } else { Some(CStr::from_ptr(value)) };
- let f = &mut *res_p.cast::<&mut dyn FnMut(Option<&CStr>, Option<&CStr>)>();
- f(name, value);
- }
-
- let mut f: &mut dyn FnOnce(Option<&CStr>, Option<&CStr>) = &mut f;
-
- // Unsafe block for FFI call. We convert the FnOnce
- // to a void pointer, and unwrap it in our callback.
- unsafe {
- keystore2_system_property_bindgen::__system_property_read_callback(
- prop_info,
- Some(callback),
- &mut f as *mut _ as *mut c_void,
- )
- }
- }
-
- /// Call the passed function, passing it the name and current value
- /// of this system property. See documentation for
- /// `__system_property_read_callback` for details.
- /// Returns an error if the property is empty or doesn't exist.
- pub fn read<T, F>(&mut self, f: F) -> Result<T>
- where
- F: FnOnce(&str, &str) -> anyhow::Result<T>,
- {
- let prop_info = self.get_prop_info().ok_or(PropertyWatcherError::SystemPropertyAbsent)?;
- let mut result = Err(PropertyWatcherError::ReadCallbackNotCalled);
- Self::read_raw(prop_info, |name, value| {
- // use a wrapping closure as an erzatz try block.
- result = (|| {
- let name = name.ok_or(PropertyWatcherError::MissingCString)?.to_str()?;
- let value = value.ok_or(PropertyWatcherError::MissingCString)?.to_str()?;
- f(name, value).map_err(PropertyWatcherError::CallbackError)
- })()
- });
- result
- }
-
- // Waits for the property that self is watching to be created. Returns immediately if the
- // property already exists.
- fn wait_for_property_creation(&mut self) -> Result<()> {
- let mut global_serial = 0;
- loop {
- match self.get_prop_info() {
- Some(_) => return Ok(()),
- None => {
- // Unsafe call for FFI. The function modifies only global_serial, and has
- // no side-effects.
- if !unsafe {
- // Wait for a global serial number change, then try again. On success,
- // the function will update global_serial with the last version seen.
- keystore2_system_property_bindgen::__system_property_wait(
- null(),
- global_serial,
- &mut global_serial,
- null(),
- )
- } {
- return Err(PropertyWatcherError::WaitFailed);
- }
- }
- }
- }
- }
-
- /// Wait for the system property to change. This
- /// records the serial number of the last change, so
- /// race conditions are avoided.
- pub fn wait(&mut self) -> Result<()> {
- // If the property is null, then wait for it to be created. Subsequent waits will
- // skip this step and wait for our specific property to change.
- if self.prop_info.is_null() {
- return self.wait_for_property_creation();
- }
-
- let mut new_serial = self.serial;
- // Unsafe block to call __system_property_wait.
- // All arguments are private to PropertyWatcher so we
- // can be confident they are valid.
- if !unsafe {
- keystore2_system_property_bindgen::__system_property_wait(
- self.prop_info,
- self.serial,
- &mut new_serial,
- null(),
- )
- } {
- return Err(PropertyWatcherError::WaitFailed);
- }
- self.serial = new_serial;
- Ok(())
- }
-}
diff --git a/keystore2/vpnprofilestore/lib.rs b/keystore2/vpnprofilestore/lib.rs
deleted file mode 100644
index df2731a..0000000
--- a/keystore2/vpnprofilestore/lib.rs
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright 2020, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Implements the android.security.vpnprofilestore interface.
-
-use android_security_vpnprofilestore::aidl::android::security::vpnprofilestore::{
- IVpnProfileStore::BnVpnProfileStore, IVpnProfileStore::IVpnProfileStore,
- IVpnProfileStore::ERROR_PROFILE_NOT_FOUND, IVpnProfileStore::ERROR_SYSTEM_ERROR,
-};
-use android_security_vpnprofilestore::binder::{
- BinderFeatures, ExceptionCode, Result as BinderResult, Status as BinderStatus, Strong,
- ThreadState,
-};
-use anyhow::{anyhow, Context, Result};
-use keystore2::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader, utils::watchdog as wd};
-use rusqlite::{
- params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
-};
-use std::{
- collections::HashSet,
- path::{Path, PathBuf},
- sync::Once,
-};
-
-static DB_SET_WAL_MODE: Once = Once::new();
-
-struct DB {
- conn: Connection,
-}
-
-impl DB {
- fn new(db_file: &Path) -> Result<Self> {
- DB_SET_WAL_MODE.call_once(|| {
- log::info!("Setting VpnProfileStore database to WAL mode first time since boot.");
- Self::set_wal_mode(&db_file).expect("In vpnprofilestore: Could not set WAL mode.");
- });
-
- let mut db = Self {
- conn: Connection::open(db_file).context("Failed to initialize SQLite connection.")?,
- };
-
- db.init_tables().context("Trying to initialize vpnstore db.")?;
- Ok(db)
- }
-
- fn set_wal_mode(db_file: &Path) -> Result<()> {
- let conn = Connection::open(db_file)
- .context("In VpnProfileStore set_wal_mode: Failed to open DB.")?;
- let mode: String = conn
- .pragma_update_and_check(None, "journal_mode", &"WAL", |row| row.get(0))
- .context("In VpnProfileStore set_wal_mode: Failed to set journal_mode")?;
- match mode.as_str() {
- "wal" => Ok(()),
- _ => Err(anyhow!("Unable to set WAL mode, db is still in {} mode.", mode)),
- }
- }
-
- fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
- where
- F: Fn(&Transaction) -> Result<T>,
- {
- loop {
- match self
- .conn
- .transaction_with_behavior(behavior)
- .context("In with_transaction.")
- .and_then(|tx| f(&tx).map(|result| (result, tx)))
- .and_then(|(result, tx)| {
- tx.commit().context("In with_transaction: Failed to commit transaction.")?;
- Ok(result)
- }) {
- Ok(result) => break Ok(result),
- Err(e) => {
- if Self::is_locked_error(&e) {
- std::thread::sleep(std::time::Duration::from_micros(500));
- continue;
- } else {
- return Err(e).context("In with_transaction.");
- }
- }
- }
- }
- }
-
- fn is_locked_error(e: &anyhow::Error) -> bool {
- matches!(
- e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
- Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseBusy, .. })
- | Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseLocked, .. })
- )
- }
-
- fn init_tables(&mut self) -> Result<()> {
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "CREATE TABLE IF NOT EXISTS profiles (
- owner INTEGER,
- alias BLOB,
- profile BLOB,
- UNIQUE(owner, alias));",
- NO_PARAMS,
- )
- .context("Failed to initialize \"profiles\" table.")?;
- Ok(())
- })
- }
-
- fn list(&mut self, caller_uid: u32) -> Result<Vec<String>> {
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let mut stmt = tx
- .prepare("SELECT alias FROM profiles WHERE owner = ? ORDER BY alias ASC;")
- .context("In list: Failed to prepare statement.")?;
-
- let aliases = stmt
- .query_map(params![caller_uid], |row| row.get(0))?
- .collect::<rusqlite::Result<Vec<String>>>()
- .context("In list: query_map failed.");
- aliases
- })
- }
-
- fn put(&mut self, caller_uid: u32, alias: &str, profile: &[u8]) -> Result<()> {
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "INSERT OR REPLACE INTO profiles (owner, alias, profile) values (?, ?, ?)",
- params![caller_uid, alias, profile,],
- )
- .context("In put: Failed to insert or replace.")?;
- Ok(())
- })
- }
-
- fn get(&mut self, caller_uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- tx.query_row(
- "SELECT profile FROM profiles WHERE owner = ? AND alias = ?;",
- params![caller_uid, alias],
- |row| row.get(0),
- )
- .optional()
- .context("In get: failed loading profile.")
- })
- }
-
- fn remove(&mut self, caller_uid: u32, alias: &str) -> Result<bool> {
- let removed = self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "DELETE FROM profiles WHERE owner = ? AND alias = ?;",
- params![caller_uid, alias],
- )
- .context("In remove: Failed to delete row.")
- })?;
- Ok(removed == 1)
- }
-}
-
-/// This is the main VpnProfileStore error type, it wraps binder exceptions and the
-/// VnpStore errors.
-#[derive(Debug, thiserror::Error, PartialEq)]
-pub enum Error {
- /// Wraps a VpnProfileStore error code.
- #[error("Error::Error({0:?})")]
- Error(i32),
- /// Wraps a Binder exception code other than a service specific exception.
- #[error("Binder exception code {0:?}, {1:?}")]
- Binder(ExceptionCode, i32),
-}
-
-impl Error {
- /// Short hand for `Error::Error(ERROR_SYSTEM_ERROR)`
- pub fn sys() -> Self {
- Error::Error(ERROR_SYSTEM_ERROR)
- }
-
- /// Short hand for `Error::Error(ERROR_PROFILE_NOT_FOUND)`
- pub fn not_found() -> Self {
- Error::Error(ERROR_PROFILE_NOT_FOUND)
- }
-}
-
-/// This function should be used by vpnprofilestore service calls to translate error conditions
-/// into service specific exceptions.
-///
-/// All error conditions get logged by this function, except for ERROR_PROFILE_NOT_FOUND error.
-///
-/// `Error::Error(x)` variants get mapped onto a service specific error code of `x`.
-///
-/// All non `Error` error conditions get mapped onto `ERROR_SYSTEM_ERROR`.
-///
-/// `handle_ok` will be called if `result` is `Ok(value)` where `value` will be passed
-/// as argument to `handle_ok`. `handle_ok` must generate a `BinderResult<T>`, but it
-/// typically returns Ok(value).
-fn map_or_log_err<T, U, F>(result: Result<U>, handle_ok: F) -> BinderResult<T>
-where
- F: FnOnce(U) -> BinderResult<T>,
-{
- result.map_or_else(
- |e| {
- let root_cause = e.root_cause();
- let (rc, log_error) = match root_cause.downcast_ref::<Error>() {
- // Make the profile not found errors silent.
- Some(Error::Error(ERROR_PROFILE_NOT_FOUND)) => (ERROR_PROFILE_NOT_FOUND, false),
- Some(Error::Error(e)) => (*e, true),
- Some(Error::Binder(_, _)) | None => (ERROR_SYSTEM_ERROR, true),
- };
- if log_error {
- log::error!("{:?}", e);
- }
- Err(BinderStatus::new_service_specific_error(rc, None))
- },
- handle_ok,
- )
-}
-
-/// Implements IVpnProfileStore AIDL interface.
-pub struct VpnProfileStore {
- db_path: PathBuf,
- async_task: AsyncTask,
-}
-
-struct AsyncState {
- recently_imported: HashSet<(u32, String)>,
- legacy_loader: LegacyBlobLoader,
- db_path: PathBuf,
-}
-
-impl VpnProfileStore {
- /// Creates a new VpnProfileStore instance.
- pub fn new_native_binder(path: &Path) -> Strong<dyn IVpnProfileStore> {
- let mut db_path = path.to_path_buf();
- db_path.push("vpnprofilestore.sqlite");
-
- let result = Self { db_path, async_task: Default::default() };
- result.init_shelf(path);
- BnVpnProfileStore::new_binder(result, BinderFeatures::default())
- }
-
- fn open_db(&self) -> Result<DB> {
- DB::new(&self.db_path).context("In open_db: Failed to open db.")
- }
-
- fn get(&self, alias: &str) -> Result<Vec<u8>> {
- let mut db = self.open_db().context("In get.")?;
- let calling_uid = ThreadState::get_calling_uid();
-
- if let Some(profile) =
- db.get(calling_uid, alias).context("In get: Trying to load profile from DB.")?
- {
- return Ok(profile);
- }
- if self.get_legacy(calling_uid, alias).context("In get: Trying to migrate legacy blob.")? {
- // If we were able to migrate a legacy blob try again.
- if let Some(profile) =
- db.get(calling_uid, alias).context("In get: Trying to load profile from DB.")?
- {
- return Ok(profile);
- }
- }
- Err(Error::not_found()).context("In get: No such profile.")
- }
-
- fn put(&self, alias: &str, profile: &[u8]) -> Result<()> {
- let calling_uid = ThreadState::get_calling_uid();
- // In order to make sure that we don't have stale legacy profiles, make sure they are
- // migrated before replacing them.
- let _ = self.get_legacy(calling_uid, alias);
- let mut db = self.open_db().context("In put.")?;
- db.put(calling_uid, alias, profile).context("In put: Trying to insert profile into DB.")
- }
-
- fn remove(&self, alias: &str) -> Result<()> {
- let calling_uid = ThreadState::get_calling_uid();
- let mut db = self.open_db().context("In remove.")?;
- // In order to make sure that we don't have stale legacy profiles, make sure they are
- // migrated before removing them.
- let _ = self.get_legacy(calling_uid, alias);
- let removed = db
- .remove(calling_uid, alias)
- .context("In remove: Trying to remove profile from DB.")?;
- if removed {
- Ok(())
- } else {
- Err(Error::not_found()).context("In remove: No such profile.")
- }
- }
-
- fn list(&self, prefix: &str) -> Result<Vec<String>> {
- let mut db = self.open_db().context("In list.")?;
- let calling_uid = ThreadState::get_calling_uid();
- let mut result = self.list_legacy(calling_uid).context("In list.")?;
- result
- .append(&mut db.list(calling_uid).context("In list: Trying to get list of profiles.")?);
- result = result.into_iter().filter(|s| s.starts_with(prefix)).collect();
- result.sort_unstable();
- result.dedup();
- Ok(result)
- }
-
- fn init_shelf(&self, path: &Path) {
- let mut db_path = path.to_path_buf();
- self.async_task.queue_hi(move |shelf| {
- let legacy_loader = LegacyBlobLoader::new(&db_path);
- db_path.push("vpnprofilestore.sqlite");
-
- shelf.put(AsyncState { legacy_loader, db_path, recently_imported: Default::default() });
- })
- }
-
- fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Result<T>
- where
- F: FnOnce(&mut AsyncState) -> Result<T> + Send + 'static,
- {
- let (sender, receiver) = std::sync::mpsc::channel::<Result<T>>();
- self.async_task.queue_hi(move |shelf| {
- let state = shelf.get_downcast_mut::<AsyncState>().expect("Failed to get shelf.");
- sender.send(f(state)).expect("Failed to send result.");
- });
- receiver.recv().context("In do_serialized: Failed to receive result.")?
- }
-
- fn list_legacy(&self, uid: u32) -> Result<Vec<String>> {
- self.do_serialized(move |state| {
- state
- .legacy_loader
- .list_vpn_profiles(uid)
- .context("Trying to list legacy vnp profiles.")
- })
- .context("In list_legacy.")
- }
-
- fn get_legacy(&self, uid: u32, alias: &str) -> Result<bool> {
- let alias = alias.to_string();
- self.do_serialized(move |state| {
- if state.recently_imported.contains(&(uid, alias.clone())) {
- return Ok(true);
- }
- let mut db = DB::new(&state.db_path).context("In open_db: Failed to open db.")?;
- let migrated =
- Self::migrate_one_legacy_profile(uid, &alias, &state.legacy_loader, &mut db)
- .context("Trying to migrate legacy vpn profile.")?;
- if migrated {
- state.recently_imported.insert((uid, alias));
- }
- Ok(migrated)
- })
- .context("In get_legacy.")
- }
-
- fn migrate_one_legacy_profile(
- uid: u32,
- alias: &str,
- legacy_loader: &LegacyBlobLoader,
- db: &mut DB,
- ) -> Result<bool> {
- let blob = legacy_loader
- .read_vpn_profile(uid, alias)
- .context("In migrate_one_legacy_profile: Trying to read legacy vpn profile.")?;
- if let Some(profile) = blob {
- db.put(uid, alias, &profile)
- .context("In migrate_one_legacy_profile: Trying to insert profile into DB.")?;
- legacy_loader
- .remove_vpn_profile(uid, alias)
- .context("In migrate_one_legacy_profile: Trying to delete legacy profile.")?;
- Ok(true)
- } else {
- Ok(false)
- }
- }
-}
-
-impl binder::Interface for VpnProfileStore {}
-
-impl IVpnProfileStore for VpnProfileStore {
- fn get(&self, alias: &str) -> BinderResult<Vec<u8>> {
- let _wp = wd::watch_millis("IVpnProfileStore::get", 500);
- map_or_log_err(self.get(alias), Ok)
- }
- fn put(&self, alias: &str, profile: &[u8]) -> BinderResult<()> {
- let _wp = wd::watch_millis("IVpnProfileStore::put", 500);
- map_or_log_err(self.put(alias, profile), Ok)
- }
- fn remove(&self, alias: &str) -> BinderResult<()> {
- let _wp = wd::watch_millis("IVpnProfileStore::remove", 500);
- map_or_log_err(self.remove(alias), Ok)
- }
- fn list(&self, prefix: &str) -> BinderResult<Vec<String>> {
- let _wp = wd::watch_millis("IVpnProfileStore::list", 500);
- map_or_log_err(self.list(prefix), Ok)
- }
-}
-
-#[cfg(test)]
-mod db_test {
- use super::*;
- use keystore2_test_utils::TempDir;
- use std::sync::Arc;
- use std::thread;
- use std::time::Duration;
- use std::time::Instant;
-
- static TEST_ALIAS: &str = &"test_alias";
- static TEST_BLOB1: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
- static TEST_BLOB2: &[u8] = &[2, 2, 3, 4, 5, 6, 7, 8, 9, 0];
- static TEST_BLOB3: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
- static TEST_BLOB4: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
-
- #[test]
- fn test_profile_db() {
- let test_dir = TempDir::new("profiledb_test_").expect("Failed to create temp dir.");
- let mut db =
- DB::new(&test_dir.build().push("vpnprofile.sqlite")).expect("Failed to open database.");
-
- // Insert three profiles for owner 2.
- db.put(2, "test1", TEST_BLOB1).expect("Failed to insert test1.");
- db.put(2, "test2", TEST_BLOB2).expect("Failed to insert test2.");
- db.put(2, "test3", TEST_BLOB3).expect("Failed to insert test3.");
-
- // Check list returns all inserted aliases.
- assert_eq!(
- vec!["test1".to_string(), "test2".to_string(), "test3".to_string(),],
- db.list(2).expect("Failed to list profiles.")
- );
-
- // There should be no profiles for owner 1.
- assert_eq!(Vec::<String>::new(), db.list(1).expect("Failed to list profiles."));
-
- // Check the content of the three entries.
- assert_eq!(
- Some(TEST_BLOB1),
- db.get(2, "test1").expect("Failed to get profile.").as_deref()
- );
- assert_eq!(
- Some(TEST_BLOB2),
- db.get(2, "test2").expect("Failed to get profile.").as_deref()
- );
- assert_eq!(
- Some(TEST_BLOB3),
- db.get(2, "test3").expect("Failed to get profile.").as_deref()
- );
-
- // Remove test2 and check and check that it is no longer retrievable.
- assert!(db.remove(2, "test2").expect("Failed to remove profile."));
- assert!(db.get(2, "test2").expect("Failed to get profile.").is_none());
-
- // test2 should now no longer be in the list.
- assert_eq!(
- vec!["test1".to_string(), "test3".to_string(),],
- db.list(2).expect("Failed to list profiles.")
- );
-
- // Put on existing alias replaces it.
- // Verify test1 is TEST_BLOB1.
- assert_eq!(
- Some(TEST_BLOB1),
- db.get(2, "test1").expect("Failed to get profile.").as_deref()
- );
- db.put(2, "test1", TEST_BLOB4).expect("Failed to replace test1.");
- // Verify test1 is TEST_BLOB4.
- assert_eq!(
- Some(TEST_BLOB4),
- db.get(2, "test1").expect("Failed to get profile.").as_deref()
- );
- }
-
- #[test]
- fn concurrent_vpn_profile_test() -> Result<()> {
- let temp_dir = Arc::new(
- TempDir::new("concurrent_vpn_profile_test_").expect("Failed to create temp dir."),
- );
-
- let db_path = temp_dir.build().push("vpnprofile.sqlite").to_owned();
-
- let test_begin = Instant::now();
-
- let mut db = DB::new(&db_path).expect("Failed to open database.");
- const PROFILE_COUNT: u32 = 5000u32;
- const PROFILE_DB_COUNT: u32 = 5000u32;
-
- let mode: String = db.conn.pragma_query_value(None, "journal_mode", |row| row.get(0))?;
- assert_eq!(mode, "wal");
-
- let mut actual_profile_count = PROFILE_COUNT;
- // First insert PROFILE_COUNT profiles.
- for count in 0..PROFILE_COUNT {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(15) {
- actual_profile_count = count;
- break;
- }
- let alias = format!("test_alias_{}", count);
- db.put(1, &alias, TEST_BLOB1).expect("Failed to add profile (1).");
- }
-
- // Insert more keys from a different thread and into a different namespace.
- let db_path1 = db_path.clone();
- let handle1 = thread::spawn(move || {
- let mut db = DB::new(&db_path1).expect("Failed to open database.");
-
- for count in 0..actual_profile_count {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
- return;
- }
- let alias = format!("test_alias_{}", count);
- db.put(2, &alias, TEST_BLOB2).expect("Failed to add profile (2).");
- }
-
- // Then delete them again.
- for count in 0..actual_profile_count {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
- return;
- }
- let alias = format!("test_alias_{}", count);
- db.remove(2, &alias).expect("Remove Failed (2).");
- }
- });
-
- // And start deleting the first set of profiles.
- let db_path2 = db_path.clone();
- let handle2 = thread::spawn(move || {
- let mut db = DB::new(&db_path2).expect("Failed to open database.");
-
- for count in 0..actual_profile_count {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
- return;
- }
- let alias = format!("test_alias_{}", count);
- db.remove(1, &alias).expect("Remove Failed (1)).");
- }
- });
-
- // While a lot of inserting and deleting is going on we have to open database connections
- // successfully and then insert and delete a specific profile.
- let db_path3 = db_path.clone();
- let handle3 = thread::spawn(move || {
- for _count in 0..PROFILE_DB_COUNT {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
- return;
- }
- let mut db = DB::new(&db_path3).expect("Failed to open database.");
-
- db.put(3, &TEST_ALIAS, TEST_BLOB3).expect("Failed to add profile (3).");
-
- db.remove(3, &TEST_ALIAS).expect("Remove failed (3).");
- }
- });
-
- // While thread 3 is inserting and deleting TEST_ALIAS, we try to get the alias.
- // This may yield an entry or none, but it must not fail.
- let handle4 = thread::spawn(move || {
- for _count in 0..PROFILE_DB_COUNT {
- if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
- return;
- }
- let mut db = DB::new(&db_path).expect("Failed to open database.");
-
- // This may return Some or None but it must not fail.
- db.get(3, &TEST_ALIAS).expect("Failed to get profile (4).");
- }
- });
-
- handle1.join().expect("Thread 1 panicked.");
- handle2.join().expect("Thread 2 panicked.");
- handle3.join().expect("Thread 3 panicked.");
- handle4.join().expect("Thread 4 panicked.");
-
- Ok(())
- }
-}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index 432e585..5219240 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -11,8 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-// List of clang-tidy checks that are reported as errors.
-// Please keep this list ordered lexicographically.
package {
// See: http://go/android-license-faq
@@ -23,6 +21,8 @@
default_applicable_licenses: ["system_security_license"],
}
+// List of clang-tidy checks that are reported as errors.
+// Please keep this list ordered lexicographically.
tidy_errors = [
"cert-err34-c",
"google-default-arguments",
@@ -84,6 +84,7 @@
srcs: [
"odsign_main.cpp",
"CertUtils.cpp",
+ "FakeCompOs.cpp",
"KeystoreKey.cpp",
"KeystoreHmacKey.cpp",
"VerityUtils.cpp",
@@ -94,6 +95,7 @@
static_libs: [
"libc++fs",
"lib_odsign_proto",
+ "lib_compos_proto",
],
shared_libs: [
@@ -105,7 +107,7 @@
"libcrypto_utils",
"libfsverity",
"liblogwrap",
- "libprotobuf-cpp-full",
+ "libprotobuf-cpp-lite",
"libutils",
],
}
diff --git a/ondevice-signing/CertUtils.cpp b/ondevice-signing/CertUtils.cpp
index 9867f62..d67bea6 100644
--- a/ondevice-signing/CertUtils.cpp
+++ b/ondevice-signing/CertUtils.cpp
@@ -26,40 +26,58 @@
#include <openssl/x509.h>
#include <openssl/x509v3.h>
-#include <fcntl.h>
+#include <optional>
#include <vector>
#include "KeyConstants.h"
-const char kBasicConstraints[] = "CA:TRUE";
-const char kKeyUsage[] = "critical,keyCertSign,cRLSign,digitalSignature";
-const char kSubjectKeyIdentifier[] = "hash";
-const char kAuthorityKeyIdentifier[] = "keyid:always";
+// Common properties for all of our certificates.
constexpr int kCertLifetimeSeconds = 10 * 365 * 24 * 60 * 60;
+const char* const kIssuerCountry = "US";
+const char* const kIssuerOrg = "Android";
-using android::base::Result;
-// using android::base::ErrnoError;
+using android::base::ErrnoError;
using android::base::Error;
+using android::base::Result;
-static bool add_ext(X509* cert, int nid, const char* value) {
- size_t len = strlen(value) + 1;
- std::vector<char> mutableValue(value, value + len);
- X509V3_CTX context;
+static Result<bssl::UniquePtr<X509>> loadX509(const std::string& path) {
+ X509* rawCert;
+ auto f = fopen(path.c_str(), "re");
+ if (f == nullptr) {
+ return Error() << "Failed to open " << path;
+ }
+ if (!d2i_X509_fp(f, &rawCert)) {
+ fclose(f);
+ return Error() << "Unable to decode x509 cert at " << path;
+ }
+ bssl::UniquePtr<X509> cert(rawCert);
- X509V3_set_ctx_nodb(&context);
+ fclose(f);
+ return cert;
+}
- X509V3_set_ctx(&context, cert, cert, nullptr, nullptr, 0);
- X509_EXTENSION* ex = X509V3_EXT_nconf_nid(nullptr, &context, nid, mutableValue.data());
+static X509V3_CTX makeContext(X509* issuer, X509* subject) {
+ X509V3_CTX context = {};
+ X509V3_set_ctx(&context, issuer, subject, nullptr, nullptr, 0);
+ return context;
+}
+
+static bool add_ext(X509V3_CTX* context, X509* cert, int nid, const char* value) {
+ bssl::UniquePtr<X509_EXTENSION> ex(X509V3_EXT_nconf_nid(nullptr, context, nid, value));
if (!ex) {
return false;
}
- X509_add_ext(cert, ex, -1);
- X509_EXTENSION_free(ex);
+ X509_add_ext(cert, ex.get(), -1);
return true;
}
-Result<bssl::UniquePtr<RSA>> getRsa(const std::vector<uint8_t>& publicKey) {
+static void addNameEntry(X509_NAME* name, const char* field, const char* value) {
+ X509_NAME_add_entry_by_txt(name, field, MBSTRING_ASC,
+ reinterpret_cast<const unsigned char*>(value), -1, -1, 0);
+}
+
+static Result<bssl::UniquePtr<RSA>> getRsaFromModulus(const std::vector<uint8_t>& publicKey) {
bssl::UniquePtr<BIGNUM> n(BN_new());
bssl::UniquePtr<BIGNUM> e(BN_new());
bssl::UniquePtr<RSA> rsaPubkey(RSA_new());
@@ -75,29 +93,24 @@
return rsaPubkey;
}
-Result<void> verifySignature(const std::string& message, const std::string& signature,
- const std::vector<uint8_t>& publicKey) {
- auto rsaKey = getRsa(publicKey);
- if (!rsaKey.ok()) {
- return rsaKey.error();
+static Result<bssl::UniquePtr<RSA>>
+getRsaFromRsaPublicKey(const std::vector<uint8_t>& rsaPublicKey) {
+ auto derBytes = rsaPublicKey.data();
+ bssl::UniquePtr<RSA> rsaKey(d2i_RSAPublicKey(nullptr, &derBytes, rsaPublicKey.size()));
+ if (rsaKey.get() == nullptr) {
+ return Error() << "Failed to parse RsaPublicKey";
}
- uint8_t hashBuf[SHA256_DIGEST_LENGTH];
- SHA256(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(message.c_str())),
- message.length(), hashBuf);
-
- bool success = RSA_verify(NID_sha256, hashBuf, sizeof(hashBuf),
- (const uint8_t*)signature.c_str(), signature.length(), rsaKey->get());
-
- if (!success) {
- return Error() << "Failed to verify signature.";
+ if (derBytes != rsaPublicKey.data() + rsaPublicKey.size()) {
+ return Error() << "Key has unexpected trailing data";
}
- return {};
+
+ return rsaKey;
}
-static Result<bssl::UniquePtr<EVP_PKEY>> toRsaPkey(const std::vector<uint8_t>& publicKey) {
+static Result<bssl::UniquePtr<EVP_PKEY>> modulusToRsaPkey(const std::vector<uint8_t>& publicKey) {
// "publicKey" corresponds to the raw public key bytes - need to create
// a new RSA key with the correct exponent.
- auto rsaPubkey = getRsa(publicKey);
+ auto rsaPubkey = getRsaFromModulus(publicKey);
if (!rsaPubkey.ok()) {
return rsaPubkey.error();
}
@@ -109,47 +122,76 @@
return public_key;
}
-Result<void> createSelfSignedCertificate(
- const std::vector<uint8_t>& publicKey,
- const std::function<Result<std::string>(const std::string&)>& signFunction,
- const std::string& path) {
+static Result<bssl::UniquePtr<EVP_PKEY>>
+rsaPublicKeyToRsaPkey(const std::vector<uint8_t>& rsaPublicKey) {
+ // rsaPublicKey contains both modulus and exponent, DER-encoded.
+ auto rsaKey = getRsaFromRsaPublicKey(rsaPublicKey);
+ if (!rsaKey.ok()) {
+ return rsaKey.error();
+ }
+
+ bssl::UniquePtr<EVP_PKEY> public_key(EVP_PKEY_new());
+ if (!EVP_PKEY_assign_RSA(public_key.get(), rsaKey->release())) {
+ return Error() << "Failed to assign key";
+ }
+ return public_key;
+}
+
+Result<void> verifySignature(const std::string& message, const std::string& signature,
+ const std::vector<uint8_t>& publicKey) {
+ auto rsaKey = getRsaFromModulus(publicKey);
+ if (!rsaKey.ok()) {
+ return rsaKey.error();
+ }
+ uint8_t hashBuf[SHA256_DIGEST_LENGTH];
+ SHA256(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(message.c_str())),
+ message.length(), hashBuf);
+
+ bool success = RSA_verify(NID_sha256, hashBuf, sizeof(hashBuf),
+ (const uint8_t*)signature.c_str(), signature.length(), rsaKey->get());
+
+ if (!success) {
+ return Error() << "Failed to verify signature";
+ }
+ return {};
+}
+
+Result<void> verifyRsaPublicKeySignature(const std::string& message, const std::string& signature,
+ const std::vector<uint8_t>& rsaPublicKey) {
+ auto rsaKey = getRsaFromRsaPublicKey(rsaPublicKey);
+ if (!rsaKey.ok()) {
+ return rsaKey.error();
+ }
+
+ uint8_t hashBuf[SHA256_DIGEST_LENGTH];
+ SHA256(reinterpret_cast<const uint8_t*>(message.data()), message.size(), hashBuf);
+
+ bool success = RSA_verify(NID_sha256, hashBuf, sizeof(hashBuf),
+ reinterpret_cast<const uint8_t*>(signature.data()), signature.size(),
+ rsaKey->get());
+ if (!success) {
+ return Error() << "Failed to verify signature";
+ }
+ return {};
+}
+
+static Result<void> createCertificate(
+ const CertSubject& subject, EVP_PKEY* publicKey,
+ const std::function<android::base::Result<std::string>(const std::string&)>& signFunction,
+ const std::optional<std::string>& issuerCertPath, const std::string& path) {
+
+ // If an issuer cert is specified, we are signing someone else's key.
+ // Otherwise we are signing our key - a self-signed certificate.
+ bool selfSigned = !issuerCertPath;
+
bssl::UniquePtr<X509> x509(X509_new());
if (!x509) {
return Error() << "Unable to allocate x509 container";
}
X509_set_version(x509.get(), 2);
-
- ASN1_INTEGER_set(X509_get_serialNumber(x509.get()), 1);
X509_gmtime_adj(X509_get_notBefore(x509.get()), 0);
X509_gmtime_adj(X509_get_notAfter(x509.get()), kCertLifetimeSeconds);
-
- auto public_key = toRsaPkey(publicKey);
- if (!public_key.ok()) {
- return public_key.error();
- }
-
- if (!X509_set_pubkey(x509.get(), public_key.value().get())) {
- return Error() << "Unable to set x509 public key";
- }
-
- X509_NAME* name = X509_get_subject_name(x509.get());
- if (!name) {
- return Error() << "Unable to get x509 subject name";
- }
- X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
- reinterpret_cast<const unsigned char*>("US"), -1, -1, 0);
- X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC,
- reinterpret_cast<const unsigned char*>("Android"), -1, -1, 0);
- X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
- reinterpret_cast<const unsigned char*>("ODS"), -1, -1, 0);
- if (!X509_set_issuer_name(x509.get(), name)) {
- return Error() << "Unable to set x509 issuer name";
- }
-
- add_ext(x509.get(), NID_basic_constraints, kBasicConstraints);
- add_ext(x509.get(), NID_key_usage, kKeyUsage);
- add_ext(x509.get(), NID_subject_key_identifier, kSubjectKeyIdentifier);
- add_ext(x509.get(), NID_authority_key_identifier, kAuthorityKeyIdentifier);
+ ASN1_INTEGER_set(X509_get_serialNumber(x509.get()), subject.serialNumber);
bssl::UniquePtr<X509_ALGOR> algor(X509_ALGOR_new());
if (!algor ||
@@ -159,6 +201,57 @@
return Error() << "Unable to set x509 signature algorithm";
}
+ if (!X509_set_pubkey(x509.get(), publicKey)) {
+ return Error() << "Unable to set x509 public key";
+ }
+
+ X509_NAME* subjectName = X509_get_subject_name(x509.get());
+ if (!subjectName) {
+ return Error() << "Unable to get x509 subject name";
+ }
+ addNameEntry(subjectName, "C", kIssuerCountry);
+ addNameEntry(subjectName, "O", kIssuerOrg);
+ addNameEntry(subjectName, "CN", subject.commonName);
+
+ if (selfSigned) {
+ if (!X509_set_issuer_name(x509.get(), subjectName)) {
+ return Error() << "Unable to set x509 issuer name";
+ }
+ } else {
+ X509_NAME* issuerName = X509_get_issuer_name(x509.get());
+ if (!issuerName) {
+ return Error() << "Unable to get x509 issuer name";
+ }
+ addNameEntry(issuerName, "C", kIssuerCountry);
+ addNameEntry(issuerName, "O", kIssuerOrg);
+ addNameEntry(issuerName, "CN", kRootSubject.commonName);
+ }
+
+ // Beware: context contains a pointer to issuerCert, so we need to keep it alive.
+ bssl::UniquePtr<X509> issuerCert;
+ X509V3_CTX context;
+
+ if (selfSigned) {
+ context = makeContext(x509.get(), x509.get());
+ } else {
+ auto certStatus = loadX509(*issuerCertPath);
+ if (!certStatus.ok()) {
+ return Error() << "Unable to load issuer cert: " << certStatus.error();
+ }
+ issuerCert = std::move(certStatus.value());
+ context = makeContext(issuerCert.get(), x509.get());
+ }
+
+ // If it's a self-signed cert we use it for signing certs, otherwise only for signing data.
+ const char* basicConstraints = selfSigned ? "CA:TRUE" : "CA:FALSE";
+ const char* keyUsage =
+ selfSigned ? "critical,keyCertSign,cRLSign,digitalSignature" : "critical,digitalSignature";
+
+ add_ext(&context, x509.get(), NID_basic_constraints, basicConstraints);
+ add_ext(&context, x509.get(), NID_key_usage, keyUsage);
+ add_ext(&context, x509.get(), NID_subject_key_identifier, "hash");
+ add_ext(&context, x509.get(), NID_authority_key_identifier, "keyid:always");
+
// Get the data to be signed
unsigned char* to_be_signed_buf(nullptr);
size_t to_be_signed_length = i2d_re_X509_tbs(x509.get(), &to_be_signed_buf);
@@ -177,14 +270,40 @@
auto f = fopen(path.c_str(), "wbe");
if (f == nullptr) {
- return Error() << "Failed to open " << path;
+ return ErrnoError() << "Failed to open " << path;
}
i2d_X509_fp(f, x509.get());
- fclose(f);
+ if (fclose(f) != 0) {
+ return ErrnoError() << "Failed to close " << path;
+ }
return {};
}
+Result<void> createSelfSignedCertificate(
+ const std::vector<uint8_t>& publicKey,
+ const std::function<Result<std::string>(const std::string&)>& signFunction,
+ const std::string& path) {
+ auto rsa_pkey = modulusToRsaPkey(publicKey);
+ if (!rsa_pkey.ok()) {
+ return rsa_pkey.error();
+ }
+
+ return createCertificate(kRootSubject, rsa_pkey.value().get(), signFunction, {}, path);
+}
+
+android::base::Result<void> createLeafCertificate(
+ const CertSubject& subject, const std::vector<uint8_t>& rsaPublicKey,
+ const std::function<android::base::Result<std::string>(const std::string&)>& signFunction,
+ const std::string& issuerCertPath, const std::string& path) {
+ auto rsa_pkey = rsaPublicKeyToRsaPkey(rsaPublicKey);
+ if (!rsa_pkey.ok()) {
+ return rsa_pkey.error();
+ }
+
+ return createCertificate(subject, rsa_pkey.value().get(), signFunction, issuerCertPath, path);
+}
+
Result<std::vector<uint8_t>> extractPublicKey(EVP_PKEY* pkey) {
if (pkey == nullptr) {
return Error() << "Failed to extract public key from x509 cert";
@@ -225,22 +344,6 @@
return extractPublicKey(decoded_pkey.get());
}
-static Result<bssl::UniquePtr<X509>> loadX509(const std::string& path) {
- X509* rawCert;
- auto f = fopen(path.c_str(), "re");
- if (f == nullptr) {
- return Error() << "Failed to open " << path;
- }
- if (!d2i_X509_fp(f, &rawCert)) {
- fclose(f);
- return Error() << "Unable to decode x509 cert at " << path;
- }
- bssl::UniquePtr<X509> cert(rawCert);
-
- fclose(f);
- return cert;
-}
-
Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path) {
auto cert = loadX509(path);
if (!cert.ok()) {
@@ -249,9 +352,26 @@
return extractPublicKey(X509_get_pubkey(cert.value().get()));
}
+Result<std::vector<uint8_t>> extractRsaPublicKey(EVP_PKEY* pkey) {
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey);
+ if (rsa == nullptr) {
+ return Error() << "The public key is not an RSA key";
+ }
+
+ uint8_t* out = nullptr;
+ int size = i2d_RSAPublicKey(rsa, &out);
+ if (size < 0 || !out) {
+ return Error() << "Failed to convert to RSAPublicKey";
+ }
+
+ bssl::UniquePtr<uint8_t> buffer(out);
+ std::vector<uint8_t> result(out, out + size);
+ return result;
+}
+
Result<CertInfo> verifyAndExtractCertInfoFromX509(const std::string& path,
const std::vector<uint8_t>& publicKey) {
- auto public_key = toRsaPkey(publicKey);
+ auto public_key = modulusToRsaPkey(publicKey);
if (!public_key.ok()) {
return public_key.error();
}
@@ -268,7 +388,7 @@
}
bssl::UniquePtr<EVP_PKEY> pkey(X509_get_pubkey(x509));
- auto subject_key = extractPublicKey(pkey.get());
+ auto subject_key = extractRsaPublicKey(pkey.get());
if (!subject_key.ok()) {
return subject_key.error();
}
@@ -292,7 +412,8 @@
return cert_info;
}
-Result<std::vector<uint8_t>> createPkcs7(const std::vector<uint8_t>& signed_digest) {
+Result<std::vector<uint8_t>> createPkcs7(const std::vector<uint8_t>& signed_digest,
+ const CertSubject& signer) {
CBB out, outer_seq, wrapped_seq, seq, digest_algos_set, digest_algo, null;
CBB content_info, issuer_and_serial, signer_infos, signer_info, sign_algo, signature;
uint8_t *pkcs7_data, *name_der;
@@ -300,19 +421,20 @@
BIGNUM* serial = BN_new();
int sig_nid = NID_rsaEncryption;
- X509_NAME* name = X509_NAME_new();
- if (!name) {
- return Error() << "Unable to get x509 subject name";
+ X509_NAME* issuer_name = X509_NAME_new();
+ if (!issuer_name) {
+ return Error() << "Unable to create x509 subject name";
}
- X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
- reinterpret_cast<const unsigned char*>("US"), -1, -1, 0);
- X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC,
- reinterpret_cast<const unsigned char*>("Android"), -1, -1, 0);
- X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
- reinterpret_cast<const unsigned char*>("ODS"), -1, -1, 0);
+ X509_NAME_add_entry_by_txt(issuer_name, "C", MBSTRING_ASC,
+ reinterpret_cast<const unsigned char*>(kIssuerCountry), -1, -1, 0);
+ X509_NAME_add_entry_by_txt(issuer_name, "O", MBSTRING_ASC,
+ reinterpret_cast<const unsigned char*>(kIssuerOrg), -1, -1, 0);
+ X509_NAME_add_entry_by_txt(issuer_name, "CN", MBSTRING_ASC,
+ reinterpret_cast<const unsigned char*>(kRootSubject.commonName), -1,
+ -1, 0);
- BN_set_word(serial, 1);
- name_der_len = i2d_X509_NAME(name, &name_der);
+ BN_set_word(serial, signer.serialNumber);
+ name_der_len = i2d_X509_NAME(issuer_name, &name_der);
CBB_init(&out, 1024);
if (!CBB_add_asn1(&out, &outer_seq, CBS_ASN1_SEQUENCE) ||
diff --git a/ondevice-signing/CertUtils.h b/ondevice-signing/CertUtils.h
index d202fbc..fe703fa 100644
--- a/ondevice-signing/CertUtils.h
+++ b/ondevice-signing/CertUtils.h
@@ -16,18 +16,43 @@
#pragma once
+#include <functional>
+#include <string>
+#include <vector>
+
#include <android-base/result.h>
+// Information extracted from a certificate.
struct CertInfo {
std::string subjectCn;
- std::vector<uint8_t> subjectKey;
+ std::vector<uint8_t> subjectRsaPublicKey;
};
+// Subjects of certificates we issue.
+struct CertSubject {
+ const char* commonName;
+ unsigned serialNumber;
+};
+
+// These are all the certificates we ever sign (the first one being our
+// self-signed cert). We shouldn't really re-use serial numbers for different
+// certificates for the same subject but we do; only one should be in use at a
+// time though.
+inline const CertSubject kRootSubject{"ODS", 1};
+inline const CertSubject kCompOsSubject{"CompOs", 2};
+
android::base::Result<void> createSelfSignedCertificate(
const std::vector<uint8_t>& publicKey,
const std::function<android::base::Result<std::string>(const std::string&)>& signFunction,
const std::string& path);
-android::base::Result<std::vector<uint8_t>> createPkcs7(const std::vector<uint8_t>& signedData);
+
+android::base::Result<void> createLeafCertificate(
+ const CertSubject& subject, const std::vector<uint8_t>& publicKey,
+ const std::function<android::base::Result<std::string>(const std::string&)>& signFunction,
+ const std::string& issuerCertPath, const std::string& outPath);
+
+android::base::Result<std::vector<uint8_t>> createPkcs7(const std::vector<uint8_t>& signedData,
+ const CertSubject& signer);
android::base::Result<std::vector<uint8_t>>
extractPublicKeyFromX509(const std::vector<uint8_t>& x509);
@@ -41,3 +66,7 @@
android::base::Result<void> verifySignature(const std::string& message,
const std::string& signature,
const std::vector<uint8_t>& publicKey);
+
+android::base::Result<void> verifyRsaPublicKeySignature(const std::string& message,
+ const std::string& signature,
+ const std::vector<uint8_t>& rsaPublicKey);
diff --git a/ondevice-signing/FakeCompOs.cpp b/ondevice-signing/FakeCompOs.cpp
new file mode 100644
index 0000000..596d6e2
--- /dev/null
+++ b/ondevice-signing/FakeCompOs.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FakeCompOs.h"
+
+#include "CertUtils.h"
+#include "KeyConstants.h"
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/result.h>
+#include <android-base/scopeguard.h>
+
+#include <binder/IServiceManager.h>
+
+#include <openssl/nid.h>
+#include <openssl/rand.h>
+#include <openssl/rsa.h>
+#include <openssl/sha.h>
+
+using android::String16;
+
+using android::hardware::security::keymint::Algorithm;
+using android::hardware::security::keymint::Digest;
+using android::hardware::security::keymint::KeyParameter;
+using android::hardware::security::keymint::KeyParameterValue;
+using android::hardware::security::keymint::KeyPurpose;
+using android::hardware::security::keymint::PaddingMode;
+using android::hardware::security::keymint::SecurityLevel;
+using android::hardware::security::keymint::Tag;
+
+using android::system::keystore2::CreateOperationResponse;
+using android::system::keystore2::Domain;
+
+using android::base::Error;
+using android::base::Result;
+
+using android::binder::Status;
+
+// TODO: Allocate a namespace for CompOS
+const int64_t kCompOsNamespace = 101;
+
+Result<std::unique_ptr<FakeCompOs>>
+FakeCompOs::startInstance(const std::string& /*instanceImagePath*/) {
+ std::unique_ptr<FakeCompOs> compOs(new FakeCompOs);
+ auto init = compOs->initialize();
+ if (init.ok()) {
+ return compOs;
+ } else {
+ return init.error();
+ }
+}
+
+FakeCompOs::FakeCompOs() {}
+
+Result<void> FakeCompOs::initialize() {
+ auto sm = android::defaultServiceManager();
+ if (!sm) {
+ return Error() << "No ServiceManager";
+ }
+ auto rawService = sm->getService(String16("android.system.keystore2.IKeystoreService/default"));
+ if (!rawService) {
+ return Error() << "No Keystore service";
+ }
+ mService = interface_cast<android::system::keystore2::IKeystoreService>(rawService);
+ if (!mService) {
+ return Error() << "Bad Keystore service";
+ }
+
+ // TODO: We probably want SecurityLevel::SOFTWARE here, in the VM, but Keystore doesn't do it
+ auto status = mService->getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT, &mSecurityLevel);
+ if (!status.isOk()) {
+ return Error() << status;
+ }
+
+ return {};
+}
+
+Result<FakeCompOs::ByteVector> FakeCompOs::signData(const ByteVector& keyBlob,
+ const ByteVector& data) const {
+ KeyDescriptor descriptor;
+ descriptor.domain = Domain::BLOB;
+ descriptor.nspace = kCompOsNamespace;
+ descriptor.blob = keyBlob;
+
+ std::vector<KeyParameter> parameters;
+
+ {
+ KeyParameter algo;
+ algo.tag = Tag::ALGORITHM;
+ algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::RSA);
+ parameters.push_back(algo);
+
+ KeyParameter digest;
+ digest.tag = Tag::DIGEST;
+ digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
+ parameters.push_back(digest);
+
+ KeyParameter padding;
+ padding.tag = Tag::PADDING;
+ padding.value = KeyParameterValue::make<KeyParameterValue::paddingMode>(
+ PaddingMode::RSA_PKCS1_1_5_SIGN);
+ parameters.push_back(padding);
+
+ KeyParameter purpose;
+ purpose.tag = Tag::PURPOSE;
+ purpose.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
+ parameters.push_back(purpose);
+ }
+
+ Status status;
+
+ CreateOperationResponse response;
+ status = mSecurityLevel->createOperation(descriptor, parameters, /*forced=*/false, &response);
+ if (!status.isOk()) {
+ return Error() << "Failed to create operation: " << status;
+ }
+
+ auto operation = response.iOperation;
+ auto abort_guard = android::base::make_scope_guard([&] { operation->abort(); });
+
+ if (response.operationChallenge.has_value()) {
+ return Error() << "Key requires user authorization";
+ }
+
+ std::optional<ByteVector> signature;
+ status = operation->finish(data, {}, &signature);
+ if (!status.isOk()) {
+ return Error() << "Failed to sign data: " << status;
+ }
+
+ abort_guard.Disable();
+
+ if (!signature.has_value()) {
+ return Error() << "No signature received from keystore.";
+ }
+
+ return signature.value();
+}
+
+Result<void> FakeCompOs::loadAndVerifyKey(const ByteVector& keyBlob,
+ const ByteVector& publicKey) const {
+ // To verify the key is valid, we use it to sign some data, and then verify the signature using
+ // the supplied public key.
+
+ ByteVector data(32);
+ if (RAND_bytes(data.data(), data.size()) != 1) {
+ return Error() << "No random bytes";
+ }
+
+ auto signature = signData(keyBlob, data);
+ if (!signature.ok()) {
+ return signature.error();
+ }
+
+ std::string signatureString(reinterpret_cast<char*>(signature.value().data()),
+ signature.value().size());
+ std::string dataString(reinterpret_cast<char*>(data.data()), data.size());
+ return verifyRsaPublicKeySignature(dataString, signatureString, publicKey);
+}
diff --git a/ondevice-signing/FakeCompOs.h b/ondevice-signing/FakeCompOs.h
new file mode 100644
index 0000000..6c7a445
--- /dev/null
+++ b/ondevice-signing/FakeCompOs.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <android-base/result.h>
+
+#include <utils/StrongPointer.h>
+
+#include <android/system/keystore2/IKeystoreService.h>
+
+class FakeCompOs {
+ using IKeystoreService = ::android::system::keystore2::IKeystoreService;
+ using IKeystoreSecurityLevel = ::android::system::keystore2::IKeystoreSecurityLevel;
+ using KeyDescriptor = ::android::system::keystore2::KeyDescriptor;
+ using KeyMetadata = ::android::system::keystore2::KeyMetadata;
+
+ public:
+ using ByteVector = std::vector<uint8_t>;
+
+ static android::base::Result<std::unique_ptr<FakeCompOs>>
+ startInstance(const std::string& instanceImagePath);
+
+ android::base::Result<void> loadAndVerifyKey(const ByteVector& keyBlob,
+ const ByteVector& publicKey) const;
+
+ private:
+ FakeCompOs();
+
+ android::base::Result<void> initialize();
+
+ android::base::Result<ByteVector> signData(const ByteVector& keyBlob,
+ const ByteVector& data) const;
+
+ KeyDescriptor mDescriptor;
+ android::sp<IKeystoreService> mService;
+ android::sp<IKeystoreSecurityLevel> mSecurityLevel;
+};
diff --git a/ondevice-signing/KeystoreHmacKey.cpp b/ondevice-signing/KeystoreHmacKey.cpp
index a2208ce..09677d7 100644
--- a/ondevice-signing/KeystoreHmacKey.cpp
+++ b/ondevice-signing/KeystoreHmacKey.cpp
@@ -112,7 +112,7 @@
KeyMetadata metadata;
auto status = mSecurityLevel->generateKey(mDescriptor, {}, params, 0, {}, &metadata);
if (!status.isOk()) {
- return Error() << "Failed to create new HMAC key";
+ return Error() << "Failed to create new HMAC key: " << status;
}
return {};
@@ -209,8 +209,7 @@
auto status = mSecurityLevel->createOperation(mDescriptor, params, false, &opResponse);
if (!status.isOk()) {
- return Error() << "Failed to create keystore signing operation: "
- << status.serviceSpecificErrorCode();
+ return Error() << "Failed to create keystore signing operation: " << status;
}
auto operation = opResponse.iOperation;
@@ -240,8 +239,7 @@
auto status = mSecurityLevel->createOperation(mDescriptor, params, false, &opResponse);
if (!status.isOk()) {
- return Error() << "Failed to create keystore verification operation: "
- << status.serviceSpecificErrorCode();
+ return Error() << "Failed to create keystore verification operation: " << status;
}
auto operation = opResponse.iOperation;
@@ -260,3 +258,12 @@
return {};
}
+
+Result<void> KeystoreHmacKey::deleteKey() const {
+ auto status = mService->deleteKey(mDescriptor);
+ if (!status.isOk()) {
+ return Error() << "Failed to delete HMAC key: " << status;
+ }
+
+ return {};
+}
diff --git a/ondevice-signing/KeystoreHmacKey.h b/ondevice-signing/KeystoreHmacKey.h
index fbad0fd..782969a 100644
--- a/ondevice-signing/KeystoreHmacKey.h
+++ b/ondevice-signing/KeystoreHmacKey.h
@@ -37,6 +37,7 @@
android::base::Result<std::string> sign(const std::string& message) const;
android::base::Result<void> verify(const std::string& message,
const std::string& signature) const;
+ android::base::Result<void> deleteKey() const;
private:
android::base::Result<void> createKey();
diff --git a/ondevice-signing/KeystoreKey.cpp b/ondevice-signing/KeystoreKey.cpp
index 0951d92..03bb6d5 100644
--- a/ondevice-signing/KeystoreKey.cpp
+++ b/ondevice-signing/KeystoreKey.cpp
@@ -119,7 +119,7 @@
KeyMetadata metadata;
auto status = mSecurityLevel->generateKey(mDescriptor, {}, params, 0, {}, &metadata);
if (!status.isOk()) {
- return Error() << "Failed to create new key";
+ return Error() << "Failed to create new key: " << status;
}
// Extract the public key from the certificate, HMAC it and store the signature
@@ -172,11 +172,13 @@
auto key = getOrCreateKey();
if (!key.ok()) {
+ // Delete the HMAC, just in case signing failed, and we could recover by recreating it.
+ mHmacKey.deleteKey();
LOG(ERROR) << key.error().message();
return false;
}
mPublicKey = *key;
- LOG(ERROR) << "Initialized Keystore key.";
+ LOG(INFO) << "Initialized Keystore key.";
return true;
}
@@ -297,19 +299,13 @@
auto status = mSecurityLevel->createOperation(mDescriptor, opParameters, false, &opResponse);
if (!status.isOk()) {
- return Error() << "Failed to create keystore signing operation: "
- << status.serviceSpecificErrorCode();
+ return Error() << "Failed to create keystore signing operation: " << status;
}
auto operation = opResponse.iOperation;
- std::optional<std::vector<uint8_t>> out;
- status = operation->update({message.begin(), message.end()}, &out);
- if (!status.isOk()) {
- return Error() << "Failed to call keystore update operation.";
- }
-
+ std::optional<std::vector<uint8_t>> input{std::in_place, message.begin(), message.end()};
std::optional<std::vector<uint8_t>> signature;
- status = operation->finish({}, {}, &signature);
+ status = operation->finish(input, {}, &signature);
if (!status.isOk()) {
return Error() << "Failed to call keystore finish operation.";
}
diff --git a/ondevice-signing/KeystoreKey.h b/ondevice-signing/KeystoreKey.h
index 1257cbb..f2fbb70 100644
--- a/ondevice-signing/KeystoreKey.h
+++ b/ondevice-signing/KeystoreKey.h
@@ -20,7 +20,6 @@
#include <android-base/macros.h>
#include <android-base/result.h>
-#include <android-base/unique_fd.h>
#include <utils/StrongPointer.h>
diff --git a/ondevice-signing/TEST_MAPPING b/ondevice-signing/TEST_MAPPING
deleted file mode 100644
index 03b9b95..0000000
--- a/ondevice-signing/TEST_MAPPING
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "presubmit": [
- {
- "name": "odsign_e2e_tests"
- }
- ]
-}
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index 25f949c..2beb7eb 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -27,11 +27,13 @@
#include <android-base/logging.h>
#include <android-base/unique_fd.h>
+#include <asm/byteorder.h>
#include <libfsverity.h>
#include <linux/fsverity.h>
#include "CertUtils.h"
#include "SigningKey.h"
+#include "compos_signature.pb.h"
#define FS_VERITY_MAX_DIGEST_SIZE 64
@@ -40,24 +42,16 @@
using android::base::Result;
using android::base::unique_fd;
+using compos::proto::Signature;
+
static const char* kFsVerityInitPath = "/system/bin/fsverity_init";
+static const char* kSignatureExtension = ".signature";
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define cpu_to_le16(v) ((__force __le16)(uint16_t)(v))
-#define le16_to_cpu(v) ((__force uint16_t)(__le16)(v))
-#else
-#define cpu_to_le16(v) ((__force __le16)__builtin_bswap16(v))
-#define le16_to_cpu(v) (__builtin_bswap16((__force uint16_t)(v)))
-#endif
+static bool isSignatureFile(const std::filesystem::path& path) {
+ return path.extension().native() == kSignatureExtension;
+}
-struct fsverity_signed_digest {
- char magic[8]; /* must be "FSVerity" */
- __le16 digest_algorithm;
- __le16 digest_size;
- __u8 digest[];
-};
-
-static std::string toHex(std::span<uint8_t> data) {
+static std::string toHex(std::span<const uint8_t> data) {
std::stringstream ss;
for (auto it = data.begin(); it != data.end(); ++it) {
ss << std::setfill('0') << std::setw(2) << std::hex << static_cast<unsigned>(*it);
@@ -71,16 +65,11 @@
return 0;
}
-Result<std::vector<uint8_t>> createDigest(const std::string& path) {
+Result<std::vector<uint8_t>> createDigest(int fd) {
struct stat filestat;
- unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
- if (fd < 0) {
- return ErrnoError() << "Failed to open " << path;
- }
-
- int ret = stat(path.c_str(), &filestat);
+ int ret = fstat(fd, &filestat);
if (ret < 0) {
- return ErrnoError() << "Failed to stat " << path;
+ return ErrnoError() << "Failed to fstat";
}
struct libfsverity_merkle_tree_params params = {
.version = 1,
@@ -92,13 +81,26 @@
struct libfsverity_digest* digest;
ret = libfsverity_compute_digest(&fd, &read_callback, ¶ms, &digest);
if (ret < 0) {
- return ErrnoError() << "Failed to compute fs-verity digest for " << path;
+ return ErrnoError() << "Failed to compute fs-verity digest";
}
- std::vector<uint8_t> digestVector(&digest->digest[0], &digest->digest[32]);
+ int expected_digest_size = libfsverity_get_digest_size(FS_VERITY_HASH_ALG_SHA256);
+ if (digest->digest_size != expected_digest_size) {
+ return Error() << "Digest does not have expected size: " << expected_digest_size
+ << " actual: " << digest->digest_size;
+ }
+ std::vector<uint8_t> digestVector(&digest->digest[0], &digest->digest[expected_digest_size]);
free(digest);
return digestVector;
}
+Result<std::vector<uint8_t>> createDigest(const std::string& path) {
+ unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
+ if (!fd.ok()) {
+ return ErrnoError() << "Unable to open";
+ }
+ return createDigest(fd.get());
+}
+
namespace {
template <typename T> struct DeleteAsPODArray {
void operator()(T* x) {
@@ -114,18 +116,18 @@
template <typename T>
static trailing_unique_ptr<T> makeUniqueWithTrailingData(size_t trailing_data_size) {
- uint8_t* memory = new uint8_t[sizeof(T*) + trailing_data_size];
+ uint8_t* memory = new uint8_t[sizeof(T) + trailing_data_size];
T* ptr = new (memory) T;
return trailing_unique_ptr<T>{ptr};
}
static Result<std::vector<uint8_t>> signDigest(const SigningKey& key,
const std::vector<uint8_t>& digest) {
- auto d = makeUniqueWithTrailingData<fsverity_signed_digest>(digest.size());
+ auto d = makeUniqueWithTrailingData<fsverity_formatted_digest>(digest.size());
memcpy(d->magic, "FSVerity", 8);
- d->digest_algorithm = cpu_to_le16(FS_VERITY_HASH_ALG_SHA256);
- d->digest_size = cpu_to_le16(digest.size());
+ d->digest_algorithm = __cpu_to_le16(FS_VERITY_HASH_ALG_SHA256);
+ d->digest_size = __cpu_to_le16(digest.size());
memcpy(d->digest, digest.data(), digest.size());
auto signed_digest = key.sign(std::string((char*)d.get(), sizeof(*d) + digest.size()));
@@ -136,10 +138,32 @@
return std::vector<uint8_t>(signed_digest->begin(), signed_digest->end());
}
+Result<void> enableFsVerity(int fd, std::span<uint8_t> pkcs7) {
+ struct fsverity_enable_arg arg = {.version = 1};
+
+ arg.sig_ptr = reinterpret_cast<uint64_t>(pkcs7.data());
+ arg.sig_size = pkcs7.size();
+ arg.hash_algorithm = FS_VERITY_HASH_ALG_SHA256;
+ arg.block_size = 4096;
+
+ int ret = ioctl(fd, FS_IOC_ENABLE_VERITY, &arg);
+
+ if (ret != 0) {
+ return ErrnoError() << "Failed to call FS_IOC_ENABLE_VERITY";
+ }
+
+ return {};
+}
+
Result<std::string> enableFsVerity(const std::string& path, const SigningKey& key) {
- auto digest = createDigest(path);
+ unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
+ if (!fd.ok()) {
+ return ErrnoError() << "Failed to open " << path;
+ }
+
+ auto digest = createDigest(fd.get());
if (!digest.ok()) {
- return digest.error();
+ return Error() << digest.error() << ": " << path;
}
auto signed_digest = signDigest(key, digest.value());
@@ -147,20 +171,14 @@
return signed_digest.error();
}
- auto pkcs7_data = createPkcs7(signed_digest.value());
+ auto pkcs7_data = createPkcs7(signed_digest.value(), kRootSubject);
+ if (!pkcs7_data.ok()) {
+ return pkcs7_data.error();
+ }
- struct fsverity_enable_arg arg = {.version = 1};
-
- arg.sig_ptr = (uint64_t)pkcs7_data->data();
- arg.sig_size = pkcs7_data->size();
- arg.hash_algorithm = FS_VERITY_HASH_ALG_SHA256;
- arg.block_size = 4096;
-
- unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
- int ret = ioctl(fd, FS_IOC_ENABLE_VERITY, &arg);
-
- if (ret != 0) {
- return ErrnoError() << "Failed to call FS_IOC_ENABLE_VERITY on " << path;
+ auto enabled = enableFsVerity(fd.get(), pkcs7_data.value());
+ if (!enabled.ok()) {
+ return Error() << enabled.error() << ": " << path;
}
// Return the root hash as a hex string
@@ -170,12 +188,10 @@
Result<std::map<std::string, std::string>> addFilesToVerityRecursive(const std::string& path,
const SigningKey& key) {
std::map<std::string, std::string> digests;
+
std::error_code ec;
-
auto it = std::filesystem::recursive_directory_iterator(path, ec);
- auto end = std::filesystem::recursive_directory_iterator();
-
- while (!ec && it != end) {
+ for (auto end = std::filesystem::recursive_directory_iterator(); it != end; it.increment(ec)) {
if (it->is_regular_file()) {
LOG(INFO) << "Adding " << it->path() << " to fs-verity...";
auto result = enableFsVerity(it->path(), key);
@@ -184,38 +200,40 @@
}
digests[it->path()] = *result;
}
- ++it;
}
if (ec) {
- return Error() << "Failed to iterate " << path << ": " << ec;
+ return Error() << "Failed to iterate " << path << ": " << ec.message();
}
return digests;
}
-Result<std::string> isFileInVerity(const std::string& path) {
- unsigned int flags;
+Result<std::string> isFileInVerity(int fd) {
+ auto d = makeUniqueWithTrailingData<fsverity_digest>(FS_VERITY_MAX_DIGEST_SIZE);
+ d->digest_size = FS_VERITY_MAX_DIGEST_SIZE;
+ auto ret = ioctl(fd, FS_IOC_MEASURE_VERITY, d.get());
+ if (ret < 0) {
+ if (errno == ENODATA) {
+ return Error() << "File is not in fs-verity";
+ } else {
+ return ErrnoError() << "Failed to FS_IOC_MEASURE_VERITY";
+ }
+ }
+ return toHex({&d->digest[0], &d->digest[d->digest_size]});
+}
+Result<std::string> isFileInVerity(const std::string& path) {
unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
- if (fd < 0) {
+ if (!fd.ok()) {
return ErrnoError() << "Failed to open " << path;
}
- int ret = ioctl(fd, FS_IOC_GETFLAGS, &flags);
- if (ret < 0) {
- return ErrnoError() << "Failed to FS_IOC_GETFLAGS for " << path;
- }
- if (!(flags & FS_VERITY_FL)) {
- return Error() << "File is not in fs-verity: " << path;
+ auto digest = isFileInVerity(fd);
+ if (!digest.ok()) {
+ return Error() << digest.error() << ": " << path;
}
- auto d = makeUniqueWithTrailingData<fsverity_digest>(FS_VERITY_MAX_DIGEST_SIZE);
- d->digest_size = FS_VERITY_MAX_DIGEST_SIZE;
- ret = ioctl(fd, FS_IOC_MEASURE_VERITY, d.get());
- if (ret < 0) {
- return ErrnoError() << "Failed to FS_IOC_MEASURE_VERITY for " << path;
- }
- return toHex({&d->digest[0], &d->digest[d->digest_size]});
+ return digest;
}
Result<std::map<std::string, std::string>> verifyAllFilesInVerity(const std::string& path) {
@@ -227,13 +245,19 @@
while (!ec && it != end) {
if (it->is_regular_file()) {
- // Verify
+ // Verify the file is in fs-verity
auto result = isFileInVerity(it->path());
if (!result.ok()) {
return result.error();
}
digests[it->path()] = *result;
- } // TODO reject other types besides dirs?
+ } else if (it->is_directory()) {
+ // These are fine to ignore
+ } else if (it->is_symlink()) {
+ return Error() << "Rejecting artifacts, symlink at " << it->path();
+ } else {
+ return Error() << "Rejecting artifacts, unexpected file type for " << it->path();
+ }
++it;
}
if (ec) {
@@ -243,10 +267,131 @@
return digests;
}
+Result<Signature> readSignature(const std::filesystem::path& signature_path) {
+ unique_fd fd(TEMP_FAILURE_RETRY(open(signature_path.c_str(), O_RDONLY | O_CLOEXEC)));
+ if (fd == -1) {
+ return ErrnoError();
+ }
+ Signature signature;
+ if (!signature.ParseFromFileDescriptor(fd.get())) {
+ return Error() << "Failed to parse";
+ }
+ return signature;
+}
+
+Result<std::map<std::string, std::string>>
+verifyAllFilesUsingCompOs(const std::string& directory_path,
+ const std::vector<uint8_t>& compos_key) {
+ std::map<std::string, std::string> new_digests;
+ std::vector<std::filesystem::path> signature_files;
+
+ std::error_code ec;
+ auto it = std::filesystem::recursive_directory_iterator(directory_path, ec);
+ for (auto end = std::filesystem::recursive_directory_iterator(); it != end; it.increment(ec)) {
+ auto& path = it->path();
+ if (it->is_regular_file()) {
+ if (isSignatureFile(path)) {
+ continue;
+ }
+
+ unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
+ if (!fd.ok()) {
+ return ErrnoError() << "Can't open " << path;
+ }
+
+ auto signature_path = path;
+ signature_path += kSignatureExtension;
+ auto signature = readSignature(signature_path);
+ if (!signature.ok()) {
+ return Error() << "Invalid signature " << signature_path << ": "
+ << signature.error();
+ }
+ signature_files.push_back(signature_path);
+
+ // Note that these values are not yet trusted.
+ auto& raw_digest = signature->digest();
+ auto& raw_signature = signature->signature();
+
+ // Re-construct the fsverity_formatted_digest that was signed, so we
+ // can verify the signature.
+ std::vector<uint8_t> buffer(sizeof(fsverity_formatted_digest) + raw_digest.size());
+ auto signed_data = new (buffer.data()) fsverity_formatted_digest;
+ memcpy(signed_data->magic, "FSVerity", sizeof signed_data->magic);
+ signed_data->digest_algorithm = __cpu_to_le16(FS_VERITY_HASH_ALG_SHA256);
+ signed_data->digest_size = __cpu_to_le16(raw_digest.size());
+ memcpy(signed_data->digest, raw_digest.data(), raw_digest.size());
+
+ // Make sure the signature matches the CompOs public key, and not some other
+ // fs-verity trusted key.
+ std::string to_verify(reinterpret_cast<char*>(buffer.data()), buffer.size());
+
+ auto verified = verifyRsaPublicKeySignature(to_verify, raw_signature, compos_key);
+ if (!verified.ok()) {
+ return Error() << verified.error() << ": " << path;
+ }
+
+ std::span<const uint8_t> digest_bytes(
+ reinterpret_cast<const uint8_t*>(raw_digest.data()), raw_digest.size());
+ std::string compos_digest = toHex(digest_bytes);
+
+ auto verity_digest = isFileInVerity(fd);
+ if (verity_digest.ok()) {
+ // The file is already in fs-verity. We need to make sure it was signed
+ // by CompOs, so we just check that it has the digest we expect.
+ if (verity_digest.value() != compos_digest) {
+ return Error() << "fs-verity digest does not match signature file: " << path;
+ }
+ } else {
+ // Not in fs-verity yet. But we have a valid signature of some
+ // digest. If it's not the correct digest for the file then
+ // enabling fs-verity will fail, so we don't need to check it
+ // explicitly ourselves. Otherwise we should be good.
+ std::vector<uint8_t> signature_bytes(raw_signature.begin(), raw_signature.end());
+ auto pkcs7 = createPkcs7(signature_bytes, kCompOsSubject);
+ if (!pkcs7.ok()) {
+ return Error() << pkcs7.error() << ": " << path;
+ }
+
+ LOG(INFO) << "Adding " << path << " to fs-verity...";
+ auto enabled = enableFsVerity(fd, pkcs7.value());
+ if (!enabled.ok()) {
+ return Error() << enabled.error() << ": " << path;
+ }
+ }
+
+ new_digests[path] = compos_digest;
+ } else if (it->is_directory()) {
+ // These are fine to ignore
+ } else if (it->is_symlink()) {
+ return Error() << "Rejecting artifacts, symlink at " << path;
+ } else {
+ return Error() << "Rejecting artifacts, unexpected file type for " << path;
+ }
+ }
+ if (ec) {
+ return Error() << "Failed to iterate " << directory_path << ": " << ec.message();
+ }
+
+ // Delete the signature files now that they have served their purpose. (ART
+ // has no use for them, and their presence could cause verification to fail
+ // on subsequent boots.)
+ for (auto& signature_path : signature_files) {
+ std::filesystem::remove(signature_path, ec);
+ if (ec) {
+ return Error() << "Failed to delete " << signature_path << ": " << ec.message();
+ }
+ }
+
+ return new_digests;
+}
+
Result<void> addCertToFsVerityKeyring(const std::string& path, const char* keyName) {
const char* const argv[] = {kFsVerityInitPath, "--load-extra-key", keyName};
int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+ if (fd == -1) {
+ return ErrnoError() << "Failed to open " << path;
+ }
pid_t pid = fork();
if (pid == 0) {
dup2(fd, STDIN_FILENO);
@@ -271,10 +416,8 @@
if (!WIFEXITED(status)) {
return Error() << kFsVerityInitPath << ": abnormal process exit";
}
- if (WEXITSTATUS(status)) {
- if (status != 0) {
- return Error() << kFsVerityInitPath << " exited with " << status;
- }
+ if (WEXITSTATUS(status) != 0) {
+ return Error() << kFsVerityInitPath << " exited with " << WEXITSTATUS(status);
}
return {};
diff --git a/ondevice-signing/VerityUtils.h b/ondevice-signing/VerityUtils.h
index dca3184..8d8e62c 100644
--- a/ondevice-signing/VerityUtils.h
+++ b/ondevice-signing/VerityUtils.h
@@ -24,5 +24,9 @@
android::base::Result<std::vector<uint8_t>> createDigest(const std::string& path);
android::base::Result<std::map<std::string, std::string>>
verifyAllFilesInVerity(const std::string& path);
+
android::base::Result<std::map<std::string, std::string>>
addFilesToVerityRecursive(const std::string& path, const SigningKey& key);
+
+android::base::Result<std::map<std::string, std::string>>
+verifyAllFilesUsingCompOs(const std::string& path, const std::vector<uint8_t>& compos_key);
diff --git a/ondevice-signing/odsign.rc b/ondevice-signing/odsign.rc
index 044bae7..de09fc0 100644
--- a/ondevice-signing/odsign.rc
+++ b/ondevice-signing/odsign.rc
@@ -2,5 +2,8 @@
class core
user root
group system
- oneshot
disabled # does not start with the core class
+
+# Note that odsign is not oneshot, but stopped manually when it exits. This
+# ensures that if odsign crashes during a module update, apexd will detect
+# those crashes and roll back the update.
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index 135c4a0..bba39b8 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -32,6 +32,7 @@
#include <odrefresh/odrefresh.h>
#include "CertUtils.h"
+#include "FakeCompOs.h"
#include "KeystoreKey.h"
#include "VerityUtils.h"
@@ -50,25 +51,96 @@
const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/dalvik-cache";
-static const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
+constexpr const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
-static const char* kFsVerityProcPath = "/proc/sys/fs/verity";
+constexpr const char* kFsVerityProcPath = "/proc/sys/fs/verity";
-static const bool kForceCompilation = false;
-static const bool kUseCompOs = false; // STOPSHIP if true
+constexpr bool kForceCompilation = false;
+constexpr bool kUseCompOs = false; // STOPSHIP if true
-static const char* kVirtApexPath = "/apex/com.android.virt";
+constexpr const char* kCompOsApexPath = "/apex/com.android.compos";
const std::string kCompOsCert = "/data/misc/odsign/compos_key.cert";
+const std::string kCompOsPublicKey = "/data/misc/apexdata/com.android.compos/compos_key.pubkey";
+const std::string kCompOsKeyBlob = "/data/misc/apexdata/com.android.compos/compos_key.blob";
+const std::string kCompOsInstance = "/data/misc/apexdata/com.android.compos/compos_instance.img";
-static const char* kOdsignVerificationDoneProp = "odsign.verification.done";
-static const char* kOdsignKeyDoneProp = "odsign.key.done";
+const std::string kCompOsPendingPublicKey =
+ "/data/misc/apexdata/com.android.compos/compos_pending_key.pubkey";
+const std::string kCompOsPendingKeyBlob =
+ "/data/misc/apexdata/com.android.compos/compos_pending_key.blob";
+const std::string kCompOsPendingInstance =
+ "/data/misc/apexdata/com.android.compos/compos_pending_instance.img";
+const std::string kCompOsPendingArtifactsDir = "/data/misc/apexdata/com.android.art/compos-pending";
-static const char* kOdsignVerificationStatusProp = "odsign.verification.success";
-static const char* kOdsignVerificationStatusValid = "1";
-static const char* kOdsignVerificationStatusError = "0";
+constexpr const char* kOdsignVerificationDoneProp = "odsign.verification.done";
+constexpr const char* kOdsignKeyDoneProp = "odsign.key.done";
+
+constexpr const char* kOdsignVerificationStatusProp = "odsign.verification.success";
+constexpr const char* kOdsignVerificationStatusValid = "1";
+constexpr const char* kOdsignVerificationStatusError = "0";
+
+constexpr const char* kStopServiceProp = "ctl.stop";
+
+static std::vector<uint8_t> readBytesFromFile(const std::string& path) {
+ std::string str;
+ android::base::ReadFileToString(path, &str);
+ return std::vector<uint8_t>(str.begin(), str.end());
+}
+
+static bool rename(const std::string& from, const std::string& to) {
+ std::error_code ec;
+ std::filesystem::rename(from, to, ec);
+ if (ec) {
+ LOG(ERROR) << "Can't rename " << from << " to " << to << ": " << ec.message();
+ return false;
+ }
+ return true;
+}
+
+static int removeDirectory(const std::string& directory) {
+ std::error_code ec;
+ auto num_removed = std::filesystem::remove_all(directory, ec);
+ if (ec) {
+ LOG(ERROR) << "Can't remove " << directory << ": " << ec.message();
+ return 0;
+ } else {
+ if (num_removed > 0) {
+ LOG(INFO) << "Removed " << num_removed << " entries from " << directory;
+ }
+ return num_removed;
+ }
+}
+
+static bool directoryHasContent(const std::string& directory) {
+ std::error_code ec;
+ return std::filesystem::is_directory(directory, ec) &&
+ !std::filesystem::is_empty(directory, ec);
+}
+
+art::odrefresh::ExitCode compileArtifacts(bool force) {
+ const char* const argv[] = {kOdrefreshPath, force ? "--force-compile" : "--compile"};
+ const int exit_code =
+ logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr);
+ return static_cast<art::odrefresh::ExitCode>(exit_code);
+}
+
+art::odrefresh::ExitCode checkArtifacts() {
+ const char* const argv[] = {kOdrefreshPath, "--check"};
+ const int exit_code =
+ logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr);
+ return static_cast<art::odrefresh::ExitCode>(exit_code);
+}
+
+static std::string toHex(const std::vector<uint8_t>& digest) {
+ std::stringstream ss;
+ for (auto it = digest.begin(); it != digest.end(); ++it) {
+ ss << std::setfill('0') << std::setw(2) << std::hex << static_cast<unsigned>(*it);
+ }
+ return ss.str();
+}
bool compOsPresent() {
- return access(kVirtApexPath, F_OK) == 0;
+ return access(kCompOsApexPath, F_OK) == 0;
}
Result<void> verifyExistingRootCert(const SigningKey& key) {
@@ -102,15 +174,14 @@
}
auto keySignFunction = [&](const std::string& to_be_signed) { return key.sign(to_be_signed); };
- createSelfSignedCertificate(*publicKey, keySignFunction, outPath);
- return {};
+ return createSelfSignedCertificate(*publicKey, keySignFunction, outPath);
}
-Result<std::vector<uint8_t>> extractPublicKeyFromLeafCert(const SigningKey& key,
- const std::string& certPath,
- const std::string& expectedCn) {
+Result<std::vector<uint8_t>> extractRsaPublicKeyFromLeafCert(const SigningKey& key,
+ const std::string& certPath,
+ const std::string& expectedCn) {
if (access(certPath.c_str(), F_OK) < 0) {
- return ErrnoError() << "Certificate not found: " << kCompOsCert;
+ return ErrnoError() << "Certificate not found: " << certPath;
}
auto trustedPublicKey = key.getPublicKey();
if (!trustedPublicKey.ok()) {
@@ -129,22 +200,112 @@
<< ", should be " << expectedCn;
}
- return existingCertInfo.value().subjectKey;
+ return existingCertInfo.value().subjectRsaPublicKey;
}
-art::odrefresh::ExitCode compileArtifacts(bool force) {
- const char* const argv[] = {kOdrefreshPath, force ? "--force-compile" : "--compile"};
- const int exit_code =
- logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr);
- return static_cast<art::odrefresh::ExitCode>(exit_code);
-}
-
-static std::string toHex(const std::vector<uint8_t>& digest) {
- std::stringstream ss;
- for (auto it = digest.begin(); it != digest.end(); ++it) {
- ss << std::setfill('0') << std::setw(2) << std::hex << static_cast<unsigned>(*it);
+// Attempt to start a CompOS VM from the given instance image and then get it to
+// verify the public key & key blob. Returns the RsaPublicKey bytes if
+// successful, an empty vector if any of the files are not present, or an error
+// otherwise.
+Result<std::vector<uint8_t>> loadAndVerifyCompOsKey(const std::string& instanceFile,
+ const std::string& publicKeyFile,
+ const std::string& keyBlobFile) {
+ if (access(instanceFile.c_str(), F_OK) != 0 || access(publicKeyFile.c_str(), F_OK) != 0 ||
+ access(keyBlobFile.c_str(), F_OK) != 0) {
+ return {};
}
- return ss.str();
+
+ auto compOsStatus = FakeCompOs::startInstance(instanceFile);
+ if (!compOsStatus.ok()) {
+ return Error() << "Failed to start CompOs instance " << instanceFile << ": "
+ << compOsStatus.error();
+ }
+ auto& compOs = compOsStatus.value();
+
+ auto publicKey = readBytesFromFile(publicKeyFile);
+ auto keyBlob = readBytesFromFile(keyBlobFile);
+ auto response = compOs->loadAndVerifyKey(keyBlob, publicKey);
+ if (!response.ok()) {
+ return response.error();
+ }
+
+ return publicKey;
+}
+
+Result<std::vector<uint8_t>> verifyCompOsKey(const SigningKey& signingKey) {
+ std::vector<uint8_t> publicKey;
+
+ // If a pending key has been generated we don't know if it is the correct
+ // one for the pending CompOS VM, so we need to start it and ask it.
+ auto pendingPublicKey = loadAndVerifyCompOsKey(kCompOsPendingInstance, kCompOsPendingPublicKey,
+ kCompOsPendingKeyBlob);
+ if (pendingPublicKey.ok()) {
+ if (!pendingPublicKey->empty()) {
+ LOG(INFO) << "Verified pending CompOs key";
+
+ if (rename(kCompOsPendingInstance, kCompOsInstance) &&
+ rename(kCompOsPendingPublicKey, kCompOsPublicKey) &&
+ rename(kCompOsPendingKeyBlob, kCompOsKeyBlob)) {
+ publicKey = std::move(*pendingPublicKey);
+ }
+ }
+ } else {
+ LOG(WARNING) << "Failed to verify pending CompOs key: " << pendingPublicKey.error();
+ // And fall through to dealing with any current key.
+ }
+ // Whether good or bad, we've finished with these files.
+ unlink(kCompOsPendingInstance.c_str());
+ unlink(kCompOsPendingKeyBlob.c_str());
+ unlink(kCompOsPendingPublicKey.c_str());
+
+ if (publicKey.empty()) {
+ // Alternatively if we signed a cert for the key on a previous boot, then we
+ // can use that straight away.
+ auto existing_key =
+ extractRsaPublicKeyFromLeafCert(signingKey, kCompOsCert, kCompOsSubject.commonName);
+ if (existing_key.ok()) {
+ LOG(INFO) << "Found and verified existing CompOs public key certificate: "
+ << kCompOsCert;
+ return existing_key.value();
+ }
+ }
+
+ // Otherwise, if there is an existing key that we haven't signed yet, then we can sign it
+ // now if CompOS confirms it's OK.
+ if (publicKey.empty()) {
+ auto currentPublicKey =
+ loadAndVerifyCompOsKey(kCompOsInstance, kCompOsPublicKey, kCompOsKeyBlob);
+ if (currentPublicKey.ok()) {
+ if (!currentPublicKey->empty()) {
+ LOG(INFO) << "Verified existing CompOs key";
+ publicKey = std::move(*currentPublicKey);
+ }
+ } else {
+ LOG(WARNING) << "Failed to verify existing CompOs key: " << currentPublicKey.error();
+ // Delete so we won't try again on next boot.
+ unlink(kCompOsInstance.c_str());
+ unlink(kCompOsKeyBlob.c_str());
+ unlink(kCompOsPublicKey.c_str());
+ }
+ }
+
+ if (publicKey.empty()) {
+ return Error() << "No valid CompOs key present.";
+ }
+
+ // One way or another we now have a valid key pair. Persist a certificate so
+ // we can simplify the checks on subsequent boots.
+
+ auto signFunction = [&](const std::string& to_be_signed) {
+ return signingKey.sign(to_be_signed);
+ };
+ auto certStatus = createLeafCertificate(kCompOsSubject, publicKey, signFunction,
+ kSigningKeyCert, kCompOsCert);
+ if (!certStatus.ok()) {
+ return Error() << "Failed to create CompOs cert: " << certStatus.error();
+ }
+
+ return publicKey;
}
Result<std::map<std::string, std::string>> computeDigests(const std::string& path) {
@@ -158,7 +319,8 @@
if (it->is_regular_file()) {
auto digest = createDigest(it->path());
if (!digest.ok()) {
- return Error() << "Failed to compute digest for " << it->path();
+ return Error() << "Failed to compute digest for " << it->path() << ": "
+ << digest.error();
}
digests[it->path()] = toHex(*digest);
}
@@ -270,20 +432,6 @@
return {};
}
-static int removeArtifacts() {
- std::error_code ec;
- auto num_removed = std::filesystem::remove_all(kArtArtifactsDir, ec);
- if (ec) {
- LOG(ERROR) << "Can't remove " << kArtArtifactsDir << ": " << ec.message();
- return 0;
- } else {
- if (num_removed > 0) {
- LOG(INFO) << "Removed " << num_removed << " entries from " << kArtArtifactsDir;
- }
- return num_removed;
- }
-}
-
static Result<void> verifyArtifacts(const SigningKey& key, bool supportsFsVerity) {
auto signInfo = getOdsignInfo(key);
// Tell init we're done with the key; this is a boot time optimization
@@ -315,15 +463,99 @@
return {};
}
+Result<std::vector<uint8_t>> addCompOsCertToFsVerityKeyring(const SigningKey& signingKey) {
+ auto publicKey = verifyCompOsKey(signingKey);
+ if (!publicKey.ok()) {
+ return publicKey.error();
+ }
+
+ auto cert_add_result = addCertToFsVerityKeyring(kCompOsCert, "fsv_compos");
+ if (!cert_add_result.ok()) {
+ // Best efforts only - nothing we can do if deletion fails.
+ unlink(kCompOsCert.c_str());
+ return Error() << "Failed to add CompOs certificate to fs-verity keyring: "
+ << cert_add_result.error();
+ }
+
+ return publicKey;
+}
+
+art::odrefresh::ExitCode checkCompOsPendingArtifacts(const std::vector<uint8_t>& compos_key,
+ const SigningKey& signingKey,
+ bool* digests_verified) {
+ if (!directoryHasContent(kCompOsPendingArtifactsDir)) {
+ return art::odrefresh::ExitCode::kCompilationRequired;
+ }
+
+ // CompOs has generated some artifacts that may, or may not, match the
+ // current state. But if there are already valid artifacts present the
+ // CompOs ones are redundant.
+ art::odrefresh::ExitCode odrefresh_status = checkArtifacts();
+ if (odrefresh_status != art::odrefresh::ExitCode::kCompilationRequired) {
+ if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
+ LOG(INFO) << "Current artifacts are OK, deleting pending artifacts";
+ removeDirectory(kCompOsPendingArtifactsDir);
+ }
+ return odrefresh_status;
+ }
+
+ // No useful current artifacts, lets see if the CompOs ones are ok
+ LOG(INFO) << "Current artifacts are out of date, switching to pending artifacts";
+ removeDirectory(kArtArtifactsDir);
+ if (!rename(kCompOsPendingArtifactsDir, kArtArtifactsDir)) {
+ removeDirectory(kCompOsPendingArtifactsDir);
+ return art::odrefresh::ExitCode::kCompilationRequired;
+ }
+
+ // TODO: Make sure that we check here that the contents of the artifacts
+ // correspond to their filenames (and extensions) - the CompOs signatures
+ // can't guarantee that.
+ odrefresh_status = checkArtifacts();
+ if (odrefresh_status != art::odrefresh::ExitCode::kOkay) {
+ LOG(WARNING) << "Pending artifacts are not OK";
+ return odrefresh_status;
+ }
+
+ // The artifacts appear to be up to date - but we haven't
+ // verified that they are genuine yet.
+ Result<std::map<std::string, std::string>> digests =
+ verifyAllFilesUsingCompOs(kArtArtifactsDir, compos_key);
+
+ if (digests.ok()) {
+ auto persisted = persistDigests(digests.value(), signingKey);
+
+ // Having signed the digests (or failed to), we're done with the signing key.
+ SetProperty(kOdsignKeyDoneProp, "1");
+
+ if (persisted.ok()) {
+ *digests_verified = true;
+ LOG(INFO) << "Pending artifacts successfully verified.";
+ return art::odrefresh::ExitCode::kOkay;
+ } else {
+ LOG(WARNING) << persisted.error();
+ }
+ } else {
+ LOG(WARNING) << "Pending artifact verification failed: " << digests.error();
+ }
+
+ // We can't use the existing artifacts, so we will need to generate new
+ // ones.
+ removeDirectory(kArtArtifactsDir);
+ return art::odrefresh::ExitCode::kCompilationRequired;
+}
+
int main(int /* argc */, char** /* argv */) {
auto errorScopeGuard = []() {
// In case we hit any error, remove the artifacts and tell Zygote not to use anything
- removeArtifacts();
+ removeDirectory(kArtArtifactsDir);
+ removeDirectory(kCompOsPendingArtifactsDir);
// Tell init we don't need to use our key anymore
SetProperty(kOdsignKeyDoneProp, "1");
// Tell init we're done with verification, and that it was an error
- SetProperty(kOdsignVerificationDoneProp, "1");
SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusError);
+ SetProperty(kOdsignVerificationDoneProp, "1");
+ // Tell init it shouldn't try to restart us - see odsign.rc
+ SetProperty(kStopServiceProp, "odsign");
};
auto scope_guard = android::base::make_scope_guard(errorScopeGuard);
@@ -334,7 +566,7 @@
auto keystoreResult = KeystoreKey::getInstance();
if (!keystoreResult.ok()) {
- LOG(ERROR) << "Could not create keystore key: " << keystoreResult.error().message();
+ LOG(ERROR) << "Could not create keystore key: " << keystoreResult.error();
return -1;
}
SigningKey* key = keystoreResult.value();
@@ -349,12 +581,12 @@
if (supportsFsVerity) {
auto existing_cert = verifyExistingRootCert(*key);
if (!existing_cert.ok()) {
- LOG(WARNING) << existing_cert.error().message();
+ LOG(WARNING) << existing_cert.error();
// Try to create a new cert
auto new_cert = createX509RootCert(*key, kSigningKeyCert);
if (!new_cert.ok()) {
- LOG(ERROR) << "Failed to create X509 certificate: " << new_cert.error().message();
+ LOG(ERROR) << "Failed to create X509 certificate: " << new_cert.error();
// TODO apparently the key become invalid - delete the blob / cert
return -1;
}
@@ -364,46 +596,42 @@
auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert, "fsv_ods");
if (!cert_add_result.ok()) {
LOG(ERROR) << "Failed to add certificate to fs-verity keyring: "
- << cert_add_result.error().message();
+ << cert_add_result.error();
return -1;
}
}
+ art::odrefresh::ExitCode odrefresh_status = art::odrefresh::ExitCode::kCompilationRequired;
+ bool digests_verified = false;
+
if (supportsCompOs) {
- auto compos_key = extractPublicKeyFromLeafCert(*key, kCompOsCert, "CompOS");
- if (compos_key.ok()) {
- auto cert_add_result = addCertToFsVerityKeyring(kCompOsCert, "fsv_compos");
- if (cert_add_result.ok()) {
- LOG(INFO) << "Added CompOs key to fs-verity keyring";
- } else {
- LOG(ERROR) << "Failed to add CompOs certificate to fs-verity keyring: "
- << cert_add_result.error().message();
- // TODO - what do we do now?
- // return -1;
- }
+ auto compos_key = addCompOsCertToFsVerityKeyring(*key);
+ if (!compos_key.ok()) {
+ LOG(WARNING) << compos_key.error();
} else {
- LOG(ERROR) << "Failed to retrieve key from CompOs certificate: "
- << compos_key.error().message();
- // Best efforts only - nothing we can do if deletion fails.
- unlink(kCompOsCert.c_str());
- // TODO - what do we do now?
+ odrefresh_status =
+ checkCompOsPendingArtifacts(compos_key.value(), *key, &digests_verified);
}
}
- art::odrefresh::ExitCode odrefresh_status = compileArtifacts(kForceCompilation);
+ if (odrefresh_status == art::odrefresh::ExitCode::kCompilationRequired) {
+ odrefresh_status = compileArtifacts(kForceCompilation);
+ }
if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
LOG(INFO) << "odrefresh said artifacts are VALID";
- // A post-condition of validating artifacts is that if the ones on /system
- // are used, kArtArtifactsDir is removed. Conversely, if kArtArtifactsDir
- // exists, those are artifacts that will be used, and we should verify them.
- int err = access(kArtArtifactsDir.c_str(), F_OK);
- // If we receive any error other than ENOENT, be suspicious
- bool artifactsPresent = (err == 0) || (err < 0 && errno != ENOENT);
- if (artifactsPresent) {
- auto verificationResult = verifyArtifacts(*key, supportsFsVerity);
- if (!verificationResult.ok()) {
- LOG(ERROR) << verificationResult.error().message();
- return -1;
+ if (!digests_verified) {
+ // A post-condition of validating artifacts is that if the ones on /system
+ // are used, kArtArtifactsDir is removed. Conversely, if kArtArtifactsDir
+ // exists, those are artifacts that will be used, and we should verify them.
+ int err = access(kArtArtifactsDir.c_str(), F_OK);
+ // If we receive any error other than ENOENT, be suspicious
+ bool artifactsPresent = (err == 0) || (err < 0 && errno != ENOENT);
+ if (artifactsPresent) {
+ auto verificationResult = verifyArtifacts(*key, supportsFsVerity);
+ if (!verificationResult.ok()) {
+ LOG(ERROR) << verificationResult.error();
+ return -1;
+ }
}
}
} else if (odrefresh_status == art::odrefresh::ExitCode::kCompilationSuccess ||
@@ -420,12 +648,12 @@
digests = computeDigests(kArtArtifactsDir);
}
if (!digests.ok()) {
- LOG(ERROR) << digests.error().message();
+ LOG(ERROR) << digests.error();
return -1;
}
auto persistStatus = persistDigests(*digests, *key);
if (!persistStatus.ok()) {
- LOG(ERROR) << persistStatus.error().message();
+ LOG(ERROR) << persistStatus.error();
return -1;
}
} else if (odrefresh_status == art::odrefresh::ExitCode::kCleanupFailed) {
@@ -442,7 +670,10 @@
// At this point, we're done with the key for sure
SetProperty(kOdsignKeyDoneProp, "1");
// And we did a successful verification
- SetProperty(kOdsignVerificationDoneProp, "1");
SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusValid);
+ SetProperty(kOdsignVerificationDoneProp, "1");
+
+ // Tell init it shouldn't try to restart us - see odsign.rc
+ SetProperty(kStopServiceProp, "odsign");
return 0;
}
diff --git a/ondevice-signing/proto/Android.bp b/ondevice-signing/proto/Android.bp
index fd48f31..c042b8e 100644
--- a/ondevice-signing/proto/Android.bp
+++ b/ondevice-signing/proto/Android.bp
@@ -23,7 +23,21 @@
host_supported: true,
proto: {
export_proto_headers: true,
- type: "full",
+ type: "lite",
},
srcs: ["odsign_info.proto"],
}
+
+cc_library_static {
+ name: "lib_compos_proto",
+ host_supported: true,
+ proto: {
+ export_proto_headers: true,
+ type: "lite",
+ },
+ srcs: ["compos_signature.proto"],
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.compos",
+ ],
+}
diff --git a/ondevice-signing/proto/compos_signature.proto b/ondevice-signing/proto/compos_signature.proto
new file mode 100644
index 0000000..2f7d09f
--- /dev/null
+++ b/ondevice-signing/proto/compos_signature.proto
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+package compos.proto;
+
+// Data provided by CompOS to allow validation of a file it generated.
+message Signature {
+ // The fs-verity digest (which is derived from the root hash of
+ // the Merkle tree) of the file contents.
+ bytes digest = 1;
+
+ // Signature of a fsverity_formatted_digest structure containing
+ // the digest, signed using CompOS's private key.
+ bytes signature = 2;
+}
diff --git a/provisioner/Android.bp b/provisioner/Android.bp
index 12a21d1..aac4878 100644
--- a/provisioner/Android.bp
+++ b/provisioner/Android.bp
@@ -43,27 +43,23 @@
},
}
-java_binary {
- name: "provisioner_cli",
- wrapper: "provisioner_cli",
- srcs: ["src/com/android/commands/provisioner/**/*.java"],
- static_libs: [
- "android.security.provisioner-java",
- ],
-}
-
cc_binary {
name: "rkp_factory_extraction_tool",
+ vendor: true,
srcs: ["rkp_factory_extraction_tool.cpp"],
shared_libs: [
- "android.hardware.security.keymint-V1-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk",
"libbinder",
"libbinder_ndk",
- "libcppbor_external",
- "libcppcose_rkp",
"libcrypto",
"liblog",
- "libvintf",
],
- //export_include_dirs: ["include"],
+ static_libs: [
+ "libbase",
+ "libcppbor_external",
+ "libcppcose_rkp",
+ "libgflags",
+ "libjsoncpp",
+ "libkeymint_remote_prov_support",
+ ],
}
diff --git a/provisioner/provisioner_cli b/provisioner/provisioner_cli
deleted file mode 100755
index 7b53d6e..0000000
--- a/provisioner/provisioner_cli
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/system/bin/sh
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Script to start "provisioner_cli" on the device.
-#
-base=/system
-export CLASSPATH=$base/framework/provisioner_cli.jar
-exec app_process $base/bin com.android.commands.provisioner.Cli "$@"
diff --git a/provisioner/rkp_factory_extraction_tool.cpp b/provisioner/rkp_factory_extraction_tool.cpp
index d4842b1..2e59dbd 100644
--- a/provisioner/rkp_factory_extraction_tool.cpp
+++ b/provisioner/rkp_factory_extraction_tool.cpp
@@ -20,147 +20,171 @@
#include <aidl/android/hardware/security/keymint/IRemotelyProvisionedComponent.h>
#include <android/binder_manager.h>
#include <cppbor.h>
+#include <gflags/gflags.h>
#include <keymaster/cppcose/cppcose.h>
-#include <log/log.h>
-#include <vintf/VintfObject.h>
-
-using std::set;
-using std::string;
-using std::vector;
+#include <openssl/base64.h>
+#include <remote_prov/remote_prov_utils.h>
+#include <sys/random.h>
using aidl::android::hardware::security::keymint::DeviceInfo;
using aidl::android::hardware::security::keymint::IRemotelyProvisionedComponent;
using aidl::android::hardware::security::keymint::MacedPublicKey;
using aidl::android::hardware::security::keymint::ProtectedData;
-
-using android::vintf::HalManifest;
-using android::vintf::VintfObject;
+using aidl::android::hardware::security::keymint::remote_prov::generateEekChain;
+using aidl::android::hardware::security::keymint::remote_prov::getProdEekChain;
+using aidl::android::hardware::security::keymint::remote_prov::jsonEncodeCsrWithBuild;
using namespace cppbor;
using namespace cppcose;
+DEFINE_bool(test_mode, false, "If enabled, a fake EEK key/cert are used.");
+
+DEFINE_string(output_format, "csr", "How to format the output. Defaults to 'csr'.");
+
namespace {
-const string kPackage = "android.hardware.security.keymint";
-const string kInterface = "IRemotelyProvisionedComponent";
-const string kFormattedName = kPackage + "." + kInterface + "/";
+// Various supported --output_format values.
+constexpr std::string_view kBinaryCsrOutput = "csr"; // Just the raw csr as binary
+constexpr std::string_view kBuildPlusCsr = "build+csr"; // Text-encoded (JSON) build
+ // fingerprint plus CSR.
-ErrMsgOr<vector<uint8_t>> generateEekChain(size_t length, const vector<uint8_t>& eekId) {
- auto eekChain = cppbor::Array();
+constexpr size_t kChallengeSize = 16;
- vector<uint8_t> prevPrivKey;
- for (size_t i = 0; i < length - 1; ++i) {
- vector<uint8_t> pubKey(ED25519_PUBLIC_KEY_LEN);
- vector<uint8_t> privKey(ED25519_PRIVATE_KEY_LEN);
-
- ED25519_keypair(pubKey.data(), privKey.data());
-
- // The first signing key is self-signed.
- if (prevPrivKey.empty()) prevPrivKey = privKey;
-
- auto coseSign1 = constructCoseSign1(prevPrivKey,
- cppbor::Map() /* payload CoseKey */
- .add(CoseKey::KEY_TYPE, OCTET_KEY_PAIR)
- .add(CoseKey::ALGORITHM, EDDSA)
- .add(CoseKey::CURVE, ED25519)
- .add(CoseKey::PUBKEY_X, pubKey)
- .canonicalize()
- .encode(),
- {} /* AAD */);
- if (!coseSign1) return coseSign1.moveMessage();
- eekChain.add(coseSign1.moveValue());
-
- prevPrivKey = privKey;
+std::string toBase64(const std::vector<uint8_t>& buffer) {
+ size_t base64Length;
+ int rc = EVP_EncodedLength(&base64Length, buffer.size());
+ if (!rc) {
+ std::cerr << "Error getting base64 length. Size overflow?" << std::endl;
+ exit(-1);
}
- vector<uint8_t> pubKey(X25519_PUBLIC_VALUE_LEN);
- vector<uint8_t> privKey(X25519_PRIVATE_KEY_LEN);
- X25519_keypair(pubKey.data(), privKey.data());
-
- auto coseSign1 = constructCoseSign1(prevPrivKey,
- cppbor::Map() /* payload CoseKey */
- .add(CoseKey::KEY_TYPE, OCTET_KEY_PAIR)
- .add(CoseKey::KEY_ID, eekId)
- .add(CoseKey::ALGORITHM, ECDH_ES_HKDF_256)
- .add(CoseKey::CURVE, cppcose::X25519)
- .add(CoseKey::PUBKEY_X, pubKey)
- .canonicalize()
- .encode(),
- {} /* AAD */);
- if (!coseSign1) return coseSign1.moveMessage();
- eekChain.add(coseSign1.moveValue());
-
- return eekChain.encode();
+ std::string base64(base64Length, ' ');
+ rc = EVP_EncodeBlock(reinterpret_cast<uint8_t*>(base64.data()), buffer.data(), buffer.size());
+ ++rc; // Account for NUL, which BoringSSL does not for some reason.
+ if (rc != base64Length) {
+ std::cerr << "Error writing base64. Expected " << base64Length
+ << " bytes to be written, but " << rc << " bytes were actually written."
+ << std::endl;
+ exit(-1);
+ }
+ return base64;
}
-std::vector<uint8_t> getChallenge() {
- return std::vector<uint8_t>(0);
+std::vector<uint8_t> generateChallenge() {
+ std::vector<uint8_t> challenge(kChallengeSize);
+
+ ssize_t bytesRemaining = static_cast<ssize_t>(challenge.size());
+ uint8_t* writePtr = challenge.data();
+ while (bytesRemaining > 0) {
+ int bytesRead = getrandom(writePtr, bytesRemaining, /*flags=*/0);
+ if (bytesRead < 0 && errno != EINTR) {
+ std::cerr << errno << ": " << strerror(errno) << std::endl;
+ exit(-1);
+ }
+ bytesRemaining -= bytesRead;
+ writePtr += bytesRead;
+ }
+
+ return challenge;
}
-std::vector<uint8_t> composeCertificateRequest(ProtectedData&& protectedData,
- DeviceInfo&& deviceInfo) {
- Array emptyMacedKeysToSign;
- emptyMacedKeysToSign
- .add(std::vector<uint8_t>(0)) // empty protected headers as bstr
- .add(Map()) // empty unprotected headers
- .add(Null()) // nil for the payload
- .add(std::vector<uint8_t>(0)); // empty tag as bstr
- Array certificateRequest;
- certificateRequest.add(EncodedItem(std::move(deviceInfo.deviceInfo)))
- .add(getChallenge()) // fake challenge
- .add(EncodedItem(std::move(protectedData.protectedData)))
- .add(std::move(emptyMacedKeysToSign));
- return certificateRequest.encode();
+Array composeCertificateRequest(const ProtectedData& protectedData,
+ const DeviceInfo& verifiedDeviceInfo,
+ const std::vector<uint8_t>& challenge,
+ const std::vector<uint8_t>& keysToSignMac) {
+ Array macedKeysToSign = Array()
+ .add(std::vector<uint8_t>(0)) // empty protected headers as bstr
+ .add(Map()) // empty unprotected headers
+ .add(Null()) // nil for the payload
+ .add(keysToSignMac); // MAC as returned from the HAL
+
+ Array deviceInfo =
+ Array().add(EncodedItem(verifiedDeviceInfo.deviceInfo)).add(Map()); // Empty device info
+
+ Array certificateRequest = Array()
+ .add(std::move(deviceInfo))
+ .add(challenge)
+ .add(EncodedItem(protectedData.protectedData))
+ .add(std::move(macedKeysToSign));
+ return certificateRequest;
}
-int32_t errorMsg(string name) {
- std::cerr << "Failed for rkp instance: " << name;
- return -1;
+std::vector<uint8_t> getEekChain() {
+ if (FLAGS_test_mode) {
+ const std::vector<uint8_t> kFakeEekId = {'f', 'a', 'k', 'e', 0};
+ auto eekOrErr = generateEekChain(3 /* chainlength */, kFakeEekId);
+ if (!eekOrErr) {
+ std::cerr << "Failed to generate test EEK somehow: " << eekOrErr.message() << std::endl;
+ exit(-1);
+ }
+ auto [eek, pubkey, privkey] = eekOrErr.moveValue();
+ std::cout << "EEK raw keypair:" << std::endl;
+ std::cout << " pub: " << toBase64(pubkey) << std::endl;
+ std::cout << " priv: " << toBase64(privkey) << std::endl;
+ return eek;
+ }
+
+ return getProdEekChain();
+}
+
+void writeOutput(const Array& csr) {
+ if (FLAGS_output_format == kBinaryCsrOutput) {
+ auto bytes = csr.encode();
+ std::copy(bytes.begin(), bytes.end(), std::ostream_iterator<char>(std::cout));
+ } else if (FLAGS_output_format == kBuildPlusCsr) {
+ auto [json, error] = jsonEncodeCsrWithBuild(csr);
+ if (!error.empty()) {
+ std::cerr << "Error JSON encoding the output: " << error;
+ exit(1);
+ }
+ std::cout << json << std::endl;
+ } else {
+ std::cerr << "Unexpected output_format '" << FLAGS_output_format << "'" << std::endl;
+ std::cerr << "Valid formats:" << std::endl;
+ std::cerr << " " << kBinaryCsrOutput << std::endl;
+ std::cerr << " " << kBuildPlusCsr << std::endl;
+ exit(1);
+ }
+}
+
+// Callback for AServiceManager_forEachDeclaredInstance that writes out a CSR
+// for every IRemotelyProvisionedComponent.
+void getCsrForInstance(const char* name, void* /*context*/) {
+ const std::vector<uint8_t> challenge = generateChallenge();
+
+ auto fullName = std::string(IRemotelyProvisionedComponent::descriptor) + "/" + name;
+ AIBinder* rkpAiBinder = AServiceManager_getService(fullName.c_str());
+ ::ndk::SpAIBinder rkp_binder(rkpAiBinder);
+ auto rkp_service = IRemotelyProvisionedComponent::fromBinder(rkp_binder);
+ if (!rkp_service) {
+ std::cerr << "Unable to get binder object for '" << fullName << "', skipping.";
+ return;
+ }
+
+ std::vector<uint8_t> keysToSignMac;
+ std::vector<MacedPublicKey> emptyKeys;
+ DeviceInfo verifiedDeviceInfo;
+ ProtectedData protectedData;
+ ::ndk::ScopedAStatus status = rkp_service->generateCertificateRequest(
+ FLAGS_test_mode, emptyKeys, getEekChain(), challenge, &verifiedDeviceInfo, &protectedData,
+ &keysToSignMac);
+ if (!status.isOk()) {
+ std::cerr << "Bundle extraction failed for '" << fullName
+ << "'. Error code: " << status.getServiceSpecificError() << "." << std::endl;
+ exit(-1);
+ }
+ auto request =
+ composeCertificateRequest(protectedData, verifiedDeviceInfo, challenge, keysToSignMac);
+ writeOutput(request);
}
} // namespace
-int main() {
- std::shared_ptr<const HalManifest> manifest = VintfObject::GetDeviceHalManifest();
- set<string> rkpNames = manifest->getAidlInstances(kPackage, kInterface);
- for (auto name : rkpNames) {
- string fullName = kFormattedName + name;
- if (!AServiceManager_isDeclared(fullName.c_str())) {
- ALOGE("Could not find the following instance declared in the manifest: %s\n",
- fullName.c_str());
- return errorMsg(name);
- }
- AIBinder* rkpAiBinder = AServiceManager_getService(fullName.c_str());
- ::ndk::SpAIBinder rkp_binder(rkpAiBinder);
- auto rkp_service = IRemotelyProvisionedComponent::fromBinder(rkp_binder);
- std::vector<uint8_t> keysToSignMac;
- std::vector<MacedPublicKey> emptyKeys;
+int main(int argc, char** argv) {
+ gflags::ParseCommandLineFlags(&argc, &argv, /*remove_flags=*/true);
- // Replace this eek chain generation with the actual production GEEK
- std::vector<uint8_t> eekId(10); // replace with real KID later (EEK fingerprint)
- auto eekOrErr = generateEekChain(3 /* chainlength */, eekId);
- if (!eekOrErr) {
- ALOGE("Failed to generate test EEK somehow: %s", eekOrErr.message().c_str());
- return errorMsg(name);
- }
+ AServiceManager_forEachDeclaredInstance(IRemotelyProvisionedComponent::descriptor,
+ /*context=*/nullptr, getCsrForInstance);
- std::vector<uint8_t> eek = eekOrErr.moveValue();
- DeviceInfo deviceInfo;
- ProtectedData protectedData;
- if (rkp_service) {
- ALOGE("extracting bundle");
- ::ndk::ScopedAStatus status = rkp_service->generateCertificateRequest(
- true /* testMode */, emptyKeys, eek, getChallenge(), &deviceInfo, &protectedData,
- &keysToSignMac);
- if (!status.isOk()) {
- ALOGE("Bundle extraction failed. Error code: %d", status.getServiceSpecificError());
- return errorMsg(name);
- }
- std::cout << "\n";
- std::vector<uint8_t> certificateRequest =
- composeCertificateRequest(std::move(protectedData), std::move(deviceInfo));
- std::copy(certificateRequest.begin(), certificateRequest.end(),
- std::ostream_iterator<char>(std::cout));
- }
- }
+ return 0;
}
diff --git a/provisioner/src/com/android/commands/provisioner/Cli.java b/provisioner/src/com/android/commands/provisioner/Cli.java
deleted file mode 100644
index 62afdac..0000000
--- a/provisioner/src/com/android/commands/provisioner/Cli.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.commands.provisioner;
-
-import android.os.IBinder;
-import android.os.RemoteException;
-import android.os.ServiceManager;
-import android.security.provisioner.IProvisionerService;
-
-import com.android.internal.os.BaseCommand;
-
-import java.io.ByteArrayOutputStream;
-import java.io.InputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.lang.IllegalArgumentException;
-
-/**
- * Contains the implementation of the remote provisioning command-line interface.
- */
-public class Cli extends BaseCommand {
- /**
- * Creates an instance of the command-line interface and runs it. This is the entry point of
- * the tool.
- */
- public static void main(String[] args) {
- new Cli().run(args);
- }
-
- /**
- * Runs the command requested by the invoker. It parses the very first required argument, which
- * is the command, and calls the appropriate handler.
- */
- @Override
- public void onRun() throws Exception {
- String cmd = nextArgRequired();
- switch (cmd) {
- case "get-req":
- getRequest();
- break;
-
- case "help":
- onShowUsage(System.out);
- break;
-
- default:
- throw new IllegalArgumentException("unknown command: " + cmd);
- }
- }
-
- /**
- * Retrieves a 'certificate request' from the provisioning service. The COSE-encoded
- * 'certificate chain' describing the endpoint encryption key (EEK) to use for encryption is
- * read from the standard input. The retrieved request is written to the standard output.
- */
- private void getRequest() throws Exception {
- // Process options.
- boolean test = false;
- byte[] challenge = null;
- int count = 0;
- String arg;
- while ((arg = nextArg()) != null) {
- switch (arg) {
- case "--test":
- test = true;
- break;
-
- case "--challenge":
- // TODO: We may need a different encoding of the challenge.
- challenge = nextArgRequired().getBytes();
- break;
-
- case "--count":
- count = Integer.parseInt(nextArgRequired());
- if (count < 0) {
- throw new IllegalArgumentException(
- "--count must be followed by non-negative number");
- }
- break;
-
- default:
- throw new IllegalArgumentException("unknown argument: " + arg);
- }
- }
-
- // Send the request over to the provisioning service and write the result to stdout.
- byte[] res = getService().getCertificateRequest(test, count, readAll(System.in), challenge);
- if (res != null) {
- System.out.write(res);
- }
- }
-
- /**
- * Retrieves an implementation of the IProvisionerService interface. It allows the caller to
- * call into the service via binder.
- */
- private static IProvisionerService getService() throws RemoteException {
- IBinder binder = ServiceManager.getService("remote-provisioner");
- if (binder == null) {
- throw new RemoteException("Provisioning service is inaccessible");
- }
- return IProvisionerService.Stub.asInterface(binder);
- }
-
- /** Reads all data from the provided input stream and returns it as a byte array. */
- private static byte[] readAll(InputStream in) throws IOException {
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- byte[] buf = new byte[1024];
- int read;
- while ((read = in.read(buf)) != -1) {
- out.write(buf, 0, read);
- }
- return out.toByteArray();
- }
-
- /**
- * Writes the usage information to the given stream. This is displayed to users of the tool when
- * they ask for help or when they pass incorrect arguments to the tool.
- */
- @Override
- public void onShowUsage(PrintStream out) {
- out.println(
- "Usage: provisioner_cli <command> [options]\n" +
- "Commands: help\n" +
- " get-req [--count <n>] [--test] [--challenge <v>]");
- }
-}