Merge "Implement a back-level KeyMint compatibility wrapper"
diff --git a/identity/Android.bp b/identity/Android.bp
index 7b0503a..c69ead1 100644
--- a/identity/Android.bp
+++ b/identity/Android.bp
@@ -27,6 +27,7 @@
     defaults: [
         "identity_defaults",
         "keymint_use_latest_hal_aidl_ndk_shared",
+        "keymint_use_latest_hal_aidl_cpp_static",
     ],
 
     srcs: [
@@ -53,6 +54,7 @@
         "libkeymaster4support",
         "libkeystore-attestation-application-id",
         "android.security.authorization-ndk",
+        "android.security.remoteprovisioning-cpp",
         "libutilscallstack",
     ],
     static_libs: [
diff --git a/identity/CredentialStore.cpp b/identity/CredentialStore.cpp
index 61a9125..c5c429b 100644
--- a/identity/CredentialStore.cpp
+++ b/identity/CredentialStore.cpp
@@ -17,10 +17,15 @@
 #define LOG_TAG "credstore"
 
 #include <algorithm>
+#include <optional>
 
 #include <android-base/logging.h>
-
+#include <android/hardware/security/keymint/IRemotelyProvisionedComponent.h>
+#include <android/hardware/security/keymint/RpcHardwareInfo.h>
+#include <android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.h>
+#include <android/security/remoteprovisioning/RemotelyProvisionedKey.h>
 #include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
 
 #include "Credential.h"
 #include "CredentialData.h"
@@ -32,6 +37,46 @@
 namespace android {
 namespace security {
 namespace identity {
+namespace {
+
+using ::android::hardware::security::keymint::IRemotelyProvisionedComponent;
+using ::android::hardware::security::keymint::RpcHardwareInfo;
+using ::android::security::remoteprovisioning::IRemotelyProvisionedKeyPool;
+using ::android::security::remoteprovisioning::RemotelyProvisionedKey;
+
+std::optional<std::string>
+getRemotelyProvisionedComponentId(const sp<IIdentityCredentialStore>& hal) {
+    auto init = [](const sp<IIdentityCredentialStore>& hal) -> std::optional<std::string> {
+        sp<IRemotelyProvisionedComponent> remotelyProvisionedComponent;
+        Status status = hal->getRemotelyProvisionedComponent(&remotelyProvisionedComponent);
+        if (!status.isOk()) {
+            LOG(ERROR) << "Error getting remotely provisioned component: " << status;
+            return std::nullopt;
+        }
+
+        RpcHardwareInfo rpcHwInfo;
+        status = remotelyProvisionedComponent->getHardwareInfo(&rpcHwInfo);
+        if (!status.isOk()) {
+            LOG(ERROR) << "Error getting remotely provisioned component hardware info: " << status;
+            return std::nullopt;
+        }
+
+        if (!rpcHwInfo.uniqueId) {
+            LOG(ERROR) << "Remotely provisioned component is missing a unique id, which is "
+                       << "required for credential key remotely provisioned attestation keys. "
+                       << "This is a bug in the vendor implementation.";
+            return std::nullopt;
+        }
+
+        // This id is required to later fetch remotely provisioned attestation keys.
+        return *rpcHwInfo.uniqueId;
+    };
+
+    static std::optional<std::string> id = init(hal);
+    return id;
+}
+
+}  // namespace
 
 CredentialStore::CredentialStore(const std::string& dataPath, sp<IIdentityCredentialStore> hal)
     : dataPath_(dataPath), hal_(hal) {}
@@ -44,6 +89,16 @@
     }
     halApiVersion_ = hal_->getInterfaceVersion();
 
+    if (hwInfo_.isRemoteKeyProvisioningSupported) {
+        keyPool_ = android::waitForService<IRemotelyProvisionedKeyPool>(
+            IRemotelyProvisionedKeyPool::descriptor);
+        if (keyPool_.get() == nullptr) {
+            LOG(ERROR) << "Error getting IRemotelyProvisionedKeyPool HAL with service name '"
+                       << IRemotelyProvisionedKeyPool::descriptor << "'";
+            return false;
+        }
+    }
+
     LOG(INFO) << "Connected to Identity Credential HAL with API version " << halApiVersion_
               << " and name '" << hwInfo_.credentialStoreName << "' authored by '"
               << hwInfo_.credentialStoreAuthorName << "' with chunk size " << hwInfo_.dataChunkSize
@@ -90,6 +145,13 @@
         return halStatusToGenericError(status);
     }
 
+    if (hwInfo_.isRemoteKeyProvisioningSupported) {
+        status = setRemotelyProvisionedAttestationKey(halWritableCredential.get());
+        if (!status.isOk()) {
+            return halStatusToGenericError(status);
+        }
+    }
+
     sp<IWritableCredential> writableCredential = new WritableCredential(
         dataPath_, credentialName, docType, false, hwInfo_, halWritableCredential);
     *_aidl_return = writableCredential;
@@ -145,6 +207,33 @@
     return Status::ok();
 }
 
+Status CredentialStore::setRemotelyProvisionedAttestationKey(
+    IWritableIdentityCredential* halWritableCredential) {
+    std::optional<std::string> rpcId = getRemotelyProvisionedComponentId(hal_);
+    if (!rpcId) {
+        return Status::fromServiceSpecificError(ERROR_GENERIC,
+                                                "Error getting remotely provisioned component id");
+    }
+
+    uid_t callingUid = android::IPCThreadState::self()->getCallingUid();
+    RemotelyProvisionedKey key;
+    Status status = keyPool_->getAttestationKey(callingUid, *rpcId, &key);
+    if (!status.isOk()) {
+        LOG(WARNING) << "Unable to fetch remotely provisioned attestation key, falling back "
+                     << "to the factory-provisioned attestation key.";
+        return Status::ok();
+    }
+
+    status = halWritableCredential->setRemotelyProvisionedAttestationKey(key.keyBlob,
+                                                                         key.encodedCertChain);
+    if (!status.isOk()) {
+        LOG(ERROR) << "Error setting remotely provisioned attestation key on credential";
+        return status;
+    }
+
+    return Status::ok();
+}
+
 }  // namespace identity
 }  // namespace security
 }  // namespace android
diff --git a/identity/CredentialStore.h b/identity/CredentialStore.h
index f2aa506..df7928e 100644
--- a/identity/CredentialStore.h
+++ b/identity/CredentialStore.h
@@ -21,8 +21,8 @@
 #include <vector>
 
 #include <android/hardware/identity/IIdentityCredentialStore.h>
-
 #include <android/security/identity/BnCredentialStore.h>
+#include <android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.h>
 
 namespace android {
 namespace security {
@@ -38,6 +38,8 @@
 using ::android::hardware::identity::HardwareInformation;
 using ::android::hardware::identity::IIdentityCredentialStore;
 using ::android::hardware::identity::IPresentationSession;
+using ::android::hardware::identity::IWritableIdentityCredential;
+using ::android::security::remoteprovisioning::IRemotelyProvisionedKeyPool;
 
 class CredentialStore : public BnCredentialStore {
   public:
@@ -64,11 +66,15 @@
     Status createPresentationSession(int32_t cipherSuite, sp<ISession>* _aidl_return) override;
 
   private:
+    Status setRemotelyProvisionedAttestationKey(IWritableIdentityCredential* halWritableCredential);
+
     string dataPath_;
 
     sp<IIdentityCredentialStore> hal_;
     int halApiVersion_;
 
+    sp<IRemotelyProvisionedKeyPool> keyPool_;
+
     HardwareInformation hwInfo_;
 };
 
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index 520237a..2027af4 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -76,15 +76,6 @@
 }
 
 rust_library {
-    name: "libkeystore2_noicu",
-    defaults: ["libkeystore2_defaults"],
-    rustlibs: [
-        "liblibsqlite3_sys_noicu",
-        "librusqlite_noicu",
-    ],
-}
-
-rust_library {
     name: "libkeystore2_test_utils",
     crate_name: "keystore2_test_utils",
     srcs: ["test_utils/lib.rs"],
@@ -167,18 +158,3 @@
         "librusqlite",
     ],
 }
-
-// Variant of keystore2 for use in microdroid. It doesn't depend on the ICU-enabled sqlite.
-// This can be used also in Android, but we choose not to because it will bring two
-// variants of sqlite to the system causing more RAM usage and CPU cycles when loading.
-rust_binary {
-    name: "keystore2_microdroid",
-    stem: "keystore2",
-    defaults: ["keystore2_defaults"],
-    rustlibs: [
-        "libkeystore2_noicu",
-        "liblegacykeystore-rust_noicu",
-        "librusqlite_noicu",
-    ],
-    installable: false, // don't install this to Android
-}
diff --git a/keystore2/aidl/Android.bp b/keystore2/aidl/Android.bp
index 7eb2b83..ae08567 100644
--- a/keystore2/aidl/Android.bp
+++ b/keystore2/aidl/Android.bp
@@ -29,7 +29,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
@@ -52,7 +51,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
@@ -71,7 +69,6 @@
     backend: {
         java: {
             enabled: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
@@ -94,7 +91,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
@@ -116,7 +112,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         ndk: {
             enabled: true,
@@ -138,7 +133,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
@@ -157,7 +151,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
@@ -179,7 +172,6 @@
     backend: {
         java: {
             platform_apis: true,
-            srcs_available: true,
         },
         rust: {
             enabled: true,
diff --git a/keystore2/legacykeystore/Android.bp b/keystore2/legacykeystore/Android.bp
index d407569..505b165 100644
--- a/keystore2/legacykeystore/Android.bp
+++ b/keystore2/legacykeystore/Android.bp
@@ -47,15 +47,6 @@
     ],
 }
 
-rust_library {
-    name: "liblegacykeystore-rust_noicu",
-    defaults: ["liblegacykeystore-rust_defaults"],
-    rustlibs: [
-        "libkeystore2_noicu",
-        "librusqlite_noicu",
-    ],
-}
-
 rust_test {
     name: "legacykeystore_test",
     crate_name: "legacykeystore",
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index 64b498f..8265dd0 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -16,7 +16,7 @@
 
 use crate::error::Error as KeystoreError;
 use crate::error::anyhow_error_to_cstring;
-use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_IMPORTER};
 use crate::permission::KeystorePerm;
 use crate::super_key::UserState;
 use crate::utils::{check_keystore_permission, watchdog as wd};
@@ -154,8 +154,10 @@
                     .context("In on_lock_screen_event: Unlock with password.")?;
                 ENFORCEMENTS.set_device_locked(user_id, false);
 
+                let mut skm = SUPER_KEY.write().unwrap();
+
                 DB.with(|db| {
-                    SUPER_KEY.unlock_screen_lock_bound_key(
+                    skm.unlock_screen_lock_bound_key(
                         &mut db.borrow_mut(),
                         user_id as u32,
                         &password,
@@ -166,10 +168,9 @@
                 // Unlock super key.
                 if let UserState::Uninitialized = DB
                     .with(|db| {
-                        UserState::get_with_password_unlock(
+                        skm.unlock_and_get_user_state(
                             &mut db.borrow_mut(),
-                            &LEGACY_MIGRATOR,
-                            &SUPER_KEY,
+                            &LEGACY_IMPORTER,
                             user_id as u32,
                             &password,
                         )
@@ -187,8 +188,9 @@
                 check_keystore_permission(KeystorePerm::Unlock)
                     .context("In on_lock_screen_event: Unlock.")?;
                 ENFORCEMENTS.set_device_locked(user_id, false);
+                let mut skm = SUPER_KEY.write().unwrap();
                 DB.with(|db| {
-                    SUPER_KEY.try_unlock_user_with_biometric(&mut db.borrow_mut(), user_id as u32)
+                    skm.try_unlock_user_with_biometric(&mut db.borrow_mut(), user_id as u32)
                 })
                 .context("In on_lock_screen_event: try_unlock_user_with_biometric failed")?;
                 Ok(())
@@ -197,8 +199,9 @@
                 check_keystore_permission(KeystorePerm::Lock)
                     .context("In on_lock_screen_event: Lock")?;
                 ENFORCEMENTS.set_device_locked(user_id, true);
+                let mut skm = SUPER_KEY.write().unwrap();
                 DB.with(|db| {
-                    SUPER_KEY.lock_screen_lock_bound_key(
+                    skm.lock_screen_lock_bound_key(
                         &mut db.borrow_mut(),
                         user_id as u32,
                         unlocking_sids.unwrap_or(&[]),
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 133a926..e68b0fd 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -3231,7 +3231,7 @@
     use std::collections::BTreeMap;
     use std::fmt::Write;
     use std::sync::atomic::{AtomicU8, Ordering};
-    use std::sync::Arc;
+    use std::sync::{Arc, RwLock};
     use std::thread;
     use std::time::{Duration, SystemTime};
     #[cfg(disabled)]
@@ -3251,7 +3251,7 @@
     where
         F: Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static,
     {
-        let super_key: Arc<SuperKeyManager> = Default::default();
+        let super_key: Arc<RwLock<SuperKeyManager>> = Default::default();
 
         let gc_db = KeystoreDB::new(path, None).expect("Failed to open test gc db_connection.");
         let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db, super_key));
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 2407525..cb6a266 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -450,7 +450,7 @@
                         KeyParameterValue::Algorithm(Algorithm::RSA)
                         | KeyParameterValue::Algorithm(Algorithm::EC) => {
                             return Err(Error::Km(Ec::UNSUPPORTED_PURPOSE)).context(
-                                "In authorize_create: public operations on asymmetric keys are not
+                                "In authorize_create: public operations on asymmetric keys are not \
                                  supported.",
                             );
                         }
@@ -566,8 +566,7 @@
         // if both NO_AUTH_REQUIRED and USER_SECURE_ID tags are present, return error
         if !user_secure_ids.is_empty() && no_auth_required {
             return Err(Error::Km(Ec::INVALID_KEY_BLOB)).context(
-                "In authorize_create: key has both NO_AUTH_REQUIRED
-                and USER_SECURE_ID tags.",
+                "In authorize_create: key has both NO_AUTH_REQUIRED and USER_SECURE_ID tags.",
             );
         }
 
@@ -576,8 +575,8 @@
             || (user_auth_type.is_none() && !user_secure_ids.is_empty())
         {
             return Err(Error::Km(Ec::KEY_USER_NOT_AUTHENTICATED)).context(
-                "In authorize_create: Auth required, but either auth type or secure ids
-                are not present.",
+                "In authorize_create: Auth required, but either auth type or secure ids \
+                 are not present.",
             );
         }
 
@@ -587,8 +586,7 @@
             && op_params.iter().any(|kp| kp.tag == Tag::NONCE)
         {
             return Err(Error::Km(Ec::CALLER_NONCE_PROHIBITED)).context(
-                "In authorize_create, NONCE is present,
-                    although CALLER_NONCE is not present",
+                "In authorize_create, NONCE is present, although CALLER_NONCE is not present",
             );
         }
 
@@ -602,7 +600,7 @@
         }
 
         if let Some(level) = max_boot_level {
-            if !SUPER_KEY.level_accessible(level) {
+            if !SUPER_KEY.read().unwrap().level_accessible(level) {
                 return Err(Error::Km(Ec::BOOT_LEVEL_EXCEEDED))
                     .context("In authorize_create: boot level is too late.");
             }
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index 25f08c8..341aa0a 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -27,7 +27,7 @@
 use async_task::AsyncTask;
 use std::sync::{
     atomic::{AtomicU8, Ordering},
-    Arc,
+    Arc, RwLock,
 };
 
 pub struct Gc {
@@ -47,7 +47,7 @@
         F: FnOnce() -> (
                 Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
                 KeystoreDB,
-                Arc<SuperKeyManager>,
+                Arc<RwLock<SuperKeyManager>>,
             ) + Send
             + 'static,
     {
@@ -87,7 +87,7 @@
     invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
     db: KeystoreDB,
     async_task: std::sync::Weak<AsyncTask>,
-    super_key: Arc<SuperKeyManager>,
+    super_key: Arc<RwLock<SuperKeyManager>>,
     notified: Arc<AtomicU8>,
 }
 
@@ -121,6 +121,8 @@
             if let Some(uuid) = blob_metadata.km_uuid() {
                 let blob = self
                     .super_key
+                    .read()
+                    .unwrap()
                     .unwrap_key_if_required(&blob_metadata, &blob)
                     .context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
                 (self.invalidate_key)(uuid, &*blob)
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index e8f3ff9..14b3601 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -18,7 +18,7 @@
 
 use crate::gc::Gc;
 use crate::legacy_blob::LegacyBlobLoader;
-use crate::legacy_migrator::LegacyMigrator;
+use crate::legacy_importer::LegacyImporter;
 use crate::super_key::SuperKeyManager;
 use crate::utils::watchdog as wd;
 use crate::{async_task::AsyncTask, database::MonotonicRawTime};
@@ -157,7 +157,7 @@
     pub static ref DB_PATH: RwLock<PathBuf> = RwLock::new(
         Path::new("/data/misc/keystore").to_path_buf());
     /// Runtime database of unwrapped super keys.
-    pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
+    pub static ref SUPER_KEY: Arc<RwLock<SuperKeyManager>> = Default::default();
     /// Map of KeyMint devices.
     static ref KEY_MINT_DEVICES: Mutex<DevicesMap<dyn IKeyMintDevice>> = Default::default();
     /// Timestamp service.
@@ -176,8 +176,8 @@
     pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
         &DB_PATH.read().expect("Could not get the database path for legacy blob loader.")));
     /// Legacy migrator. Atomically migrates legacy blobs to the database.
-    pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
-        Arc::new(LegacyMigrator::new(Arc::new(Default::default())));
+    pub static ref LEGACY_IMPORTER: Arc<LegacyImporter> =
+        Arc::new(LegacyImporter::new(Arc::new(Default::default())));
     /// Background thread which handles logging via statsd and logd
     pub static ref LOGS_HANDLER: Arc<AsyncTask> = Default::default();
 
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index abab4b6..bea5f08 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -19,7 +19,9 @@
 use keystore2::maintenance::Maintenance;
 use keystore2::metrics::Metrics;
 use keystore2::metrics_store;
-use keystore2::remote_provisioning::RemoteProvisioningService;
+use keystore2::remote_provisioning::{
+    RemoteProvisioningService, RemotelyProvisionedKeyPoolService,
+};
 use keystore2::service::KeystoreService;
 use keystore2::{apc::ApcManager, shared_secret_negotiation};
 use keystore2::{authorization::AuthorizationManager, id_rotation::IdRotationState};
@@ -33,6 +35,8 @@
 static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
 static METRICS_SERVICE_NAME: &str = "android.security.metrics";
 static REMOTE_PROVISIONING_SERVICE_NAME: &str = "android.security.remoteprovisioning";
+static REMOTELY_PROVISIONED_KEY_POOL_SERVICE_NAME: &str =
+    "android.security.remoteprovisioning.IRemotelyProvisionedKeyPool";
 static USER_MANAGER_SERVICE_NAME: &str = "android.security.maintenance";
 static LEGACY_KEYSTORE_SERVICE_NAME: &str = "android.security.legacykeystore";
 
@@ -145,6 +149,22 @@
         });
     }
 
+    // Even if the IRemotelyProvisionedComponent HAL is implemented, it doesn't mean that the keys
+    // may be fetched via the key pool. The HAL must be a new version that exports a unique id. If
+    // none of the HALs support this, then the key pool service is not published.
+    if let Ok(key_pool_service) = RemotelyProvisionedKeyPoolService::new_native_binder() {
+        binder::add_service(
+            REMOTELY_PROVISIONED_KEY_POOL_SERVICE_NAME,
+            key_pool_service.as_binder(),
+        )
+        .unwrap_or_else(|e| {
+            panic!(
+                "Failed to register service {} because of {:?}.",
+                REMOTELY_PROVISIONED_KEY_POOL_SERVICE_NAME, e
+            );
+        });
+    }
+
     binder::add_service(LEGACY_KEYSTORE_SERVICE_NAME, legacykeystore.as_binder()).unwrap_or_else(
         |e| {
             panic!(
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 7454cca..b801ed3 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -1340,7 +1340,7 @@
             CACERT_NON_AUTHBOUND,
         )?;
 
-        let key_manager: SuperKeyManager = Default::default();
+        let mut key_manager: SuperKeyManager = Default::default();
         let mut db = crate::database::KeystoreDB::new(temp_dir.path(), None)?;
         let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
 
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_importer.rs
similarity index 79%
rename from keystore2/src/legacy_migrator.rs
rename to keystore2/src/legacy_importer.rs
index 65f4b0b..3f37b14 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_importer.rs
@@ -38,8 +38,8 @@
 use std::sync::mpsc::channel;
 use std::sync::{Arc, Mutex};
 
-/// Represents LegacyMigrator.
-pub struct LegacyMigrator {
+/// Represents LegacyImporter.
+pub struct LegacyImporter {
     async_task: Arc<AsyncTask>,
     initializer: Mutex<
         Option<
@@ -51,19 +51,19 @@
         >,
     >,
     /// This atomic is used for cheap interior mutability. It is intended to prevent
-    /// expensive calls into the legacy migrator when the legacy database is empty.
+    /// expensive calls into the legacy importer when the legacy database is empty.
     /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
     /// of time. This is tolerable in favor of the common case.
     state: AtomicU8,
 }
 
 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-struct RecentMigration {
+struct RecentImport {
     uid: u32,
     alias: String,
 }
 
-impl RecentMigration {
+impl RecentImport {
     fn new(uid: u32, alias: String) -> Self {
         Self { uid, alias }
     }
@@ -74,15 +74,15 @@
     User(u32),
 }
 
-struct LegacyMigratorState {
-    recently_migrated: HashSet<RecentMigration>,
-    recently_migrated_super_key: HashSet<u32>,
+struct LegacyImporterState {
+    recently_imported: HashSet<RecentImport>,
+    recently_imported_super_key: HashSet<u32>,
     legacy_loader: Arc<LegacyBlobLoader>,
     sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
     db: KeystoreDB,
 }
 
-impl LegacyMigrator {
+impl LegacyImporter {
     const WIFI_NAMESPACE: i64 = 102;
     const AID_WIFI: u32 = 1010;
 
@@ -90,7 +90,7 @@
     const STATE_READY: u8 = 1;
     const STATE_EMPTY: u8 = 2;
 
-    /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
+    /// Constructs a new LegacyImporter using the given AsyncTask object as import
     /// worker.
     pub fn new(async_task: Arc<AsyncTask>) -> Self {
         Self {
@@ -100,7 +100,7 @@
         }
     }
 
-    /// The legacy migrator must be initialized deferred, because keystore starts very early.
+    /// The legacy importer must be initialized deferred, because keystore starts very early.
     /// At this time the data partition may not be mounted. So we cannot open database connections
     /// until we get actual key load requests. This sets the function that the legacy loader
     /// uses to connect to the database.
@@ -125,11 +125,11 @@
         Ok(())
     }
 
-    /// This function is called by the migration requestor to check if it is worth
-    /// making a migration request. It also transitions the state from UNINITIALIZED
+    /// This function is called by the import requestor to check if it is worth
+    /// making an import request. It also transitions the state from UNINITIALIZED
     /// to READY or EMPTY on first use. The deferred initialization is necessary, because
     /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
-    /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
+    /// Returns Ok(STATE_READY) if an import request is worth undertaking and
     /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
     /// was not initialized and cannot be initialized.
     fn check_state(&self) -> Result<u8> {
@@ -157,9 +157,9 @@
                         }
 
                         self.async_task.queue_hi(move |shelf| {
-                            shelf.get_or_put_with(|| LegacyMigratorState {
-                                recently_migrated: Default::default(),
-                                recently_migrated_super_key: Default::default(),
+                            shelf.get_or_put_with(|| LegacyImporterState {
+                                recently_imported: Default::default(),
+                                recently_imported_super_key: Default::default(),
                                 legacy_loader,
                                 sec_level_to_km_uuid,
                                 db,
@@ -189,14 +189,14 @@
                     );
                 }
                 (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
-                (s, _) => panic!("Unknown legacy migrator state. {} ", s),
+                (s, _) => panic!("Unknown legacy importer state. {} ", s),
             }
         }
     }
 
     /// List all aliases for uid in the legacy database.
     pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
-        let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500);
+        let _wp = wd::watch_millis("LegacyImporter::list_uid", 500);
 
         let uid = match (domain, namespace) {
             (Domain::APP, namespace) => namespace as u32,
@@ -217,44 +217,44 @@
         )
     }
 
-    /// Sends the given closure to the migrator thread for execution after calling check_state.
+    /// Sends the given closure to the importer thread for execution after calling check_state.
     /// Returns None if the database was empty and the request was not executed.
-    /// Otherwise returns Some with the result produced by the migration request.
+    /// Otherwise returns Some with the result produced by the import request.
     /// The loader state may transition to STATE_EMPTY during the execution of this function.
     fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
     where
-        F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
+        F: FnOnce(&mut LegacyImporterState) -> Result<T> + Send + 'static,
     {
         // Short circuit if the database is empty or not initialized (error case).
         match self.check_state().context("In do_serialized: Checking state.") {
-            Ok(LegacyMigrator::STATE_EMPTY) => return None,
-            Ok(LegacyMigrator::STATE_READY) => {}
+            Ok(LegacyImporter::STATE_EMPTY) => return None,
+            Ok(LegacyImporter::STATE_READY) => {}
             Err(e) => return Some(Err(e)),
-            Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
+            Ok(s) => panic!("Unknown legacy importer state. {} ", s),
         }
 
         // We have established that there may be a key in the legacy database.
-        // Now we schedule a migration request.
+        // Now we schedule an import request.
         let (sender, receiver) = channel();
         self.async_task.queue_hi(move |shelf| {
-            // Get the migrator state from the shelf.
-            // There may not be a state. This can happen if this migration request was scheduled
+            // Get the importer state from the shelf.
+            // There may not be a state. This can happen if this import request was scheduled
             // before a previous request established that the legacy database was empty
             // and removed the state from the shelf. Since we know now that the database
             // is empty, we can return None here.
-            let (new_state, result) = if let Some(legacy_migrator_state) =
-                shelf.get_downcast_mut::<LegacyMigratorState>()
+            let (new_state, result) = if let Some(legacy_importer_state) =
+                shelf.get_downcast_mut::<LegacyImporterState>()
             {
-                let result = f(legacy_migrator_state);
-                (legacy_migrator_state.check_empty(), Some(result))
+                let result = f(legacy_importer_state);
+                (legacy_importer_state.check_empty(), Some(result))
             } else {
                 (Self::STATE_EMPTY, None)
             };
 
-            // If the migration request determined that the database is now empty, we discard
+            // If the import request determined that the database is now empty, we discard
             // the state from the shelf to free up the resources we won't need any longer.
             if result.is_some() && new_state == Self::STATE_EMPTY {
-                shelf.remove_downcast_ref::<LegacyMigratorState>();
+                shelf.remove_downcast_ref::<LegacyImporterState>();
             }
 
             // Send the result to the requester.
@@ -271,7 +271,7 @@
         };
 
         // We can only transition to EMPTY but never back.
-        // The migrator never creates any legacy blobs.
+        // The importer never creates any legacy blobs.
         if new_state == Self::STATE_EMPTY {
             self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
         }
@@ -280,10 +280,10 @@
     }
 
     /// Runs the key_accessor function and returns its result. If it returns an error and the
-    /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
+    /// root cause was KEY_NOT_FOUND, tries to import a key with the given parameters from
     /// the legacy database to the new database and runs the key_accessor function again if
-    /// the migration request was successful.
-    pub fn with_try_migrate<F, T>(
+    /// the import request was successful.
+    pub fn with_try_import<F, T>(
         &self,
         key: &KeyDescriptor,
         caller_uid: u32,
@@ -292,7 +292,7 @@
     where
         F: Fn() -> Result<T>,
     {
-        let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500);
+        let _wp = wd::watch_millis("LegacyImporter::with_try_import", 500);
 
         // Access the key and return on success.
         match key_accessor() {
@@ -304,7 +304,7 @@
         }
 
         // Filter inputs. We can only load legacy app domain keys and some special rules due
-        // to which we migrate keys transparently to an SELINUX domain.
+        // to which we import keys transparently to an SELINUX domain.
         let uid = match key {
             KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
             KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
@@ -324,11 +324,11 @@
 
         let key_clone = key.clone();
         let result = self
-            .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
+            .do_serialized(move |importer_state| importer_state.check_and_import(uid, key_clone));
 
         if let Some(result) = result {
             result?;
-            // After successful migration try again.
+            // After successful import try again.
             key_accessor()
         } else {
             Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
@@ -336,8 +336,8 @@
     }
 
     /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
-    /// this function makes a migration request and on success retries the key_accessor.
-    pub fn with_try_migrate_super_key<F, T>(
+    /// this function makes an import request and on success retries the key_accessor.
+    pub fn with_try_import_super_key<F, T>(
         &self,
         user_id: u32,
         pw: &Password,
@@ -346,31 +346,31 @@
     where
         F: FnMut() -> Result<Option<T>>,
     {
-        let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500);
+        let _wp = wd::watch_millis("LegacyImporter::with_try_import_super_key", 500);
 
         match key_accessor() {
             Ok(Some(result)) => return Ok(Some(result)),
             Ok(None) => {}
             Err(e) => return Err(e),
         }
-        let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
-        let result = self.do_serialized(move |migrator_state| {
-            migrator_state.check_and_migrate_super_key(user_id, &pw)
+        let pw = pw.try_clone().context("In with_try_import_super_key: Cloning password.")?;
+        let result = self.do_serialized(move |importer_state| {
+            importer_state.check_and_import_super_key(user_id, &pw)
         });
 
         if let Some(result) = result {
             result?;
-            // After successful migration try again.
+            // After successful import try again.
             key_accessor()
         } else {
             Ok(None)
         }
     }
 
-    /// Deletes all keys belonging to the given namespace, migrating them into the database
+    /// Deletes all keys belonging to the given namespace, importing them into the database
     /// for subsequent garbage collection if necessary.
     pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
-        let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500);
+        let _wp = wd::watch_millis("LegacyImporter::bulk_delete_uid", 500);
 
         let uid = match (domain, nspace) {
             (Domain::APP, nspace) => nspace as u32,
@@ -379,24 +379,24 @@
             _ => return Ok(()),
         };
 
-        let result = self.do_serialized(move |migrator_state| {
-            migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
+        let result = self.do_serialized(move |importer_state| {
+            importer_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
         });
 
         result.unwrap_or(Ok(()))
     }
 
-    /// Deletes all keys belonging to the given android user, migrating them into the database
+    /// Deletes all keys belonging to the given android user, importing them into the database
     /// for subsequent garbage collection if necessary.
     pub fn bulk_delete_user(
         &self,
         user_id: u32,
         keep_non_super_encrypted_keys: bool,
     ) -> Result<()> {
-        let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500);
+        let _wp = wd::watch_millis("LegacyImporter::bulk_delete_user", 500);
 
-        let result = self.do_serialized(move |migrator_state| {
-            migrator_state
+        let result = self.do_serialized(move |importer_state| {
+            importer_state
                 .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
         });
 
@@ -406,12 +406,12 @@
     /// Queries the legacy database for the presence of a super key for the given user.
     pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
         let result =
-            self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
+            self.do_serialized(move |importer_state| importer_state.has_super_key(user_id));
         result.unwrap_or(Ok(false))
     }
 }
 
-impl LegacyMigratorState {
+impl LegacyImporterState {
     fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
         let sec_level = if is_strongbox {
             SecurityLevel::STRONGBOX
@@ -430,17 +430,17 @@
             .context("In list_uid: Trying to list legacy entries.")
     }
 
-    /// This is a key migration request that must run in the migrator thread. This must
+    /// This is a key import request that must run in the importer thread. This must
     /// be passed to do_serialized.
-    fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
+    fn check_and_import(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
         let alias = key.alias.clone().ok_or_else(|| {
-            anyhow::anyhow!(Error::sys()).context(concat!(
-                "In check_and_migrate: Must be Some because ",
-                "our caller must not have called us otherwise."
-            ))
+            anyhow::anyhow!(Error::sys()).context(
+                "In check_and_import: Must be Some because \
+                 our caller must not have called us otherwise.",
+            )
         })?;
 
-        if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
+        if self.recently_imported.contains(&RecentImport::new(uid, alias.clone())) {
             return Ok(());
         }
 
@@ -452,7 +452,7 @@
         let (km_blob_params, user_cert, ca_cert) = self
             .legacy_loader
             .load_by_uid_alias(uid, &alias, None)
-            .context("In check_and_migrate: Trying to load legacy blob.")?;
+            .context("In check_and_import: Trying to load legacy blob.")?;
         let result = match km_blob_params {
             Some((km_blob, params)) => {
                 let is_strongbox = km_blob.is_strongbox();
@@ -464,33 +464,33 @@
                         let super_key_id = match self
                             .db
                             .load_super_key(&USER_SUPER_KEY, user_id)
-                            .context("In check_and_migrate: Failed to load super key")?
+                            .context("In check_and_import: Failed to load super key")?
                         {
                             Some((_, entry)) => entry.id(),
                             None => {
                                 // This might be the first time we access the super key,
-                                // and it may not have been migrated. We cannot import
+                                // and it may not have been imported. We cannot import
                                 // the legacy super_key key now, because we need to reencrypt
                                 // it which we cannot do if we are not unlocked, which we are
-                                // not because otherwise the key would have been migrated.
+                                // not because otherwise the key would have been imported.
                                 // We can check though if the key exists. If it does,
                                 // we can return Locked. Otherwise, we can delete the
                                 // key and return NotFound, because the key will never
                                 // be unlocked again.
                                 if self.legacy_loader.has_super_key(user_id) {
                                     return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
-                                        "In check_and_migrate: Cannot migrate super key of this ",
+                                        "In check_and_import: Cannot import super key of this ",
                                         "key while user is locked."
                                     ));
                                 } else {
                                     self.legacy_loader.remove_keystore_entry(uid, &alias).context(
                                         concat!(
-                                            "In check_and_migrate: ",
+                                            "In check_and_import: ",
                                             "Trying to remove obsolete key."
                                         ),
                                     )?;
                                     return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                                        .context("In check_and_migrate: Obsolete key.");
+                                        .context("In check_and_import: Obsolete key.");
                                 }
                             }
                         };
@@ -505,18 +505,18 @@
                     BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
                     _ => {
                         return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                            .context("In check_and_migrate: Legacy key has unexpected type.")
+                            .context("In check_and_import: Legacy key has unexpected type.")
                     }
                 };
 
                 let km_uuid = self
                     .get_km_uuid(is_strongbox)
-                    .context("In check_and_migrate: Trying to get KM UUID")?;
+                    .context("In check_and_import: Trying to get KM UUID")?;
                 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
 
                 let mut metadata = KeyMetaData::new();
                 let creation_date = DateTime::now()
-                    .context("In check_and_migrate: Trying to make creation time.")?;
+                    .context("In check_and_import: Trying to make creation time.")?;
                 metadata.add(KeyMetaEntry::CreationDate(creation_date));
 
                 // Store legacy key in the database.
@@ -530,49 +530,49 @@
                         &metadata,
                         &km_uuid,
                     )
-                    .context("In check_and_migrate.")?;
+                    .context("In check_and_import.")?;
                 Ok(())
             }
             None => {
                 if let Some(ca_cert) = ca_cert {
                     self.db
                         .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
-                        .context("In check_and_migrate: Failed to insert new certificate.")?;
+                        .context("In check_and_import: Failed to insert new certificate.")?;
                     Ok(())
                 } else {
                     Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                        .context("In check_and_migrate: Legacy key not found.")
+                        .context("In check_and_import: Legacy key not found.")
                 }
             }
         };
 
         match result {
             Ok(()) => {
-                // Add the key to the migrated_keys list.
-                self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
+                // Add the key to the imported_keys list.
+                self.recently_imported.insert(RecentImport::new(uid, alias.clone()));
                 // Delete legacy key from the file system
                 self.legacy_loader
                     .remove_keystore_entry(uid, &alias)
-                    .context("In check_and_migrate: Trying to remove migrated key.")?;
+                    .context("In check_and_import: Trying to remove imported key.")?;
                 Ok(())
             }
             Err(e) => Err(e),
         }
     }
 
-    fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
-        if self.recently_migrated_super_key.contains(&user_id) {
+    fn check_and_import_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
+        if self.recently_imported_super_key.contains(&user_id) {
             return Ok(());
         }
 
         if let Some(super_key) = self
             .legacy_loader
             .load_super_key(user_id, pw)
-            .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
+            .context("In check_and_import_super_key: Trying to load legacy super key.")?
         {
             let (blob, blob_metadata) =
                 crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
-                    .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
+                    .context("In check_and_import_super_key: Trying to encrypt super key.")?;
 
             self.db
                 .store_super_key(
@@ -583,20 +583,20 @@
                     &KeyMetaData::new(),
                 )
                 .context(concat!(
-                    "In check_and_migrate_super_key: ",
+                    "In check_and_import_super_key: ",
                     "Trying to insert legacy super_key into the database."
                 ))?;
             self.legacy_loader.remove_super_key(user_id);
-            self.recently_migrated_super_key.insert(user_id);
+            self.recently_imported_super_key.insert(user_id);
             Ok(())
         } else {
             Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                .context("In check_and_migrate_super_key: No key found do migrate.")
+                .context("In check_and_import_super_key: No key found do import.")
         }
     }
 
-    /// Key migrator request to be run by do_serialized.
-    /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
+    /// Key importer request to be run by do_serialized.
+    /// See LegacyImporter::bulk_delete_uid and LegacyImporter::bulk_delete_user.
     fn bulk_delete(
         &mut self,
         bulk_delete_request: BulkDeleteRequest,
@@ -695,21 +695,21 @@
 
             self.legacy_loader
                 .remove_keystore_entry(uid, &alias)
-                .context("In bulk_delete: Trying to remove migrated key.")?;
+                .context("In bulk_delete: Trying to remove imported key.")?;
         }
         Ok(())
     }
 
     fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
-        Ok(self.recently_migrated_super_key.contains(&user_id)
+        Ok(self.recently_imported_super_key.contains(&user_id)
             || self.legacy_loader.has_super_key(user_id))
     }
 
     fn check_empty(&self) -> u8 {
         if self.legacy_loader.is_empty().unwrap_or(false) {
-            LegacyMigrator::STATE_EMPTY
+            LegacyImporter::STATE_EMPTY
         } else {
-            LegacyMigrator::STATE_READY
+            LegacyImporter::STATE_READY
         }
     }
 }
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 6e87a5e..4a23843 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -29,7 +29,7 @@
 /// Internal Representation of Key Parameter and convenience functions.
 pub mod key_parameter;
 pub mod legacy_blob;
-pub mod legacy_migrator;
+pub mod legacy_importer;
 pub mod maintenance;
 pub mod metrics;
 pub mod metrics_store;
@@ -40,7 +40,6 @@
 pub mod security_level;
 pub mod service;
 pub mod shared_secret_negotiation;
-pub mod try_insert;
 pub mod utils;
 
 mod attestation_key_utils;
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index d5feee1..71f43d6 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -19,9 +19,9 @@
 use crate::error::map_or_log_err;
 use crate::error::Error;
 use crate::globals::get_keymint_device;
-use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, LEGACY_IMPORTER, SUPER_KEY};
 use crate::permission::{KeyPerm, KeystorePerm};
-use crate::super_key::UserState;
+use crate::super_key::{SuperKeyManager, UserState};
 use crate::utils::{
     check_key_permission, check_keystore_permission, list_key_entries, watchdog as wd,
 };
@@ -70,24 +70,25 @@
     }
 
     fn on_user_password_changed(user_id: i32, password: Option<Password>) -> Result<()> {
-        //Check permission. Function should return if this failed. Therefore having '?' at the end
-        //is very important.
+        // Check permission. Function should return if this failed. Therefore having '?' at the end
+        // is very important.
         check_keystore_permission(KeystorePerm::ChangePassword)
             .context("In on_user_password_changed.")?;
 
+        let mut skm = SUPER_KEY.write().unwrap();
+
         if let Some(pw) = password.as_ref() {
             DB.with(|db| {
-                SUPER_KEY.unlock_screen_lock_bound_key(&mut db.borrow_mut(), user_id as u32, pw)
+                skm.unlock_screen_lock_bound_key(&mut db.borrow_mut(), user_id as u32, pw)
             })
             .context("In on_user_password_changed: unlock_screen_lock_bound_key failed")?;
         }
 
         match DB
             .with(|db| {
-                UserState::get_with_password_changed(
+                skm.reset_or_init_user_and_get_user_state(
                     &mut db.borrow_mut(),
-                    &LEGACY_MIGRATOR,
-                    &SUPER_KEY,
+                    &LEGACY_IMPORTER,
                     user_id as u32,
                     password.as_ref(),
                 )
@@ -110,11 +111,11 @@
         // Check permission. Function should return if this failed. Therefore having '?' at the end
         // is very important.
         check_keystore_permission(KeystorePerm::ChangeUser).context("In add_or_remove_user.")?;
+
         DB.with(|db| {
-            UserState::reset_user(
+            SUPER_KEY.write().unwrap().reset_user(
                 &mut db.borrow_mut(),
-                &SUPER_KEY,
-                &LEGACY_MIGRATOR,
+                &LEGACY_IMPORTER,
                 user_id as u32,
                 false,
             )
@@ -129,7 +130,7 @@
         // Permission check. Must return on error. Do not touch the '?'.
         check_keystore_permission(KeystorePerm::ClearUID).context("In clear_namespace.")?;
 
-        LEGACY_MIGRATOR
+        LEGACY_IMPORTER
             .bulk_delete_uid(domain, nspace)
             .context("In clear_namespace: Trying to delete legacy keys.")?;
         DB.with(|db| db.borrow_mut().unbind_keys_for_namespace(domain, nspace))
@@ -145,7 +146,11 @@
         check_keystore_permission(KeystorePerm::GetState).context("In get_state.")?;
         let state = DB
             .with(|db| {
-                UserState::get(&mut db.borrow_mut(), &LEGACY_MIGRATOR, &SUPER_KEY, user_id as u32)
+                SUPER_KEY.read().unwrap().get_user_state(
+                    &mut db.borrow_mut(),
+                    &LEGACY_IMPORTER,
+                    user_id as u32,
+                )
             })
             .context("In get_state. Trying to get UserState.")?;
 
@@ -202,7 +207,9 @@
             .context("In early_boot_ended. Checking permission")?;
         log::info!("In early_boot_ended.");
 
-        if let Err(e) = DB.with(|db| SUPER_KEY.set_up_boot_level_cache(&mut db.borrow_mut())) {
+        if let Err(e) =
+            DB.with(|db| SuperKeyManager::set_up_boot_level_cache(&SUPER_KEY, &mut db.borrow_mut()))
+        {
             log::error!("SUPER_KEY.set_up_boot_level_cache failed:\n{:?}\n:(", e);
         }
         Maintenance::call_on_all_security_levels("earlyBootEnded", |dev| dev.earlyBootEnded())
@@ -228,7 +235,7 @@
             }
             _ => {
                 return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT)).context(
-                    "In migrate_key_namespace:
+                    "In migrate_key_namespace: \
                      Source domain must be one of APP, SELINUX, or KEY_ID.",
                 )
             }
@@ -242,15 +249,15 @@
             }
             _ => {
                 return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT)).context(
-                    "In migrate_key_namespace:
+                    "In migrate_key_namespace: \
                      Destination domain must be one of APP or SELINUX.",
                 )
             }
         };
 
         DB.with(|db| {
-            let (key_id_guard, _) = LEGACY_MIGRATOR
-                .with_try_migrate(source, src_uid, || {
+            let (key_id_guard, _) = LEGACY_IMPORTER
+                .with_try_import(source, src_uid, || {
                     db.borrow_mut().load_key_entry(
                         source,
                         KeyType::Client,
diff --git a/keystore2/src/metrics_store.rs b/keystore2/src/metrics_store.rs
index b18d84c..b6f1343 100644
--- a/keystore2/src/metrics_store.rs
+++ b/keystore2/src/metrics_store.rs
@@ -649,6 +649,7 @@
 pub fn read_keystore_crash_count() -> Result<i32> {
     rustutils::system_properties::read("keystore.crash_count")
         .context("In read_keystore_crash_count: Failed read property.")?
+        .context("In read_keystore_crash_count: Property not set.")?
         .parse::<i32>()
         .map_err(std::convert::Into::into)
 }
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index fadd252..639fe1e 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -31,6 +31,7 @@
 use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
     AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
     IRemoteProvisioning::IRemoteProvisioning,
+    IRemotelyProvisionedKeyPool::BnRemotelyProvisionedKeyPool,
     IRemotelyProvisionedKeyPool::IRemotelyProvisionedKeyPool, ImplInfo::ImplInfo,
     RemotelyProvisionedKey::RemotelyProvisionedKey,
 };
@@ -183,22 +184,6 @@
         }
     }
 
-    fn get_dev_by_unique_id(
-        &self,
-        unique_id: &str,
-    ) -> Result<(SecurityLevel, &dyn IRemotelyProvisionedComponent)> {
-        for (sec_level, dev) in &self.device_by_sec_level {
-            if dev.getHardwareInfo()?.uniqueId == Some(unique_id.to_string()) {
-                return Ok((*sec_level, dev.as_ref()));
-            }
-        }
-
-        Err(error::Error::sys()).context(format!(
-            "In get_dev_by_unique_id: Instance for requested unique id '{}' not found",
-            unique_id
-        ))
-    }
-
     /// Creates a new instance of the remote provisioning service
     pub fn new_native_binder() -> Result<Strong<dyn IRemoteProvisioning>> {
         let mut result: Self = Default::default();
@@ -421,35 +406,6 @@
             db.delete_all_attestation_keys()
         })
     }
-
-    /// Fetches a remotely provisioned certificate chain and key for the given client uid that
-    /// was provisioned using the IRemotelyProvisionedComponent with the given id. The same key
-    /// will be returned for a given caller_uid on every request. If there are no attestation keys
-    /// available, `OUT_OF_KEYS` is returned.
-    fn get_attestation_key(
-        &self,
-        db: &mut KeystoreDB,
-        caller_uid: i32,
-        irpc_id: &str,
-    ) -> Result<RemotelyProvisionedKey> {
-        log::info!("get_attestation_key(self, {}, {}", caller_uid, irpc_id);
-
-        let (sec_level, _) = self.get_dev_by_unique_id(irpc_id)?;
-        let (_, _, km_uuid) = get_keymint_device(&sec_level)?;
-
-        let cert_chain = get_rem_prov_attest_key(Domain::APP, caller_uid as u32, db, &km_uuid)
-            .context("In get_attestation_key")?;
-        match cert_chain {
-            Some(chain) => Ok(RemotelyProvisionedKey {
-                keyBlob: chain.private_key.to_vec(),
-                encodedCertChain: chain.cert_chain,
-            }),
-            // It should be impossible to get `None`, but handle it just in case as a
-            // precaution against future behavioral changes in `get_rem_prov_attest_key`.
-            None => Err(error::Error::Rc(ResponseCode::OUT_OF_KEYS))
-                .context("In get_attestation_key: No available attestation keys"),
-        }
-    }
 }
 
 /// Populates the AttestationPoolStatus parcelable with information about how many
@@ -616,9 +572,86 @@
     }
 }
 
+/// Implementation of the IRemotelyProvisionedKeyPool service.
+#[derive(Default)]
+pub struct RemotelyProvisionedKeyPoolService {
+    unique_id_to_sec_level: HashMap<String, SecurityLevel>,
+}
+
+impl RemotelyProvisionedKeyPoolService {
+    /// Fetches a remotely provisioned certificate chain and key for the given client uid that
+    /// was provisioned using the IRemotelyProvisionedComponent with the given id. The same key
+    /// will be returned for a given caller_uid on every request. If there are no attestation keys
+    /// available, `OUT_OF_KEYS` is returned.
+    fn get_attestation_key(
+        &self,
+        db: &mut KeystoreDB,
+        caller_uid: i32,
+        irpc_id: &str,
+    ) -> Result<RemotelyProvisionedKey> {
+        log::info!("get_attestation_key(self, {}, {}", caller_uid, irpc_id);
+
+        let sec_level = self
+            .unique_id_to_sec_level
+            .get(irpc_id)
+            .ok_or(Error::Rc(ResponseCode::INVALID_ARGUMENT))
+            .context(format!("In get_attestation_key: unknown irpc id '{}'", irpc_id))?;
+        let (_, _, km_uuid) = get_keymint_device(sec_level)?;
+
+        let cert_chain = get_rem_prov_attest_key(Domain::APP, caller_uid as u32, db, &km_uuid)
+            .context("In get_attestation_key")?;
+        match cert_chain {
+            Some(chain) => Ok(RemotelyProvisionedKey {
+                keyBlob: chain.private_key.to_vec(),
+                encodedCertChain: chain.cert_chain,
+            }),
+            // It should be impossible to get `None`, but handle it just in case as a
+            // precaution against future behavioral changes in `get_rem_prov_attest_key`.
+            None => Err(error::Error::Rc(ResponseCode::OUT_OF_KEYS))
+                .context("In get_attestation_key: No available attestation keys"),
+        }
+    }
+
+    /// Creates a new instance of the remotely provisioned key pool service, used for fetching
+    /// remotely provisioned attestation keys.
+    pub fn new_native_binder() -> Result<Strong<dyn IRemotelyProvisionedKeyPool>> {
+        let mut result: Self = Default::default();
+
+        let dev = get_remotely_provisioned_component(&SecurityLevel::TRUSTED_ENVIRONMENT)
+            .context("In new_native_binder: Failed to get TEE Remote Provisioner instance.")?;
+        if let Some(id) = dev.getHardwareInfo()?.uniqueId {
+            result.unique_id_to_sec_level.insert(id, SecurityLevel::TRUSTED_ENVIRONMENT);
+        }
+
+        if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
+            if let Some(id) = dev.getHardwareInfo()?.uniqueId {
+                if result.unique_id_to_sec_level.contains_key(&id) {
+                    anyhow::bail!("In new_native_binder: duplicate irpc id found: '{}'", id)
+                }
+                result.unique_id_to_sec_level.insert(id, SecurityLevel::STRONGBOX);
+            }
+        }
+
+        // If none of the remotely provisioned components have unique ids, then we shouldn't
+        // bother publishing the service, as it's impossible to match keys with their backends.
+        if result.unique_id_to_sec_level.is_empty() {
+            anyhow::bail!(
+                "In new_native_binder: No remotely provisioned components have unique ids"
+            )
+        }
+
+        Ok(BnRemotelyProvisionedKeyPool::new_binder(
+            result,
+            BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+        ))
+    }
+}
+
+impl binder::Interface for RemotelyProvisionedKeyPoolService {}
+
 // Implementation of IRemotelyProvisionedKeyPool. See AIDL spec at
 // :aidl/android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.aidl
-impl IRemotelyProvisionedKeyPool for RemoteProvisioningService {
+impl IRemotelyProvisionedKeyPool for RemotelyProvisionedKeyPoolService {
     fn getAttestationKey(
         &self,
         caller_uid: i32,
@@ -842,10 +875,10 @@
         let mock_rpc = Box::<MockRemotelyProvisionedComponent>::default();
         mock_rpc.0.lock().unwrap().hw_info.uniqueId = Some(String::from("mallory"));
 
-        let mut service: RemoteProvisioningService = Default::default();
+        let mut service: RemotelyProvisionedKeyPoolService = Default::default();
         service
-            .device_by_sec_level
-            .insert(SecurityLevel::TRUSTED_ENVIRONMENT, Strong::new(mock_rpc));
+            .unique_id_to_sec_level
+            .insert(String::from("mallory"), SecurityLevel::TRUSTED_ENVIRONMENT);
 
         assert_eq!(
             service
@@ -867,13 +900,15 @@
 
         let mock_rpc = Box::<MockRemotelyProvisionedComponent>::default();
         let mock_values = mock_rpc.0.clone();
-        let mut service: RemoteProvisioningService = Default::default();
-        service.device_by_sec_level.insert(sec_level, Strong::new(mock_rpc));
+        let mut remote_provisioning: RemoteProvisioningService = Default::default();
+        remote_provisioning.device_by_sec_level.insert(sec_level, Strong::new(mock_rpc));
+        let mut key_pool: RemotelyProvisionedKeyPoolService = Default::default();
+        key_pool.unique_id_to_sec_level.insert(String::from(irpc_id), sec_level);
 
         mock_values.lock().unwrap().hw_info.uniqueId = Some(String::from(irpc_id));
         mock_values.lock().unwrap().private_key = vec![8, 6, 7, 5, 3, 0, 9];
         mock_values.lock().unwrap().maced_public_key = generate_maced_pubkey(0x11);
-        service.generate_key_pair(&mut db, true, sec_level).unwrap();
+        remote_provisioning.generate_key_pair(&mut db, true, sec_level).unwrap();
 
         let public_key = RemoteProvisioningService::parse_cose_mac0_for_coords(
             mock_values.lock().unwrap().maced_public_key.as_slice(),
@@ -881,7 +916,7 @@
         .unwrap();
         let batch_cert = get_fake_cert();
         let certs = &[5, 6, 7, 8];
-        assert!(service
+        assert!(remote_provisioning
             .provision_cert_chain(
                 &mut db,
                 public_key.as_slice(),
@@ -893,7 +928,7 @@
             .is_ok());
 
         // ensure we got the key we expected
-        let first_key = service
+        let first_key = key_pool
             .get_attestation_key(&mut db, caller_uid, irpc_id)
             .context("get first key")
             .unwrap();
@@ -903,7 +938,7 @@
         // ensure that multiple calls get the same key
         assert_eq!(
             first_key,
-            service
+            key_pool
                 .get_attestation_key(&mut db, caller_uid, irpc_id)
                 .context("get second key")
                 .unwrap()
@@ -911,7 +946,7 @@
 
         // no more keys for new clients
         assert_eq!(
-            service
+            key_pool
                 .get_attestation_key(&mut db, caller_uid + 1, irpc_id)
                 .unwrap_err()
                 .downcast::<error::Error>()
@@ -931,19 +966,21 @@
 
         let mock_rpc = Box::<MockRemotelyProvisionedComponent>::default();
         let mock_values = mock_rpc.0.clone();
-        let mut service: RemoteProvisioningService = Default::default();
-        service.device_by_sec_level.insert(sec_level, Strong::new(mock_rpc));
+        let mut remote_provisioning: RemoteProvisioningService = Default::default();
+        remote_provisioning.device_by_sec_level.insert(sec_level, Strong::new(mock_rpc));
+        let mut key_pool: RemotelyProvisionedKeyPoolService = Default::default();
+        key_pool.unique_id_to_sec_level.insert(String::from(irpc_id), sec_level);
 
         // generate two distinct keys and provision them with certs
         mock_values.lock().unwrap().hw_info.uniqueId = Some(String::from(irpc_id));
         mock_values.lock().unwrap().private_key = vec![3, 1, 4, 1, 5];
         mock_values.lock().unwrap().maced_public_key = generate_maced_pubkey(0x11);
-        assert!(service.generate_key_pair(&mut db, true, sec_level).is_ok());
+        assert!(remote_provisioning.generate_key_pair(&mut db, true, sec_level).is_ok());
         let public_key = RemoteProvisioningService::parse_cose_mac0_for_coords(
             mock_values.lock().unwrap().maced_public_key.as_slice(),
         )
         .unwrap();
-        assert!(service
+        assert!(remote_provisioning
             .provision_cert_chain(
                 &mut db,
                 public_key.as_slice(),
@@ -957,12 +994,12 @@
         mock_values.lock().unwrap().hw_info.uniqueId = Some(String::from(irpc_id));
         mock_values.lock().unwrap().private_key = vec![9, 0, 2, 1, 0];
         mock_values.lock().unwrap().maced_public_key = generate_maced_pubkey(0x22);
-        assert!(service.generate_key_pair(&mut db, true, sec_level).is_ok());
+        assert!(remote_provisioning.generate_key_pair(&mut db, true, sec_level).is_ok());
         let public_key = RemoteProvisioningService::parse_cose_mac0_for_coords(
             mock_values.lock().unwrap().maced_public_key.as_slice(),
         )
         .unwrap();
-        assert!(service
+        assert!(remote_provisioning
             .provision_cert_chain(
                 &mut db,
                 public_key.as_slice(),
@@ -975,11 +1012,11 @@
 
         // make sure each caller gets a distinct key
         assert_ne!(
-            service
+            key_pool
                 .get_attestation_key(&mut db, first_caller, irpc_id)
                 .context("get first key")
                 .unwrap(),
-            service
+            key_pool
                 .get_attestation_key(&mut db, second_caller, irpc_id)
                 .context("get second key")
                 .unwrap()
@@ -987,22 +1024,22 @@
 
         // repeated calls should return the same key for a given caller
         assert_eq!(
-            service
+            key_pool
                 .get_attestation_key(&mut db, first_caller, irpc_id)
                 .context("first caller a")
                 .unwrap(),
-            service
+            key_pool
                 .get_attestation_key(&mut db, first_caller, irpc_id)
                 .context("first caller b")
                 .unwrap(),
         );
 
         assert_eq!(
-            service
+            key_pool
                 .get_attestation_key(&mut db, second_caller, irpc_id)
                 .context("second caller a")
                 .unwrap(),
-            service
+            key_pool
                 .get_attestation_key(&mut db, second_caller, irpc_id)
                 .context("second caller b")
                 .unwrap()
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 9334930..eefbc20 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -20,7 +20,7 @@
 };
 use crate::database::{CertificateInfo, KeyIdGuard};
 use crate::error::{self, map_km_error, map_or_log_err, Error, ErrorCode};
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_IMPORTER, SUPER_KEY};
 use crate::key_parameter::KeyParameter as KsKeyParam;
 use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
 use crate::metrics_store::log_key_creation_event_stats;
@@ -160,9 +160,11 @@
                     let mut db = db.borrow_mut();
 
                     let (key_blob, mut blob_metadata) = SUPER_KEY
+                        .read()
+                        .unwrap()
                         .handle_super_encryption_on_key_init(
                             &mut db,
-                            &LEGACY_MIGRATOR,
+                            &LEGACY_IMPORTER,
                             &(key.domain),
                             &key_parameters,
                             flags,
@@ -243,7 +245,7 @@
             _ => {
                 let (key_id_guard, mut key_entry) = DB
                     .with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
-                        LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+                        LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                             db.borrow_mut().load_key_entry(
                                 key,
                                 KeyType::Client,
@@ -303,6 +305,8 @@
             .context("In create_operation.")?;
 
         let km_blob = SUPER_KEY
+            .read()
+            .unwrap()
             .unwrap_key_if_required(&blob_metadata, km_blob)
             .context("In create_operation. Failed to handle super encryption.")?;
 
@@ -719,7 +723,7 @@
 
         let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
             .with(|db| {
-                LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                LEGACY_IMPORTER.with_try_import(&key, caller_uid, || {
                     db.borrow_mut().load_key_entry(
                         wrapping_key,
                         KeyType::Client,
@@ -736,8 +740,11 @@
             .ok_or_else(error::Error::sys)
             .context("No km_blob after successfully loading key. This should never happen.")?;
 
-        let wrapping_key_blob =
-            SUPER_KEY.unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob).context(
+        let wrapping_key_blob = SUPER_KEY
+            .read()
+            .unwrap()
+            .unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob)
+            .context(
                 "In import_wrapped_key. Failed to handle super encryption for wrapping key.",
             )?;
 
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 2725dc2..46bc8b0 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -26,7 +26,7 @@
 };
 use crate::{
     database::Uuid,
-    globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+    globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_IMPORTER},
 };
 use crate::{database::KEYSTORE_UUID, permission};
 use crate::{
@@ -81,7 +81,7 @@
         }
 
         let uuid_by_sec_level = result.uuid_by_sec_level.clone();
-        LEGACY_MIGRATOR
+        LEGACY_IMPORTER
             .set_init(move || {
                 (create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
             })
@@ -132,7 +132,7 @@
         let caller_uid = ThreadState::get_calling_uid();
         let (key_id_guard, mut key_entry) = DB
             .with(|db| {
-                LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+                LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                     db.borrow_mut().load_key_entry(
                         key,
                         KeyType::Client,
@@ -183,7 +183,7 @@
     ) -> Result<()> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with::<_, Result<()>>(|db| {
-            let entry = match LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+            let entry = match LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                 db.borrow_mut().load_key_entry(
                     key,
                     KeyType::Client,
@@ -292,7 +292,7 @@
     fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with(|db| {
-            LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+            LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                 db.borrow_mut().unbind_key(key, KeyType::Client, caller_uid, |k, av| {
                     check_key_permission(KeyPerm::Delete, k, &av).context("During delete_key.")
                 })
@@ -310,7 +310,7 @@
     ) -> Result<KeyDescriptor> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with(|db| {
-            LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+            LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                 db.borrow_mut().grant(
                     key,
                     caller_uid,
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index ca5e593..2fb4991 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -26,9 +26,8 @@
     error::ResponseCode,
     key_parameter::{KeyParameter, KeyParameterValue},
     legacy_blob::LegacyBlobLoader,
-    legacy_migrator::LegacyMigrator,
+    legacy_importer::LegacyImporter,
     raw_device::KeyMintDevice,
-    try_insert::TryInsert,
     utils::watchdog as wd,
     utils::AID_KEYSTORE,
 };
@@ -50,7 +49,7 @@
 use std::{
     collections::HashMap,
     sync::Arc,
-    sync::{Mutex, Weak},
+    sync::{Mutex, RwLock, Weak},
 };
 use std::{convert::TryFrom, ops::Deref};
 
@@ -76,7 +75,7 @@
 /// different purpose, distinguished by alias. Each is associated with a static
 /// constant of this type.
 pub struct SuperKeyType<'a> {
-    /// Alias used to look the key up in the `persistent.keyentry` table.
+    /// Alias used to look up the key in the `persistent.keyentry` table.
     pub alias: &'a str,
     /// Encryption algorithm
     pub algorithm: SuperEncryptionAlgorithm,
@@ -256,7 +255,7 @@
 struct SkmState {
     user_keys: HashMap<UserId, UserSuperKeys>,
     key_index: HashMap<i64, Weak<SuperKey>>,
-    boot_level_key_cache: Option<BootLevelKeyCache>,
+    boot_level_key_cache: Option<Mutex<BootLevelKeyCache>>,
 }
 
 impl SkmState {
@@ -275,24 +274,24 @@
 
 #[derive(Default)]
 pub struct SuperKeyManager {
-    data: Mutex<SkmState>,
+    data: SkmState,
 }
 
 impl SuperKeyManager {
-    pub fn set_up_boot_level_cache(self: &Arc<Self>, db: &mut KeystoreDB) -> Result<()> {
-        let mut data = self.data.lock().unwrap();
-        if data.boot_level_key_cache.is_some() {
+    pub fn set_up_boot_level_cache(skm: &Arc<RwLock<Self>>, db: &mut KeystoreDB) -> Result<()> {
+        let mut skm_guard = skm.write().unwrap();
+        if skm_guard.data.boot_level_key_cache.is_some() {
             log::info!("In set_up_boot_level_cache: called for a second time");
             return Ok(());
         }
         let level_zero_key = get_level_zero_key(db)
             .context("In set_up_boot_level_cache: get_level_zero_key failed")?;
-        data.boot_level_key_cache = Some(BootLevelKeyCache::new(level_zero_key));
+        skm_guard.data.boot_level_key_cache =
+            Some(Mutex::new(BootLevelKeyCache::new(level_zero_key)));
         log::info!("Starting boot level watcher.");
-        let clone = self.clone();
+        let clone = skm.clone();
         std::thread::spawn(move || {
-            clone
-                .watch_boot_level()
+            Self::watch_boot_level(clone)
                 .unwrap_or_else(|e| log::error!("watch_boot_level failed:\n{:?}", e));
         });
         Ok(())
@@ -300,32 +299,40 @@
 
     /// Watch the `keystore.boot_level` system property, and keep boot level up to date.
     /// Blocks waiting for system property changes, so must be run in its own thread.
-    fn watch_boot_level(&self) -> Result<()> {
+    fn watch_boot_level(skm: Arc<RwLock<Self>>) -> Result<()> {
         let mut w = PropertyWatcher::new("keystore.boot_level")
             .context("In watch_boot_level: PropertyWatcher::new failed")?;
         loop {
             let level = w
                 .read(|_n, v| v.parse::<usize>().map_err(std::convert::Into::into))
                 .context("In watch_boot_level: read of property failed")?;
-            // watch_boot_level should only be called once data.boot_level_key_cache is Some,
-            // so it's safe to unwrap in the branches below.
-            if level < MAX_MAX_BOOT_LEVEL {
-                log::info!("Read keystore.boot_level value {}", level);
-                let mut data = self.data.lock().unwrap();
-                data.boot_level_key_cache
+
+            // This scope limits the skm_guard life, so we don't hold the skm_guard while
+            // waiting.
+            {
+                let mut skm_guard = skm.write().unwrap();
+                let boot_level_key_cache = skm_guard
+                    .data
+                    .boot_level_key_cache
                     .as_mut()
-                    .unwrap()
-                    .advance_boot_level(level)
-                    .context("In watch_boot_level: advance_boot_level failed")?;
-            } else {
-                log::info!(
-                    "keystore.boot_level {} hits maximum {}, finishing.",
-                    level,
-                    MAX_MAX_BOOT_LEVEL
-                );
-                let mut data = self.data.lock().unwrap();
-                data.boot_level_key_cache.as_mut().unwrap().finish();
-                break;
+                    .ok_or_else(Error::sys)
+                    .context("In watch_boot_level: Boot level cache not initialized")?
+                    .get_mut()
+                    .unwrap();
+                if level < MAX_MAX_BOOT_LEVEL {
+                    log::info!("Read keystore.boot_level value {}", level);
+                    boot_level_key_cache
+                        .advance_boot_level(level)
+                        .context("In watch_boot_level: advance_boot_level failed")?;
+                } else {
+                    log::info!(
+                        "keystore.boot_level {} hits maximum {}, finishing.",
+                        level,
+                        MAX_MAX_BOOT_LEVEL
+                    );
+                    boot_level_key_cache.finish();
+                    break;
+                }
             }
             w.wait().context("In watch_boot_level: property wait failed")?;
         }
@@ -334,34 +341,37 @@
 
     pub fn level_accessible(&self, boot_level: i32) -> bool {
         self.data
-            .lock()
-            .unwrap()
             .boot_level_key_cache
             .as_ref()
-            .map_or(false, |c| c.level_accessible(boot_level as usize))
+            .map_or(false, |c| c.lock().unwrap().level_accessible(boot_level as usize))
     }
 
-    pub fn forget_all_keys_for_user(&self, user: UserId) {
-        let mut data = self.data.lock().unwrap();
-        data.user_keys.remove(&user);
+    pub fn forget_all_keys_for_user(&mut self, user: UserId) {
+        self.data.user_keys.remove(&user);
     }
 
-    fn install_per_boot_key_for_user(&self, user: UserId, super_key: Arc<SuperKey>) -> Result<()> {
-        let mut data = self.data.lock().unwrap();
-        data.add_key_to_key_index(&super_key)
+    fn install_per_boot_key_for_user(
+        &mut self,
+        user: UserId,
+        super_key: Arc<SuperKey>,
+    ) -> Result<()> {
+        self.data
+            .add_key_to_key_index(&super_key)
             .context("In install_per_boot_key_for_user: add_key_to_key_index failed")?;
-        data.user_keys.entry(user).or_default().per_boot = Some(super_key);
+        self.data.user_keys.entry(user).or_default().per_boot = Some(super_key);
         Ok(())
     }
 
     fn lookup_key(&self, key_id: &SuperKeyIdentifier) -> Result<Option<Arc<SuperKey>>> {
-        let mut data = self.data.lock().unwrap();
         Ok(match key_id {
-            SuperKeyIdentifier::DatabaseId(id) => data.key_index.get(id).and_then(|k| k.upgrade()),
-            SuperKeyIdentifier::BootLevel(level) => data
+            SuperKeyIdentifier::DatabaseId(id) => {
+                self.data.key_index.get(id).and_then(|k| k.upgrade())
+            }
+            SuperKeyIdentifier::BootLevel(level) => self
+                .data
                 .boot_level_key_cache
-                .as_mut()
-                .map(|b| b.aes_key(*level as usize))
+                .as_ref()
+                .map(|b| b.lock().unwrap().aes_key(*level as usize))
                 .transpose()
                 .context("In lookup_key: aes_key failed")?
                 .flatten()
@@ -377,8 +387,7 @@
     }
 
     pub fn get_per_boot_key_by_user_id(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
-        let data = self.data.lock().unwrap();
-        data.user_keys.get(&user_id).and_then(|e| e.per_boot.as_ref().cloned())
+        self.data.user_keys.get(&user_id).and_then(|e| e.per_boot.as_ref().cloned())
     }
 
     /// This function unlocks the super keys for a given user.
@@ -386,7 +395,7 @@
     /// super key cache. If there is no such key a new key is created, encrypted with
     /// a key derived from the given password and stored in the database.
     pub fn unlock_user_key(
-        &self,
+        &mut self,
         db: &mut KeystoreDB,
         user: UserId,
         pw: &Password,
@@ -493,9 +502,12 @@
     }
 
     /// Checks if user has setup LSKF, even when super key cache is empty for the user.
-    pub fn super_key_exists_in_db_for_user(
+    /// The reference to self is unused but it is required to prevent calling this function
+    /// concurrently with skm state database changes.
+    fn super_key_exists_in_db_for_user(
+        &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
     ) -> Result<bool> {
         let key_in_db = db
@@ -505,7 +517,7 @@
         if key_in_db {
             Ok(key_in_db)
         } else {
-            legacy_migrator
+            legacy_importer
                 .has_super_key(user_id)
                 .context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
         }
@@ -515,15 +527,15 @@
     /// legacy database). If not, return Uninitialized state.
     /// Otherwise, decrypt the super key from the password and return LskfUnlocked state.
     pub fn check_and_unlock_super_key(
-        &self,
+        &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         pw: &Password,
     ) -> Result<UserState> {
         let alias = &USER_SUPER_KEY;
-        let result = legacy_migrator
-            .with_try_migrate_super_key(user_id, pw, || db.load_super_key(alias, user_id))
+        let result = legacy_importer
+            .with_try_import_super_key(user_id, pw, || db.load_super_key(alias, user_id))
             .context("In check_and_unlock_super_key. Failed to load super key")?;
 
         match result {
@@ -544,24 +556,23 @@
     /// and return LskfUnlocked state.
     /// If the password is not provided, return Uninitialized state.
     pub fn check_and_initialize_super_key(
-        &self,
+        &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         pw: Option<&Password>,
     ) -> Result<UserState> {
-        let super_key_exists_in_db =
-            Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
-                "In check_and_initialize_super_key. Failed to check if super key exists.",
-            )?;
+        let super_key_exists_in_db = self
+            .super_key_exists_in_db_for_user(db, legacy_importer, user_id)
+            .context("In check_and_initialize_super_key. Failed to check if super key exists.")?;
         if super_key_exists_in_db {
             Ok(UserState::LskfLocked)
         } else if let Some(pw) = pw {
-            //generate a new super key.
+            // Generate a new super key.
             let super_key = generate_aes256_key()
                 .context("In check_and_initialize_super_key: Failed to generate AES 256 key.")?;
-            //derive an AES256 key from the password and re-encrypt the super key
-            //before we insert it in the database.
+            // Derive an AES256 key from the password and re-encrypt the super key
+            // before we insert it in the database.
             let (encrypted_super_key, blob_metadata) = Self::encrypt_with_password(&super_key, pw)
                 .context("In check_and_initialize_super_key.")?;
 
@@ -589,9 +600,9 @@
         }
     }
 
-    //helper function to populate super key cache from the super key blob loaded from the database
+    // Helper function to populate super key cache from the super key blob loaded from the database.
     fn populate_cache_from_super_key_blob(
-        &self,
+        &mut self,
         user_id: UserId,
         algorithm: SuperEncryptionAlgorithm,
         entry: KeyEntry,
@@ -605,7 +616,7 @@
         Ok(super_key)
     }
 
-    /// Extracts super key from the entry loaded from the database
+    /// Extracts super key from the entry loaded from the database.
     pub fn extract_super_key_from_key_entry(
         algorithm: SuperEncryptionAlgorithm,
         entry: KeyEntry,
@@ -620,7 +631,7 @@
                 metadata.aead_tag(),
             ) {
                 (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
-                    // Note that password encryption is AES no matter the value of algorithm
+                    // Note that password encryption is AES no matter the value of algorithm.
                     let key = pw.derive_key(Some(salt), AES_256_KEY_LENGTH).context(
                         "In extract_super_key_from_key_entry: Failed to generate key from password.",
                     )?;
@@ -680,11 +691,12 @@
     fn super_encrypt_on_key_init(
         &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         key_blob: &[u8],
     ) -> Result<(Vec<u8>, BlobMetaData)> {
-        match UserState::get(db, legacy_migrator, self, user_id)
+        match self
+            .get_user_state(db, legacy_importer, user_id)
             .context("In super_encrypt. Failed to get user state.")?
         {
             UserState::LskfUnlocked(super_key) => {
@@ -699,9 +711,9 @@
         }
     }
 
-    //Helper function to encrypt a key with the given super key. Callers should select which super
-    //key to be used. This is called when a key is super encrypted at its creation as well as at its
-    //upgrade.
+    // Helper function to encrypt a key with the given super key. Callers should select which super
+    // key to be used. This is called when a key is super encrypted at its creation as well as at
+    // its upgrade.
     fn encrypt_with_aes_super_key(
         key_blob: &[u8],
         super_key: &SuperKey,
@@ -725,7 +737,7 @@
     pub fn handle_super_encryption_on_key_init(
         &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         domain: &Domain,
         key_parameters: &[KeyParameter],
         flags: Option<i32>,
@@ -735,15 +747,19 @@
         match Enforcements::super_encryption_required(domain, key_parameters, flags) {
             SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
             SuperEncryptionType::LskfBound => self
-                .super_encrypt_on_key_init(db, legacy_migrator, user_id, key_blob)
+                .super_encrypt_on_key_init(db, legacy_importer, user_id, key_blob)
                 .context(concat!(
                     "In handle_super_encryption_on_key_init. ",
                     "Failed to super encrypt with LskfBound key."
                 )),
             SuperEncryptionType::ScreenLockBound => {
-                let mut data = self.data.lock().unwrap();
-                let entry = data.user_keys.entry(user_id).or_default();
-                if let Some(super_key) = entry.screen_lock_bound.as_ref() {
+                let entry = self
+                    .data
+                    .user_keys
+                    .get(&user_id)
+                    .map(|e| e.screen_lock_bound.as_ref())
+                    .flatten();
+                if let Some(super_key) = entry {
                     Self::encrypt_with_aes_super_key(key_blob, super_key).context(concat!(
                         "In handle_super_encryption_on_key_init. ",
                         "Failed to encrypt with ScreenLockBound key."
@@ -813,6 +829,7 @@
     /// When this is called, the caller must hold the lock on the SuperKeyManager.
     /// So it's OK that the check and creation are different DB transactions.
     fn get_or_create_super_key(
+        &mut self,
         db: &mut KeystoreDB,
         user_id: UserId,
         key_type: &SuperKeyType,
@@ -847,8 +864,8 @@
                     )
                 }
             };
-            //derive an AES256 key from the password and re-encrypt the super key
-            //before we insert it in the database.
+            // Derive an AES256 key from the password and re-encrypt the super key
+            // before we insert it in the database.
             let (encrypted_super_key, blob_metadata) =
                 Self::encrypt_with_password(&super_key, password)
                     .context("In get_or_create_super_key.")?;
@@ -876,52 +893,64 @@
 
     /// Decrypt the screen-lock bound keys for this user using the password and store in memory.
     pub fn unlock_screen_lock_bound_key(
-        &self,
+        &mut self,
         db: &mut KeystoreDB,
         user_id: UserId,
         password: &Password,
     ) -> Result<()> {
-        let mut data = self.data.lock().unwrap();
-        let entry = data.user_keys.entry(user_id).or_default();
-        let aes = entry
-            .screen_lock_bound
-            .get_or_try_to_insert_with(|| {
-                Self::get_or_create_super_key(
-                    db,
-                    user_id,
-                    &USER_SCREEN_LOCK_BOUND_KEY,
-                    password,
-                    None,
-                )
-            })?
-            .clone();
-        let ecdh = entry
-            .screen_lock_bound_private
-            .get_or_try_to_insert_with(|| {
-                Self::get_or_create_super_key(
-                    db,
-                    user_id,
-                    &USER_SCREEN_LOCK_BOUND_P521_KEY,
-                    password,
-                    Some(aes.clone()),
-                )
-            })?
-            .clone();
-        data.add_key_to_key_index(&aes)?;
-        data.add_key_to_key_index(&ecdh)?;
+        let (screen_lock_bound, screen_lock_bound_private) = self
+            .data
+            .user_keys
+            .get(&user_id)
+            .map(|e| (e.screen_lock_bound.clone(), e.screen_lock_bound_private.clone()))
+            .unwrap_or((None, None));
+
+        if screen_lock_bound.is_some() && screen_lock_bound_private.is_some() {
+            // Already unlocked.
+            return Ok(());
+        }
+
+        let aes = if let Some(screen_lock_bound) = screen_lock_bound {
+            // This is weird. If this point is reached only one of the screen locked keys was
+            // initialized. This should never happen.
+            screen_lock_bound
+        } else {
+            self.get_or_create_super_key(db, user_id, &USER_SCREEN_LOCK_BOUND_KEY, password, None)
+                .context("In unlock_screen_lock_bound_key: Trying to get or create symmetric key.")?
+        };
+
+        let ecdh = if let Some(screen_lock_bound_private) = screen_lock_bound_private {
+            // This is weird. If this point is reached only one of the screen locked keys was
+            // initialized. This should never happen.
+            screen_lock_bound_private
+        } else {
+            self.get_or_create_super_key(
+                db,
+                user_id,
+                &USER_SCREEN_LOCK_BOUND_P521_KEY,
+                password,
+                Some(aes.clone()),
+            )
+            .context("In unlock_screen_lock_bound_key: Trying to get or create asymmetric key.")?
+        };
+
+        self.data.add_key_to_key_index(&aes)?;
+        self.data.add_key_to_key_index(&ecdh)?;
+        let entry = self.data.user_keys.entry(user_id).or_default();
+        entry.screen_lock_bound = Some(aes);
+        entry.screen_lock_bound_private = Some(ecdh);
         Ok(())
     }
 
     /// Wipe the screen-lock bound keys for this user from memory.
     pub fn lock_screen_lock_bound_key(
-        &self,
+        &mut self,
         db: &mut KeystoreDB,
         user_id: UserId,
         unlocking_sids: &[i64],
     ) {
         log::info!("Locking screen bound for user {} sids {:?}", user_id, unlocking_sids);
-        let mut data = self.data.lock().unwrap();
-        let mut entry = data.user_keys.entry(user_id).or_default();
+        let mut entry = self.data.user_keys.entry(user_id).or_default();
         if !unlocking_sids.is_empty() {
             if let (Some(aes), Some(ecdh)) = (
                 entry.screen_lock_bound.as_ref().cloned(),
@@ -993,12 +1022,11 @@
     /// User has unlocked, not using a password. See if any of our stored auth tokens can be used
     /// to unlock the keys protecting UNLOCKED_DEVICE_REQUIRED keys.
     pub fn try_unlock_user_with_biometric(
-        &self,
+        &mut self,
         db: &mut KeystoreDB,
         user_id: UserId,
     ) -> Result<()> {
-        let mut data = self.data.lock().unwrap();
-        let mut entry = data.user_keys.entry(user_id).or_default();
+        let mut entry = self.data.user_keys.entry(user_id).or_default();
         if let Some(biometric) = entry.biometric_unlock.as_ref() {
             let (key_id_guard, key_entry) = db
                 .load_key_entry(
@@ -1038,8 +1066,8 @@
                         Ok((slb, slbp)) => {
                             entry.screen_lock_bound = Some(slb.clone());
                             entry.screen_lock_bound_private = Some(slbp.clone());
-                            data.add_key_to_key_index(&slb)?;
-                            data.add_key_to_key_index(&slbp)?;
+                            self.data.add_key_to_key_index(&slb)?;
+                            self.data.add_key_to_key_index(&slbp)?;
                             log::info!(concat!(
                                 "In try_unlock_user_with_biometric: ",
                                 "Successfully unlocked with biometric"
@@ -1055,6 +1083,122 @@
         }
         Ok(())
     }
+
+    /// Returns the keystore locked state of the given user. It requires the thread local
+    /// keystore database and a reference to the legacy migrator because it may need to
+    /// import the super key from the legacy blob database to the keystore database.
+    pub fn get_user_state(
+        &self,
+        db: &mut KeystoreDB,
+        legacy_importer: &LegacyImporter,
+        user_id: UserId,
+    ) -> Result<UserState> {
+        match self.get_per_boot_key_by_user_id(user_id) {
+            Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
+            None => {
+                // Check if a super key exists in the database or legacy database.
+                // If so, return locked user state.
+                if self
+                    .super_key_exists_in_db_for_user(db, legacy_importer, user_id)
+                    .context("In get_user_state.")?
+                {
+                    Ok(UserState::LskfLocked)
+                } else {
+                    Ok(UserState::Uninitialized)
+                }
+            }
+        }
+    }
+
+    /// If the given user is unlocked:
+    /// * and `password` is None, the user is reset, all authentication bound keys are deleted and
+    ///   `Ok(UserState::Uninitialized)` is returned.
+    /// * and `password` is Some, `Ok(UserState::LskfUnlocked)` is returned.
+    /// If the given user is locked:
+    /// * and the user was initialized before, `Ok(UserState::Locked)` is returned.
+    /// * and the user was not initialized before:
+    ///   * and `password` is None, `Ok(Uninitialized)` is returned.
+    ///   * and `password` is Some, super keys are generated and `Ok(UserState::LskfUnlocked)` is
+    ///     returned.
+    pub fn reset_or_init_user_and_get_user_state(
+        &mut self,
+        db: &mut KeystoreDB,
+        legacy_importer: &LegacyImporter,
+        user_id: UserId,
+        password: Option<&Password>,
+    ) -> Result<UserState> {
+        match self.get_per_boot_key_by_user_id(user_id) {
+            Some(_) if password.is_none() => {
+                // Transitioning to swiping, delete only the super key in database and cache,
+                // and super-encrypted keys in database (and in KM).
+                self.reset_user(db, legacy_importer, user_id, true).context(
+                    "In reset_or_init_user_and_get_user_state: Trying to delete keys from the db.",
+                )?;
+                // Lskf is now removed in Keystore.
+                Ok(UserState::Uninitialized)
+            }
+            Some(super_key) => {
+                // Keystore won't be notified when changing to a new password when LSKF is
+                // already setup. Therefore, ideally this path wouldn't be reached.
+                Ok(UserState::LskfUnlocked(super_key))
+            }
+            None => {
+                // Check if a super key exists in the database or legacy database.
+                // If so, return LskfLocked state.
+                // Otherwise, i) if the password is provided, initialize the super key and return
+                // LskfUnlocked state ii) if password is not provided, return Uninitialized state.
+                self.check_and_initialize_super_key(db, legacy_importer, user_id, password)
+            }
+        }
+    }
+
+    /// Unlocks the given user with the given password. If the key was already unlocked or unlocking
+    /// was successful, `Ok(UserState::LskfUnlocked)` is returned.
+    /// If the user was never initialized `Ok(UserState::Uninitialized)` is returned.
+    pub fn unlock_and_get_user_state(
+        &mut self,
+        db: &mut KeystoreDB,
+        legacy_importer: &LegacyImporter,
+        user_id: UserId,
+        password: &Password,
+    ) -> Result<UserState> {
+        match self.get_per_boot_key_by_user_id(user_id) {
+            Some(super_key) => {
+                log::info!("In unlock_and_get_user_state. Trying to unlock when already unlocked.");
+                Ok(UserState::LskfUnlocked(super_key))
+            }
+            None => {
+                // Check if a super key exists in the database or legacy database.
+                // If not, return Uninitialized state.
+                // Otherwise, try to unlock the super key and if successful,
+                // return LskfUnlocked.
+                self.check_and_unlock_super_key(db, legacy_importer, user_id, password)
+                    .context("In unlock_and_get_user_state. Failed to unlock super key.")
+            }
+        }
+    }
+
+    /// Delete all the keys created on behalf of the user.
+    /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
+    /// encrypted keys.
+    pub fn reset_user(
+        &mut self,
+        db: &mut KeystoreDB,
+        legacy_importer: &LegacyImporter,
+        user_id: UserId,
+        keep_non_super_encrypted_keys: bool,
+    ) -> Result<()> {
+        // Mark keys created on behalf of the user as unreferenced.
+        legacy_importer
+            .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
+            .context("In reset_user: Trying to delete legacy keys.")?;
+        db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
+            .context("In reset user. Error in unbinding keys.")?;
+
+        // Delete super key in cache, if exists.
+        self.forget_all_keys_for_user(user_id);
+        Ok(())
+    }
 }
 
 /// This enum represents different states of the user's life cycle in the device.
@@ -1072,110 +1216,6 @@
     Uninitialized,
 }
 
-impl UserState {
-    pub fn get(
-        db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
-        skm: &SuperKeyManager,
-        user_id: UserId,
-    ) -> Result<UserState> {
-        match skm.get_per_boot_key_by_user_id(user_id) {
-            Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
-            None => {
-                //Check if a super key exists in the database or legacy database.
-                //If so, return locked user state.
-                if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
-                    .context("In get.")?
-                {
-                    Ok(UserState::LskfLocked)
-                } else {
-                    Ok(UserState::Uninitialized)
-                }
-            }
-        }
-    }
-
-    /// Queries user state when serving password change requests.
-    pub fn get_with_password_changed(
-        db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
-        skm: &SuperKeyManager,
-        user_id: UserId,
-        password: Option<&Password>,
-    ) -> Result<UserState> {
-        match skm.get_per_boot_key_by_user_id(user_id) {
-            Some(super_key) => {
-                if password.is_none() {
-                    //transitioning to swiping, delete only the super key in database and cache, and
-                    //super-encrypted keys in database (and in KM)
-                    Self::reset_user(db, skm, legacy_migrator, user_id, true).context(
-                        "In get_with_password_changed: Trying to delete keys from the db.",
-                    )?;
-                    //Lskf is now removed in Keystore
-                    Ok(UserState::Uninitialized)
-                } else {
-                    //Keystore won't be notified when changing to a new password when LSKF is
-                    //already setup. Therefore, ideally this path wouldn't be reached.
-                    Ok(UserState::LskfUnlocked(super_key))
-                }
-            }
-            None => {
-                //Check if a super key exists in the database or legacy database.
-                //If so, return LskfLocked state.
-                //Otherwise, i) if the password is provided, initialize the super key and return
-                //LskfUnlocked state ii) if password is not provided, return Uninitialized state.
-                skm.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
-            }
-        }
-    }
-
-    /// Queries user state when serving password unlock requests.
-    pub fn get_with_password_unlock(
-        db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
-        skm: &SuperKeyManager,
-        user_id: UserId,
-        password: &Password,
-    ) -> Result<UserState> {
-        match skm.get_per_boot_key_by_user_id(user_id) {
-            Some(super_key) => {
-                log::info!("In get_with_password_unlock. Trying to unlock when already unlocked.");
-                Ok(UserState::LskfUnlocked(super_key))
-            }
-            None => {
-                //Check if a super key exists in the database or legacy database.
-                //If not, return Uninitialized state.
-                //Otherwise, try to unlock the super key and if successful,
-                //return LskfUnlocked state
-                skm.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
-                    .context("In get_with_password_unlock. Failed to unlock super key.")
-            }
-        }
-    }
-
-    /// Delete all the keys created on behalf of the user.
-    /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
-    /// encrypted keys.
-    pub fn reset_user(
-        db: &mut KeystoreDB,
-        skm: &SuperKeyManager,
-        legacy_migrator: &LegacyMigrator,
-        user_id: UserId,
-        keep_non_super_encrypted_keys: bool,
-    ) -> Result<()> {
-        // mark keys created on behalf of the user as unreferenced.
-        legacy_migrator
-            .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
-            .context("In reset_user: Trying to delete legacy keys.")?;
-        db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
-            .context("In reset user. Error in unbinding keys.")?;
-
-        //delete super key in cache, if exists
-        skm.forget_all_keys_for_user(user_id);
-        Ok(())
-    }
-}
-
 /// This enum represents three states a KeyMint Blob can be in, w.r.t super encryption.
 /// `Sensitive` holds the non encrypted key and a reference to its super key.
 /// `NonSensitive` holds a non encrypted key that is never supposed to be encrypted.
diff --git a/keystore2/src/try_insert.rs b/keystore2/src/try_insert.rs
deleted file mode 100644
index 6dd3962..0000000
--- a/keystore2/src/try_insert.rs
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! The TryInsert trait adds to Option<T> the method
-//! get_or_try_to_insert_with, which is analogous to
-//! get_or_insert_with, but allows the called function to fail and propagates the failure.
-
-/// The TryInsert trait adds to Option<T> the method
-/// get_or_try_to_insert_with, which is analogous to
-/// get_or_insert_with, but allows the called function to fail and propagates the failure.
-pub trait TryInsert {
-    /// Type of the Ok branch of the Result
-    type Item;
-    /// Inserts a value computed from `f` into the option if it is [`None`],
-    /// then returns a mutable reference to the contained value. If `f`
-    /// returns Err, the Option is unchanged.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// let mut x = None;
-    /// assert_eq!(x.get_or_try_to_insert_with(Err("oops".to_string())), Err("oops".to_string()))
-    /// {
-    ///     let y: &mut u32 = x.get_or_try_to_insert_with(|| Ok(5))?;
-    ///     assert_eq!(y, &5);
-    ///
-    ///     *y = 7;
-    /// }
-    ///
-    /// assert_eq!(x, Some(7));
-    /// ```
-    fn get_or_try_to_insert_with<E, F: FnOnce() -> Result<Self::Item, E>>(
-        &mut self,
-        f: F,
-    ) -> Result<&mut Self::Item, E>;
-}
-
-impl<T> TryInsert for Option<T> {
-    type Item = T;
-    fn get_or_try_to_insert_with<E, F: FnOnce() -> Result<Self::Item, E>>(
-        &mut self,
-        f: F,
-    ) -> Result<&mut Self::Item, E> {
-        if self.is_none() {
-            *self = Some(f()?);
-        }
-
-        match self {
-            Some(v) => Ok(v),
-            // SAFETY: a `None` variant for `self` would have been replaced by a `Some`
-            // variant in the code above.
-            None => unsafe { std::hint::unreachable_unchecked() },
-        }
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use super::*;
-
-    fn fails() -> Result<i32, String> {
-        Err("fail".to_string())
-    }
-
-    fn succeeds() -> Result<i32, String> {
-        Ok(99)
-    }
-
-    #[test]
-    fn test() {
-        let mut x = None;
-        assert_eq!(x.get_or_try_to_insert_with(fails), Err("fail".to_string()));
-        assert_eq!(x, None);
-        assert_eq!(*x.get_or_try_to_insert_with(succeeds).unwrap(), 99);
-        assert_eq!(x, Some(99));
-        x = Some(42);
-        assert_eq!(*x.get_or_try_to_insert_with(fails).unwrap(), 42);
-        assert_eq!(x, Some(42));
-        assert_eq!(*x.get_or_try_to_insert_with(succeeds).unwrap(), 42);
-        assert_eq!(x, Some(42));
-        *x.get_or_try_to_insert_with(fails).unwrap() = 2;
-        assert_eq!(x, Some(2));
-        *x.get_or_try_to_insert_with(succeeds).unwrap() = 3;
-        assert_eq!(x, Some(3));
-        x = None;
-        *x.get_or_try_to_insert_with(succeeds).unwrap() = 5;
-        assert_eq!(x, Some(5));
-    }
-}
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 82e6700..c924bef 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -20,7 +20,7 @@
 use crate::permission::{KeyPerm, KeyPermSet, KeystorePerm};
 use crate::{
     database::{KeyType, KeystoreDB},
-    globals::LEGACY_MIGRATOR,
+    globals::LEGACY_IMPORTER,
 };
 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
     KeyCharacteristics::KeyCharacteristics, Tag::Tag,
@@ -211,7 +211,7 @@
 ) -> Result<Vec<KeyDescriptor>> {
     let mut result = Vec::new();
     result.append(
-        &mut LEGACY_MIGRATOR
+        &mut LEGACY_IMPORTER
             .list_uid(domain, namespace)
             .context("In list_key_entries: Trying to list legacy keys.")?,
     );
diff --git a/keystore2/src/watchdog.rs b/keystore2/src/watchdog.rs
index 9cca171..a26b632 100644
--- a/keystore2/src/watchdog.rs
+++ b/keystore2/src/watchdog.rs
@@ -111,11 +111,44 @@
         }
         self.last_report = Instant::now();
         self.has_overdue = has_overdue;
-        log::warn!("Keystore Watchdog report:");
-        log::warn!("Overdue records:");
+        log::warn!("### Keystore Watchdog report - BEGIN ###");
+
         let now = Instant::now();
-        for (i, r) in self.records.iter() {
-            if r.deadline.saturating_duration_since(now) == Duration::new(0, 0) {
+        let mut overdue_records: Vec<(&Index, &Record)> = self
+            .records
+            .iter()
+            .filter(|(_, r)| r.deadline.saturating_duration_since(now) == Duration::new(0, 0))
+            .collect();
+
+        log::warn!("When extracting from a bug report, please include this header");
+        log::warn!("and all {} records below.", overdue_records.len());
+
+        // Watch points can be nested, i.e., a single thread may have multiple armed
+        // watch points. And the most recent on each thread (thread recent) is closest to the point
+        // where something is blocked. Furthermore, keystore2 has various critical section
+        // and common backend resources KeyMint that can only be entered serialized. So if one
+        // thread hangs, the others will soon follow suite. Thus the oldest "thread recent" watch
+        // point is most likely pointing toward the culprit.
+        // Thus, sort by start time first.
+        overdue_records.sort_unstable_by(|(_, r1), (_, r2)| r1.started.cmp(&r2.started));
+        // Then we groups all of the watch points per thread preserving the order within
+        // groups.
+        let groups = overdue_records.iter().fold(
+            HashMap::<thread::ThreadId, Vec<(&Index, &Record)>>::new(),
+            |mut acc, (i, r)| {
+                acc.entry(i.tid).or_default().push((i, r));
+                acc
+            },
+        );
+        // Put the groups back into a vector.
+        let mut groups: Vec<Vec<(&Index, &Record)>> = groups.into_iter().map(|(_, v)| v).collect();
+        // Sort the groups by start time of the most recent (.last()) of each group.
+        // It is panic safe to use unwrap() here because we never add empty vectors to
+        // the map.
+        groups.sort_by(|v1, v2| v1.last().unwrap().1.started.cmp(&v2.last().unwrap().1.started));
+
+        for g in groups.iter() {
+            for (i, r) in g.iter() {
                 match &r.callback {
                     Some(cb) => {
                         log::warn!(
@@ -139,6 +172,7 @@
                 }
             }
         }
+        log::warn!("### Keystore Watchdog report - END ###");
         true
     }
 
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index a324857..5c541ae 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -53,7 +53,6 @@
 constexpr const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
 constexpr const char* kCompOsVerifyPath = "/apex/com.android.compos/bin/compos_verify_key";
 constexpr const char* kFsVerityProcPath = "/proc/sys/fs/verity";
-constexpr const char* kKvmDevicePath = "/dev/kvm";
 
 constexpr bool kForceCompilation = false;
 constexpr bool kUseCompOs = true;
@@ -145,7 +144,8 @@
 }
 
 bool compOsPresent() {
-    return access(kCompOsVerifyPath, X_OK) == 0 && access(kKvmDevicePath, F_OK) == 0;
+    // We must have the CompOS APEX
+    return access(kCompOsVerifyPath, X_OK) == 0;
 }
 
 Result<void> verifyExistingRootCert(const SigningKey& key) {