Merge changes I5fd4b49c,Ibf7e5259

* changes:
  Mark open() as deliberately not O_CLOEXEC.
  odsign: Don't initialize if we can't find a Keymaster.
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index 05bf699..812d5e6 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -33,6 +33,7 @@
         "android.security.authorization-rust",
         "android.security.compat-rust",
         "android.security.remoteprovisioning-rust",
+        "android.security.usermanager-rust",
         "android.system.keystore2-V1-rust",
         "libanyhow",
         "libbinder_rs",
@@ -75,6 +76,7 @@
         "android.security.authorization-rust",
         "android.security.compat-rust",
         "android.security.remoteprovisioning-rust",
+        "android.security.usermanager-rust",
         "android.system.keystore2-V1-rust",
         "libandroid_logger",
         "libanyhow",
diff --git a/keystore2/TEST_MAPPING b/keystore2/TEST_MAPPING
index 33d157e..d4e20de 100644
--- a/keystore2/TEST_MAPPING
+++ b/keystore2/TEST_MAPPING
@@ -4,9 +4,6 @@
       "name": "keystore2_certificate_test"
     },
     {
-      "name": "keystore2_km_compat_test"
-    },
-    {
       "name": "keystore2_test"
     }
   ]
diff --git a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
index d045345..0d4c30f 100644
--- a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
+++ b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
@@ -16,6 +16,7 @@
 
 package android.security.remoteprovisioning;
 
+import android.hardware.security.keymint.ProtectedData;
 import android.hardware.security.keymint.SecurityLevel;
 import android.security.remoteprovisioning.AttestationPoolStatus;
 
@@ -83,10 +84,13 @@
      * @param secLevel The security level to specify which KM instance from which to generate a
      *                         CSR.
      *
-     * @return A CBOR blob composed of various encrypted/signed elements from the TA in a byte[]
+     * @param protectedData The encrypted CBOR blob generated by the remote provisioner
+     *
+     * @return A CBOR blob composed of various elements required by the server to verify the
+     *                         request.
      */
     byte[] generateCsr(in boolean testMode, in int numCsr, in byte[] eek, in byte[] challenge,
-        in SecurityLevel secLevel);
+        in SecurityLevel secLevel, out ProtectedData protectedData);
 
     /**
      * This method provides a way for the returned attestation certificate chains to be provisioned
@@ -95,7 +99,10 @@
      *
      * @param publicKey The raw public key encoded in the leaf certificate.
      *
-     * @param cert An X.509, DER encoded certificate chain.
+     * @param batchCert The batch certificate corresponding to the attestation key. Separated for
+     *                          the purpose of making Subject lookup for KM attestation easier.
+     *
+     * @param certs An X.509, DER encoded certificate chain for the attestation key.
      *
      * @param expirationDate The expiration date on the certificate chain, provided by the caller
      *                          for convenience.
@@ -103,8 +110,8 @@
      * @param secLevel The security level representing the KM instance containing the key that this
      *                          chain corresponds to.
      */
-    void provisionCertChain(in byte[] publicKey, in byte[] certs, in long expirationDate,
-        in SecurityLevel secLevel);
+    void provisionCertChain(in byte[] publicKey, in byte[] batchCert, in byte[] certs,
+        in long expirationDate, in SecurityLevel secLevel);
 
     /**
      * This method allows the caller to instruct KeyStore to generate and store a key pair to be
@@ -117,4 +124,13 @@
      * @param secLevel The security level to specify which KM instance should generate a key pair.
      */
     void generateKeyPair(in boolean is_test_mode, in SecurityLevel secLevel);
+
+    /**
+     * This method returns the SecurityLevels of whichever instances of
+     * IRemotelyProvisionedComponent are running on the device. The RemoteProvisioner app needs to
+     * know which KM instances it should be generating and managing attestation keys for.
+     *
+     * @return The array of security levels.
+     */
+     SecurityLevel[] getSecurityLevels();
 }
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index e446e78..02b19c4 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -12,18 +12,16 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! This module implements IKeyAuthorization AIDL interface.
+//! This module implements IKeystoreAuthorization AIDL interface.
 
 use crate::error::Error as KeystoreError;
 use crate::error::map_or_log_err;
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_BLOB_LOADER, SUPER_KEY};
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
 use crate::permission::KeystorePerm;
+use crate::super_key::UserState;
 use crate::utils::check_keystore_permission;
 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
-    HardwareAuthToken::HardwareAuthToken, HardwareAuthenticatorType::HardwareAuthenticatorType,
-};
-use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
-    Timestamp::Timestamp,
+    HardwareAuthToken::HardwareAuthToken,
 };
 use android_security_authorization::binder::{Interface, Result as BinderResult, Strong};
 use android_security_authorization::aidl::android::security::authorization::IKeystoreAuthorization::{
@@ -50,16 +48,7 @@
         //check keystore permission
         check_keystore_permission(KeystorePerm::add_auth()).context("In add_auth_token.")?;
 
-        //TODO: Keymint's HardwareAuthToken aidl needs to implement Copy/Clone
-        let auth_token_copy = HardwareAuthToken {
-            challenge: auth_token.challenge,
-            userId: auth_token.userId,
-            authenticatorId: auth_token.authenticatorId,
-            authenticatorType: HardwareAuthenticatorType(auth_token.authenticatorType.0),
-            timestamp: Timestamp { milliSeconds: auth_token.timestamp.milliSeconds },
-            mac: auth_token.mac.clone(),
-        };
-        ENFORCEMENTS.add_auth_token(auth_token_copy)?;
+        ENFORCEMENTS.add_auth_token(auth_token.clone())?;
         Ok(())
     }
 
@@ -77,22 +66,22 @@
                     .context("In on_lock_screen_event: Unlock with password.")?;
                 ENFORCEMENTS.set_device_locked(user_id, false);
                 // Unlock super key.
-                DB.with::<_, Result<()>>(|db| {
-                    let mut db = db.borrow_mut();
-                    //TODO - b/176123105 - Once the user management API is implemented, unlock is
-                    //allowed only if the user is added. Then the two tasks handled by the
-                    //unlock_user_key will be split into two methods. For now, unlock_user_key
-                    //method is used as it is, which created a super key for the user if one does
-                    //not exists, in addition to unlocking the existing super key of the user/
-                    SUPER_KEY.unlock_user_key(
-                        user_id as u32,
-                        user_password,
-                        &mut db,
-                        &LEGACY_BLOB_LOADER,
-                    )?;
-                    Ok(())
-                })
-                .context("In on_lock_screen_event.")?;
+                if let UserState::Uninitialized = DB
+                    .with(|db| {
+                        UserState::get_with_password_unlock(
+                            &mut db.borrow_mut(),
+                            &LEGACY_MIGRATOR,
+                            &SUPER_KEY,
+                            user_id as u32,
+                            user_password,
+                        )
+                    })
+                    .context("In on_lock_screen_event: Unlock with password.")?
+                {
+                    log::info!(
+                        "In on_lock_screen_event. Trying to unlock when LSKF is uninitialized."
+                    );
+                }
 
                 Ok(())
             }
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index dc6d7a0..db06bff 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -45,7 +45,7 @@
 use crate::impl_metadata; // This is in db_utils.rs
 use crate::key_parameter::{KeyParameter, Tag};
 use crate::permission::KeyPermSet;
-use crate::utils::get_current_time_in_seconds;
+use crate::utils::{get_current_time_in_seconds, AID_USER_OFFSET};
 use crate::{
     db_utils::{self, SqlField},
     gc::Gc,
@@ -144,7 +144,7 @@
     fn store_in_db(&self, key_id: i64, tx: &Transaction) -> Result<()> {
         let mut stmt = tx
             .prepare(
-                "INSERT into persistent.keymetadata (keyentryid, tag, data)
+                "INSERT or REPLACE INTO persistent.keymetadata (keyentryid, tag, data)
                     VALUES (?, ?, ?);",
             )
             .context("In KeyMetaData::store_in_db: Failed to prepare statement.")?;
@@ -584,6 +584,7 @@
 #[allow(dead_code)]
 pub struct CertificateChain {
     private_key: ZVec,
+    batch_cert: ZVec,
     cert_chain: ZVec,
 }
 
@@ -653,6 +654,10 @@
     pub fn pure_cert(&self) -> bool {
         self.pure_cert
     }
+    /// Consumes this key entry and extracts the keyparameters and metadata from it.
+    pub fn into_key_parameters_and_metadata(self) -> (Vec<KeyParameter>, KeyMetaData) {
+        (self.parameters, self.metadata)
+    }
 }
 
 /// Indicates the sub component of a key entry for persistent storage.
@@ -790,8 +795,12 @@
 pub struct PerBootDbKeepAlive(Connection);
 
 impl KeystoreDB {
+    const UNASSIGNED_KEY_ID: i64 = -1i64;
     const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
 
+    /// The alias of the user super key.
+    pub const USER_SUPER_KEY_ALIAS: &'static str = &"USER_SUPER_KEY";
+
     /// This creates a PerBootDbKeepAlive object to keep the per boot database alive.
     pub fn keep_perboot_db_alive() -> Result<PerBootDbKeepAlive> {
         let conn = Connection::open_in_memory()
@@ -912,7 +921,8 @@
             "CREATE TABLE IF NOT EXISTS persistent.keymetadata (
                      keyentryid INTEGER,
                      tag INTEGER,
-                     data ANY);",
+                     data ANY,
+                     UNIQUE (keyentryid, tag));",
             NO_PARAMS,
         )
         .context("Failed to initialize \"keymetadata\" table.")?;
@@ -1093,6 +1103,98 @@
         .context("In cleanup_leftovers.")
     }
 
+    /// Checks if a key exists with given key type and key descriptor properties.
+    pub fn key_exists(
+        &mut self,
+        domain: Domain,
+        nspace: i64,
+        alias: &str,
+        key_type: KeyType,
+    ) -> Result<bool> {
+        self.with_transaction(TransactionBehavior::Immediate, |tx| {
+            let key_descriptor =
+                KeyDescriptor { domain, nspace, alias: Some(alias.to_string()), blob: None };
+            let result = Self::load_key_entry_id(&tx, &key_descriptor, key_type);
+            match result {
+                Ok(_) => Ok(true),
+                Err(error) => match error.root_cause().downcast_ref::<KsError>() {
+                    Some(KsError::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(false),
+                    _ => Err(error).context("In key_exists: Failed to find if the key exists."),
+                },
+            }
+            .no_gc()
+        })
+        .context("In key_exists.")
+    }
+
+    /// Stores a super key in the database.
+    pub fn store_super_key(
+        &mut self,
+        user_id: u32,
+        blob_info: &(&[u8], &BlobMetaData),
+    ) -> Result<KeyEntry> {
+        self.with_transaction(TransactionBehavior::Immediate, |tx| {
+            let key_id = Self::insert_with_retry(|id| {
+                tx.execute(
+                    "INSERT into persistent.keyentry
+                            (id, key_type, domain, namespace, alias, state, km_uuid)
+                            VALUES(?, ?, ?, ?, ?, ?, ?);",
+                    params![
+                        id,
+                        KeyType::Super,
+                        Domain::APP.0,
+                        user_id as i64,
+                        Self::USER_SUPER_KEY_ALIAS,
+                        KeyLifeCycle::Live,
+                        &KEYSTORE_UUID,
+                    ],
+                )
+            })
+            .context("Failed to insert into keyentry table.")?;
+
+            let (blob, blob_metadata) = *blob_info;
+            Self::set_blob_internal(
+                &tx,
+                key_id,
+                SubComponentType::KEY_BLOB,
+                Some(blob),
+                Some(blob_metadata),
+            )
+            .context("Failed to store key blob.")?;
+
+            Self::load_key_components(tx, KeyEntryLoadBits::KM, key_id)
+                .context("Trying to load key components.")
+                .no_gc()
+        })
+        .context("In store_super_key.")
+    }
+
+    /// Loads super key of a given user, if exists
+    pub fn load_super_key(&mut self, user_id: u32) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
+        self.with_transaction(TransactionBehavior::Immediate, |tx| {
+            let key_descriptor = KeyDescriptor {
+                domain: Domain::APP,
+                nspace: user_id as i64,
+                alias: Some(String::from("USER_SUPER_KEY")),
+                blob: None,
+            };
+            let id = Self::load_key_entry_id(&tx, &key_descriptor, KeyType::Super);
+            match id {
+                Ok(id) => {
+                    let key_entry = Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+                        .context("In load_super_key. Failed to load key entry.")?;
+                    Ok(Some((KEY_ID_LOCK.get(id), key_entry)))
+                }
+                Err(error) => match error.root_cause().downcast_ref::<KsError>() {
+                    Some(KsError::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
+                    _ => Err(error).context("In load_super_key."),
+                },
+            }
+            .no_gc()
+        })
+        .context("In load_super_key.")
+    }
+
     /// Atomically loads a key entry and associated metadata or creates it using the
     /// callback create_new_key callback. The callback is called during a database
     /// transaction. This means that implementers should be mindful about using
@@ -1359,6 +1461,24 @@
         .context("In set_blob.")
     }
 
+    /// Why would we insert a deleted blob? This weird function is for the purpose of legacy
+    /// key migration in the case where we bulk delete all the keys of an app or even a user.
+    /// We use this to insert key blobs into the database which can then be garbage collected
+    /// lazily by the key garbage collector.
+    pub fn set_deleted_blob(&mut self, blob: &[u8], blob_metadata: &BlobMetaData) -> Result<()> {
+        self.with_transaction(TransactionBehavior::Immediate, |tx| {
+            Self::set_blob_internal(
+                &tx,
+                Self::UNASSIGNED_KEY_ID,
+                SubComponentType::KEY_BLOB,
+                Some(blob),
+                Some(blob_metadata),
+            )
+            .need_gc()
+        })
+        .context("In set_deleted_blob.")
+    }
+
     fn set_blob_internal(
         tx: &Transaction,
         key_id: i64,
@@ -1451,6 +1571,7 @@
     pub fn store_signed_attestation_certificate_chain(
         &mut self,
         raw_public_key: &[u8],
+        batch_cert: &[u8],
         cert_chain: &[u8],
         expiration_date: i64,
         km_uuid: &Uuid,
@@ -1509,6 +1630,8 @@
                 None,
             )
             .context("Failed to insert cert chain")?;
+            Self::set_blob_internal(&tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
+                .context("Failed to insert cert")?;
             Ok(()).no_gc()
         })
         .context("In store_signed_attestation_certificate_chain: ")
@@ -1759,19 +1882,21 @@
                     |row| Ok((row.get(0)?, row.get(1)?)),
                 )?
                 .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
-                .context("In retrieve_attestation_key_and_cert_chain: query failed.")?;
+                .context("query failed.")?;
             if rows.is_empty() {
                 return Ok(None).no_gc();
-            } else if rows.len() != 2 {
+            } else if rows.len() != 3 {
                 return Err(KsError::sys()).context(format!(
                     concat!(
-                "In retrieve_attestation_key_and_cert_chain: Expected to get a single attestation",
-                "key chain but instead got {}."),
+                        "Expected to get a single attestation",
+                        "key, cert, and cert chain for a total of 3 entries, but instead got {}."
+                    ),
                     rows.len()
                 ));
             }
             let mut km_blob: Vec<u8> = Vec::new();
             let mut cert_chain_blob: Vec<u8> = Vec::new();
+            let mut batch_cert_blob: Vec<u8> = Vec::new();
             for row in rows {
                 let sub_type: SubComponentType = row.0;
                 match sub_type {
@@ -1781,15 +1906,20 @@
                     SubComponentType::CERT_CHAIN => {
                         cert_chain_blob = row.1;
                     }
+                    SubComponentType::CERT => {
+                        batch_cert_blob = row.1;
+                    }
                     _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
                 }
             }
             Ok(Some(CertificateChain {
                 private_key: ZVec::try_from(km_blob)?,
+                batch_cert: ZVec::try_from(batch_cert_blob)?,
                 cert_chain: ZVec::try_from(cert_chain_blob)?,
             }))
             .no_gc()
         })
+        .context("In retrieve_attestation_key_and_cert_chain:")
     }
 
     /// Updates the alias column of the given key id `newid` with the given alias,
@@ -1965,7 +2095,7 @@
             .prepare(
                 "SELECT id FROM persistent.keyentry
                     WHERE
-                    key_type =  ?
+                    key_type = ?
                     AND domain = ?
                     AND namespace = ?
                     AND alias = ?
@@ -2392,6 +2522,82 @@
         .context("In get_key_km_uuid.")
     }
 
+    /// Delete the keys created on behalf of the user, denoted by the user id.
+    /// Delete all the keys unless 'keep_non_super_encrypted_keys' set to true.
+    /// Returned boolean is to hint the garbage collector to delete the unbound keys.
+    /// The caller of this function should notify the gc if the returned value is true.
+    pub fn unbind_keys_for_user(
+        &mut self,
+        user_id: u32,
+        keep_non_super_encrypted_keys: bool,
+    ) -> Result<()> {
+        self.with_transaction(TransactionBehavior::Immediate, |tx| {
+            let mut stmt = tx
+                .prepare(&format!(
+                    "SELECT id from persistent.keyentry
+                     WHERE (
+                         key_type = ?
+                         AND domain = ?
+                         AND cast ( (namespace/{aid_user_offset}) as int) = ?
+                         AND state = ?
+                     ) OR (
+                         key_type = ?
+                         AND namespace = ?
+                         AND alias = ?
+                         AND state = ?
+                     );",
+                    aid_user_offset = AID_USER_OFFSET
+                ))
+                .context(concat!(
+                    "In unbind_keys_for_user. ",
+                    "Failed to prepare the query to find the keys created by apps."
+                ))?;
+
+            let mut rows = stmt
+                .query(params![
+                    // WHERE client key:
+                    KeyType::Client,
+                    Domain::APP.0 as u32,
+                    user_id,
+                    KeyLifeCycle::Live,
+                    // OR super key:
+                    KeyType::Super,
+                    user_id,
+                    Self::USER_SUPER_KEY_ALIAS,
+                    KeyLifeCycle::Live
+                ])
+                .context("In unbind_keys_for_user. Failed to query the keys created by apps.")?;
+
+            let mut key_ids: Vec<i64> = Vec::new();
+            db_utils::with_rows_extract_all(&mut rows, |row| {
+                key_ids
+                    .push(row.get(0).context("Failed to read key id of a key created by an app.")?);
+                Ok(())
+            })
+            .context("In unbind_keys_for_user.")?;
+
+            let mut notify_gc = false;
+            for key_id in key_ids {
+                if keep_non_super_encrypted_keys {
+                    // Load metadata and filter out non-super-encrypted keys.
+                    if let (_, Some((_, blob_metadata)), _, _) =
+                        Self::load_blob_components(key_id, KeyEntryLoadBits::KM, tx)
+                            .context("In unbind_keys_for_user: Trying to load blob info.")?
+                    {
+                        if blob_metadata.encrypted_by().is_none() {
+                            continue;
+                        }
+                    }
+                }
+                notify_gc = Self::mark_unreferenced(&tx, key_id)
+                    .context("In unbind_keys_for_user.")?
+                    || notify_gc;
+            }
+            Ok(()).do_gc(notify_gc)
+        })
+        .context("In unbind_keys_for_user.")
+    }
+
     fn load_key_components(
         tx: &Transaction,
         load_bits: KeyEntryLoadBits,
@@ -2559,7 +2765,10 @@
     // otherwise return the id.
     fn insert_with_retry(inserter: impl Fn(i64) -> rusqlite::Result<usize>) -> Result<i64> {
         loop {
-            let newid: i64 = random();
+            let newid: i64 = match random() {
+                Self::UNASSIGNED_KEY_ID => continue, // UNASSIGNED_KEY_ID cannot be assigned.
+                i => i,
+            };
             match inserter(newid) {
                 // If the id already existed, try again.
                 Err(rusqlite::Error::SqliteFailure(
@@ -2684,6 +2893,7 @@
     };
     use crate::key_perm_set;
     use crate::permission::{KeyPerm, KeyPermSet};
+    use crate::super_key::SuperKeyManager;
     use keystore2_test_utils::TempDir;
     use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
         HardwareAuthToken::HardwareAuthToken,
@@ -2716,8 +2926,10 @@
     where
         F: Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static,
     {
+        let super_key = Arc::new(SuperKeyManager::new());
+
         let gc_db = KeystoreDB::new(path, None).expect("Failed to open test gc db_connection.");
-        let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db));
+        let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db, super_key));
 
         KeystoreDB::new(path, Some(gc))
     }
@@ -2954,8 +3166,9 @@
             db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
         assert_eq!(true, chain.is_some());
         let cert_chain = chain.unwrap();
-        assert_eq!(cert_chain.private_key.to_vec(), loaded_values[2]);
-        assert_eq!(cert_chain.cert_chain.to_vec(), loaded_values[1]);
+        assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
+        assert_eq!(cert_chain.batch_cert.to_vec(), loaded_values.batch_cert);
+        assert_eq!(cert_chain.cert_chain.to_vec(), loaded_values.cert_chain);
         Ok(())
     }
 
@@ -2990,6 +3203,7 @@
         let private_key: Vec<u8> = vec![0x04, 0x05, 0x06];
         let raw_public_key: Vec<u8> = vec![0x07, 0x08, 0x09];
         let cert_chain: Vec<u8> = vec![0x0a, 0x0b, 0x0c];
+        let batch_cert: Vec<u8> = vec![0x0d, 0x0e, 0x0f];
         db.create_attestation_key_entry(
             &public_key,
             &raw_public_key,
@@ -3002,6 +3216,7 @@
         assert_eq!(status.total, 4);
         db.store_signed_attestation_certificate_chain(
             &raw_public_key,
+            &batch_cert,
             &cert_chain,
             20,
             &KEYSTORE_UUID,
@@ -3036,9 +3251,9 @@
             .conn
             .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
             .expect("Failed to get blob entry row count.");
-        // We expect 6 rows here because there are two blobs per attestation key, i.e.,
-        // One key and one certificate.
-        assert_eq!(blob_entry_row_count, 6);
+        // We expect 9 rows here because there are three blobs per attestation key, i.e.,
+        // one key, one certificate chain, and one certificate.
+        assert_eq!(blob_entry_row_count, 9);
 
         assert_eq!(db.delete_expired_attestation_keys()?, 2);
 
@@ -3046,8 +3261,9 @@
             db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
         assert!(cert_chain.is_some());
         let value = cert_chain.unwrap();
-        assert_eq!(entry_values[1], value.cert_chain.to_vec());
-        assert_eq!(entry_values[2], value.private_key.to_vec());
+        assert_eq!(entry_values.batch_cert, value.batch_cert.to_vec());
+        assert_eq!(entry_values.cert_chain, value.cert_chain.to_vec());
+        assert_eq!(entry_values.priv_key, value.private_key.to_vec());
 
         cert_chain = db.retrieve_attestation_key_and_cert_chain(
             Domain::APP,
@@ -3069,9 +3285,9 @@
             .conn
             .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
             .expect("Failed to get blob entry row count.");
-        // There shound be 2 blob entries left, because we deleted two of the attestation
-        // key entries with two blobs each.
-        assert_eq!(blob_entry_row_count, 2);
+        // There shound be 3 blob entries left, because we deleted two of the attestation
+        // key entries with three blobs each.
+        assert_eq!(blob_entry_row_count, 3);
 
         Ok(())
     }
@@ -4144,30 +4360,33 @@
             .collect::<Result<Vec<_>>>()
     }
 
+    struct RemoteProvValues {
+        cert_chain: Vec<u8>,
+        priv_key: Vec<u8>,
+        batch_cert: Vec<u8>,
+    }
+
     fn load_attestation_key_pool(
         db: &mut KeystoreDB,
         expiration_date: i64,
         namespace: i64,
         base_byte: u8,
-    ) -> Result<Vec<Vec<u8>>> {
-        let mut chain: Vec<Vec<u8>> = Vec::new();
+    ) -> Result<RemoteProvValues> {
         let public_key: Vec<u8> = vec![base_byte, 0x02 * base_byte];
         let cert_chain: Vec<u8> = vec![0x03 * base_byte, 0x04 * base_byte];
         let priv_key: Vec<u8> = vec![0x05 * base_byte, 0x06 * base_byte];
         let raw_public_key: Vec<u8> = vec![0x0b * base_byte, 0x0c * base_byte];
+        let batch_cert: Vec<u8> = vec![base_byte * 0x0d, base_byte * 0x0e];
         db.create_attestation_key_entry(&public_key, &raw_public_key, &priv_key, &KEYSTORE_UUID)?;
         db.store_signed_attestation_certificate_chain(
             &raw_public_key,
+            &batch_cert,
             &cert_chain,
             expiration_date,
             &KEYSTORE_UUID,
         )?;
         db.assign_attestation_key(Domain::APP, namespace, &KEYSTORE_UUID)?;
-        chain.push(public_key);
-        chain.push(cert_chain);
-        chain.push(priv_key);
-        chain.push(raw_public_key);
-        Ok(chain)
+        Ok(RemoteProvValues { cert_chain, priv_key, batch_cert })
     }
 
     // Note: The parameters and SecurityLevel associations are nonsensical. This
@@ -4541,4 +4760,53 @@
         assert!(last_off_body_1.seconds() < last_off_body_2.seconds());
         Ok(())
     }
+
+    #[test]
+    fn test_unbind_keys_for_user() -> Result<()> {
+        let mut db = new_test_db()?;
+        db.unbind_keys_for_user(1, false)?;
+
+        make_test_key_entry(&mut db, Domain::APP, 210000, TEST_ALIAS, None)?;
+        make_test_key_entry(&mut db, Domain::APP, 110000, TEST_ALIAS, None)?;
+        db.unbind_keys_for_user(2, false)?;
+
+        assert_eq!(1, db.list(Domain::APP, 110000)?.len());
+        assert_eq!(0, db.list(Domain::APP, 210000)?.len());
+
+        db.unbind_keys_for_user(1, true)?;
+        assert_eq!(0, db.list(Domain::APP, 110000)?.len());
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_store_super_key() -> Result<()> {
+        let mut db = new_test_db()?;
+        let pw = "xyzabc".as_bytes();
+        let super_key = keystore2_crypto::generate_aes256_key()?;
+        let secret = String::from("keystore2 is great.");
+        let secret_bytes = secret.into_bytes();
+        let (encrypted_secret, iv, tag) =
+            keystore2_crypto::aes_gcm_encrypt(&secret_bytes, &super_key)?;
+
+        let (encrypted_super_key, metadata) =
+            SuperKeyManager::encrypt_with_password(&super_key, &pw)?;
+        db.store_super_key(1, &(&encrypted_super_key, &metadata))?;
+
+        //check if super key exists
+        assert!(db.key_exists(Domain::APP, 1, "USER_SUPER_KEY", KeyType::Super)?);
+
+        let (_, key_entry) = db.load_super_key(1)?.unwrap();
+        let loaded_super_key = SuperKeyManager::extract_super_key_from_key_entry(key_entry, &pw)?;
+
+        let decrypted_secret_bytes = keystore2_crypto::aes_gcm_decrypt(
+            &encrypted_secret,
+            &iv,
+            &tag,
+            &loaded_super_key.get_key(),
+        )?;
+        let decrypted_secret = String::from_utf8((&decrypted_secret_bytes).to_vec())?;
+        assert_eq!(String::from("keystore2 is great."), decrypted_secret);
+        Ok(())
+    }
 }
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 9c3bc89..cc59c32 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -26,7 +26,10 @@
 use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
     ISecureClock::ISecureClock, TimeStampToken::TimeStampToken,
 };
-use android_system_keystore2::aidl::android::system::keystore2::OperationChallenge::OperationChallenge;
+use android_system_keystore2::aidl::android::system::keystore2::{
+    IKeystoreSecurityLevel::KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING,
+    OperationChallenge::OperationChallenge,
+};
 use android_system_keystore2::binder::Strong;
 use anyhow::{Context, Result};
 use std::sync::{
@@ -744,6 +747,19 @@
     fn register_op_auth_receiver(&self, challenge: i64, recv: TokenReceiver) {
         self.op_auth_map.add_receiver(challenge, recv);
     }
+
+    /// Given the set of key parameters and flags, check if super encryption is required.
+    pub fn super_encryption_required(key_parameters: &[KeyParameter], flags: Option<i32>) -> bool {
+        let auth_bound = key_parameters.iter().any(|kp| kp.get_tag() == Tag::USER_SECURE_ID);
+
+        let skip_lskf_binding = if let Some(flags) = flags {
+            (flags & KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING) != 0
+        } else {
+            false
+        };
+
+        auth_bound && !skip_lskf_binding
+    }
 }
 
 impl Default for Enforcements {
diff --git a/keystore2/src/error.rs b/keystore2/src/error.rs
index 7227f62..d67f5f4 100644
--- a/keystore2/src/error.rs
+++ b/keystore2/src/error.rs
@@ -57,6 +57,10 @@
     /// Wraps a Binder status code.
     #[error("Binder transaction error {0:?}")]
     BinderTransaction(StatusCode),
+    /// Wraps a Remote Provisioning ErrorCode as defined by the IRemotelyProvisionedComponent
+    /// AIDL interface spec.
+    #[error("Error::Rp({0:?})")]
+    Rp(ErrorCode),
 }
 
 impl Error {
@@ -101,6 +105,16 @@
     })
 }
 
+/// Helper function to map the binder status we get from calls into a RemotelyProvisionedComponent
+/// to a Keystore Error. We don't create an anyhow error here to make
+/// it easier to evaluate service specific errors.
+pub fn map_rem_prov_error<T>(r: BinderResult<T>) -> Result<T, Error> {
+    r.map_err(|s| match s.exception_code() {
+        ExceptionCode::SERVICE_SPECIFIC => Error::Rp(ErrorCode(s.service_specific_error())),
+        e_code => Error::Binder(e_code, 0),
+    })
+}
+
 /// This function is similar to map_km_error only that we don't expect
 /// any KeyMint error codes, we simply preserve the exception code and optional
 /// service specific exception.
@@ -164,6 +178,7 @@
             let rc = match root_cause.downcast_ref::<Error>() {
                 Some(Error::Rc(rcode)) => rcode.0,
                 Some(Error::Km(ec)) => ec.0,
+                Some(Error::Rp(_)) => ResponseCode::SYSTEM_ERROR.0,
                 // If an Error::Binder reaches this stage we report a system error.
                 // The exception code and possible service specific error will be
                 // printed in the error log above.
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index b5b1c6c..6cc0f27 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -21,6 +21,7 @@
 use crate::{
     async_task,
     database::{KeystoreDB, Uuid},
+    super_key::SuperKeyManager,
 };
 use anyhow::{Context, Result};
 use async_task::AsyncTask;
@@ -37,19 +38,23 @@
     /// time a garbage collector was initialized with the given AsyncTask instance.
     pub fn new_init_with<F>(async_task: Arc<AsyncTask>, init: F) -> Self
     where
-        F: FnOnce() -> (Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>, KeystoreDB)
-            + Send
+        F: FnOnce() -> (
+                Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
+                KeystoreDB,
+                Arc<SuperKeyManager>,
+            ) + Send
             + 'static,
     {
         let weak_at = Arc::downgrade(&async_task);
         // Initialize the task's shelf.
         async_task.queue_hi(move |shelf| {
-            let (invalidate_key, db) = init();
+            let (invalidate_key, db, super_key) = init();
             shelf.get_or_put_with(|| GcInternal {
                 blob_id_to_delete: None,
                 invalidate_key,
                 db,
                 async_task: weak_at,
+                super_key,
             });
         });
         Self { async_task }
@@ -68,6 +73,7 @@
     invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
     db: KeystoreDB,
     async_task: std::sync::Weak<AsyncTask>,
+    super_key: Arc<SuperKeyManager>,
 }
 
 impl GcInternal {
@@ -91,6 +97,10 @@
             // (At this time keys may get deleted without having the super encryption
             // key in this case we can only delete the key from the database.)
             if let Some(uuid) = blob_metadata.km_uuid() {
+                let blob = self
+                    .super_key
+                    .unwrap_key_if_required(&blob_metadata, &blob)
+                    .context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
                 (self.invalidate_key)(&uuid, &*blob)
                     .context("In process_one_key: Trying to invalidate key.")?;
             }
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 3037a03..9668ee3 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -18,6 +18,7 @@
 
 use crate::gc::Gc;
 use crate::legacy_blob::LegacyBlobLoader;
+use crate::legacy_migrator::LegacyMigrator;
 use crate::super_key::SuperKeyManager;
 use crate::utils::Asp;
 use crate::{async_task::AsyncTask, database::MonotonicRawTime};
@@ -28,8 +29,8 @@
 };
 use crate::{enforcements::Enforcements, error::map_km_error};
 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
-    IKeyMintDevice::IKeyMintDevice, KeyMintHardwareInfo::KeyMintHardwareInfo,
-    SecurityLevel::SecurityLevel,
+    IKeyMintDevice::IKeyMintDevice, IRemotelyProvisionedComponent::IRemotelyProvisionedComponent,
+    KeyMintHardwareInfo::KeyMintHardwareInfo, SecurityLevel::SecurityLevel,
 };
 use android_hardware_security_keymint::binder::{StatusCode, Strong};
 use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
@@ -49,7 +50,7 @@
 /// a gc. Although one GC is created for each thread local database connection, this closure
 /// is run only once, as long as the ASYNC_TASK instance is the same. So only one additional
 /// database connection is created for the garbage collector worker.
-fn create_thread_local_db() -> KeystoreDB {
+pub fn create_thread_local_db() -> KeystoreDB {
     let gc = Gc::new_init_with(ASYNC_TASK.clone(), || {
         (
             Box::new(|uuid, blob| {
@@ -60,6 +61,7 @@
             }),
             KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), None)
                 .expect("Failed to open database."),
+            SUPER_KEY.clone(),
         )
     });
 
@@ -126,6 +128,21 @@
     }
 }
 
+#[derive(Default)]
+struct RemotelyProvisionedDevicesMap {
+    devices_by_sec_level: HashMap<SecurityLevel, Asp>,
+}
+
+impl RemotelyProvisionedDevicesMap {
+    fn dev_by_sec_level(&self, sec_level: &SecurityLevel) -> Option<Asp> {
+        self.devices_by_sec_level.get(sec_level).map(|dev| (*dev).clone())
+    }
+
+    fn insert(&mut self, sec_level: SecurityLevel, dev: Asp) {
+        self.devices_by_sec_level.insert(sec_level, dev);
+    }
+}
+
 lazy_static! {
     /// The path where keystore stores all its keys.
     pub static ref DB_PATH: Mutex<PathBuf> = Mutex::new(
@@ -136,6 +153,8 @@
     static ref KEY_MINT_DEVICES: Mutex<DevicesMap> = Default::default();
     /// Timestamp service.
     static ref TIME_STAMP_DEVICE: Mutex<Option<Asp>> = Default::default();
+    /// RemotelyProvisionedComponent HAL devices.
+    static ref REMOTELY_PROVISIONED_COMPONENT_DEVICES: Mutex<RemotelyProvisionedDevicesMap> = Default::default();
     /// A single on-demand worker thread that handles deferred tasks with two different
     /// priorities.
     pub static ref ASYNC_TASK: Arc<AsyncTask> = Default::default();
@@ -143,8 +162,11 @@
     pub static ref ENFORCEMENTS: Enforcements = Enforcements::new();
     /// LegacyBlobLoader is initialized and exists globally.
     /// The same directory used by the database is used by the LegacyBlobLoader as well.
-    pub static ref LEGACY_BLOB_LOADER: LegacyBlobLoader = LegacyBlobLoader::new(
-        &DB_PATH.lock().expect("Could not get the database path for legacy blob loader."));
+    pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
+        &DB_PATH.lock().expect("Could not get the database path for legacy blob loader.")));
+    /// Legacy migrator. Atomically migrates legacy blobs to the database.
+    pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
+        Arc::new(LegacyMigrator::new(ASYNC_TASK.clone()));
 }
 
 static KEYMINT_SERVICE_NAME: &str = "android.hardware.security.keymint.IKeyMintDevice";
@@ -271,3 +293,45 @@
         Ok(dev)
     }
 }
+
+static REMOTE_PROVISIONING_HAL_SERVICE_NAME: &str =
+    "android.hardware.security.keymint.IRemotelyProvisionedComponent";
+
+fn connect_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+    let service_name = match *security_level {
+        SecurityLevel::TRUSTED_ENVIRONMENT => {
+            format!("{}/default", REMOTE_PROVISIONING_HAL_SERVICE_NAME)
+        }
+        SecurityLevel::STRONGBOX => format!("{}/strongbox", REMOTE_PROVISIONING_HAL_SERVICE_NAME),
+        _ => {
+            // Given the integration of IRemotelyProvisionedComponent with KeyMint, it is reasonable
+            // to return HARDWARE_TYPE_UNAVAILABLE as a Km error if it cannot be found.
+            return Err(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
+                .context("In connect_remotely_provisioned_component.");
+        }
+    };
+
+    let rem_prov_hal: Strong<dyn IRemotelyProvisionedComponent> =
+        map_binder_status_code(binder::get_interface(&service_name))
+            .context(concat!(
+                "In connect_remotely_provisioned_component: Trying to connect to",
+                " RemotelyProvisionedComponent service."
+            ))
+            .map_err(|e| e)?;
+    Ok(Asp::new(rem_prov_hal.as_binder()))
+}
+
+/// Get a remote provisiong component device for the given security level either from the cache or
+/// by making a new connection. Returns the device.
+pub fn get_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+    let mut devices_map = REMOTELY_PROVISIONED_COMPONENT_DEVICES.lock().unwrap();
+    if let Some(dev) = devices_map.dev_by_sec_level(&security_level) {
+        Ok(dev)
+    } else {
+        let dev = connect_remotely_provisioned_component(security_level)
+            .context("In get_remotely_provisioned_component.")?;
+        devices_map.insert(*security_level, dev);
+        // Unwrap must succeed because we just inserted it.
+        Ok(devices_map.dev_by_sec_level(security_level).unwrap())
+    }
+}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 75475e1..9dc59a2 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -17,13 +17,17 @@
 use keystore2::apc::ApcManager;
 use keystore2::authorization::AuthorizationManager;
 use keystore2::globals::ENFORCEMENTS;
+use keystore2::remote_provisioning::RemoteProvisioningService;
 use keystore2::service::KeystoreService;
+use keystore2::user_manager::UserManager;
 use log::{error, info};
 use std::{panic, path::Path, sync::mpsc::channel};
 
 static KS2_SERVICE_NAME: &str = "android.system.keystore2";
 static APC_SERVICE_NAME: &str = "android.security.apc";
 static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
+static REMOTE_PROVISIONING_SERVICE_NAME: &str = "android.security.remoteprovisioning";
+static USER_MANAGER_SERVICE_NAME: &str = "android.security.usermanager";
 
 /// Keystore 2.0 takes one argument which is a path indicating its designated working directory.
 fn main() {
@@ -87,6 +91,29 @@
             panic!("Failed to register service {} because of {:?}.", AUTHORIZATION_SERVICE_NAME, e);
         });
 
+    let usermanager_service = UserManager::new_native_binder().unwrap_or_else(|e| {
+        panic!("Failed to create service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
+    });
+    binder::add_service(USER_MANAGER_SERVICE_NAME, usermanager_service.as_binder()).unwrap_or_else(
+        |e| {
+            panic!("Failed to register service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
+        },
+    );
+
+    // Devices with KS2 and KM 1.0 may not have any IRemotelyProvisionedComponent HALs at all. Do
+    // not panic if new_native_binder returns failure because it could not find the TEE HAL.
+    if let Ok(remote_provisioning_service) = RemoteProvisioningService::new_native_binder() {
+        binder::add_service(
+            REMOTE_PROVISIONING_SERVICE_NAME,
+            remote_provisioning_service.as_binder(),
+        )
+        .unwrap_or_else(|e| {
+            panic!(
+                "Failed to register service {} because of {:?}.",
+                REMOTE_PROVISIONING_SERVICE_NAME, e
+            );
+        });
+    }
     info!("Successfully registered Keystore 2.0 service.");
 
     info!("Joining thread pool now.");
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index ebd063c..b51f644 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -17,7 +17,6 @@
 //! This module implements methods to load legacy keystore key blob files.
 
 use crate::{
-    database::KeyMetaData,
     error::{Error as KsError, ResponseCode},
     key_parameter::{KeyParameter, KeyParameterValue},
     super_key::SuperKeyManager,
@@ -28,8 +27,12 @@
 };
 use anyhow::{Context, Result};
 use keystore2_crypto::{aes_gcm_decrypt, derive_key_from_password, ZVec};
-use std::io::{ErrorKind, Read};
+use std::collections::{HashMap, HashSet};
 use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
+use std::{
+    fs,
+    io::{ErrorKind, Read, Result as IoResult},
+};
 
 const SUPPORTED_LEGACY_BLOB_VERSION: u8 = 3;
 
@@ -231,6 +234,7 @@
     pub fn new(path: &Path) -> Self {
         Self { path: path.to_owned() }
     }
+
     /// Encodes an alias string as ascii character sequence in the range
     /// ['+' .. '.'] and ['0' .. '~'].
     /// Bytes with values in the range ['0' .. '~'] are represented as they are.
@@ -587,7 +591,7 @@
         let sw_list = Self::read_key_parameters(&mut stream)
             .context("In read_characteristics_file.")?
             .into_iter()
-            .map(|value| KeyParameter::new(value, SecurityLevel::SOFTWARE));
+            .map(|value| KeyParameter::new(value, SecurityLevel::KEYSTORE));
 
         Ok(hw_list.into_iter().flatten().chain(sw_list).collect())
     }
@@ -600,7 +604,7 @@
     //            used this for user installed certificates without private key material.
 
     fn read_km_blob_file(&self, uid: u32, alias: &str) -> Result<Option<(Blob, String)>> {
-        let mut iter = ["USRPKEY", "USERSKEY"].iter();
+        let mut iter = ["USRPKEY", "USRSKEY"].iter();
 
         let (blob, prefix) = loop {
             if let Some(prefix) = iter.next() {
@@ -619,7 +623,7 @@
     }
 
     fn read_generic_blob(path: &Path) -> Result<Option<Blob>> {
-        let mut file = match File::open(path) {
+        let mut file = match Self::with_retry_interrupted(|| File::open(path)) {
             Ok(file) => file,
             Err(e) => match e.kind() {
                 ErrorKind::NotFound => return Ok(None),
@@ -633,47 +637,240 @@
     /// This function constructs the blob file name which has the form:
     /// user_<android user id>/<uid>_<alias>.
     fn make_blob_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
-        let mut path = self.path.clone();
         let user_id = uid_to_android_user(uid);
         let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
-        path.push(format!("user_{}", user_id));
+        let mut path = self.make_user_path_name(user_id);
         path.push(format!("{}_{}", uid, encoded_alias));
         path
     }
 
     /// This function constructs the characteristics file name which has the form:
-    /// user_<android user id>/.<uid>_chr_<alias>.
+    /// user_<android user id>/.<uid>_chr_<prefix>_<alias>.
     fn make_chr_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
-        let mut path = self.path.clone();
         let user_id = uid_to_android_user(uid);
         let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
-        path.push(format!("user_{}", user_id));
+        let mut path = self.make_user_path_name(user_id);
         path.push(format!(".{}_chr_{}", uid, encoded_alias));
         path
     }
 
-    fn load_by_uid_alias(
+    fn make_super_key_filename(&self, user_id: u32) -> PathBuf {
+        let mut path = self.make_user_path_name(user_id);
+        path.push(".masterkey");
+        path
+    }
+
+    fn make_user_path_name(&self, user_id: u32) -> PathBuf {
+        let mut path = self.path.clone();
+        path.push(&format!("user_{}", user_id));
+        path
+    }
+
+    /// Returns if the legacy blob database is empty, i.e., there are no entries matching "user_*"
+    /// in the database dir.
+    pub fn is_empty(&self) -> Result<bool> {
+        let dir = Self::with_retry_interrupted(|| fs::read_dir(self.path.as_path()))
+            .context("In is_empty: Failed to open legacy blob database.")?;
+        for entry in dir {
+            if (*entry.context("In is_empty: Trying to access dir entry")?.file_name())
+                .to_str()
+                .map_or(false, |f| f.starts_with("user_"))
+            {
+                return Ok(false);
+            }
+        }
+        Ok(true)
+    }
+
+    /// Returns if the legacy blob database is empty for a given user, i.e., there are no entries
+    /// matching "user_*" in the database dir.
+    pub fn is_empty_user(&self, user_id: u32) -> Result<bool> {
+        let mut user_path = self.path.clone();
+        user_path.push(format!("user_{}", user_id));
+        if !user_path.as_path().is_dir() {
+            return Ok(true);
+        }
+        Ok(Self::with_retry_interrupted(|| user_path.read_dir())
+            .context("In is_empty_user: Failed to open legacy user dir.")?
+            .next()
+            .is_none())
+    }
+
+    fn extract_alias(encoded_alias: &str) -> Option<String> {
+        // We can check the encoded alias because the prefixes we are interested
+        // in are all in the printable range that don't get mangled.
+        for prefix in &["USRPKEY_", "USRSKEY_", "USRCERT_", "CACERT_"] {
+            if let Some(alias) = encoded_alias.strip_prefix(prefix) {
+                return Self::decode_alias(&alias).ok();
+            }
+        }
+        None
+    }
+
+    /// List all entries for a given user. The strings are unchanged file names, i.e.,
+    /// encoded with UID prefix.
+    fn list_user(&self, user_id: u32) -> Result<Vec<String>> {
+        let path = self.make_user_path_name(user_id);
+        let dir =
+            Self::with_retry_interrupted(|| fs::read_dir(path.as_path())).with_context(|| {
+                format!("In list_user: Failed to open legacy blob database. {:?}", path)
+            })?;
+        let mut result: Vec<String> = Vec::new();
+        for entry in dir {
+            let file_name = entry.context("In list_user: Trying to access dir entry")?.file_name();
+            if let Some(f) = file_name.to_str() {
+                result.push(f.to_string())
+            }
+        }
+        Ok(result)
+    }
+
+    /// List all keystore entries belonging to the given user. Returns a map of UIDs
+    /// to sets of decoded aliases.
+    pub fn list_keystore_entries_for_user(
+        &self,
+        user_id: u32,
+    ) -> Result<HashMap<u32, HashSet<String>>> {
+        let user_entries = self
+            .list_user(user_id)
+            .context("In list_keystore_entries_for_user: Trying to list user.")?;
+
+        let result =
+            user_entries.into_iter().fold(HashMap::<u32, HashSet<String>>::new(), |mut acc, v| {
+                if let Some(sep_pos) = v.find('_') {
+                    if let Ok(uid) = v[0..sep_pos].parse::<u32>() {
+                        if let Some(alias) = Self::extract_alias(&v[sep_pos + 1..]) {
+                            let entry = acc.entry(uid).or_default();
+                            entry.insert(alias);
+                        }
+                    }
+                }
+                acc
+            });
+        Ok(result)
+    }
+
+    /// List all keystore entries belonging to the given uid.
+    pub fn list_keystore_entries_for_uid(&self, uid: u32) -> Result<Vec<String>> {
+        let user_id = uid_to_android_user(uid);
+
+        let user_entries = self
+            .list_user(user_id)
+            .context("In list_keystore_entries_for_uid: Trying to list user.")?;
+
+        let uid_str = format!("{}_", uid);
+
+        let mut result: Vec<String> = user_entries
+            .into_iter()
+            .filter_map(|v| {
+                if !v.starts_with(&uid_str) {
+                    return None;
+                }
+                let encoded_alias = &v[uid_str.len()..];
+                Self::extract_alias(encoded_alias)
+            })
+            .collect();
+
+        result.sort_unstable();
+        result.dedup();
+        Ok(result)
+    }
+
+    fn with_retry_interrupted<F, T>(f: F) -> IoResult<T>
+    where
+        F: Fn() -> IoResult<T>,
+    {
+        loop {
+            match f() {
+                Ok(v) => return Ok(v),
+                Err(e) => match e.kind() {
+                    ErrorKind::Interrupted => continue,
+                    _ => return Err(e),
+                },
+            }
+        }
+    }
+
+    /// Deletes a keystore entry. Also removes the user_<uid> directory on the
+    /// last migration.
+    pub fn remove_keystore_entry(&self, uid: u32, alias: &str) -> Result<bool> {
+        let mut something_was_deleted = false;
+        let prefixes = ["USRPKEY", "USRSKEY"];
+        for prefix in &prefixes {
+            let path = self.make_blob_filename(uid, alias, prefix);
+            if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+                match e.kind() {
+                    // Only a subset of keys are expected.
+                    ErrorKind::NotFound => continue,
+                    // Log error but ignore.
+                    _ => log::error!("Error while deleting key blob entries. {:?}", e),
+                }
+            }
+            let path = self.make_chr_filename(uid, alias, prefix);
+            if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+                match e.kind() {
+                    ErrorKind::NotFound => {
+                        log::info!("No characteristics file found for legacy key blob.")
+                    }
+                    // Log error but ignore.
+                    _ => log::error!("Error while deleting key blob entries. {:?}", e),
+                }
+            }
+            something_was_deleted = true;
+            // Only one of USRPKEY and USRSKEY can be present. So we can end the loop
+            // if we reach this point.
+            break;
+        }
+
+        let prefixes = ["USRCERT", "CACERT"];
+        for prefix in &prefixes {
+            let path = self.make_blob_filename(uid, alias, prefix);
+            if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+                match e.kind() {
+                    // USRCERT and CACERT are optional either or both may or may not be present.
+                    ErrorKind::NotFound => continue,
+                    // Log error but ignore.
+                    _ => log::error!("Error while deleting key blob entries. {:?}", e),
+                }
+                something_was_deleted = true;
+            }
+        }
+
+        if something_was_deleted {
+            let user_id = uid_to_android_user(uid);
+            if self
+                .is_empty_user(user_id)
+                .context("In remove_keystore_entry: Trying to check for empty user dir.")?
+            {
+                let user_path = self.make_user_path_name(user_id);
+                Self::with_retry_interrupted(|| fs::remove_dir(user_path.as_path())).ok();
+            }
+        }
+
+        Ok(something_was_deleted)
+    }
+
+    /// Load a legacy key blob entry by uid and alias.
+    pub fn load_by_uid_alias(
         &self,
         uid: u32,
         alias: &str,
-        key_manager: &SuperKeyManager,
-    ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>, KeyMetaData)>
-    {
-        let metadata = KeyMetaData::new();
-
+        key_manager: Option<&SuperKeyManager>,
+    ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
         let km_blob = self.read_km_blob_file(uid, alias).context("In load_by_uid_alias.")?;
 
         let km_blob = match km_blob {
             Some((km_blob, prefix)) => {
-                let km_blob =
-                    match km_blob {
-                        Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
-                        // Unwrap the key blob if required.
-                        Blob { flags, value: BlobValue::Encrypted { iv, tag, data } } => {
+                let km_blob = match km_blob {
+                    Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
+                    // Unwrap the key blob if required and if we have key_manager.
+                    Blob { flags, value: BlobValue::Encrypted { ref iv, ref tag, ref data } } => {
+                        if let Some(key_manager) = key_manager {
                             let decrypted = match key_manager
                                 .get_per_boot_key_by_user_id(uid_to_android_user(uid))
                             {
-                                Some(key) => aes_gcm_decrypt(&data, &iv, &tag, &key).context(
+                                Some(key) => aes_gcm_decrypt(data, iv, tag, &(key.get_key()))
+                                    .context(
                                     "In load_by_uid_alias: while trying to decrypt legacy blob.",
                                 )?,
                                 None => {
@@ -687,11 +884,16 @@
                                 }
                             };
                             Blob { flags, value: BlobValue::Decrypted(decrypted) }
+                        } else {
+                            km_blob
                         }
-                        _ => return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
+                    }
+                    _ => {
+                        return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
                             "In load_by_uid_alias: Found wrong blob type in legacy key blob file.",
-                        ),
-                    };
+                        )
+                    }
+                };
 
                 let hw_sec_level = match km_blob.is_strongbox() {
                     true => SecurityLevel::STRONGBOX,
@@ -729,14 +931,17 @@
             }
         };
 
-        Ok((km_blob, user_cert, ca_cert, metadata))
+        Ok((km_blob, user_cert, ca_cert))
+    }
+
+    /// Returns true if the given user has a super key.
+    pub fn has_super_key(&self, user_id: u32) -> bool {
+        self.make_super_key_filename(user_id).is_file()
     }
 
     /// Load and decrypt legacy super key blob.
     pub fn load_super_key(&self, user_id: u32, pw: &[u8]) -> Result<Option<ZVec>> {
-        let mut path = self.path.clone();
-        path.push(&format!("user_{}", user_id));
-        path.push(".masterkey");
+        let path = self.make_super_key_filename(user_id);
         let blob = Self::read_generic_blob(&path)
             .context("In load_super_key: While loading super key.")?;
 
@@ -763,6 +968,18 @@
 
         Ok(blob)
     }
+
+    /// Removes the super key for the given user from the legacy database.
+    /// If this was the last entry in the user's database, this function removes
+    /// the user_<uid> directory as well.
+    pub fn remove_super_key(&self, user_id: u32) {
+        let path = self.make_super_key_filename(user_id);
+        Self::with_retry_interrupted(|| fs::remove_file(path.as_path())).ok();
+        if self.is_empty_user(user_id).ok().unwrap_or(false) {
+            let path = self.make_user_path_name(user_id);
+            Self::with_retry_interrupted(|| fs::remove_dir(path.as_path())).ok();
+        }
+    }
 }
 
 #[cfg(test)]
@@ -897,6 +1114,37 @@
     }
 
     #[test]
+    fn test_is_empty() {
+        let temp_dir = TempDir::new("test_is_empty").expect("Failed to create temp dir.");
+        let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+        assert!(legacy_blob_loader.is_empty().expect("Should succeed and be empty."));
+
+        let _db = crate::database::KeystoreDB::new(temp_dir.path(), None)
+            .expect("Failed to open database.");
+
+        assert!(legacy_blob_loader.is_empty().expect("Should succeed and still be empty."));
+
+        std::fs::create_dir(&*temp_dir.build().push("user_0")).expect("Failed to create user_0.");
+
+        assert!(!legacy_blob_loader.is_empty().expect("Should succeed but not be empty."));
+
+        std::fs::create_dir(&*temp_dir.build().push("user_10")).expect("Failed to create user_10.");
+
+        assert!(!legacy_blob_loader.is_empty().expect("Should succeed but still not be empty."));
+
+        std::fs::remove_dir_all(&*temp_dir.build().push("user_0"))
+            .expect("Failed to remove user_0.");
+
+        assert!(!legacy_blob_loader.is_empty().expect("Should succeed but still not be empty."));
+
+        std::fs::remove_dir_all(&*temp_dir.build().push("user_10"))
+            .expect("Failed to remove user_10.");
+
+        assert!(legacy_blob_loader.is_empty().expect("Should succeed and be empty again."));
+    }
+
+    #[test]
     fn test_legacy_blobs() -> anyhow::Result<()> {
         let temp_dir = TempDir::new("legacy_blob_test")?;
         std::fs::create_dir(&*temp_dir.build().push("user_0"))?;
@@ -943,27 +1191,27 @@
 
         assert_eq!(
             legacy_blob_loader
-                .load_by_uid_alias(10223, "authbound", &key_manager)
+                .load_by_uid_alias(10223, "authbound", Some(&key_manager))
                 .unwrap_err()
                 .root_cause()
                 .downcast_ref::<error::Error>(),
             Some(&error::Error::Rc(ResponseCode::LOCKED))
         );
 
-        key_manager.unlock_user_key(0, PASSWORD, &mut db, &legacy_blob_loader)?;
+        key_manager.unlock_user_key(&mut db, 0, PASSWORD, &legacy_blob_loader)?;
 
-        if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain), _kp) =
-            legacy_blob_loader.load_by_uid_alias(10223, "authbound", &key_manager)?
+        if let (Some((Blob { flags, value: _ }, _params)), Some(cert), Some(chain)) =
+            legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
         {
             assert_eq!(flags, 4);
-            assert_eq!(value, BlobValue::Decrypted(DECRYPTED_USRPKEY_AUTHBOUND.try_into()?));
+            //assert_eq!(value, BlobValue::Encrypted(..));
             assert_eq!(&cert[..], LOADED_CERT_AUTHBOUND);
             assert_eq!(&chain[..], LOADED_CACERT_AUTHBOUND);
         } else {
             panic!("");
         }
-        if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain), _kp) =
-            legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &key_manager)?
+        if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
+            legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
         {
             assert_eq!(flags, 0);
             assert_eq!(value, BlobValue::Decrypted(LOADED_USRPKEY_NON_AUTHBOUND.try_into()?));
@@ -973,6 +1221,33 @@
             panic!("");
         }
 
+        legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+        legacy_blob_loader
+            .remove_keystore_entry(10223, "non_authbound")
+            .expect("This should succeed.");
+
+        assert_eq!(
+            (None, None, None),
+            legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+        );
+        assert_eq!(
+            (None, None, None),
+            legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+        );
+
+        // The database should not be empty due to the super key.
+        assert!(!legacy_blob_loader.is_empty()?);
+        assert!(!legacy_blob_loader.is_empty_user(0)?);
+
+        // The database should be considered empty for user 1.
+        assert!(legacy_blob_loader.is_empty_user(1)?);
+
+        legacy_blob_loader.remove_super_key(0);
+
+        // Now it should be empty.
+        assert!(legacy_blob_loader.is_empty_user(0)?);
+        assert!(legacy_blob_loader.is_empty()?);
+
         Ok(())
     }
 }
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
new file mode 100644
index 0000000..9ffe86c
--- /dev/null
+++ b/keystore2/src/legacy_migrator.rs
@@ -0,0 +1,706 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module acts as a bridge between the legacy key database and the keystore2 database.
+
+use crate::database::{
+    BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData, KeyMetaEntry,
+    KeystoreDB, Uuid, KEYSTORE_UUID,
+};
+use crate::error::Error;
+use crate::key_parameter::KeyParameterValue;
+use crate::legacy_blob::BlobValue;
+use crate::utils::uid_to_android_user;
+use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_system_keystore2::aidl::android::system::keystore2::{
+    Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
+};
+use anyhow::{Context, Result};
+use core::ops::Deref;
+use keystore2_crypto::ZVec;
+use std::collections::{HashMap, HashSet};
+use std::convert::TryInto;
+use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::mpsc::channel;
+use std::sync::{Arc, Mutex};
+
+/// Represents LegacyMigrator.
+pub struct LegacyMigrator {
+    async_task: Arc<AsyncTask>,
+    initializer: Mutex<
+        Option<
+            Box<
+                dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+                    + Send
+                    + 'static,
+            >,
+        >,
+    >,
+    /// This atomic is used for cheap interior mutability. It is intended to prevent
+    /// expensive calls into the legacy migrator when the legacy database is empty.
+    /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
+    /// of time. This is tolerable in favor of the common case.
+    state: AtomicU8,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+struct RecentMigration {
+    uid: u32,
+    alias: String,
+}
+
+impl RecentMigration {
+    fn new(uid: u32, alias: String) -> Self {
+        Self { uid, alias }
+    }
+}
+
+enum BulkDeleteRequest {
+    Uid(u32),
+    User(u32),
+}
+
+struct LegacyMigratorState {
+    recently_migrated: HashSet<RecentMigration>,
+    recently_migrated_super_key: HashSet<u32>,
+    legacy_loader: Arc<LegacyBlobLoader>,
+    sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
+    db: KeystoreDB,
+}
+
+impl LegacyMigrator {
+    const WIFI_NAMESPACE: i64 = 102;
+    const AID_WIFI: u32 = 1010;
+
+    const STATE_UNINITIALIZED: u8 = 0;
+    const STATE_READY: u8 = 1;
+    const STATE_EMPTY: u8 = 2;
+
+    /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
+    /// worker.
+    pub fn new(async_task: Arc<AsyncTask>) -> Self {
+        Self {
+            async_task,
+            initializer: Default::default(),
+            state: AtomicU8::new(Self::STATE_UNINITIALIZED),
+        }
+    }
+
+    /// The legacy migrator must be initialized deferred, because keystore starts very early.
+    /// At this time the data partition may not be mounted. So we cannot open database connections
+    /// until we get actual key load requests. This sets the function that the legacy loader
+    /// uses to connect to the database.
+    pub fn set_init<F>(&self, f_init: F) -> Result<()>
+    where
+        F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+            + Send
+            + 'static,
+    {
+        let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
+
+        // If we are not uninitialized we have no business setting the initializer.
+        if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
+            return Ok(());
+        }
+
+        // Only set the initializer if it hasn't been set before.
+        if initializer.is_none() {
+            *initializer = Some(Box::new(f_init))
+        }
+
+        Ok(())
+    }
+
+    /// This function is called by the migration requestor to check if it is worth
+    /// making a migration request. It also transitions the state from UNINITIALIZED
+    /// to READY or EMPTY on first use. The deferred initialization is necessary, because
+    /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
+    /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
+    /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
+    /// was not initialized and cannot be initialized.
+    fn check_state(&self) -> Result<u8> {
+        let mut first_try = true;
+        loop {
+            match (self.state.load(Ordering::Relaxed), first_try) {
+                (Self::STATE_EMPTY, _) => {
+                    return Ok(Self::STATE_EMPTY);
+                }
+                (Self::STATE_UNINITIALIZED, true) => {
+                    // If we find the legacy loader uninitialized, we grab the initializer lock,
+                    // check if the legacy database is empty, and if not, schedule an initialization
+                    // request. Coming out of the initializer lock, the state is either EMPTY or
+                    // READY.
+                    let mut initializer = self.initializer.lock().unwrap();
+
+                    if let Some(initializer) = initializer.take() {
+                        let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
+
+                        if legacy_loader.is_empty().context(
+                            "In check_state: Trying to check if the legacy database is empty.",
+                        )? {
+                            self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
+                            return Ok(Self::STATE_EMPTY);
+                        }
+
+                        self.async_task.queue_hi(move |shelf| {
+                            shelf.get_or_put_with(|| LegacyMigratorState {
+                                recently_migrated: Default::default(),
+                                recently_migrated_super_key: Default::default(),
+                                legacy_loader,
+                                sec_level_to_km_uuid,
+                                db,
+                            });
+                        });
+
+                        // It is safe to set this here even though the async task may not yet have
+                        // run because any thread observing this will not be able to schedule a
+                        // task that can run before the initialization.
+                        // Also we can only transition out of this state while having the
+                        // initializer lock and having found an initializer.
+                        self.state.store(Self::STATE_READY, Ordering::Relaxed);
+                        return Ok(Self::STATE_READY);
+                    } else {
+                        // There is a chance that we just lost the race from state.load() to
+                        // grabbing the initializer mutex. If that is the case the state must
+                        // be EMPTY or READY after coming out of the lock. So we can give it
+                        // one more try.
+                        first_try = false;
+                        continue;
+                    }
+                }
+                (Self::STATE_UNINITIALIZED, false) => {
+                    // Okay, tough luck. The legacy loader was really completely uninitialized.
+                    return Err(Error::sys()).context(
+                        "In check_state: Legacy loader should not be called uninitialized.",
+                    );
+                }
+                (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
+                (s, _) => panic!("Unknown legacy migrator state. {} ", s),
+            }
+        }
+    }
+
+    /// List all aliases for uid in the legacy database.
+    pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+        let uid = match (domain, namespace) {
+            (Domain::APP, namespace) => namespace as u32,
+            (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
+            _ => return Ok(Vec::new()),
+        };
+        self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
+            |v| {
+                v.into_iter()
+                    .map(|alias| KeyDescriptor {
+                        domain,
+                        nspace: namespace,
+                        alias: Some(alias),
+                        blob: None,
+                    })
+                    .collect()
+            },
+        )
+    }
+
+    /// Sends the given closure to the migrator thread for execution after calling check_state.
+    /// Returns None if the database was empty and the request was not executed.
+    /// Otherwise returns Some with the result produced by the migration request.
+    /// The loader state may transition to STATE_EMPTY during the execution of this function.
+    fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
+    where
+        F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
+    {
+        // Short circuit if the database is empty or not initialized (error case).
+        match self.check_state().context("In do_serialized: Checking state.") {
+            Ok(LegacyMigrator::STATE_EMPTY) => return None,
+            Ok(LegacyMigrator::STATE_READY) => {}
+            Err(e) => return Some(Err(e)),
+            Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
+        }
+
+        // We have established that there may be a key in the legacy database.
+        // Now we schedule a migration request.
+        let (sender, receiver) = channel();
+        self.async_task.queue_hi(move |shelf| {
+            // Get the migrator state from the shelf.
+            // There may not be a state. This can happen if this migration request was scheduled
+            // before a previous request established that the legacy database was empty
+            // and removed the state from the shelf. Since we know now that the database
+            // is empty, we can return None here.
+            let (new_state, result) = if let Some(legacy_migrator_state) =
+                shelf.get_downcast_mut::<LegacyMigratorState>()
+            {
+                let result = f(legacy_migrator_state);
+                (legacy_migrator_state.check_empty(), Some(result))
+            } else {
+                (Self::STATE_EMPTY, None)
+            };
+
+            // If the migration request determined that the database is now empty, we discard
+            // the state from the shelf to free up the resources we won't need any longer.
+            if result.is_some() && new_state == Self::STATE_EMPTY {
+                shelf.remove_downcast_ref::<LegacyMigratorState>();
+            }
+
+            // Send the result to the requester.
+            if let Err(e) = sender.send((new_state, result)) {
+                log::error!("In do_serialized. Error in sending the result. {:?}", e);
+            }
+        });
+
+        let (new_state, result) = match receiver.recv() {
+            Err(e) => {
+                return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
+            }
+            Ok(r) => r,
+        };
+
+        // We can only transition to EMPTY but never back.
+        // The migrator never creates any legacy blobs.
+        if new_state == Self::STATE_EMPTY {
+            self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
+        }
+
+        result
+    }
+
+    /// Runs the key_accessor function and returns its result. If it returns an error and the
+    /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
+    /// the legacy database to the new database and runs the key_accessor function again if
+    /// the migration request was successful.
+    pub fn with_try_migrate<F, T>(
+        &self,
+        key: &KeyDescriptor,
+        caller_uid: u32,
+        key_accessor: F,
+    ) -> Result<T>
+    where
+        F: Fn() -> Result<T>,
+    {
+        // Access the key and return on success.
+        match key_accessor() {
+            Ok(result) => return Ok(result),
+            Err(e) => match e.root_cause().downcast_ref::<Error>() {
+                Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
+                _ => return Err(e),
+            },
+        }
+
+        // Filter inputs. We can only load legacy app domain keys and some special rules due
+        // to which we migrate keys transparently to an SELINUX domain.
+        let uid = match key {
+            KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
+            KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
+                match *nspace {
+                    Self::WIFI_NAMESPACE => Self::AID_WIFI,
+                    _ => {
+                        return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+                            .context(format!("No legacy keys for namespace {}", nspace))
+                    }
+                }
+            }
+            _ => {
+                return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+                    .context("No legacy keys for key descriptor.")
+            }
+        };
+
+        let key_clone = key.clone();
+        let result = self
+            .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
+
+        if let Some(result) = result {
+            result?;
+            // After successful migration try again.
+            key_accessor()
+        } else {
+            Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
+        }
+    }
+
+    /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
+    /// this function makes a migration request and on success retries the key_accessor.
+    pub fn with_try_migrate_super_key<F, T>(
+        &self,
+        user_id: u32,
+        pw: &[u8],
+        mut key_accessor: F,
+    ) -> Result<Option<T>>
+    where
+        F: FnMut() -> Result<Option<T>>,
+    {
+        match key_accessor() {
+            Ok(Some(result)) => return Ok(Some(result)),
+            Ok(None) => {}
+            Err(e) => return Err(e),
+        }
+
+        let pw: ZVec = pw
+            .try_into()
+            .context("In with_try_migrate_super_key: copying the password into a zvec.")?;
+        let result = self.do_serialized(move |migrator_state| {
+            migrator_state.check_and_migrate_super_key(user_id, pw)
+        });
+
+        if let Some(result) = result {
+            result?;
+            // After successful migration try again.
+            key_accessor()
+        } else {
+            Ok(None)
+        }
+    }
+
+    /// Deletes all keys belonging to the given uid, migrating them into the database
+    /// for subsequent garbage collection if necessary.
+    pub fn bulk_delete_uid(&self, uid: u32, keep_non_super_encrypted_keys: bool) -> Result<()> {
+        let result = self.do_serialized(move |migrator_state| {
+            migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), keep_non_super_encrypted_keys)
+        });
+
+        result.unwrap_or(Ok(()))
+    }
+
+    /// Deletes all keys belonging to the given android user, migrating them into the database
+    /// for subsequent garbage collection if necessary.
+    pub fn bulk_delete_user(
+        &self,
+        user_id: u32,
+        keep_non_super_encrypted_keys: bool,
+    ) -> Result<()> {
+        let result = self.do_serialized(move |migrator_state| {
+            migrator_state
+                .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
+        });
+
+        result.unwrap_or(Ok(()))
+    }
+
+    /// Queries the legacy database for the presence of a super key for the given user.
+    pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
+        let result =
+            self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
+        result.unwrap_or(Ok(false))
+    }
+}
+
+impl LegacyMigratorState {
+    fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
+        let sec_level = if is_strongbox {
+            SecurityLevel::STRONGBOX
+        } else {
+            SecurityLevel::TRUSTED_ENVIRONMENT
+        };
+
+        self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
+            anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
+        })
+    }
+
+    fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
+        self.legacy_loader
+            .list_keystore_entries_for_uid(uid)
+            .context("In list_uid: Trying to list legacy entries.")
+    }
+
+    /// This is a key migration request that can run in the migrator thread. This should
+    /// be passed to do_serialized.
+    fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
+        let alias = key.alias.clone().ok_or_else(|| {
+            anyhow::anyhow!(Error::sys()).context(concat!(
+                "In check_and_migrate: Must be Some because ",
+                "our caller must not have called us otherwise."
+            ))
+        })?;
+
+        if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
+            return Ok(());
+        }
+
+        if key.domain == Domain::APP {
+            key.nspace = uid as i64;
+        }
+
+        // If the key is not found in the cache, try to load from the legacy database.
+        let (km_blob_params, user_cert, ca_cert) = self
+            .legacy_loader
+            .load_by_uid_alias(uid, &alias, None)
+            .context("In check_and_migrate: Trying to load legacy blob.")?;
+        let result = match km_blob_params {
+            Some((km_blob, params)) => {
+                let is_strongbox = km_blob.is_strongbox();
+                let (blob, mut blob_metadata) = match km_blob.take_value() {
+                    BlobValue::Encrypted { iv, tag, data } => {
+                        // Get super key id for user id.
+                        let user_id = uid_to_android_user(uid as u32);
+
+                        let super_key_id = match self
+                            .db
+                            .load_super_key(user_id)
+                            .context("In check_and_migrate: Failed to load super key")?
+                        {
+                            Some((_, entry)) => entry.id(),
+                            None => {
+                                // This might be the first time we access the super key,
+                                // and it may not have been migrated. We cannot import
+                                // the legacy super_key key now, because we need to reencrypt
+                                // it which we cannot do if we are not unlocked, which we are
+                                // not because otherwise the key would have been migrated.
+                                // We can check though if the key exists. If it does,
+                                // we can return Locked. Otherwise, we can delete the
+                                // key and return NotFound, because the key will never
+                                // be unlocked again.
+                                if self.legacy_loader.has_super_key(user_id) {
+                                    return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
+                                        "In check_and_migrate: Cannot migrate super key of this ",
+                                        "key while user is locked."
+                                    ));
+                                } else {
+                                    self.legacy_loader.remove_keystore_entry(uid, &alias).context(
+                                        concat!(
+                                            "In check_and_migrate: ",
+                                            "Trying to remove obsolete key."
+                                        ),
+                                    )?;
+                                    return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+                                        .context("In check_and_migrate: Obsolete key.");
+                                }
+                            }
+                        };
+
+                        let mut blob_metadata = BlobMetaData::new();
+                        blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+                        blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+                        blob_metadata
+                            .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+                        (LegacyBlob::Vec(data), blob_metadata)
+                    }
+                    BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
+                    _ => {
+                        return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+                            .context("In check_and_migrate: Legacy key has unexpected type.")
+                    }
+                };
+
+                let km_uuid = self
+                    .get_km_uuid(is_strongbox)
+                    .context("In check_and_migrate: Trying to get KM UUID")?;
+                blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+
+                let mut metadata = KeyMetaData::new();
+                let creation_date = DateTime::now()
+                    .context("In check_and_migrate: Trying to make creation time.")?;
+                metadata.add(KeyMetaEntry::CreationDate(creation_date));
+
+                // Store legacy key in the database.
+                self.db
+                    .store_new_key(
+                        &key,
+                        &params,
+                        &(&blob, &blob_metadata),
+                        &CertificateInfo::new(user_cert, ca_cert),
+                        &metadata,
+                        &km_uuid,
+                    )
+                    .context("In check_and_migrate.")?;
+                Ok(())
+            }
+            None => {
+                if let Some(ca_cert) = ca_cert {
+                    self.db
+                        .store_new_certificate(&key, &ca_cert, &KEYSTORE_UUID)
+                        .context("In check_and_migrate: Failed to insert new certificate.")?;
+                    Ok(())
+                } else {
+                    Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+                        .context("In check_and_migrate: Legacy key not found.")
+                }
+            }
+        };
+
+        match result {
+            Ok(()) => {
+                // Add the key to the migrated_keys list.
+                self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
+                // Delete legacy key from the file system
+                self.legacy_loader
+                    .remove_keystore_entry(uid, &alias)
+                    .context("In check_and_migrate: Trying to remove migrated key.")?;
+                Ok(())
+            }
+            Err(e) => Err(e),
+        }
+    }
+
+    fn check_and_migrate_super_key(&mut self, user_id: u32, pw: ZVec) -> Result<()> {
+        if self.recently_migrated_super_key.contains(&user_id) {
+            return Ok(());
+        }
+
+        if let Some(super_key) = self
+            .legacy_loader
+            .load_super_key(user_id, &pw)
+            .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
+        {
+            let (blob, blob_metadata) =
+                crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, &pw)
+                    .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
+
+            self.db.store_super_key(user_id, &(&blob, &blob_metadata)).context(concat!(
+                "In check_and_migrate_super_key: ",
+                "Trying to insert legacy super_key into the database."
+            ))?;
+            self.legacy_loader.remove_super_key(user_id);
+            self.recently_migrated_super_key.insert(user_id);
+            Ok(())
+        } else {
+            Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+                .context("In check_and_migrate_super_key: No key found do migrate.")
+        }
+    }
+
+    /// Key migrator request to be run by do_serialized.
+    /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
+    fn bulk_delete(
+        &mut self,
+        bulk_delete_request: BulkDeleteRequest,
+        keep_non_super_encrypted_keys: bool,
+    ) -> Result<()> {
+        let (aliases, user_id) = match bulk_delete_request {
+            BulkDeleteRequest::Uid(uid) => (
+                self.legacy_loader
+                    .list_keystore_entries_for_uid(uid)
+                    .context("In bulk_delete: Trying to get aliases for uid.")
+                    .map(|aliases| {
+                        let mut h = HashMap::<u32, HashSet<String>>::new();
+                        h.insert(uid, aliases.into_iter().collect());
+                        h
+                    })?,
+                uid_to_android_user(uid),
+            ),
+            BulkDeleteRequest::User(user_id) => (
+                self.legacy_loader
+                    .list_keystore_entries_for_user(user_id)
+                    .context("In bulk_delete: Trying to get aliases for user_id.")?,
+                user_id,
+            ),
+        };
+
+        let super_key_id = self
+            .db
+            .load_super_key(user_id)
+            .context("In bulk_delete: Failed to load super key")?
+            .map(|(_, entry)| entry.id());
+
+        for (uid, alias) in aliases
+            .into_iter()
+            .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
+            .flatten()
+        {
+            let (km_blob_params, _, _) = self
+                .legacy_loader
+                .load_by_uid_alias(uid, &alias, None)
+                .context("In bulk_delete: Trying to load legacy blob.")?;
+
+            // Determine if the key needs special handling to be deleted.
+            let (need_gc, is_super_encrypted) = km_blob_params
+                .as_ref()
+                .map(|(blob, params)| {
+                    (
+                        params.iter().any(|kp| {
+                            KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
+                        }),
+                        blob.is_encrypted(),
+                    )
+                })
+                .unwrap_or((false, false));
+
+            if keep_non_super_encrypted_keys && !is_super_encrypted {
+                continue;
+            }
+
+            if need_gc {
+                let mark_deleted = match km_blob_params
+                    .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
+                {
+                    Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
+                        let mut blob_metadata = BlobMetaData::new();
+                        if let (Ok(km_uuid), Some(super_key_id)) =
+                            (self.get_km_uuid(is_strongbox), super_key_id)
+                        {
+                            blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+                            blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+                            blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+                            blob_metadata
+                                .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+                            Some((LegacyBlob::Vec(data), blob_metadata))
+                        } else {
+                            // Oh well - we tried our best, but if we cannot determine which
+                            // KeyMint instance we have to send this blob to, we cannot
+                            // do more than delete the key from the file system.
+                            // And if we don't know which key wraps this key we cannot
+                            // unwrap it for KeyMint either.
+                            None
+                        }
+                    }
+                    Some((_, BlobValue::Decrypted(data))) => {
+                        Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
+                    }
+                    _ => None,
+                };
+
+                if let Some((blob, blob_metadata)) = mark_deleted {
+                    self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
+                        "In bulk_delete: Trying to insert deleted ",
+                        "blob into the database for garbage collection."
+                    ))?;
+                }
+            }
+
+            self.legacy_loader
+                .remove_keystore_entry(uid, &alias)
+                .context("In bulk_delete: Trying to remove migrated key.")?;
+        }
+        Ok(())
+    }
+
+    fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
+        Ok(self.recently_migrated_super_key.contains(&user_id)
+            || self.legacy_loader.has_super_key(user_id))
+    }
+
+    fn check_empty(&self) -> u8 {
+        if self.legacy_loader.is_empty().unwrap_or(false) {
+            LegacyMigrator::STATE_EMPTY
+        } else {
+            LegacyMigrator::STATE_READY
+        }
+    }
+}
+
+enum LegacyBlob {
+    Vec(Vec<u8>),
+    ZVec(ZVec),
+}
+
+impl Deref for LegacyBlob {
+    type Target = [u8];
+
+    fn deref(&self) -> &Self::Target {
+        match self {
+            Self::Vec(v) => &v,
+            Self::ZVec(v) => &v,
+        }
+    }
+}
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index f9554ea..358fce8 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -24,11 +24,13 @@
 /// Internal Representation of Key Parameter and convenience functions.
 pub mod key_parameter;
 pub mod legacy_blob;
+pub mod legacy_migrator;
 pub mod operation;
 pub mod permission;
 pub mod remote_provisioning;
 pub mod security_level;
 pub mod service;
+pub mod user_manager;
 pub mod utils;
 
 mod async_task;
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index 0f0ca04..576ac3f 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -299,9 +299,15 @@
         /// Checked when Keystore 2.0 gets locked.
         Lock = 0x10,       selinux name: lock;
         /// Checked when Keystore 2.0 shall be reset.
-        Reset = 0x20,   selinux name: reset;
+        Reset = 0x20,    selinux name: reset;
         /// Checked when Keystore 2.0 shall be unlocked.
-        Unlock = 0x40,  selinux name: unlock;
+        Unlock = 0x40,    selinux name: unlock;
+        /// Checked when user is added or removed.
+        ChangeUser = 0x80,    selinux name: change_user;
+        /// Checked when password of the user is changed.
+        ChangePassword = 0x100,    selinux name: change_password;
+        /// Checked when a UID is cleared.
+        ClearUID = 0x200,    selinux name: clear_uid;
     }
 );
 
@@ -659,6 +665,11 @@
         assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::lock()).is_ok());
         assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::reset()).is_ok());
         assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::unlock()).is_ok());
+        assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::change_user()).is_ok());
+        assert!(
+            check_keystore_permission(&system_server_ctx, KeystorePerm::change_password()).is_ok()
+        );
+        assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::clear_uid()).is_ok());
         let shell_ctx = Context::new("u:r:shell:s0")?;
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::add_auth()));
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_ns()));
@@ -667,6 +678,9 @@
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::lock()));
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::reset()));
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::unlock()));
+        assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::change_user()));
+        assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::change_password()));
+        assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_uid()));
         Ok(())
     }
 
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index fe38504..d606b6a 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -19,28 +19,54 @@
 //! certificate chains signed by some root authority and stored in a keystore SQLite
 //! DB.
 
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use std::collections::HashMap;
 
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+    IRemotelyProvisionedComponent::IRemotelyProvisionedComponent, MacedPublicKey::MacedPublicKey,
+    ProtectedData::ProtectedData, SecurityLevel::SecurityLevel,
+};
 use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
     AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
     IRemoteProvisioning::IRemoteProvisioning,
 };
 use android_security_remoteprovisioning::binder::Strong;
-use anyhow::Result;
+use anyhow::{Context, Result};
 
-use crate::error::map_or_log_err;
-use crate::globals::{get_keymint_device, DB};
+use crate::error::{self, map_or_log_err, map_rem_prov_error};
+use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
+use crate::utils::Asp;
 
 /// Implementation of the IRemoteProvisioning service.
+#[derive(Default)]
 pub struct RemoteProvisioningService {
-    // TODO(b/179222809): Add the remote provisioner hal aidl interface when available
+    device_by_sec_level: HashMap<SecurityLevel, Asp>,
 }
 
 impl RemoteProvisioningService {
+    fn get_dev_by_sec_level(
+        &self,
+        sec_level: &SecurityLevel,
+    ) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
+        if let Some(dev) = self.device_by_sec_level.get(sec_level) {
+            dev.get_interface().context("In get_dev_by_sec_level.")
+        } else {
+            Err(error::Error::sys()).context(concat!(
+                "In get_dev_by_sec_level: Remote instance for requested security level",
+                " not found."
+            ))
+        }
+    }
+
     /// Creates a new instance of the remote provisioning service
     pub fn new_native_binder() -> Result<Strong<dyn IRemoteProvisioning>> {
-        let result = BnRemoteProvisioning::new_binder(Self {});
-        Ok(result)
+        let mut result: Self = Default::default();
+        let dev = get_remotely_provisioned_component(&SecurityLevel::TRUSTED_ENVIRONMENT)
+            .context("In new_native_binder: Failed to get TEE Remote Provisioner instance.")?;
+        result.device_by_sec_level.insert(SecurityLevel::TRUSTED_ENVIRONMENT, dev);
+        if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
+            result.device_by_sec_level.insert(SecurityLevel::STRONGBOX, dev);
+        }
+        Ok(BnRemoteProvisioning::new_binder(result))
     }
 
     /// Populates the AttestationPoolStatus parcelable with information about how many
@@ -54,6 +80,11 @@
         let (_, _, uuid) = get_keymint_device(&sec_level)?;
         DB.with::<_, Result<AttestationPoolStatus>>(|db| {
             let mut db = db.borrow_mut();
+            // delete_expired_attestation_keys is always safe to call, and will remove anything
+            // older than the date at the time of calling. No work should be done on the
+            // attestation keys unless the pool status is checked first, so this call should be
+            // enough to routinely clean out expired keys.
+            db.delete_expired_attestation_keys()?;
             Ok(db.get_attestation_pool_status(expired_by, &uuid)?)
         })
     }
@@ -68,15 +99,34 @@
     /// baked in root of trust in the underlying IRemotelyProvisionedComponent instance.
     pub fn generate_csr(
         &self,
-        _test_mode: bool,
-        _num_csr: i32,
-        _eek: &[u8],
-        _challenge: &[u8],
-        _sec_level: SecurityLevel,
+        test_mode: bool,
+        num_csr: i32,
+        eek: &[u8],
+        challenge: &[u8],
+        sec_level: SecurityLevel,
+        protected_data: &mut ProtectedData,
     ) -> Result<Vec<u8>> {
-        // TODO(b/179222809): implement with actual remote provisioner AIDL when available. For now
-        //       it isnice to have some junk values
-        Ok(vec![0, 1, 3, 3])
+        let dev = self.get_dev_by_sec_level(&sec_level)?;
+        let (_, _, uuid) = get_keymint_device(&sec_level)?;
+        let keys_to_sign = DB.with::<_, Result<Vec<MacedPublicKey>>>(|db| {
+            let mut db = db.borrow_mut();
+            Ok(db
+                .fetch_unsigned_attestation_keys(num_csr, &uuid)?
+                .iter()
+                .map(|key| MacedPublicKey { macedKey: key.to_vec() })
+                .collect())
+        })?;
+        let mut mac = Vec::<u8>::with_capacity(32);
+        map_rem_prov_error(dev.generateCertificateRequest(
+            test_mode,
+            &keys_to_sign,
+            eek,
+            challenge,
+            &mut mac,
+            protected_data,
+        ))
+        .context("In generate_csr: Failed to generate csr")?;
+        Ok(mac)
     }
 
     /// Provisions a certificate chain for a key whose CSR was included in generate_csr. The
@@ -87,6 +137,7 @@
     pub fn provision_cert_chain(
         &self,
         public_key: &[u8],
+        batch_cert: &[u8],
         certs: &[u8],
         expiration_date: i64,
         sec_level: SecurityLevel,
@@ -96,6 +147,7 @@
             let (_, _, uuid) = get_keymint_device(&sec_level)?;
             Ok(db.store_signed_attestation_certificate_chain(
                 public_key,
+                batch_cert,
                 certs, /* DER encoded certificate chain */
                 expiration_date,
                 &uuid,
@@ -107,8 +159,35 @@
     /// `is_test_mode` indicates whether or not the returned public key should be marked as being
     /// for testing in order to differentiate them from private keys. If the call is successful,
     /// the key pair is then added to the database.
-    pub fn generate_key_pair(&self, _is_test_mode: bool, _sec_level: SecurityLevel) -> Result<()> {
-        Ok(())
+    pub fn generate_key_pair(&self, is_test_mode: bool, sec_level: SecurityLevel) -> Result<()> {
+        let (_, _, uuid) = get_keymint_device(&sec_level)?;
+        let dev = self.get_dev_by_sec_level(&sec_level)?;
+        let mut maced_key = MacedPublicKey { macedKey: Vec::new() };
+        let priv_key =
+            map_rem_prov_error(dev.generateEcdsaP256KeyPair(is_test_mode, &mut maced_key))
+                .context("In generate_key_pair: Failed to generated ECDSA keypair.")?;
+        // TODO(b/180392379): This is a brittle hack that relies on the consistent formatting of
+        //                    the returned CBOR blob in order to extract the public key.
+        let data = &maced_key.macedKey;
+        if data.len() < 85 {
+            return Err(error::Error::sys()).context(concat!(
+                "In generate_key_pair: CBOR blob returned from",
+                "RemotelyProvisionedComponent is definitely malformatted or empty."
+            ));
+        }
+        let mut raw_key: Vec<u8> = vec![0; 64];
+        raw_key[0..32].clone_from_slice(&data[18..18 + 32]);
+        raw_key[32..64].clone_from_slice(&data[53..53 + 32]);
+        DB.with::<_, Result<()>>(|db| {
+            let mut db = db.borrow_mut();
+            Ok(db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)?)
+        })
+    }
+
+    /// Checks the security level of each available IRemotelyProvisionedComponent hal and returns
+    /// all levels in an array to the caller.
+    pub fn get_security_levels(&self) -> Result<Vec<SecurityLevel>> {
+        Ok(self.device_by_sec_level.keys().cloned().collect())
     }
 }
 
@@ -132,18 +211,26 @@
         eek: &[u8],
         challenge: &[u8],
         sec_level: SecurityLevel,
+        protected_data: &mut ProtectedData,
     ) -> binder::public_api::Result<Vec<u8>> {
-        map_or_log_err(self.generate_csr(test_mode, num_csr, eek, challenge, sec_level), Ok)
+        map_or_log_err(
+            self.generate_csr(test_mode, num_csr, eek, challenge, sec_level, protected_data),
+            Ok,
+        )
     }
 
     fn provisionCertChain(
         &self,
         public_key: &[u8],
+        batch_cert: &[u8],
         certs: &[u8],
         expiration_date: i64,
         sec_level: SecurityLevel,
     ) -> binder::public_api::Result<()> {
-        map_or_log_err(self.provision_cert_chain(public_key, certs, expiration_date, sec_level), Ok)
+        map_or_log_err(
+            self.provision_cert_chain(public_key, batch_cert, certs, expiration_date, sec_level),
+            Ok,
+        )
     }
 
     fn generateKeyPair(
@@ -153,4 +240,8 @@
     ) -> binder::public_api::Result<()> {
         map_or_log_err(self.generate_key_pair(is_test_mode, sec_level), Ok)
     }
+
+    fn getSecurityLevels(&self) -> binder::public_api::Result<Vec<SecurityLevel>> {
+        map_or_log_err(self.get_security_levels(), Ok)
+    }
 }
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 37e43c6..5e1ce84 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -32,9 +32,11 @@
     KeyMetadata::KeyMetadata, KeyParameters::KeyParameters,
 };
 
-use crate::globals::ENFORCEMENTS;
+use crate::database::{CertificateInfo, KeyIdGuard};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
 use crate::key_parameter::KeyParameter as KsKeyParam;
 use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
+use crate::super_key::{KeyBlob, SuperKeyManager};
 use crate::utils::{check_key_permission, uid_to_android_user, Asp};
 use crate::{
     database::{
@@ -46,10 +48,6 @@
     permission::KeyPerm,
 };
 use crate::{
-    database::{CertificateInfo, KeyIdGuard},
-    globals::DB,
-};
-use crate::{
     error::{self, map_km_error, map_or_log_err, Error, ErrorCode},
     utils::key_characteristics_to_internal,
 };
@@ -100,6 +98,7 @@
         key: KeyDescriptor,
         creation_result: KeyCreationResult,
         user_id: u32,
+        flags: Option<i32>,
     ) -> Result<KeyMetadata> {
         let KeyCreationResult {
             keyBlob: key_blob,
@@ -132,6 +131,20 @@
             SecurityLevel::SOFTWARE,
         ));
 
+        let (key_blob, mut blob_metadata) = DB
+            .with(|db| {
+                SUPER_KEY.handle_super_encryption_on_key_init(
+                    &mut db.borrow_mut(),
+                    &LEGACY_MIGRATOR,
+                    &(key.domain),
+                    &key_parameters,
+                    flags,
+                    user_id,
+                    &key_blob,
+                )
+            })
+            .context("In store_new_key. Failed to handle super encryption.")?;
+
         let creation_date = DateTime::now().context("Trying to make creation time.")?;
 
         let key = match key.domain {
@@ -142,7 +155,6 @@
                 .with::<_, Result<KeyDescriptor>>(|db| {
                     let mut key_metadata = KeyMetaData::new();
                     key_metadata.add(KeyMetaEntry::CreationDate(creation_date));
-                    let mut blob_metadata = BlobMetaData::new();
                     blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
 
                     let mut db = db.borrow_mut();
@@ -202,19 +214,21 @@
                     },
                     None,
                     None,
-                    None,
+                    BlobMetaData::new(),
                 )
             }
             _ => {
                 let (key_id_guard, mut key_entry) = DB
                     .with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
-                        db.borrow_mut().load_key_entry(
-                            &key,
-                            KeyType::Client,
-                            KeyEntryLoadBits::KM,
-                            caller_uid,
-                            |k, av| check_key_permission(KeyPerm::use_(), k, &av),
-                        )
+                        LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                            db.borrow_mut().load_key_entry(
+                                &key,
+                                KeyType::Client,
+                                KeyEntryLoadBits::KM,
+                                caller_uid,
+                                |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+                            )
+                        })
                     })
                     .context("In create_operation: Failed to load key blob.")?;
 
@@ -229,7 +243,7 @@
                     &scoping_blob,
                     Some((key_id_guard.id(), key_entry.into_key_parameters())),
                     Some(key_id_guard),
-                    Some(blob_metadata),
+                    blob_metadata,
                 )
             }
         };
@@ -258,6 +272,12 @@
 
         let immediate_hat = immediate_hat.unwrap_or_default();
 
+        let user_id = uid_to_android_user(caller_uid);
+
+        let km_blob = SUPER_KEY
+            .unwrap_key_if_required(&blob_metadata, km_blob)
+            .context("In create_operation. Failed to handle super encryption.")?;
+
         let km_dev: Strong<dyn IKeyMintDevice> = self
             .keymint
             .get_interface()
@@ -267,7 +287,7 @@
             .upgrade_keyblob_if_required_with(
                 &*km_dev,
                 key_id_guard,
-                &(km_blob, blob_metadata.as_ref()),
+                &(&km_blob, &blob_metadata),
                 &operation_parameters,
                 |blob| loop {
                     match map_km_error(km_dev.begin(
@@ -403,7 +423,7 @@
             .context("In generate_key: While generating Key")?;
 
         let user_id = uid_to_android_user(caller_uid);
-        self.store_new_key(key, creation_result, user_id).context("In generate_key.")
+        self.store_new_key(key, creation_result, user_id, Some(flags)).context("In generate_key.")
     }
 
     fn get_attest_key(&self, key: &KeyDescriptor, caller_uid: u32) -> Result<AttestationKey> {
@@ -510,7 +530,7 @@
                 .context("In import_key: Trying to call importKey")?;
 
         let user_id = uid_to_android_user(caller_uid);
-        self.store_new_key(key, creation_result, user_id).context("In import_key.")
+        self.store_new_key(key, creation_result, user_id, Some(flags)).context("In import_key.")
     }
 
     fn import_wrapped_key(
@@ -544,6 +564,8 @@
         }
 
         let caller_uid = ThreadState::get_calling_uid();
+        let user_id = uid_to_android_user(caller_uid);
+
         let key = match key.domain {
             Domain::APP => KeyDescriptor {
                 domain: key.domain,
@@ -563,26 +585,29 @@
         // Import_wrapped_key requires the rebind permission for the new key.
         check_key_permission(KeyPerm::rebind(), &key, &None).context("In import_wrapped_key.")?;
 
-        let (wrapping_key_id_guard, wrapping_key_entry) = DB
+        let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
             .with(|db| {
-                db.borrow_mut().load_key_entry(
-                    &wrapping_key,
-                    KeyType::Client,
-                    KeyEntryLoadBits::KM,
-                    caller_uid,
-                    |k, av| check_key_permission(KeyPerm::use_(), k, &av),
-                )
+                LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                    db.borrow_mut().load_key_entry(
+                        &wrapping_key,
+                        KeyType::Client,
+                        KeyEntryLoadBits::KM,
+                        caller_uid,
+                        |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+                    )
+                })
             })
             .context("Failed to load wrapping key.")?;
-        let (wrapping_key_blob, wrapping_blob_metadata) = match wrapping_key_entry.key_blob_info() {
-            Some((blob, metadata)) => (blob, metadata),
-            None => {
-                return Err(error::Error::sys()).context(concat!(
-                    "No km_blob after successfully loading key.",
-                    " This should never happen."
-                ))
-            }
-        };
+
+        let (wrapping_key_blob, wrapping_blob_metadata) = wrapping_key_entry
+            .take_key_blob_info()
+            .ok_or_else(error::Error::sys)
+            .context("No km_blob after successfully loading key. This should never happen.")?;
+
+        let wrapping_key_blob =
+            SUPER_KEY.unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob).context(
+                "In import_wrapped_key. Failed to handle super encryption for wrapping key.",
+            )?;
 
         // km_dev.importWrappedKey does not return a certificate chain.
         // TODO Do we assume that all wrapped keys are symmetric?
@@ -611,7 +636,7 @@
             .upgrade_keyblob_if_required_with(
                 &*km_dev,
                 Some(wrapping_key_id_guard),
-                &(&wrapping_key_blob, Some(&wrapping_blob_metadata)),
+                &(&wrapping_key_blob, &wrapping_blob_metadata),
                 &[],
                 |wrapping_blob| {
                     let creation_result = map_km_error(km_dev.importWrappedKey(
@@ -627,8 +652,7 @@
             )
             .context("In import_wrapped_key.")?;
 
-        let user_id = uid_to_android_user(caller_uid);
-        self.store_new_key(key, creation_result, user_id)
+        self.store_new_key(key, creation_result, user_id, None)
             .context("In import_wrapped_key: Trying to store the new key.")
     }
 
@@ -636,7 +660,7 @@
         &self,
         km_dev: &dyn IKeyMintDevice,
         key_id_guard: Option<KeyIdGuard>,
-        blob_info: &(&[u8], Option<&BlobMetaData>),
+        blob_info: &(&KeyBlob, &BlobMetaData),
         params: &[KeyParameter],
         f: F,
     ) -> Result<(T, Option<Vec<u8>>)>
@@ -647,13 +671,26 @@
             Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
                 let upgraded_blob = map_km_error(km_dev.upgradeKey(blob_info.0, params))
                     .context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
+
+                let (upgraded_blob_to_be_stored, blob_metadata) =
+                    SuperKeyManager::reencrypt_on_upgrade_if_required(blob_info.0, &upgraded_blob)
+                        .context(
+                        "In upgrade_keyblob_if_required_with: Failed to handle super encryption.",
+                    )?;
+
+                let mut blob_metadata = blob_metadata.unwrap_or_else(BlobMetaData::new);
+                if let Some(uuid) = blob_info.1.km_uuid() {
+                    blob_metadata.add(BlobMetaEntry::KmUuid(*uuid));
+                }
+
                 key_id_guard.map_or(Ok(()), |key_id_guard| {
                     DB.with(|db| {
-                        db.borrow_mut().set_blob(
+                        let mut db = db.borrow_mut();
+                        db.set_blob(
                             &key_id_guard,
                             SubComponentType::KEY_BLOB,
-                            Some(&upgraded_blob),
-                            blob_info.1,
+                            Some(&upgraded_blob_to_be_stored),
+                            Some(&blob_metadata),
                         )
                     })
                     .context(concat!(
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index efd62e3..3a4bf82 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -27,7 +27,10 @@
     check_grant_permission, check_key_permission, check_keystore_permission,
     key_parameters_to_authorizations, Asp,
 };
-use crate::{database::Uuid, globals::DB};
+use crate::{
+    database::Uuid,
+    globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+};
 use crate::{database::KEYSTORE_UUID, permission};
 use crate::{
     database::{KeyEntryLoadBits, KeyType, SubComponentType},
@@ -73,6 +76,15 @@
             result.uuid_by_sec_level.insert(SecurityLevel::STRONGBOX, uuid);
         }
 
+        let uuid_by_sec_level = result.uuid_by_sec_level.clone();
+        LEGACY_MIGRATOR
+            .set_init(move || {
+                (create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
+            })
+            .context(
+                "In KeystoreService::new_native_binder: Trying to initialize the legacy migrator.",
+            )?;
+
         let result = BnKeystoreService::new_binder(result);
         result.as_binder().set_requesting_sid(true);
         Ok(result)
@@ -112,15 +124,18 @@
     }
 
     fn get_key_entry(&self, key: &KeyDescriptor) -> Result<KeyEntryResponse> {
+        let caller_uid = ThreadState::get_calling_uid();
         let (key_id_guard, mut key_entry) = DB
             .with(|db| {
-                db.borrow_mut().load_key_entry(
-                    &key,
-                    KeyType::Client,
-                    KeyEntryLoadBits::PUBLIC,
-                    ThreadState::get_calling_uid(),
-                    |k, av| check_key_permission(KeyPerm::get_info(), k, &av),
-                )
+                LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                    db.borrow_mut().load_key_entry(
+                        &key,
+                        KeyType::Client,
+                        KeyEntryLoadBits::PUBLIC,
+                        caller_uid,
+                        |k, av| check_key_permission(KeyPerm::get_info(), k, &av),
+                    )
+                })
             })
             .context("In get_key_entry, while trying to load key info.")?;
 
@@ -161,18 +176,20 @@
         public_cert: Option<&[u8]>,
         certificate_chain: Option<&[u8]>,
     ) -> Result<()> {
+        let caller_uid = ThreadState::get_calling_uid();
         DB.with::<_, Result<()>>(|db| {
-            let mut db = db.borrow_mut();
-            let entry = match db.load_key_entry(
-                &key,
-                KeyType::Client,
-                KeyEntryLoadBits::NONE,
-                ThreadState::get_calling_uid(),
-                |k, av| {
-                    check_key_permission(KeyPerm::update(), k, &av)
-                        .context("In update_subcomponent.")
-                },
-            ) {
+            let entry = match LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                db.borrow_mut().load_key_entry(
+                    &key,
+                    KeyType::Client,
+                    KeyEntryLoadBits::NONE,
+                    caller_uid,
+                    |k, av| {
+                        check_key_permission(KeyPerm::update(), k, &av)
+                            .context("In update_subcomponent.")
+                    },
+                )
+            }) {
                 Err(e) => match e.root_cause().downcast_ref::<Error>() {
                     Some(Error::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
                     _ => Err(e),
@@ -181,6 +198,7 @@
             }
             .context("Failed to load key entry.")?;
 
+            let mut db = db.borrow_mut();
             if let Some((key_id_guard, key_entry)) = entry {
                 db.set_blob(&key_id_guard, SubComponentType::CERT, public_cert, None)
                     .context("Failed to update cert subcomponent.")?;
@@ -258,17 +276,31 @@
             Ok(()) => {}
         };
 
-        DB.with(|db| {
-            let mut db = db.borrow_mut();
-            db.list(k.domain, k.nspace)
-        })
+        let mut result = LEGACY_MIGRATOR
+            .list_uid(k.domain, k.nspace)
+            .context("In list_entries: Trying to list legacy keys.")?;
+
+        result.append(
+            &mut DB
+                .with(|db| {
+                    let mut db = db.borrow_mut();
+                    db.list(k.domain, k.nspace)
+                })
+                .context("In list_entries: Trying to list keystore database.")?,
+        );
+
+        result.sort_unstable();
+        result.dedup();
+        Ok(result)
     }
 
     fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with(|db| {
-            db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
-                check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
+            LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
+                    check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
+                })
             })
         })
         .context("In delete_key: Trying to unbind the key.")?;
@@ -281,14 +313,17 @@
         grantee_uid: i32,
         access_vector: permission::KeyPermSet,
     ) -> Result<KeyDescriptor> {
+        let caller_uid = ThreadState::get_calling_uid();
         DB.with(|db| {
-            db.borrow_mut().grant(
-                &key,
-                ThreadState::get_calling_uid(),
-                grantee_uid as u32,
-                access_vector,
-                |k, av| check_grant_permission(*av, k).context("During grant."),
-            )
+            LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                db.borrow_mut().grant(
+                    &key,
+                    caller_uid,
+                    grantee_uid as u32,
+                    access_vector,
+                    |k, av| check_grant_permission(*av, k).context("During grant."),
+                )
+            })
         })
         .context("In KeystoreService::grant.")
     }
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 79f53bf..5ee685a 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -15,15 +15,18 @@
 #![allow(dead_code)]
 
 use crate::{
-    database::BlobMetaData, database::BlobMetaEntry, database::EncryptedBy, database::KeystoreDB,
-    error::Error, error::ResponseCode, legacy_blob::LegacyBlobLoader,
+    database::BlobMetaData, database::BlobMetaEntry, database::EncryptedBy, database::KeyEntry,
+    database::KeyType, database::KeystoreDB, enforcements::Enforcements, error::Error,
+    error::ResponseCode, key_parameter::KeyParameter, legacy_blob::LegacyBlobLoader,
+    legacy_migrator::LegacyMigrator,
 };
 use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
 use anyhow::{Context, Result};
 use keystore2_crypto::{
-    aes_gcm_decrypt, aes_gcm_encrypt, derive_key_from_password, generate_salt, ZVec,
-    AES_256_KEY_LENGTH,
+    aes_gcm_decrypt, aes_gcm_encrypt, derive_key_from_password, generate_aes256_key, generate_salt,
+    ZVec, AES_256_KEY_LENGTH,
 };
+use std::ops::Deref;
 use std::{
     collections::HashMap,
     sync::Arc,
@@ -39,13 +42,30 @@
     /// secret, that is itself derived from the user's lock screen knowledge factor (LSKF).
     /// When the user unlocks the device for the first time, this key is unlocked, i.e., decrypted,
     /// and stays memory resident until the device reboots.
-    per_boot: Option<Arc<ZVec>>,
+    per_boot: Option<SuperKey>,
     /// The screen lock key works like the per boot key with the distinction that it is cleared
     /// from memory when the screen lock is engaged.
     /// TODO the life cycle is not fully implemented at this time.
     screen_lock: Option<Arc<ZVec>>,
 }
 
+#[derive(Default, Clone)]
+pub struct SuperKey {
+    key: Arc<ZVec>,
+    // id of the super key in the database.
+    id: i64,
+}
+
+impl SuperKey {
+    pub fn get_key(&self) -> &Arc<ZVec> {
+        &self.key
+    }
+
+    pub fn get_id(&self) -> i64 {
+        self.id
+    }
+}
+
 #[derive(Default)]
 struct SkmState {
     user_keys: HashMap<UserId, UserSuperKeys>,
@@ -87,18 +107,17 @@
         data.key_index.clear();
     }
 
-    fn install_per_boot_key_for_user(&self, user: UserId, key_id: i64, key: ZVec) {
+    fn install_per_boot_key_for_user(&self, user: UserId, super_key: SuperKey) {
         let mut data = self.data.lock().unwrap();
-        let key = Arc::new(key);
-        data.key_index.insert(key_id, Arc::downgrade(&key));
-        data.user_keys.entry(user).or_default().per_boot = Some(key);
+        data.key_index.insert(super_key.id, Arc::downgrade(&(super_key.key)));
+        data.user_keys.entry(user).or_default().per_boot = Some(super_key);
     }
 
     fn get_key(&self, key_id: &i64) -> Option<Arc<ZVec>> {
         self.data.lock().unwrap().key_index.get(key_id).and_then(|k| k.upgrade())
     }
 
-    pub fn get_per_boot_key_by_user_id(&self, user_id: u32) -> Option<Arc<ZVec>> {
+    pub fn get_per_boot_key_by_user_id(&self, user_id: u32) -> Option<SuperKey> {
         let data = self.data.lock().unwrap();
         data.user_keys.get(&user_id).map(|e| e.per_boot.clone()).flatten()
     }
@@ -109,16 +128,16 @@
     /// a key derived from the given password and stored in the database.
     pub fn unlock_user_key(
         &self,
+        db: &mut KeystoreDB,
         user: UserId,
         pw: &[u8],
-        db: &mut KeystoreDB,
         legacy_blob_loader: &LegacyBlobLoader,
     ) -> Result<()> {
         let (_, entry) = db
             .get_or_create_key_with(
                 Domain::APP,
                 user as u64 as i64,
-                &"USER_SUPER_KEY",
+                KeystoreDB::USER_SUPER_KEY_ALIAS,
                 crate::database::KEYSTORE_UUID,
                 || {
                     // For backward compatibility we need to check if there is a super key present.
@@ -128,64 +147,22 @@
                     let super_key = match super_key {
                         None => {
                             // No legacy file was found. So we generate a new key.
-                            keystore2_crypto::generate_aes256_key()
+                            generate_aes256_key()
                                 .context("In create_new_key: Failed to generate AES 256 key.")?
                         }
                         Some(key) => key,
                     };
-                    // Regardless of whether we loaded an old AES128 key or a new AES256 key,
-                    // we derive a AES256 key and re-encrypt the key before we insert it in the
-                    // database. The length of the key is preserved by the encryption so we don't
-                    // need any extra flags to inform us which algorithm to use it with.
-                    let salt =
-                        generate_salt().context("In create_new_key: Failed to generate salt.")?;
-                    let derived_key = derive_key_from_password(pw, Some(&salt), AES_256_KEY_LENGTH)
-                        .context("In create_new_key: Failed to derive password.")?;
-                    let mut metadata = BlobMetaData::new();
-                    metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
-                    metadata.add(BlobMetaEntry::Salt(salt));
-                    let (encrypted_key, iv, tag) = aes_gcm_encrypt(&super_key, &derived_key)
-                        .context("In create_new_key: Failed to encrypt new super key.")?;
-                    metadata.add(BlobMetaEntry::Iv(iv));
-                    metadata.add(BlobMetaEntry::AeadTag(tag));
-                    Ok((encrypted_key, metadata))
+                    // Regardless of whether we loaded an old AES128 key or generated a new AES256
+                    // key as the super key, we derive a AES256 key from the password and re-encrypt
+                    // the super key before we insert it in the database. The length of the key is
+                    // preserved by the encryption so we don't need any extra flags to inform us
+                    // which algorithm to use it with.
+                    Self::encrypt_with_password(&super_key, pw).context("In create_new_key.")
                 },
             )
             .context("In unlock_user_key: Failed to get key id.")?;
 
-        if let Some((ref blob, ref metadata)) = entry.key_blob_info() {
-            let super_key = match (
-                metadata.encrypted_by(),
-                metadata.salt(),
-                metadata.iv(),
-                metadata.aead_tag(),
-            ) {
-                (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
-                    let key = derive_key_from_password(pw, Some(salt), AES_256_KEY_LENGTH)
-                        .context("In unlock_user_key: Failed to generate key from password.")?;
-
-                    aes_gcm_decrypt(blob, iv, tag, &key)
-                        .context("In unlock_user_key: Failed to decrypt key blob.")?
-                }
-                (enc_by, salt, iv, tag) => {
-                    return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
-                        concat!(
-                            "In unlock_user_key: Super key has incomplete metadata.",
-                            "Present: encrypted_by: {}, salt: {}, iv: {}, aead_tag: {}."
-                        ),
-                        enc_by.is_some(),
-                        salt.is_some(),
-                        iv.is_some(),
-                        tag.is_some(),
-                    ));
-                }
-            };
-            self.install_per_boot_key_for_user(user, entry.id(), super_key);
-        } else {
-            return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
-                .context("In unlock_user_key: Key entry has no key blob.");
-        }
-
+        self.populate_cache_from_super_key_blob(user, entry, pw).context("In unlock_user_key.")?;
         Ok(())
     }
 
@@ -193,12 +170,13 @@
     /// The function queries `metadata.encrypted_by()` to determine the encryption key.
     /// It then check if the required key is memory resident, and if so decrypts the
     /// blob.
-    pub fn unwrap_key(&self, blob: &[u8], metadata: &BlobMetaData) -> Result<ZVec> {
+    pub fn unwrap_key<'a>(&self, blob: &'a [u8], metadata: &BlobMetaData) -> Result<KeyBlob<'a>> {
         match metadata.encrypted_by() {
             Some(EncryptedBy::KeyId(key_id)) => match self.get_key(key_id) {
-                Some(key) => {
-                    Self::unwrap_key_with_key(blob, metadata, &key).context("In unwrap_key.")
-                }
+                Some(key) => Ok(KeyBlob::Sensitive(
+                    Self::unwrap_key_with_key(blob, metadata, &key).context("In unwrap_key.")?,
+                    SuperKey { key: key.clone(), id: *key_id },
+                )),
                 None => Err(Error::Rc(ResponseCode::LOCKED))
                     .context("In unwrap_key: Key is not usable until the user entered their LSKF."),
             },
@@ -222,4 +200,415 @@
             )),
         }
     }
+
+    /// Checks if user has setup LSKF, even when super key cache is empty for the user.
+    pub fn super_key_exists_in_db_for_user(
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        user_id: u32,
+    ) -> Result<bool> {
+        let key_in_db = db
+            .key_exists(
+                Domain::APP,
+                user_id as u64 as i64,
+                KeystoreDB::USER_SUPER_KEY_ALIAS,
+                KeyType::Super,
+            )
+            .context("In super_key_exists_in_db_for_user.")?;
+
+        if key_in_db {
+            Ok(key_in_db)
+        } else {
+            legacy_migrator
+                .has_super_key(user_id)
+                .context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
+        }
+    }
+
+    /// Checks if user has already setup LSKF (i.e. a super key is persisted in the database or the
+    /// legacy database). If not, return Uninitialized state.
+    /// Otherwise, decrypt the super key from the password and return LskfUnlocked state.
+    pub fn check_and_unlock_super_key(
+        &self,
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        user_id: u32,
+        pw: &[u8],
+    ) -> Result<UserState> {
+        let result = legacy_migrator
+            .with_try_migrate_super_key(user_id, pw, || db.load_super_key(user_id))
+            .context("In check_and_unlock_super_key. Failed to load super key")?;
+
+        match result {
+            Some((_, entry)) => {
+                let super_key = self
+                    .populate_cache_from_super_key_blob(user_id, entry, pw)
+                    .context("In check_and_unlock_super_key.")?;
+                Ok(UserState::LskfUnlocked(super_key))
+            }
+            None => Ok(UserState::Uninitialized),
+        }
+    }
+
+    /// Checks if user has already setup LSKF (i.e. a super key is persisted in the database or the
+    /// legacy database). If so, return LskfLocked state.
+    /// If the password is provided, generate a new super key, encrypt with the password,
+    /// store in the database and populate the super key cache for the new user
+    /// and return LskfUnlocked state.
+    /// If the password is not provided, return Uninitialized state.
+    pub fn check_and_initialize_super_key(
+        &self,
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        user_id: u32,
+        pw: Option<&[u8]>,
+    ) -> Result<UserState> {
+        let super_key_exists_in_db =
+            Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
+                "In check_and_initialize_super_key. Failed to check if super key exists.",
+            )?;
+        if super_key_exists_in_db {
+            Ok(UserState::LskfLocked)
+        } else if let Some(pw) = pw {
+            //generate a new super key.
+            let super_key = generate_aes256_key()
+                .context("In check_and_initialize_super_key: Failed to generate AES 256 key.")?;
+            //derive an AES256 key from the password and re-encrypt the super key
+            //before we insert it in the database.
+            let (encrypted_super_key, blob_metadata) = Self::encrypt_with_password(&super_key, pw)
+                .context("In check_and_initialize_super_key.")?;
+
+            let key_entry = db
+                .store_super_key(user_id, &(&encrypted_super_key, &blob_metadata))
+                .context("In check_and_initialize_super_key. Failed to store super key.")?;
+
+            let super_key = self
+                .populate_cache_from_super_key_blob(user_id, key_entry, pw)
+                .context("In check_and_initialize_super_key.")?;
+            Ok(UserState::LskfUnlocked(super_key))
+        } else {
+            Ok(UserState::Uninitialized)
+        }
+    }
+
+    //helper function to populate super key cache from the super key blob loaded from the database
+    fn populate_cache_from_super_key_blob(
+        &self,
+        user_id: u32,
+        entry: KeyEntry,
+        pw: &[u8],
+    ) -> Result<SuperKey> {
+        let super_key = Self::extract_super_key_from_key_entry(entry, pw).context(
+            "In populate_cache_from_super_key_blob. Failed to extract super key from key entry",
+        )?;
+        self.install_per_boot_key_for_user(user_id, super_key.clone());
+        Ok(super_key)
+    }
+
+    /// Extracts super key from the entry loaded from the database
+    pub fn extract_super_key_from_key_entry(entry: KeyEntry, pw: &[u8]) -> Result<SuperKey> {
+        if let Some((blob, metadata)) = entry.key_blob_info() {
+            let key = match (
+                metadata.encrypted_by(),
+                metadata.salt(),
+                metadata.iv(),
+                metadata.aead_tag(),
+            ) {
+                (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
+                    let key = derive_key_from_password(pw, Some(salt), AES_256_KEY_LENGTH).context(
+                    "In extract_super_key_from_key_entry: Failed to generate key from password.",
+                )?;
+
+                    aes_gcm_decrypt(blob, iv, tag, &key).context(
+                        "In extract_super_key_from_key_entry: Failed to decrypt key blob.",
+                    )?
+                }
+                (enc_by, salt, iv, tag) => {
+                    return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
+                        concat!(
+                        "In extract_super_key_from_key_entry: Super key has incomplete metadata.",
+                        "Present: encrypted_by: {}, salt: {}, iv: {}, aead_tag: {}."
+                    ),
+                        enc_by.is_some(),
+                        salt.is_some(),
+                        iv.is_some(),
+                        tag.is_some()
+                    ));
+                }
+            };
+            Ok(SuperKey { key: Arc::new(key), id: entry.id() })
+        } else {
+            Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
+                .context("In extract_super_key_from_key_entry: No key blob info.")
+        }
+    }
+
+    /// Encrypts the super key from a key derived from the password, before storing in the database.
+    pub fn encrypt_with_password(super_key: &[u8], pw: &[u8]) -> Result<(Vec<u8>, BlobMetaData)> {
+        let salt = generate_salt().context("In encrypt_with_password: Failed to generate salt.")?;
+        let derived_key = derive_key_from_password(pw, Some(&salt), AES_256_KEY_LENGTH)
+            .context("In encrypt_with_password: Failed to derive password.")?;
+        let mut metadata = BlobMetaData::new();
+        metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+        metadata.add(BlobMetaEntry::Salt(salt));
+        let (encrypted_key, iv, tag) = aes_gcm_encrypt(super_key, &derived_key)
+            .context("In encrypt_with_password: Failed to encrypt new super key.")?;
+        metadata.add(BlobMetaEntry::Iv(iv));
+        metadata.add(BlobMetaEntry::AeadTag(tag));
+        Ok((encrypted_key, metadata))
+    }
+
+    // Encrypt the given key blob with the user's super key, if the super key exists and the device
+    // is unlocked. If the super key exists and the device is locked, or LSKF is not setup,
+    // return error. Note that it is out of the scope of this function to check if super encryption
+    // is required. Such check should be performed before calling this function.
+    fn super_encrypt_on_key_init(
+        &self,
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        user_id: u32,
+        key_blob: &[u8],
+    ) -> Result<(Vec<u8>, BlobMetaData)> {
+        match UserState::get(db, legacy_migrator, self, user_id)
+            .context("In super_encrypt. Failed to get user state.")?
+        {
+            UserState::LskfUnlocked(super_key) => {
+                Self::encrypt_with_super_key(key_blob, &super_key)
+                    .context("In super_encrypt_on_key_init. Failed to encrypt the key.")
+            }
+            UserState::LskfLocked => {
+                Err(Error::Rc(ResponseCode::LOCKED)).context("In super_encrypt. Device is locked.")
+            }
+            UserState::Uninitialized => Err(Error::Rc(ResponseCode::UNINITIALIZED))
+                .context("In super_encrypt. LSKF is not setup for the user."),
+        }
+    }
+
+    //Helper function to encrypt a key with the given super key. Callers should select which super
+    //key to be used. This is called when a key is super encrypted at its creation as well as at its
+    //upgrade.
+    fn encrypt_with_super_key(
+        key_blob: &[u8],
+        super_key: &SuperKey,
+    ) -> Result<(Vec<u8>, BlobMetaData)> {
+        let mut metadata = BlobMetaData::new();
+        let (encrypted_key, iv, tag) = aes_gcm_encrypt(key_blob, &(super_key.key))
+            .context("In encrypt_with_super_key: Failed to encrypt new super key.")?;
+        metadata.add(BlobMetaEntry::Iv(iv));
+        metadata.add(BlobMetaEntry::AeadTag(tag));
+        metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key.id)));
+        Ok((encrypted_key, metadata))
+    }
+
+    /// Check if super encryption is required and if so, super-encrypt the key to be stored in
+    /// the database.
+    #[allow(clippy::clippy::too_many_arguments)]
+    pub fn handle_super_encryption_on_key_init(
+        &self,
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        domain: &Domain,
+        key_parameters: &[KeyParameter],
+        flags: Option<i32>,
+        user_id: u32,
+        key_blob: &[u8],
+    ) -> Result<(Vec<u8>, BlobMetaData)> {
+        match (*domain, Enforcements::super_encryption_required(key_parameters, flags)) {
+            (Domain::APP, true) => {
+                self.super_encrypt_on_key_init(db, legacy_migrator, user_id, &key_blob).context(
+                    "In handle_super_encryption_on_key_init.
+                         Failed to super encrypt the key.",
+                )
+            }
+            _ => Ok((key_blob.to_vec(), BlobMetaData::new())),
+        }
+    }
+
+    /// Check if a given key is super-encrypted, from its metadata. If so, unwrap the key using
+    /// the relevant super key.
+    pub fn unwrap_key_if_required<'a>(
+        &self,
+        metadata: &BlobMetaData,
+        key_blob: &'a [u8],
+    ) -> Result<KeyBlob<'a>> {
+        if Self::key_super_encrypted(&metadata) {
+            let unwrapped_key = self
+                .unwrap_key(key_blob, metadata)
+                .context("In unwrap_key_if_required. Error in unwrapping the key.")?;
+            Ok(unwrapped_key)
+        } else {
+            Ok(KeyBlob::Ref(key_blob))
+        }
+    }
+
+    /// Check if a given key needs re-super-encryption, from its KeyBlob type.
+    /// If so, re-super-encrypt the key and return a new set of metadata,
+    /// containing the new super encryption information.
+    pub fn reencrypt_on_upgrade_if_required<'a>(
+        key_blob_before_upgrade: &KeyBlob,
+        key_after_upgrade: &'a [u8],
+    ) -> Result<(KeyBlob<'a>, Option<BlobMetaData>)> {
+        match key_blob_before_upgrade {
+            KeyBlob::Sensitive(_, super_key) => {
+                let (key, metadata) = Self::encrypt_with_super_key(key_after_upgrade, super_key)
+                    .context(concat!(
+                        "In reencrypt_on_upgrade_if_required. ",
+                        "Failed to re-super-encrypt key on key upgrade."
+                    ))?;
+                Ok((KeyBlob::NonSensitive(key), Some(metadata)))
+            }
+            _ => Ok((KeyBlob::Ref(key_after_upgrade), None)),
+        }
+    }
+
+    // Helper function to decide if a key is super encrypted, given metadata.
+    fn key_super_encrypted(metadata: &BlobMetaData) -> bool {
+        if let Some(&EncryptedBy::KeyId(_)) = metadata.encrypted_by() {
+            return true;
+        }
+        false
+    }
+}
+
+/// This enum represents different states of the user's life cycle in the device.
+/// For now, only three states are defined. More states may be added later.
+pub enum UserState {
+    // The user has registered LSKF and has unlocked the device by entering PIN/Password,
+    // and hence the per-boot super key is available in the cache.
+    LskfUnlocked(SuperKey),
+    // The user has registered LSKF, but has not unlocked the device using password, after reboot.
+    // Hence the per-boot super-key(s) is not available in the cache.
+    // However, the encrypted super key is available in the database.
+    LskfLocked,
+    // There's no user in the device for the given user id, or the user with the user id has not
+    // setup LSKF.
+    Uninitialized,
+}
+
+impl UserState {
+    pub fn get(
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        skm: &SuperKeyManager,
+        user_id: u32,
+    ) -> Result<UserState> {
+        match skm.get_per_boot_key_by_user_id(user_id) {
+            Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
+            None => {
+                //Check if a super key exists in the database or legacy database.
+                //If so, return locked user state.
+                if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
+                    .context("In get.")?
+                {
+                    Ok(UserState::LskfLocked)
+                } else {
+                    Ok(UserState::Uninitialized)
+                }
+            }
+        }
+    }
+
+    /// Queries user state when serving password change requests.
+    pub fn get_with_password_changed(
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        skm: &SuperKeyManager,
+        user_id: u32,
+        password: Option<&[u8]>,
+    ) -> Result<UserState> {
+        match skm.get_per_boot_key_by_user_id(user_id) {
+            Some(super_key) => {
+                if password.is_none() {
+                    //transitioning to swiping, delete only the super key in database and cache, and
+                    //super-encrypted keys in database (and in KM)
+                    Self::reset_user(db, skm, legacy_migrator, user_id, true).context(
+                        "In get_with_password_changed: Trying to delete keys from the db.",
+                    )?;
+                    //Lskf is now removed in Keystore
+                    Ok(UserState::Uninitialized)
+                } else {
+                    //Keystore won't be notified when changing to a new password when LSKF is
+                    //already setup. Therefore, ideally this path wouldn't be reached.
+                    Ok(UserState::LskfUnlocked(super_key))
+                }
+            }
+            None => {
+                //Check if a super key exists in the database or legacy database.
+                //If so, return LskfLocked state.
+                //Otherwise, i) if the password is provided, initialize the super key and return
+                //LskfUnlocked state ii) if password is not provided, return Uninitialized state.
+                skm.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
+            }
+        }
+    }
+
+    /// Queries user state when serving password unlock requests.
+    pub fn get_with_password_unlock(
+        db: &mut KeystoreDB,
+        legacy_migrator: &LegacyMigrator,
+        skm: &SuperKeyManager,
+        user_id: u32,
+        password: &[u8],
+    ) -> Result<UserState> {
+        match skm.get_per_boot_key_by_user_id(user_id) {
+            Some(super_key) => {
+                log::info!("In get_with_password_unlock. Trying to unlock when already unlocked.");
+                Ok(UserState::LskfUnlocked(super_key))
+            }
+            None => {
+                //Check if a super key exists in the database or legacy database.
+                //If not, return Uninitialized state.
+                //Otherwise, try to unlock the super key and if successful,
+                //return LskfUnlocked state
+                skm.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
+                    .context("In get_with_password_unlock. Failed to unlock super key.")
+            }
+        }
+    }
+
+    /// Delete all the keys created on behalf of the user.
+    /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
+    /// encrypted keys.
+    pub fn reset_user(
+        db: &mut KeystoreDB,
+        skm: &SuperKeyManager,
+        legacy_migrator: &LegacyMigrator,
+        user_id: u32,
+        keep_non_super_encrypted_keys: bool,
+    ) -> Result<()> {
+        // mark keys created on behalf of the user as unreferenced.
+        legacy_migrator
+            .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
+            .context("In reset_user: Trying to delete legacy keys.")?;
+        db.unbind_keys_for_user(user_id as u32, keep_non_super_encrypted_keys)
+            .context("In reset user. Error in unbinding keys.")?;
+
+        //delete super key in cache, if exists
+        skm.forget_all_keys_for_user(user_id as u32);
+        Ok(())
+    }
+}
+
+/// This enum represents three states a KeyMint Blob can be in, w.r.t super encryption.
+/// `Sensitive` holds the non encrypted key and a reference to its super key.
+/// `NonSensitive` holds a non encrypted key that is never supposed to be encrypted.
+/// `Ref` holds a reference to a key blob when it does not need to be modified if its
+/// life time allows it.
+pub enum KeyBlob<'a> {
+    Sensitive(ZVec, SuperKey),
+    NonSensitive(Vec<u8>),
+    Ref(&'a [u8]),
+}
+
+/// Deref returns a reference to the key material in any variant.
+impl<'a> Deref for KeyBlob<'a> {
+    type Target = [u8];
+
+    fn deref(&self) -> &Self::Target {
+        match self {
+            Self::Sensitive(key, _) => &key,
+            Self::NonSensitive(key) => &key,
+            Self::Ref(key) => key,
+        }
+    }
 }
diff --git a/keystore2/src/user_manager.rs b/keystore2/src/user_manager.rs
new file mode 100644
index 0000000..8e09144
--- /dev/null
+++ b/keystore2/src/user_manager.rs
@@ -0,0 +1,104 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements IKeystoreUserManager AIDL interface.
+
+use crate::error::map_or_log_err;
+use crate::error::Error as KeystoreError;
+use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::permission::KeystorePerm;
+use crate::super_key::UserState;
+use crate::utils::check_keystore_permission;
+use android_security_usermanager::aidl::android::security::usermanager::IKeystoreUserManager::{
+    BnKeystoreUserManager, IKeystoreUserManager,
+};
+use android_security_usermanager::binder::{Interface, Result as BinderResult};
+use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
+use anyhow::{Context, Result};
+use binder::{IBinder, Strong};
+
+/// This struct is defined to implement the aforementioned AIDL interface.
+/// As of now, it is an empty struct.
+pub struct UserManager;
+
+impl UserManager {
+    /// Create a new instance of Keystore User Manager service.
+    pub fn new_native_binder() -> Result<Strong<dyn IKeystoreUserManager>> {
+        let result = BnKeystoreUserManager::new_binder(Self);
+        result.as_binder().set_requesting_sid(true);
+        Ok(result)
+    }
+
+    fn on_user_password_changed(user_id: i32, password: Option<&[u8]>) -> Result<()> {
+        //Check permission. Function should return if this failed. Therefore having '?' at the end
+        //is very important.
+        check_keystore_permission(KeystorePerm::change_password())
+            .context("In on_user_password_changed.")?;
+
+        match DB
+            .with(|db| {
+                UserState::get_with_password_changed(
+                    &mut db.borrow_mut(),
+                    &LEGACY_MIGRATOR,
+                    &SUPER_KEY,
+                    user_id as u32,
+                    password,
+                )
+            })
+            .context("In on_user_password_changed.")?
+        {
+            UserState::LskfLocked => {
+                // Error - password can not be changed when the device is locked
+                Err(KeystoreError::Rc(ResponseCode::LOCKED))
+                    .context("In on_user_password_changed. Device is locked.")
+            }
+            _ => {
+                // LskfLocked is the only error case for password change
+                Ok(())
+            }
+        }
+    }
+
+    fn add_or_remove_user(user_id: i32) -> Result<()> {
+        // Check permission. Function should return if this failed. Therefore having '?' at the end
+        // is very important.
+        check_keystore_permission(KeystorePerm::change_user()).context("In add_or_remove_user.")?;
+        DB.with(|db| {
+            UserState::reset_user(
+                &mut db.borrow_mut(),
+                &SUPER_KEY,
+                &LEGACY_MIGRATOR,
+                user_id as u32,
+                false,
+            )
+        })
+        .context("In add_or_remove_user: Trying to delete keys from db.")
+    }
+}
+
+impl Interface for UserManager {}
+
+impl IKeystoreUserManager for UserManager {
+    fn onUserPasswordChanged(&self, user_id: i32, password: Option<&[u8]>) -> BinderResult<()> {
+        map_or_log_err(Self::on_user_password_changed(user_id, password), Ok)
+    }
+
+    fn onUserAdded(&self, user_id: i32) -> BinderResult<()> {
+        map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+    }
+
+    fn onUserRemoved(&self, user_id: i32) -> BinderResult<()> {
+        map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+    }
+}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index d47a04f..5db19b7 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -71,7 +71,7 @@
   tidy_checks: tidy_errors,
   tidy_checks_as_errors: tidy_errors,
   tidy_flags: [
-    "-format-style='file'",
+    "-format-style=file",
   ],
 }
 
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index 579d3d8..b4a6a54 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -76,9 +76,10 @@
 
 static Result<std::vector<uint8_t>> signDigest(const KeymasterSigningKey& key,
                                                const std::vector<uint8_t>& digest) {
-    struct fsverity_signed_digest* d = NULL;
+    fsverity_signed_digest* d;
     size_t signed_digest_size = sizeof(*d) + digest.size();
-    d = (struct fsverity_signed_digest*)malloc(signed_digest_size);
+    std::unique_ptr<uint8_t[]> digest_buffer{new uint8_t[signed_digest_size]};
+    d = (fsverity_signed_digest*)digest_buffer.get();
 
     memcpy(d->magic, "FSVerity", 8);
     d->digest_algorithm = cpu_to_le16(FS_VERITY_HASH_ALG_SHA256);