Merge "Keystore 2.0: Rename legacy_migrator to importer."
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index 81790af..8265dd0 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -16,7 +16,7 @@
 
 use crate::error::Error as KeystoreError;
 use crate::error::anyhow_error_to_cstring;
-use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_IMPORTER};
 use crate::permission::KeystorePerm;
 use crate::super_key::UserState;
 use crate::utils::{check_keystore_permission, watchdog as wd};
@@ -170,7 +170,7 @@
                     .with(|db| {
                         skm.unlock_and_get_user_state(
                             &mut db.borrow_mut(),
-                            &LEGACY_MIGRATOR,
+                            &LEGACY_IMPORTER,
                             user_id as u32,
                             &password,
                         )
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 2819314..9475cf6 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -18,7 +18,7 @@
 
 use crate::gc::Gc;
 use crate::legacy_blob::LegacyBlobLoader;
-use crate::legacy_migrator::LegacyMigrator;
+use crate::legacy_importer::LegacyImporter;
 use crate::super_key::SuperKeyManager;
 use crate::utils::watchdog as wd;
 use crate::{async_task::AsyncTask, database::MonotonicRawTime};
@@ -175,8 +175,8 @@
     pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
         &DB_PATH.read().expect("Could not get the database path for legacy blob loader.")));
     /// Legacy migrator. Atomically migrates legacy blobs to the database.
-    pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
-        Arc::new(LegacyMigrator::new(Arc::new(Default::default())));
+    pub static ref LEGACY_IMPORTER: Arc<LegacyImporter> =
+        Arc::new(LegacyImporter::new(Arc::new(Default::default())));
     /// Background thread which handles logging via statsd and logd
     pub static ref LOGS_HANDLER: Arc<AsyncTask> = Default::default();
 
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_importer.rs
similarity index 79%
rename from keystore2/src/legacy_migrator.rs
rename to keystore2/src/legacy_importer.rs
index 65f4b0b..3f37b14 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_importer.rs
@@ -38,8 +38,8 @@
 use std::sync::mpsc::channel;
 use std::sync::{Arc, Mutex};
 
-/// Represents LegacyMigrator.
-pub struct LegacyMigrator {
+/// Represents LegacyImporter.
+pub struct LegacyImporter {
     async_task: Arc<AsyncTask>,
     initializer: Mutex<
         Option<
@@ -51,19 +51,19 @@
         >,
     >,
     /// This atomic is used for cheap interior mutability. It is intended to prevent
-    /// expensive calls into the legacy migrator when the legacy database is empty.
+    /// expensive calls into the legacy importer when the legacy database is empty.
     /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
     /// of time. This is tolerable in favor of the common case.
     state: AtomicU8,
 }
 
 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-struct RecentMigration {
+struct RecentImport {
     uid: u32,
     alias: String,
 }
 
-impl RecentMigration {
+impl RecentImport {
     fn new(uid: u32, alias: String) -> Self {
         Self { uid, alias }
     }
@@ -74,15 +74,15 @@
     User(u32),
 }
 
-struct LegacyMigratorState {
-    recently_migrated: HashSet<RecentMigration>,
-    recently_migrated_super_key: HashSet<u32>,
+struct LegacyImporterState {
+    recently_imported: HashSet<RecentImport>,
+    recently_imported_super_key: HashSet<u32>,
     legacy_loader: Arc<LegacyBlobLoader>,
     sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
     db: KeystoreDB,
 }
 
-impl LegacyMigrator {
+impl LegacyImporter {
     const WIFI_NAMESPACE: i64 = 102;
     const AID_WIFI: u32 = 1010;
 
@@ -90,7 +90,7 @@
     const STATE_READY: u8 = 1;
     const STATE_EMPTY: u8 = 2;
 
-    /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
+    /// Constructs a new LegacyImporter using the given AsyncTask object as import
     /// worker.
     pub fn new(async_task: Arc<AsyncTask>) -> Self {
         Self {
@@ -100,7 +100,7 @@
         }
     }
 
-    /// The legacy migrator must be initialized deferred, because keystore starts very early.
+    /// The legacy importer must be initialized deferred, because keystore starts very early.
     /// At this time the data partition may not be mounted. So we cannot open database connections
     /// until we get actual key load requests. This sets the function that the legacy loader
     /// uses to connect to the database.
@@ -125,11 +125,11 @@
         Ok(())
     }
 
-    /// This function is called by the migration requestor to check if it is worth
-    /// making a migration request. It also transitions the state from UNINITIALIZED
+    /// This function is called by the import requestor to check if it is worth
+    /// making an import request. It also transitions the state from UNINITIALIZED
     /// to READY or EMPTY on first use. The deferred initialization is necessary, because
     /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
-    /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
+    /// Returns Ok(STATE_READY) if an import request is worth undertaking and
     /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
     /// was not initialized and cannot be initialized.
     fn check_state(&self) -> Result<u8> {
@@ -157,9 +157,9 @@
                         }
 
                         self.async_task.queue_hi(move |shelf| {
-                            shelf.get_or_put_with(|| LegacyMigratorState {
-                                recently_migrated: Default::default(),
-                                recently_migrated_super_key: Default::default(),
+                            shelf.get_or_put_with(|| LegacyImporterState {
+                                recently_imported: Default::default(),
+                                recently_imported_super_key: Default::default(),
                                 legacy_loader,
                                 sec_level_to_km_uuid,
                                 db,
@@ -189,14 +189,14 @@
                     );
                 }
                 (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
-                (s, _) => panic!("Unknown legacy migrator state. {} ", s),
+                (s, _) => panic!("Unknown legacy importer state. {} ", s),
             }
         }
     }
 
     /// List all aliases for uid in the legacy database.
     pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
-        let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500);
+        let _wp = wd::watch_millis("LegacyImporter::list_uid", 500);
 
         let uid = match (domain, namespace) {
             (Domain::APP, namespace) => namespace as u32,
@@ -217,44 +217,44 @@
         )
     }
 
-    /// Sends the given closure to the migrator thread for execution after calling check_state.
+    /// Sends the given closure to the importer thread for execution after calling check_state.
     /// Returns None if the database was empty and the request was not executed.
-    /// Otherwise returns Some with the result produced by the migration request.
+    /// Otherwise returns Some with the result produced by the import request.
     /// The loader state may transition to STATE_EMPTY during the execution of this function.
     fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
     where
-        F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
+        F: FnOnce(&mut LegacyImporterState) -> Result<T> + Send + 'static,
     {
         // Short circuit if the database is empty or not initialized (error case).
         match self.check_state().context("In do_serialized: Checking state.") {
-            Ok(LegacyMigrator::STATE_EMPTY) => return None,
-            Ok(LegacyMigrator::STATE_READY) => {}
+            Ok(LegacyImporter::STATE_EMPTY) => return None,
+            Ok(LegacyImporter::STATE_READY) => {}
             Err(e) => return Some(Err(e)),
-            Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
+            Ok(s) => panic!("Unknown legacy importer state. {} ", s),
         }
 
         // We have established that there may be a key in the legacy database.
-        // Now we schedule a migration request.
+        // Now we schedule an import request.
         let (sender, receiver) = channel();
         self.async_task.queue_hi(move |shelf| {
-            // Get the migrator state from the shelf.
-            // There may not be a state. This can happen if this migration request was scheduled
+            // Get the importer state from the shelf.
+            // There may not be a state. This can happen if this import request was scheduled
             // before a previous request established that the legacy database was empty
             // and removed the state from the shelf. Since we know now that the database
             // is empty, we can return None here.
-            let (new_state, result) = if let Some(legacy_migrator_state) =
-                shelf.get_downcast_mut::<LegacyMigratorState>()
+            let (new_state, result) = if let Some(legacy_importer_state) =
+                shelf.get_downcast_mut::<LegacyImporterState>()
             {
-                let result = f(legacy_migrator_state);
-                (legacy_migrator_state.check_empty(), Some(result))
+                let result = f(legacy_importer_state);
+                (legacy_importer_state.check_empty(), Some(result))
             } else {
                 (Self::STATE_EMPTY, None)
             };
 
-            // If the migration request determined that the database is now empty, we discard
+            // If the import request determined that the database is now empty, we discard
             // the state from the shelf to free up the resources we won't need any longer.
             if result.is_some() && new_state == Self::STATE_EMPTY {
-                shelf.remove_downcast_ref::<LegacyMigratorState>();
+                shelf.remove_downcast_ref::<LegacyImporterState>();
             }
 
             // Send the result to the requester.
@@ -271,7 +271,7 @@
         };
 
         // We can only transition to EMPTY but never back.
-        // The migrator never creates any legacy blobs.
+        // The importer never creates any legacy blobs.
         if new_state == Self::STATE_EMPTY {
             self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
         }
@@ -280,10 +280,10 @@
     }
 
     /// Runs the key_accessor function and returns its result. If it returns an error and the
-    /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
+    /// root cause was KEY_NOT_FOUND, tries to import a key with the given parameters from
     /// the legacy database to the new database and runs the key_accessor function again if
-    /// the migration request was successful.
-    pub fn with_try_migrate<F, T>(
+    /// the import request was successful.
+    pub fn with_try_import<F, T>(
         &self,
         key: &KeyDescriptor,
         caller_uid: u32,
@@ -292,7 +292,7 @@
     where
         F: Fn() -> Result<T>,
     {
-        let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500);
+        let _wp = wd::watch_millis("LegacyImporter::with_try_import", 500);
 
         // Access the key and return on success.
         match key_accessor() {
@@ -304,7 +304,7 @@
         }
 
         // Filter inputs. We can only load legacy app domain keys and some special rules due
-        // to which we migrate keys transparently to an SELINUX domain.
+        // to which we import keys transparently to an SELINUX domain.
         let uid = match key {
             KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
             KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
@@ -324,11 +324,11 @@
 
         let key_clone = key.clone();
         let result = self
-            .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
+            .do_serialized(move |importer_state| importer_state.check_and_import(uid, key_clone));
 
         if let Some(result) = result {
             result?;
-            // After successful migration try again.
+            // After successful import try again.
             key_accessor()
         } else {
             Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
@@ -336,8 +336,8 @@
     }
 
     /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
-    /// this function makes a migration request and on success retries the key_accessor.
-    pub fn with_try_migrate_super_key<F, T>(
+    /// this function makes an import request and on success retries the key_accessor.
+    pub fn with_try_import_super_key<F, T>(
         &self,
         user_id: u32,
         pw: &Password,
@@ -346,31 +346,31 @@
     where
         F: FnMut() -> Result<Option<T>>,
     {
-        let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500);
+        let _wp = wd::watch_millis("LegacyImporter::with_try_import_super_key", 500);
 
         match key_accessor() {
             Ok(Some(result)) => return Ok(Some(result)),
             Ok(None) => {}
             Err(e) => return Err(e),
         }
-        let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
-        let result = self.do_serialized(move |migrator_state| {
-            migrator_state.check_and_migrate_super_key(user_id, &pw)
+        let pw = pw.try_clone().context("In with_try_import_super_key: Cloning password.")?;
+        let result = self.do_serialized(move |importer_state| {
+            importer_state.check_and_import_super_key(user_id, &pw)
         });
 
         if let Some(result) = result {
             result?;
-            // After successful migration try again.
+            // After successful import try again.
             key_accessor()
         } else {
             Ok(None)
         }
     }
 
-    /// Deletes all keys belonging to the given namespace, migrating them into the database
+    /// Deletes all keys belonging to the given namespace, importing them into the database
     /// for subsequent garbage collection if necessary.
     pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
-        let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500);
+        let _wp = wd::watch_millis("LegacyImporter::bulk_delete_uid", 500);
 
         let uid = match (domain, nspace) {
             (Domain::APP, nspace) => nspace as u32,
@@ -379,24 +379,24 @@
             _ => return Ok(()),
         };
 
-        let result = self.do_serialized(move |migrator_state| {
-            migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
+        let result = self.do_serialized(move |importer_state| {
+            importer_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
         });
 
         result.unwrap_or(Ok(()))
     }
 
-    /// Deletes all keys belonging to the given android user, migrating them into the database
+    /// Deletes all keys belonging to the given android user, importing them into the database
     /// for subsequent garbage collection if necessary.
     pub fn bulk_delete_user(
         &self,
         user_id: u32,
         keep_non_super_encrypted_keys: bool,
     ) -> Result<()> {
-        let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500);
+        let _wp = wd::watch_millis("LegacyImporter::bulk_delete_user", 500);
 
-        let result = self.do_serialized(move |migrator_state| {
-            migrator_state
+        let result = self.do_serialized(move |importer_state| {
+            importer_state
                 .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
         });
 
@@ -406,12 +406,12 @@
     /// Queries the legacy database for the presence of a super key for the given user.
     pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
         let result =
-            self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
+            self.do_serialized(move |importer_state| importer_state.has_super_key(user_id));
         result.unwrap_or(Ok(false))
     }
 }
 
-impl LegacyMigratorState {
+impl LegacyImporterState {
     fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
         let sec_level = if is_strongbox {
             SecurityLevel::STRONGBOX
@@ -430,17 +430,17 @@
             .context("In list_uid: Trying to list legacy entries.")
     }
 
-    /// This is a key migration request that must run in the migrator thread. This must
+    /// This is a key import request that must run in the importer thread. This must
     /// be passed to do_serialized.
-    fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
+    fn check_and_import(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
         let alias = key.alias.clone().ok_or_else(|| {
-            anyhow::anyhow!(Error::sys()).context(concat!(
-                "In check_and_migrate: Must be Some because ",
-                "our caller must not have called us otherwise."
-            ))
+            anyhow::anyhow!(Error::sys()).context(
+                "In check_and_import: Must be Some because \
+                 our caller must not have called us otherwise.",
+            )
         })?;
 
-        if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
+        if self.recently_imported.contains(&RecentImport::new(uid, alias.clone())) {
             return Ok(());
         }
 
@@ -452,7 +452,7 @@
         let (km_blob_params, user_cert, ca_cert) = self
             .legacy_loader
             .load_by_uid_alias(uid, &alias, None)
-            .context("In check_and_migrate: Trying to load legacy blob.")?;
+            .context("In check_and_import: Trying to load legacy blob.")?;
         let result = match km_blob_params {
             Some((km_blob, params)) => {
                 let is_strongbox = km_blob.is_strongbox();
@@ -464,33 +464,33 @@
                         let super_key_id = match self
                             .db
                             .load_super_key(&USER_SUPER_KEY, user_id)
-                            .context("In check_and_migrate: Failed to load super key")?
+                            .context("In check_and_import: Failed to load super key")?
                         {
                             Some((_, entry)) => entry.id(),
                             None => {
                                 // This might be the first time we access the super key,
-                                // and it may not have been migrated. We cannot import
+                                // and it may not have been imported. We cannot import
                                 // the legacy super_key key now, because we need to reencrypt
                                 // it which we cannot do if we are not unlocked, which we are
-                                // not because otherwise the key would have been migrated.
+                                // not because otherwise the key would have been imported.
                                 // We can check though if the key exists. If it does,
                                 // we can return Locked. Otherwise, we can delete the
                                 // key and return NotFound, because the key will never
                                 // be unlocked again.
                                 if self.legacy_loader.has_super_key(user_id) {
                                     return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
-                                        "In check_and_migrate: Cannot migrate super key of this ",
+                                        "In check_and_import: Cannot import super key of this ",
                                         "key while user is locked."
                                     ));
                                 } else {
                                     self.legacy_loader.remove_keystore_entry(uid, &alias).context(
                                         concat!(
-                                            "In check_and_migrate: ",
+                                            "In check_and_import: ",
                                             "Trying to remove obsolete key."
                                         ),
                                     )?;
                                     return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                                        .context("In check_and_migrate: Obsolete key.");
+                                        .context("In check_and_import: Obsolete key.");
                                 }
                             }
                         };
@@ -505,18 +505,18 @@
                     BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
                     _ => {
                         return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                            .context("In check_and_migrate: Legacy key has unexpected type.")
+                            .context("In check_and_import: Legacy key has unexpected type.")
                     }
                 };
 
                 let km_uuid = self
                     .get_km_uuid(is_strongbox)
-                    .context("In check_and_migrate: Trying to get KM UUID")?;
+                    .context("In check_and_import: Trying to get KM UUID")?;
                 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
 
                 let mut metadata = KeyMetaData::new();
                 let creation_date = DateTime::now()
-                    .context("In check_and_migrate: Trying to make creation time.")?;
+                    .context("In check_and_import: Trying to make creation time.")?;
                 metadata.add(KeyMetaEntry::CreationDate(creation_date));
 
                 // Store legacy key in the database.
@@ -530,49 +530,49 @@
                         &metadata,
                         &km_uuid,
                     )
-                    .context("In check_and_migrate.")?;
+                    .context("In check_and_import.")?;
                 Ok(())
             }
             None => {
                 if let Some(ca_cert) = ca_cert {
                     self.db
                         .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
-                        .context("In check_and_migrate: Failed to insert new certificate.")?;
+                        .context("In check_and_import: Failed to insert new certificate.")?;
                     Ok(())
                 } else {
                     Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                        .context("In check_and_migrate: Legacy key not found.")
+                        .context("In check_and_import: Legacy key not found.")
                 }
             }
         };
 
         match result {
             Ok(()) => {
-                // Add the key to the migrated_keys list.
-                self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
+                // Add the key to the imported_keys list.
+                self.recently_imported.insert(RecentImport::new(uid, alias.clone()));
                 // Delete legacy key from the file system
                 self.legacy_loader
                     .remove_keystore_entry(uid, &alias)
-                    .context("In check_and_migrate: Trying to remove migrated key.")?;
+                    .context("In check_and_import: Trying to remove imported key.")?;
                 Ok(())
             }
             Err(e) => Err(e),
         }
     }
 
-    fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
-        if self.recently_migrated_super_key.contains(&user_id) {
+    fn check_and_import_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
+        if self.recently_imported_super_key.contains(&user_id) {
             return Ok(());
         }
 
         if let Some(super_key) = self
             .legacy_loader
             .load_super_key(user_id, pw)
-            .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
+            .context("In check_and_import_super_key: Trying to load legacy super key.")?
         {
             let (blob, blob_metadata) =
                 crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
-                    .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
+                    .context("In check_and_import_super_key: Trying to encrypt super key.")?;
 
             self.db
                 .store_super_key(
@@ -583,20 +583,20 @@
                     &KeyMetaData::new(),
                 )
                 .context(concat!(
-                    "In check_and_migrate_super_key: ",
+                    "In check_and_import_super_key: ",
                     "Trying to insert legacy super_key into the database."
                 ))?;
             self.legacy_loader.remove_super_key(user_id);
-            self.recently_migrated_super_key.insert(user_id);
+            self.recently_imported_super_key.insert(user_id);
             Ok(())
         } else {
             Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
-                .context("In check_and_migrate_super_key: No key found do migrate.")
+                .context("In check_and_import_super_key: No key found do import.")
         }
     }
 
-    /// Key migrator request to be run by do_serialized.
-    /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
+    /// Key importer request to be run by do_serialized.
+    /// See LegacyImporter::bulk_delete_uid and LegacyImporter::bulk_delete_user.
     fn bulk_delete(
         &mut self,
         bulk_delete_request: BulkDeleteRequest,
@@ -695,21 +695,21 @@
 
             self.legacy_loader
                 .remove_keystore_entry(uid, &alias)
-                .context("In bulk_delete: Trying to remove migrated key.")?;
+                .context("In bulk_delete: Trying to remove imported key.")?;
         }
         Ok(())
     }
 
     fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
-        Ok(self.recently_migrated_super_key.contains(&user_id)
+        Ok(self.recently_imported_super_key.contains(&user_id)
             || self.legacy_loader.has_super_key(user_id))
     }
 
     fn check_empty(&self) -> u8 {
         if self.legacy_loader.is_empty().unwrap_or(false) {
-            LegacyMigrator::STATE_EMPTY
+            LegacyImporter::STATE_EMPTY
         } else {
-            LegacyMigrator::STATE_READY
+            LegacyImporter::STATE_READY
         }
     }
 }
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 134f707..0a80763 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -29,7 +29,7 @@
 /// Internal Representation of Key Parameter and convenience functions.
 pub mod key_parameter;
 pub mod legacy_blob;
-pub mod legacy_migrator;
+pub mod legacy_importer;
 pub mod maintenance;
 pub mod metrics;
 pub mod metrics_store;
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index c0f1e1e..71f43d6 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -19,7 +19,7 @@
 use crate::error::map_or_log_err;
 use crate::error::Error;
 use crate::globals::get_keymint_device;
-use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, LEGACY_IMPORTER, SUPER_KEY};
 use crate::permission::{KeyPerm, KeystorePerm};
 use crate::super_key::{SuperKeyManager, UserState};
 use crate::utils::{
@@ -88,7 +88,7 @@
             .with(|db| {
                 skm.reset_or_init_user_and_get_user_state(
                     &mut db.borrow_mut(),
-                    &LEGACY_MIGRATOR,
+                    &LEGACY_IMPORTER,
                     user_id as u32,
                     password.as_ref(),
                 )
@@ -115,7 +115,7 @@
         DB.with(|db| {
             SUPER_KEY.write().unwrap().reset_user(
                 &mut db.borrow_mut(),
-                &LEGACY_MIGRATOR,
+                &LEGACY_IMPORTER,
                 user_id as u32,
                 false,
             )
@@ -130,7 +130,7 @@
         // Permission check. Must return on error. Do not touch the '?'.
         check_keystore_permission(KeystorePerm::ClearUID).context("In clear_namespace.")?;
 
-        LEGACY_MIGRATOR
+        LEGACY_IMPORTER
             .bulk_delete_uid(domain, nspace)
             .context("In clear_namespace: Trying to delete legacy keys.")?;
         DB.with(|db| db.borrow_mut().unbind_keys_for_namespace(domain, nspace))
@@ -148,7 +148,7 @@
             .with(|db| {
                 SUPER_KEY.read().unwrap().get_user_state(
                     &mut db.borrow_mut(),
-                    &LEGACY_MIGRATOR,
+                    &LEGACY_IMPORTER,
                     user_id as u32,
                 )
             })
@@ -256,8 +256,8 @@
         };
 
         DB.with(|db| {
-            let (key_id_guard, _) = LEGACY_MIGRATOR
-                .with_try_migrate(source, src_uid, || {
+            let (key_id_guard, _) = LEGACY_IMPORTER
+                .with_try_import(source, src_uid, || {
                     db.borrow_mut().load_key_entry(
                         source,
                         KeyType::Client,
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 83f0bee..eefbc20 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -20,7 +20,7 @@
 };
 use crate::database::{CertificateInfo, KeyIdGuard};
 use crate::error::{self, map_km_error, map_or_log_err, Error, ErrorCode};
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_IMPORTER, SUPER_KEY};
 use crate::key_parameter::KeyParameter as KsKeyParam;
 use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
 use crate::metrics_store::log_key_creation_event_stats;
@@ -164,7 +164,7 @@
                         .unwrap()
                         .handle_super_encryption_on_key_init(
                             &mut db,
-                            &LEGACY_MIGRATOR,
+                            &LEGACY_IMPORTER,
                             &(key.domain),
                             &key_parameters,
                             flags,
@@ -245,7 +245,7 @@
             _ => {
                 let (key_id_guard, mut key_entry) = DB
                     .with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
-                        LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+                        LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                             db.borrow_mut().load_key_entry(
                                 key,
                                 KeyType::Client,
@@ -723,7 +723,7 @@
 
         let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
             .with(|db| {
-                LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+                LEGACY_IMPORTER.with_try_import(&key, caller_uid, || {
                     db.borrow_mut().load_key_entry(
                         wrapping_key,
                         KeyType::Client,
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 2725dc2..46bc8b0 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -26,7 +26,7 @@
 };
 use crate::{
     database::Uuid,
-    globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+    globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_IMPORTER},
 };
 use crate::{database::KEYSTORE_UUID, permission};
 use crate::{
@@ -81,7 +81,7 @@
         }
 
         let uuid_by_sec_level = result.uuid_by_sec_level.clone();
-        LEGACY_MIGRATOR
+        LEGACY_IMPORTER
             .set_init(move || {
                 (create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
             })
@@ -132,7 +132,7 @@
         let caller_uid = ThreadState::get_calling_uid();
         let (key_id_guard, mut key_entry) = DB
             .with(|db| {
-                LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+                LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                     db.borrow_mut().load_key_entry(
                         key,
                         KeyType::Client,
@@ -183,7 +183,7 @@
     ) -> Result<()> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with::<_, Result<()>>(|db| {
-            let entry = match LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+            let entry = match LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                 db.borrow_mut().load_key_entry(
                     key,
                     KeyType::Client,
@@ -292,7 +292,7 @@
     fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with(|db| {
-            LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+            LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                 db.borrow_mut().unbind_key(key, KeyType::Client, caller_uid, |k, av| {
                     check_key_permission(KeyPerm::Delete, k, &av).context("During delete_key.")
                 })
@@ -310,7 +310,7 @@
     ) -> Result<KeyDescriptor> {
         let caller_uid = ThreadState::get_calling_uid();
         DB.with(|db| {
-            LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+            LEGACY_IMPORTER.with_try_import(key, caller_uid, || {
                 db.borrow_mut().grant(
                     key,
                     caller_uid,
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 6862011..2fb4991 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -26,7 +26,7 @@
     error::ResponseCode,
     key_parameter::{KeyParameter, KeyParameterValue},
     legacy_blob::LegacyBlobLoader,
-    legacy_migrator::LegacyMigrator,
+    legacy_importer::LegacyImporter,
     raw_device::KeyMintDevice,
     utils::watchdog as wd,
     utils::AID_KEYSTORE,
@@ -507,7 +507,7 @@
     fn super_key_exists_in_db_for_user(
         &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
     ) -> Result<bool> {
         let key_in_db = db
@@ -517,7 +517,7 @@
         if key_in_db {
             Ok(key_in_db)
         } else {
-            legacy_migrator
+            legacy_importer
                 .has_super_key(user_id)
                 .context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
         }
@@ -529,13 +529,13 @@
     pub fn check_and_unlock_super_key(
         &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         pw: &Password,
     ) -> Result<UserState> {
         let alias = &USER_SUPER_KEY;
-        let result = legacy_migrator
-            .with_try_migrate_super_key(user_id, pw, || db.load_super_key(alias, user_id))
+        let result = legacy_importer
+            .with_try_import_super_key(user_id, pw, || db.load_super_key(alias, user_id))
             .context("In check_and_unlock_super_key. Failed to load super key")?;
 
         match result {
@@ -558,12 +558,12 @@
     pub fn check_and_initialize_super_key(
         &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         pw: Option<&Password>,
     ) -> Result<UserState> {
         let super_key_exists_in_db = self
-            .super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
+            .super_key_exists_in_db_for_user(db, legacy_importer, user_id)
             .context("In check_and_initialize_super_key. Failed to check if super key exists.")?;
         if super_key_exists_in_db {
             Ok(UserState::LskfLocked)
@@ -691,12 +691,12 @@
     fn super_encrypt_on_key_init(
         &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         key_blob: &[u8],
     ) -> Result<(Vec<u8>, BlobMetaData)> {
         match self
-            .get_user_state(db, legacy_migrator, user_id)
+            .get_user_state(db, legacy_importer, user_id)
             .context("In super_encrypt. Failed to get user state.")?
         {
             UserState::LskfUnlocked(super_key) => {
@@ -737,7 +737,7 @@
     pub fn handle_super_encryption_on_key_init(
         &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         domain: &Domain,
         key_parameters: &[KeyParameter],
         flags: Option<i32>,
@@ -747,7 +747,7 @@
         match Enforcements::super_encryption_required(domain, key_parameters, flags) {
             SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
             SuperEncryptionType::LskfBound => self
-                .super_encrypt_on_key_init(db, legacy_migrator, user_id, key_blob)
+                .super_encrypt_on_key_init(db, legacy_importer, user_id, key_blob)
                 .context(concat!(
                     "In handle_super_encryption_on_key_init. ",
                     "Failed to super encrypt with LskfBound key."
@@ -1086,11 +1086,11 @@
 
     /// Returns the keystore locked state of the given user. It requires the thread local
     /// keystore database and a reference to the legacy migrator because it may need to
-    /// migrate the super key from the legacy blob database to the keystore database.
+    /// import the super key from the legacy blob database to the keystore database.
     pub fn get_user_state(
         &self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
     ) -> Result<UserState> {
         match self.get_per_boot_key_by_user_id(user_id) {
@@ -1099,7 +1099,7 @@
                 // Check if a super key exists in the database or legacy database.
                 // If so, return locked user state.
                 if self
-                    .super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
+                    .super_key_exists_in_db_for_user(db, legacy_importer, user_id)
                     .context("In get_user_state.")?
                 {
                     Ok(UserState::LskfLocked)
@@ -1123,7 +1123,7 @@
     pub fn reset_or_init_user_and_get_user_state(
         &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         password: Option<&Password>,
     ) -> Result<UserState> {
@@ -1131,7 +1131,7 @@
             Some(_) if password.is_none() => {
                 // Transitioning to swiping, delete only the super key in database and cache,
                 // and super-encrypted keys in database (and in KM).
-                self.reset_user(db, legacy_migrator, user_id, true).context(
+                self.reset_user(db, legacy_importer, user_id, true).context(
                     "In reset_or_init_user_and_get_user_state: Trying to delete keys from the db.",
                 )?;
                 // Lskf is now removed in Keystore.
@@ -1147,7 +1147,7 @@
                 // If so, return LskfLocked state.
                 // Otherwise, i) if the password is provided, initialize the super key and return
                 // LskfUnlocked state ii) if password is not provided, return Uninitialized state.
-                self.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
+                self.check_and_initialize_super_key(db, legacy_importer, user_id, password)
             }
         }
     }
@@ -1158,7 +1158,7 @@
     pub fn unlock_and_get_user_state(
         &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         password: &Password,
     ) -> Result<UserState> {
@@ -1172,7 +1172,7 @@
                 // If not, return Uninitialized state.
                 // Otherwise, try to unlock the super key and if successful,
                 // return LskfUnlocked.
-                self.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
+                self.check_and_unlock_super_key(db, legacy_importer, user_id, password)
                     .context("In unlock_and_get_user_state. Failed to unlock super key.")
             }
         }
@@ -1184,12 +1184,12 @@
     pub fn reset_user(
         &mut self,
         db: &mut KeystoreDB,
-        legacy_migrator: &LegacyMigrator,
+        legacy_importer: &LegacyImporter,
         user_id: UserId,
         keep_non_super_encrypted_keys: bool,
     ) -> Result<()> {
         // Mark keys created on behalf of the user as unreferenced.
-        legacy_migrator
+        legacy_importer
             .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
             .context("In reset_user: Trying to delete legacy keys.")?;
         db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 82e6700..c924bef 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -20,7 +20,7 @@
 use crate::permission::{KeyPerm, KeyPermSet, KeystorePerm};
 use crate::{
     database::{KeyType, KeystoreDB},
-    globals::LEGACY_MIGRATOR,
+    globals::LEGACY_IMPORTER,
 };
 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
     KeyCharacteristics::KeyCharacteristics, Tag::Tag,
@@ -211,7 +211,7 @@
 ) -> Result<Vec<KeyDescriptor>> {
     let mut result = Vec::new();
     result.append(
-        &mut LEGACY_MIGRATOR
+        &mut LEGACY_IMPORTER
             .list_uid(domain, namespace)
             .context("In list_key_entries: Trying to list legacy keys.")?,
     );