blob: 75670706e86824021e60de8fc2b44e8b52052b70 [file] [log] [blame]
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +00001// Copyright 2021, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! This module acts as a bridge between the legacy key database and the keystore2 database.
16
17use crate::database::{
18 BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData, KeyMetaEntry,
19 KeystoreDB, Uuid, KEYSTORE_UUID,
20};
21use crate::error::Error;
Janis Danisevskiseed69842021-02-18 20:04:10 -080022use crate::key_parameter::KeyParameterValue;
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000023use crate::legacy_blob::BlobValue;
24use crate::utils::uid_to_android_user;
25use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
26use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
27use android_system_keystore2::aidl::android::system::keystore2::{
28 Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
29};
30use anyhow::{Context, Result};
31use core::ops::Deref;
Paul Crowleyf61fee72021-03-17 14:38:44 -070032use keystore2_crypto::{Password, ZVec};
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000033use std::collections::{HashMap, HashSet};
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000034use std::sync::atomic::{AtomicU8, Ordering};
35use std::sync::mpsc::channel;
36use std::sync::{Arc, Mutex};
37
38/// Represents LegacyMigrator.
39pub struct LegacyMigrator {
40 async_task: Arc<AsyncTask>,
41 initializer: Mutex<
42 Option<
43 Box<
44 dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
45 + Send
46 + 'static,
47 >,
48 >,
49 >,
50 /// This atomic is used for cheap interior mutability. It is intended to prevent
51 /// expensive calls into the legacy migrator when the legacy database is empty.
52 /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
53 /// of time. This is tolerable in favor of the common case.
54 state: AtomicU8,
55}
56
57#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
58struct RecentMigration {
59 uid: u32,
60 alias: String,
61}
62
63impl RecentMigration {
64 fn new(uid: u32, alias: String) -> Self {
65 Self { uid, alias }
66 }
67}
68
Janis Danisevskiseed69842021-02-18 20:04:10 -080069enum BulkDeleteRequest {
70 Uid(u32),
71 User(u32),
72}
73
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000074struct LegacyMigratorState {
75 recently_migrated: HashSet<RecentMigration>,
76 recently_migrated_super_key: HashSet<u32>,
77 legacy_loader: Arc<LegacyBlobLoader>,
78 sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
79 db: KeystoreDB,
80}
81
82impl LegacyMigrator {
83 const WIFI_NAMESPACE: i64 = 102;
84 const AID_WIFI: u32 = 1010;
85
86 const STATE_UNINITIALIZED: u8 = 0;
87 const STATE_READY: u8 = 1;
88 const STATE_EMPTY: u8 = 2;
89
90 /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
91 /// worker.
92 pub fn new(async_task: Arc<AsyncTask>) -> Self {
93 Self {
94 async_task,
95 initializer: Default::default(),
96 state: AtomicU8::new(Self::STATE_UNINITIALIZED),
97 }
98 }
99
100 /// The legacy migrator must be initialized deferred, because keystore starts very early.
101 /// At this time the data partition may not be mounted. So we cannot open database connections
102 /// until we get actual key load requests. This sets the function that the legacy loader
103 /// uses to connect to the database.
104 pub fn set_init<F>(&self, f_init: F) -> Result<()>
105 where
106 F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
107 + Send
108 + 'static,
109 {
110 let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
111
112 // If we are not uninitialized we have no business setting the initializer.
113 if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
114 return Ok(());
115 }
116
117 // Only set the initializer if it hasn't been set before.
118 if initializer.is_none() {
119 *initializer = Some(Box::new(f_init))
120 }
121
122 Ok(())
123 }
124
125 /// This function is called by the migration requestor to check if it is worth
126 /// making a migration request. It also transitions the state from UNINITIALIZED
127 /// to READY or EMPTY on first use. The deferred initialization is necessary, because
128 /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
129 /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
130 /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
131 /// was not initialized and cannot be initialized.
132 fn check_state(&self) -> Result<u8> {
133 let mut first_try = true;
134 loop {
135 match (self.state.load(Ordering::Relaxed), first_try) {
136 (Self::STATE_EMPTY, _) => {
137 return Ok(Self::STATE_EMPTY);
138 }
139 (Self::STATE_UNINITIALIZED, true) => {
140 // If we find the legacy loader uninitialized, we grab the initializer lock,
141 // check if the legacy database is empty, and if not, schedule an initialization
142 // request. Coming out of the initializer lock, the state is either EMPTY or
143 // READY.
144 let mut initializer = self.initializer.lock().unwrap();
145
146 if let Some(initializer) = initializer.take() {
147 let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
148
149 if legacy_loader.is_empty().context(
150 "In check_state: Trying to check if the legacy database is empty.",
151 )? {
152 self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
153 return Ok(Self::STATE_EMPTY);
154 }
155
156 self.async_task.queue_hi(move |shelf| {
157 shelf.get_or_put_with(|| LegacyMigratorState {
158 recently_migrated: Default::default(),
159 recently_migrated_super_key: Default::default(),
160 legacy_loader,
161 sec_level_to_km_uuid,
162 db,
163 });
164 });
165
166 // It is safe to set this here even though the async task may not yet have
167 // run because any thread observing this will not be able to schedule a
168 // task that can run before the initialization.
169 // Also we can only transition out of this state while having the
170 // initializer lock and having found an initializer.
171 self.state.store(Self::STATE_READY, Ordering::Relaxed);
172 return Ok(Self::STATE_READY);
173 } else {
174 // There is a chance that we just lost the race from state.load() to
175 // grabbing the initializer mutex. If that is the case the state must
176 // be EMPTY or READY after coming out of the lock. So we can give it
177 // one more try.
178 first_try = false;
179 continue;
180 }
181 }
182 (Self::STATE_UNINITIALIZED, false) => {
183 // Okay, tough luck. The legacy loader was really completely uninitialized.
184 return Err(Error::sys()).context(
185 "In check_state: Legacy loader should not be called uninitialized.",
186 );
187 }
188 (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
189 (s, _) => panic!("Unknown legacy migrator state. {} ", s),
190 }
191 }
192 }
193
194 /// List all aliases for uid in the legacy database.
195 pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
196 let uid = match (domain, namespace) {
197 (Domain::APP, namespace) => namespace as u32,
198 (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
199 _ => return Ok(Vec::new()),
200 };
201 self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
202 |v| {
203 v.into_iter()
204 .map(|alias| KeyDescriptor {
205 domain,
206 nspace: namespace,
207 alias: Some(alias),
208 blob: None,
209 })
210 .collect()
211 },
212 )
213 }
214
215 /// Sends the given closure to the migrator thread for execution after calling check_state.
216 /// Returns None if the database was empty and the request was not executed.
217 /// Otherwise returns Some with the result produced by the migration request.
218 /// The loader state may transition to STATE_EMPTY during the execution of this function.
219 fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
220 where
221 F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
222 {
223 // Short circuit if the database is empty or not initialized (error case).
224 match self.check_state().context("In do_serialized: Checking state.") {
225 Ok(LegacyMigrator::STATE_EMPTY) => return None,
226 Ok(LegacyMigrator::STATE_READY) => {}
227 Err(e) => return Some(Err(e)),
228 Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
229 }
230
231 // We have established that there may be a key in the legacy database.
232 // Now we schedule a migration request.
233 let (sender, receiver) = channel();
234 self.async_task.queue_hi(move |shelf| {
235 // Get the migrator state from the shelf.
236 // There may not be a state. This can happen if this migration request was scheduled
237 // before a previous request established that the legacy database was empty
238 // and removed the state from the shelf. Since we know now that the database
239 // is empty, we can return None here.
240 let (new_state, result) = if let Some(legacy_migrator_state) =
241 shelf.get_downcast_mut::<LegacyMigratorState>()
242 {
243 let result = f(legacy_migrator_state);
244 (legacy_migrator_state.check_empty(), Some(result))
245 } else {
246 (Self::STATE_EMPTY, None)
247 };
248
249 // If the migration request determined that the database is now empty, we discard
250 // the state from the shelf to free up the resources we won't need any longer.
251 if result.is_some() && new_state == Self::STATE_EMPTY {
252 shelf.remove_downcast_ref::<LegacyMigratorState>();
253 }
254
255 // Send the result to the requester.
256 if let Err(e) = sender.send((new_state, result)) {
257 log::error!("In do_serialized. Error in sending the result. {:?}", e);
258 }
259 });
260
261 let (new_state, result) = match receiver.recv() {
262 Err(e) => {
263 return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
264 }
265 Ok(r) => r,
266 };
267
268 // We can only transition to EMPTY but never back.
269 // The migrator never creates any legacy blobs.
270 if new_state == Self::STATE_EMPTY {
271 self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
272 }
273
274 result
275 }
276
277 /// Runs the key_accessor function and returns its result. If it returns an error and the
278 /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
279 /// the legacy database to the new database and runs the key_accessor function again if
280 /// the migration request was successful.
281 pub fn with_try_migrate<F, T>(
282 &self,
283 key: &KeyDescriptor,
284 caller_uid: u32,
285 key_accessor: F,
286 ) -> Result<T>
287 where
288 F: Fn() -> Result<T>,
289 {
290 // Access the key and return on success.
291 match key_accessor() {
292 Ok(result) => return Ok(result),
293 Err(e) => match e.root_cause().downcast_ref::<Error>() {
294 Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
295 _ => return Err(e),
296 },
297 }
298
299 // Filter inputs. We can only load legacy app domain keys and some special rules due
300 // to which we migrate keys transparently to an SELINUX domain.
301 let uid = match key {
302 KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
303 KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
304 match *nspace {
305 Self::WIFI_NAMESPACE => Self::AID_WIFI,
306 _ => {
307 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
308 .context(format!("No legacy keys for namespace {}", nspace))
309 }
310 }
311 }
312 _ => {
313 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
314 .context("No legacy keys for key descriptor.")
315 }
316 };
317
318 let key_clone = key.clone();
319 let result = self
320 .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
321
322 if let Some(result) = result {
323 result?;
324 // After successful migration try again.
325 key_accessor()
326 } else {
327 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
328 }
329 }
330
331 /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
332 /// this function makes a migration request and on success retries the key_accessor.
333 pub fn with_try_migrate_super_key<F, T>(
334 &self,
335 user_id: u32,
Paul Crowleyf61fee72021-03-17 14:38:44 -0700336 pw: &Password,
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000337 mut key_accessor: F,
338 ) -> Result<Option<T>>
339 where
340 F: FnMut() -> Result<Option<T>>,
341 {
342 match key_accessor() {
343 Ok(Some(result)) => return Ok(Some(result)),
344 Ok(None) => {}
345 Err(e) => return Err(e),
346 }
Paul Crowleyf61fee72021-03-17 14:38:44 -0700347 let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000348 let result = self.do_serialized(move |migrator_state| {
Paul Crowleyf61fee72021-03-17 14:38:44 -0700349 migrator_state.check_and_migrate_super_key(user_id, &pw)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000350 });
351
352 if let Some(result) = result {
353 result?;
354 // After successful migration try again.
355 key_accessor()
356 } else {
357 Ok(None)
358 }
359 }
360
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800361 /// Deletes all keys belonging to the given namespace, migrating them into the database
Janis Danisevskiseed69842021-02-18 20:04:10 -0800362 /// for subsequent garbage collection if necessary.
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800363 pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
364 let uid = match (domain, nspace) {
365 (Domain::APP, nspace) => nspace as u32,
366 (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
367 // Nothing to do.
368 _ => return Ok(()),
369 };
370
Janis Danisevskiseed69842021-02-18 20:04:10 -0800371 let result = self.do_serialized(move |migrator_state| {
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800372 migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
Janis Danisevskiseed69842021-02-18 20:04:10 -0800373 });
374
375 result.unwrap_or(Ok(()))
376 }
377
378 /// Deletes all keys belonging to the given android user, migrating them into the database
379 /// for subsequent garbage collection if necessary.
380 pub fn bulk_delete_user(
381 &self,
382 user_id: u32,
383 keep_non_super_encrypted_keys: bool,
384 ) -> Result<()> {
385 let result = self.do_serialized(move |migrator_state| {
386 migrator_state
387 .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
388 });
389
390 result.unwrap_or(Ok(()))
391 }
392
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000393 /// Queries the legacy database for the presence of a super key for the given user.
394 pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
395 let result =
396 self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
397 result.unwrap_or(Ok(false))
398 }
399}
400
401impl LegacyMigratorState {
402 fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
403 let sec_level = if is_strongbox {
404 SecurityLevel::STRONGBOX
405 } else {
406 SecurityLevel::TRUSTED_ENVIRONMENT
407 };
408
409 self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
410 anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
411 })
412 }
413
414 fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
415 self.legacy_loader
416 .list_keystore_entries_for_uid(uid)
417 .context("In list_uid: Trying to list legacy entries.")
418 }
419
420 /// This is a key migration request that can run in the migrator thread. This should
421 /// be passed to do_serialized.
422 fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
423 let alias = key.alias.clone().ok_or_else(|| {
424 anyhow::anyhow!(Error::sys()).context(concat!(
425 "In check_and_migrate: Must be Some because ",
426 "our caller must not have called us otherwise."
427 ))
428 })?;
429
430 if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
431 return Ok(());
432 }
433
434 if key.domain == Domain::APP {
435 key.nspace = uid as i64;
436 }
437
438 // If the key is not found in the cache, try to load from the legacy database.
439 let (km_blob_params, user_cert, ca_cert) = self
440 .legacy_loader
441 .load_by_uid_alias(uid, &alias, None)
442 .context("In check_and_migrate: Trying to load legacy blob.")?;
443 let result = match km_blob_params {
444 Some((km_blob, params)) => {
445 let is_strongbox = km_blob.is_strongbox();
446 let (blob, mut blob_metadata) = match km_blob.take_value() {
447 BlobValue::Encrypted { iv, tag, data } => {
448 // Get super key id for user id.
449 let user_id = uid_to_android_user(uid as u32);
450
451 let super_key_id = match self
452 .db
453 .load_super_key(user_id)
454 .context("In check_and_migrate: Failed to load super key")?
455 {
456 Some((_, entry)) => entry.id(),
457 None => {
458 // This might be the first time we access the super key,
459 // and it may not have been migrated. We cannot import
460 // the legacy super_key key now, because we need to reencrypt
461 // it which we cannot do if we are not unlocked, which we are
462 // not because otherwise the key would have been migrated.
463 // We can check though if the key exists. If it does,
464 // we can return Locked. Otherwise, we can delete the
465 // key and return NotFound, because the key will never
466 // be unlocked again.
467 if self.legacy_loader.has_super_key(user_id) {
468 return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
469 "In check_and_migrate: Cannot migrate super key of this ",
470 "key while user is locked."
471 ));
472 } else {
473 self.legacy_loader.remove_keystore_entry(uid, &alias).context(
474 concat!(
475 "In check_and_migrate: ",
476 "Trying to remove obsolete key."
477 ),
478 )?;
479 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
480 .context("In check_and_migrate: Obsolete key.");
481 }
482 }
483 };
484
485 let mut blob_metadata = BlobMetaData::new();
486 blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
487 blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
488 blob_metadata
489 .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
490 (LegacyBlob::Vec(data), blob_metadata)
491 }
492 BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
493 _ => {
494 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
495 .context("In check_and_migrate: Legacy key has unexpected type.")
496 }
497 };
498
499 let km_uuid = self
500 .get_km_uuid(is_strongbox)
501 .context("In check_and_migrate: Trying to get KM UUID")?;
502 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
503
504 let mut metadata = KeyMetaData::new();
505 let creation_date = DateTime::now()
506 .context("In check_and_migrate: Trying to make creation time.")?;
507 metadata.add(KeyMetaEntry::CreationDate(creation_date));
508
509 // Store legacy key in the database.
510 self.db
511 .store_new_key(
512 &key,
513 &params,
514 &(&blob, &blob_metadata),
515 &CertificateInfo::new(user_cert, ca_cert),
516 &metadata,
517 &km_uuid,
518 )
519 .context("In check_and_migrate.")?;
520 Ok(())
521 }
522 None => {
523 if let Some(ca_cert) = ca_cert {
524 self.db
525 .store_new_certificate(&key, &ca_cert, &KEYSTORE_UUID)
526 .context("In check_and_migrate: Failed to insert new certificate.")?;
527 Ok(())
528 } else {
529 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
530 .context("In check_and_migrate: Legacy key not found.")
531 }
532 }
533 };
534
535 match result {
536 Ok(()) => {
537 // Add the key to the migrated_keys list.
538 self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
539 // Delete legacy key from the file system
540 self.legacy_loader
541 .remove_keystore_entry(uid, &alias)
542 .context("In check_and_migrate: Trying to remove migrated key.")?;
543 Ok(())
544 }
545 Err(e) => Err(e),
546 }
547 }
548
Paul Crowleyf61fee72021-03-17 14:38:44 -0700549 fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000550 if self.recently_migrated_super_key.contains(&user_id) {
551 return Ok(());
552 }
553
554 if let Some(super_key) = self
555 .legacy_loader
556 .load_super_key(user_id, &pw)
557 .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
558 {
559 let (blob, blob_metadata) =
Paul Crowleyf61fee72021-03-17 14:38:44 -0700560 crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000561 .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
562
563 self.db.store_super_key(user_id, &(&blob, &blob_metadata)).context(concat!(
564 "In check_and_migrate_super_key: ",
565 "Trying to insert legacy super_key into the database."
566 ))?;
567 self.legacy_loader.remove_super_key(user_id);
568 self.recently_migrated_super_key.insert(user_id);
569 Ok(())
570 } else {
571 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
572 .context("In check_and_migrate_super_key: No key found do migrate.")
573 }
574 }
575
Janis Danisevskiseed69842021-02-18 20:04:10 -0800576 /// Key migrator request to be run by do_serialized.
577 /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
578 fn bulk_delete(
579 &mut self,
580 bulk_delete_request: BulkDeleteRequest,
581 keep_non_super_encrypted_keys: bool,
582 ) -> Result<()> {
583 let (aliases, user_id) = match bulk_delete_request {
584 BulkDeleteRequest::Uid(uid) => (
585 self.legacy_loader
586 .list_keystore_entries_for_uid(uid)
587 .context("In bulk_delete: Trying to get aliases for uid.")
588 .map(|aliases| {
589 let mut h = HashMap::<u32, HashSet<String>>::new();
590 h.insert(uid, aliases.into_iter().collect());
591 h
592 })?,
593 uid_to_android_user(uid),
594 ),
595 BulkDeleteRequest::User(user_id) => (
596 self.legacy_loader
597 .list_keystore_entries_for_user(user_id)
598 .context("In bulk_delete: Trying to get aliases for user_id.")?,
599 user_id,
600 ),
601 };
602
603 let super_key_id = self
604 .db
605 .load_super_key(user_id)
606 .context("In bulk_delete: Failed to load super key")?
607 .map(|(_, entry)| entry.id());
608
609 for (uid, alias) in aliases
610 .into_iter()
611 .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
612 .flatten()
613 {
614 let (km_blob_params, _, _) = self
615 .legacy_loader
616 .load_by_uid_alias(uid, &alias, None)
617 .context("In bulk_delete: Trying to load legacy blob.")?;
618
619 // Determine if the key needs special handling to be deleted.
620 let (need_gc, is_super_encrypted) = km_blob_params
621 .as_ref()
622 .map(|(blob, params)| {
623 (
624 params.iter().any(|kp| {
625 KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
626 }),
627 blob.is_encrypted(),
628 )
629 })
630 .unwrap_or((false, false));
631
632 if keep_non_super_encrypted_keys && !is_super_encrypted {
633 continue;
634 }
635
636 if need_gc {
637 let mark_deleted = match km_blob_params
638 .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
639 {
640 Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
641 let mut blob_metadata = BlobMetaData::new();
642 if let (Ok(km_uuid), Some(super_key_id)) =
643 (self.get_km_uuid(is_strongbox), super_key_id)
644 {
645 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
646 blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
647 blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
648 blob_metadata
649 .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
650 Some((LegacyBlob::Vec(data), blob_metadata))
651 } else {
652 // Oh well - we tried our best, but if we cannot determine which
653 // KeyMint instance we have to send this blob to, we cannot
654 // do more than delete the key from the file system.
655 // And if we don't know which key wraps this key we cannot
656 // unwrap it for KeyMint either.
657 None
658 }
659 }
660 Some((_, BlobValue::Decrypted(data))) => {
661 Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
662 }
663 _ => None,
664 };
665
666 if let Some((blob, blob_metadata)) = mark_deleted {
667 self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
668 "In bulk_delete: Trying to insert deleted ",
669 "blob into the database for garbage collection."
670 ))?;
671 }
672 }
673
674 self.legacy_loader
675 .remove_keystore_entry(uid, &alias)
676 .context("In bulk_delete: Trying to remove migrated key.")?;
677 }
678 Ok(())
679 }
680
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000681 fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
682 Ok(self.recently_migrated_super_key.contains(&user_id)
683 || self.legacy_loader.has_super_key(user_id))
684 }
685
686 fn check_empty(&self) -> u8 {
687 if self.legacy_loader.is_empty().unwrap_or(false) {
688 LegacyMigrator::STATE_EMPTY
689 } else {
690 LegacyMigrator::STATE_READY
691 }
692 }
693}
694
695enum LegacyBlob {
696 Vec(Vec<u8>),
697 ZVec(ZVec),
698}
699
700impl Deref for LegacyBlob {
701 type Target = [u8];
702
703 fn deref(&self) -> &Self::Target {
704 match self {
705 Self::Vec(v) => &v,
706 Self::ZVec(v) => &v,
707 }
708 }
709}