blob: 65f4b0ba356efae8b2588bcdc9a7710332c60bd5 [file] [log] [blame]
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +00001// Copyright 2021, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! This module acts as a bridge between the legacy key database and the keystore2 database.
16
Janis Danisevskiseed69842021-02-18 20:04:10 -080017use crate::key_parameter::KeyParameterValue;
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000018use crate::legacy_blob::BlobValue;
Janis Danisevskis850d4862021-05-05 08:41:14 -070019use crate::utils::{uid_to_android_user, watchdog as wd};
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000020use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
Janis Danisevskis0cabd712021-05-25 11:07:10 -070021use crate::{database::KeyType, error::Error};
Paul Crowley7a658392021-03-18 17:08:20 -070022use crate::{
23 database::{
24 BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
25 KeyMetaEntry, KeystoreDB, Uuid, KEYSTORE_UUID,
26 },
27 super_key::USER_SUPER_KEY,
28};
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000029use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
30use android_system_keystore2::aidl::android::system::keystore2::{
31 Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
32};
33use anyhow::{Context, Result};
34use core::ops::Deref;
Paul Crowleyf61fee72021-03-17 14:38:44 -070035use keystore2_crypto::{Password, ZVec};
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000036use std::collections::{HashMap, HashSet};
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000037use std::sync::atomic::{AtomicU8, Ordering};
38use std::sync::mpsc::channel;
39use std::sync::{Arc, Mutex};
40
41/// Represents LegacyMigrator.
42pub struct LegacyMigrator {
43 async_task: Arc<AsyncTask>,
44 initializer: Mutex<
45 Option<
46 Box<
47 dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
48 + Send
49 + 'static,
50 >,
51 >,
52 >,
53 /// This atomic is used for cheap interior mutability. It is intended to prevent
54 /// expensive calls into the legacy migrator when the legacy database is empty.
55 /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
56 /// of time. This is tolerable in favor of the common case.
57 state: AtomicU8,
58}
59
60#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
61struct RecentMigration {
62 uid: u32,
63 alias: String,
64}
65
66impl RecentMigration {
67 fn new(uid: u32, alias: String) -> Self {
68 Self { uid, alias }
69 }
70}
71
Janis Danisevskiseed69842021-02-18 20:04:10 -080072enum BulkDeleteRequest {
73 Uid(u32),
74 User(u32),
75}
76
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +000077struct LegacyMigratorState {
78 recently_migrated: HashSet<RecentMigration>,
79 recently_migrated_super_key: HashSet<u32>,
80 legacy_loader: Arc<LegacyBlobLoader>,
81 sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
82 db: KeystoreDB,
83}
84
85impl LegacyMigrator {
86 const WIFI_NAMESPACE: i64 = 102;
87 const AID_WIFI: u32 = 1010;
88
89 const STATE_UNINITIALIZED: u8 = 0;
90 const STATE_READY: u8 = 1;
91 const STATE_EMPTY: u8 = 2;
92
93 /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
94 /// worker.
95 pub fn new(async_task: Arc<AsyncTask>) -> Self {
96 Self {
97 async_task,
98 initializer: Default::default(),
99 state: AtomicU8::new(Self::STATE_UNINITIALIZED),
100 }
101 }
102
103 /// The legacy migrator must be initialized deferred, because keystore starts very early.
104 /// At this time the data partition may not be mounted. So we cannot open database connections
105 /// until we get actual key load requests. This sets the function that the legacy loader
106 /// uses to connect to the database.
107 pub fn set_init<F>(&self, f_init: F) -> Result<()>
108 where
109 F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
110 + Send
111 + 'static,
112 {
113 let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
114
115 // If we are not uninitialized we have no business setting the initializer.
116 if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
117 return Ok(());
118 }
119
120 // Only set the initializer if it hasn't been set before.
121 if initializer.is_none() {
122 *initializer = Some(Box::new(f_init))
123 }
124
125 Ok(())
126 }
127
128 /// This function is called by the migration requestor to check if it is worth
129 /// making a migration request. It also transitions the state from UNINITIALIZED
130 /// to READY or EMPTY on first use. The deferred initialization is necessary, because
131 /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
132 /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
133 /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
134 /// was not initialized and cannot be initialized.
135 fn check_state(&self) -> Result<u8> {
136 let mut first_try = true;
137 loop {
138 match (self.state.load(Ordering::Relaxed), first_try) {
139 (Self::STATE_EMPTY, _) => {
140 return Ok(Self::STATE_EMPTY);
141 }
142 (Self::STATE_UNINITIALIZED, true) => {
143 // If we find the legacy loader uninitialized, we grab the initializer lock,
144 // check if the legacy database is empty, and if not, schedule an initialization
145 // request. Coming out of the initializer lock, the state is either EMPTY or
146 // READY.
147 let mut initializer = self.initializer.lock().unwrap();
148
149 if let Some(initializer) = initializer.take() {
150 let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
151
152 if legacy_loader.is_empty().context(
153 "In check_state: Trying to check if the legacy database is empty.",
154 )? {
155 self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
156 return Ok(Self::STATE_EMPTY);
157 }
158
159 self.async_task.queue_hi(move |shelf| {
160 shelf.get_or_put_with(|| LegacyMigratorState {
161 recently_migrated: Default::default(),
162 recently_migrated_super_key: Default::default(),
163 legacy_loader,
164 sec_level_to_km_uuid,
165 db,
166 });
167 });
168
169 // It is safe to set this here even though the async task may not yet have
170 // run because any thread observing this will not be able to schedule a
171 // task that can run before the initialization.
172 // Also we can only transition out of this state while having the
173 // initializer lock and having found an initializer.
174 self.state.store(Self::STATE_READY, Ordering::Relaxed);
175 return Ok(Self::STATE_READY);
176 } else {
177 // There is a chance that we just lost the race from state.load() to
178 // grabbing the initializer mutex. If that is the case the state must
179 // be EMPTY or READY after coming out of the lock. So we can give it
180 // one more try.
181 first_try = false;
182 continue;
183 }
184 }
185 (Self::STATE_UNINITIALIZED, false) => {
186 // Okay, tough luck. The legacy loader was really completely uninitialized.
187 return Err(Error::sys()).context(
188 "In check_state: Legacy loader should not be called uninitialized.",
189 );
190 }
191 (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
192 (s, _) => panic!("Unknown legacy migrator state. {} ", s),
193 }
194 }
195 }
196
197 /// List all aliases for uid in the legacy database.
198 pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
Janis Danisevskis850d4862021-05-05 08:41:14 -0700199 let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500);
200
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000201 let uid = match (domain, namespace) {
202 (Domain::APP, namespace) => namespace as u32,
203 (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
204 _ => return Ok(Vec::new()),
205 };
206 self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
207 |v| {
208 v.into_iter()
209 .map(|alias| KeyDescriptor {
210 domain,
211 nspace: namespace,
212 alias: Some(alias),
213 blob: None,
214 })
215 .collect()
216 },
217 )
218 }
219
220 /// Sends the given closure to the migrator thread for execution after calling check_state.
221 /// Returns None if the database was empty and the request was not executed.
222 /// Otherwise returns Some with the result produced by the migration request.
223 /// The loader state may transition to STATE_EMPTY during the execution of this function.
224 fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
225 where
226 F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
227 {
228 // Short circuit if the database is empty or not initialized (error case).
229 match self.check_state().context("In do_serialized: Checking state.") {
230 Ok(LegacyMigrator::STATE_EMPTY) => return None,
231 Ok(LegacyMigrator::STATE_READY) => {}
232 Err(e) => return Some(Err(e)),
233 Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
234 }
235
236 // We have established that there may be a key in the legacy database.
237 // Now we schedule a migration request.
238 let (sender, receiver) = channel();
239 self.async_task.queue_hi(move |shelf| {
240 // Get the migrator state from the shelf.
241 // There may not be a state. This can happen if this migration request was scheduled
242 // before a previous request established that the legacy database was empty
243 // and removed the state from the shelf. Since we know now that the database
244 // is empty, we can return None here.
245 let (new_state, result) = if let Some(legacy_migrator_state) =
246 shelf.get_downcast_mut::<LegacyMigratorState>()
247 {
248 let result = f(legacy_migrator_state);
249 (legacy_migrator_state.check_empty(), Some(result))
250 } else {
251 (Self::STATE_EMPTY, None)
252 };
253
254 // If the migration request determined that the database is now empty, we discard
255 // the state from the shelf to free up the resources we won't need any longer.
256 if result.is_some() && new_state == Self::STATE_EMPTY {
257 shelf.remove_downcast_ref::<LegacyMigratorState>();
258 }
259
260 // Send the result to the requester.
261 if let Err(e) = sender.send((new_state, result)) {
262 log::error!("In do_serialized. Error in sending the result. {:?}", e);
263 }
264 });
265
266 let (new_state, result) = match receiver.recv() {
267 Err(e) => {
268 return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
269 }
270 Ok(r) => r,
271 };
272
273 // We can only transition to EMPTY but never back.
274 // The migrator never creates any legacy blobs.
275 if new_state == Self::STATE_EMPTY {
276 self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
277 }
278
279 result
280 }
281
282 /// Runs the key_accessor function and returns its result. If it returns an error and the
283 /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
284 /// the legacy database to the new database and runs the key_accessor function again if
285 /// the migration request was successful.
286 pub fn with_try_migrate<F, T>(
287 &self,
288 key: &KeyDescriptor,
289 caller_uid: u32,
290 key_accessor: F,
291 ) -> Result<T>
292 where
293 F: Fn() -> Result<T>,
294 {
Janis Danisevskis850d4862021-05-05 08:41:14 -0700295 let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500);
296
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000297 // Access the key and return on success.
298 match key_accessor() {
299 Ok(result) => return Ok(result),
300 Err(e) => match e.root_cause().downcast_ref::<Error>() {
301 Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
302 _ => return Err(e),
303 },
304 }
305
306 // Filter inputs. We can only load legacy app domain keys and some special rules due
307 // to which we migrate keys transparently to an SELINUX domain.
308 let uid = match key {
309 KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
310 KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
311 match *nspace {
312 Self::WIFI_NAMESPACE => Self::AID_WIFI,
313 _ => {
314 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
315 .context(format!("No legacy keys for namespace {}", nspace))
316 }
317 }
318 }
319 _ => {
320 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
321 .context("No legacy keys for key descriptor.")
322 }
323 };
324
325 let key_clone = key.clone();
326 let result = self
327 .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
328
329 if let Some(result) = result {
330 result?;
331 // After successful migration try again.
332 key_accessor()
333 } else {
334 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
335 }
336 }
337
338 /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
339 /// this function makes a migration request and on success retries the key_accessor.
340 pub fn with_try_migrate_super_key<F, T>(
341 &self,
342 user_id: u32,
Paul Crowleyf61fee72021-03-17 14:38:44 -0700343 pw: &Password,
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000344 mut key_accessor: F,
345 ) -> Result<Option<T>>
346 where
347 F: FnMut() -> Result<Option<T>>,
348 {
Janis Danisevskis850d4862021-05-05 08:41:14 -0700349 let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500);
350
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000351 match key_accessor() {
352 Ok(Some(result)) => return Ok(Some(result)),
353 Ok(None) => {}
354 Err(e) => return Err(e),
355 }
Paul Crowleyf61fee72021-03-17 14:38:44 -0700356 let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000357 let result = self.do_serialized(move |migrator_state| {
Paul Crowleyf61fee72021-03-17 14:38:44 -0700358 migrator_state.check_and_migrate_super_key(user_id, &pw)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000359 });
360
361 if let Some(result) = result {
362 result?;
363 // After successful migration try again.
364 key_accessor()
365 } else {
366 Ok(None)
367 }
368 }
369
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800370 /// Deletes all keys belonging to the given namespace, migrating them into the database
Janis Danisevskiseed69842021-02-18 20:04:10 -0800371 /// for subsequent garbage collection if necessary.
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800372 pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
Janis Danisevskis850d4862021-05-05 08:41:14 -0700373 let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500);
374
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800375 let uid = match (domain, nspace) {
376 (Domain::APP, nspace) => nspace as u32,
377 (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
378 // Nothing to do.
379 _ => return Ok(()),
380 };
381
Janis Danisevskiseed69842021-02-18 20:04:10 -0800382 let result = self.do_serialized(move |migrator_state| {
Janis Danisevskisddd6e752021-02-22 18:46:55 -0800383 migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
Janis Danisevskiseed69842021-02-18 20:04:10 -0800384 });
385
386 result.unwrap_or(Ok(()))
387 }
388
389 /// Deletes all keys belonging to the given android user, migrating them into the database
390 /// for subsequent garbage collection if necessary.
391 pub fn bulk_delete_user(
392 &self,
393 user_id: u32,
394 keep_non_super_encrypted_keys: bool,
395 ) -> Result<()> {
Janis Danisevskis850d4862021-05-05 08:41:14 -0700396 let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500);
397
Janis Danisevskiseed69842021-02-18 20:04:10 -0800398 let result = self.do_serialized(move |migrator_state| {
399 migrator_state
400 .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
401 });
402
403 result.unwrap_or(Ok(()))
404 }
405
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000406 /// Queries the legacy database for the presence of a super key for the given user.
407 pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
408 let result =
409 self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
410 result.unwrap_or(Ok(false))
411 }
412}
413
414impl LegacyMigratorState {
415 fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
416 let sec_level = if is_strongbox {
417 SecurityLevel::STRONGBOX
418 } else {
419 SecurityLevel::TRUSTED_ENVIRONMENT
420 };
421
422 self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
423 anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
424 })
425 }
426
427 fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
428 self.legacy_loader
429 .list_keystore_entries_for_uid(uid)
430 .context("In list_uid: Trying to list legacy entries.")
431 }
432
Janis Danisevskisbe1969e2021-04-20 15:16:24 -0700433 /// This is a key migration request that must run in the migrator thread. This must
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000434 /// be passed to do_serialized.
435 fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
436 let alias = key.alias.clone().ok_or_else(|| {
437 anyhow::anyhow!(Error::sys()).context(concat!(
438 "In check_and_migrate: Must be Some because ",
439 "our caller must not have called us otherwise."
440 ))
441 })?;
442
443 if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
444 return Ok(());
445 }
446
447 if key.domain == Domain::APP {
448 key.nspace = uid as i64;
449 }
450
451 // If the key is not found in the cache, try to load from the legacy database.
452 let (km_blob_params, user_cert, ca_cert) = self
453 .legacy_loader
454 .load_by_uid_alias(uid, &alias, None)
455 .context("In check_and_migrate: Trying to load legacy blob.")?;
456 let result = match km_blob_params {
457 Some((km_blob, params)) => {
458 let is_strongbox = km_blob.is_strongbox();
459 let (blob, mut blob_metadata) = match km_blob.take_value() {
460 BlobValue::Encrypted { iv, tag, data } => {
461 // Get super key id for user id.
462 let user_id = uid_to_android_user(uid as u32);
463
464 let super_key_id = match self
465 .db
Paul Crowley7a658392021-03-18 17:08:20 -0700466 .load_super_key(&USER_SUPER_KEY, user_id)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000467 .context("In check_and_migrate: Failed to load super key")?
468 {
469 Some((_, entry)) => entry.id(),
470 None => {
471 // This might be the first time we access the super key,
472 // and it may not have been migrated. We cannot import
473 // the legacy super_key key now, because we need to reencrypt
474 // it which we cannot do if we are not unlocked, which we are
475 // not because otherwise the key would have been migrated.
476 // We can check though if the key exists. If it does,
477 // we can return Locked. Otherwise, we can delete the
478 // key and return NotFound, because the key will never
479 // be unlocked again.
480 if self.legacy_loader.has_super_key(user_id) {
481 return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
482 "In check_and_migrate: Cannot migrate super key of this ",
483 "key while user is locked."
484 ));
485 } else {
486 self.legacy_loader.remove_keystore_entry(uid, &alias).context(
487 concat!(
488 "In check_and_migrate: ",
489 "Trying to remove obsolete key."
490 ),
491 )?;
492 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
493 .context("In check_and_migrate: Obsolete key.");
494 }
495 }
496 };
497
498 let mut blob_metadata = BlobMetaData::new();
499 blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
500 blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
501 blob_metadata
502 .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
503 (LegacyBlob::Vec(data), blob_metadata)
504 }
505 BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
506 _ => {
507 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
508 .context("In check_and_migrate: Legacy key has unexpected type.")
509 }
510 };
511
512 let km_uuid = self
513 .get_km_uuid(is_strongbox)
514 .context("In check_and_migrate: Trying to get KM UUID")?;
515 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
516
517 let mut metadata = KeyMetaData::new();
518 let creation_date = DateTime::now()
519 .context("In check_and_migrate: Trying to make creation time.")?;
520 metadata.add(KeyMetaEntry::CreationDate(creation_date));
521
522 // Store legacy key in the database.
523 self.db
524 .store_new_key(
525 &key,
Janis Danisevskis0cabd712021-05-25 11:07:10 -0700526 KeyType::Client,
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000527 &params,
528 &(&blob, &blob_metadata),
529 &CertificateInfo::new(user_cert, ca_cert),
530 &metadata,
531 &km_uuid,
532 )
533 .context("In check_and_migrate.")?;
534 Ok(())
535 }
536 None => {
537 if let Some(ca_cert) = ca_cert {
538 self.db
Janis Danisevskis0cabd712021-05-25 11:07:10 -0700539 .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000540 .context("In check_and_migrate: Failed to insert new certificate.")?;
541 Ok(())
542 } else {
543 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
544 .context("In check_and_migrate: Legacy key not found.")
545 }
546 }
547 };
548
549 match result {
550 Ok(()) => {
551 // Add the key to the migrated_keys list.
552 self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
553 // Delete legacy key from the file system
554 self.legacy_loader
555 .remove_keystore_entry(uid, &alias)
556 .context("In check_and_migrate: Trying to remove migrated key.")?;
557 Ok(())
558 }
559 Err(e) => Err(e),
560 }
561 }
562
Paul Crowleyf61fee72021-03-17 14:38:44 -0700563 fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000564 if self.recently_migrated_super_key.contains(&user_id) {
565 return Ok(());
566 }
567
568 if let Some(super_key) = self
569 .legacy_loader
Chris Wailesd5aaaef2021-07-27 16:04:33 -0700570 .load_super_key(user_id, pw)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000571 .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
572 {
573 let (blob, blob_metadata) =
Paul Crowleyf61fee72021-03-17 14:38:44 -0700574 crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000575 .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
576
Paul Crowley8d5b2532021-03-19 10:53:07 -0700577 self.db
578 .store_super_key(
579 user_id,
580 &USER_SUPER_KEY,
581 &blob,
582 &blob_metadata,
583 &KeyMetaData::new(),
584 )
585 .context(concat!(
Paul Crowley7a658392021-03-18 17:08:20 -0700586 "In check_and_migrate_super_key: ",
587 "Trying to insert legacy super_key into the database."
Paul Crowley8d5b2532021-03-19 10:53:07 -0700588 ))?;
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000589 self.legacy_loader.remove_super_key(user_id);
590 self.recently_migrated_super_key.insert(user_id);
591 Ok(())
592 } else {
593 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
594 .context("In check_and_migrate_super_key: No key found do migrate.")
595 }
596 }
597
Janis Danisevskiseed69842021-02-18 20:04:10 -0800598 /// Key migrator request to be run by do_serialized.
599 /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
600 fn bulk_delete(
601 &mut self,
602 bulk_delete_request: BulkDeleteRequest,
603 keep_non_super_encrypted_keys: bool,
604 ) -> Result<()> {
605 let (aliases, user_id) = match bulk_delete_request {
606 BulkDeleteRequest::Uid(uid) => (
607 self.legacy_loader
608 .list_keystore_entries_for_uid(uid)
609 .context("In bulk_delete: Trying to get aliases for uid.")
610 .map(|aliases| {
611 let mut h = HashMap::<u32, HashSet<String>>::new();
612 h.insert(uid, aliases.into_iter().collect());
613 h
614 })?,
615 uid_to_android_user(uid),
616 ),
617 BulkDeleteRequest::User(user_id) => (
618 self.legacy_loader
619 .list_keystore_entries_for_user(user_id)
620 .context("In bulk_delete: Trying to get aliases for user_id.")?,
621 user_id,
622 ),
623 };
624
625 let super_key_id = self
626 .db
Paul Crowley7a658392021-03-18 17:08:20 -0700627 .load_super_key(&USER_SUPER_KEY, user_id)
Janis Danisevskiseed69842021-02-18 20:04:10 -0800628 .context("In bulk_delete: Failed to load super key")?
629 .map(|(_, entry)| entry.id());
630
631 for (uid, alias) in aliases
632 .into_iter()
633 .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
634 .flatten()
635 {
636 let (km_blob_params, _, _) = self
637 .legacy_loader
638 .load_by_uid_alias(uid, &alias, None)
639 .context("In bulk_delete: Trying to load legacy blob.")?;
640
641 // Determine if the key needs special handling to be deleted.
642 let (need_gc, is_super_encrypted) = km_blob_params
643 .as_ref()
644 .map(|(blob, params)| {
645 (
646 params.iter().any(|kp| {
647 KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
648 }),
649 blob.is_encrypted(),
650 )
651 })
652 .unwrap_or((false, false));
653
654 if keep_non_super_encrypted_keys && !is_super_encrypted {
655 continue;
656 }
657
658 if need_gc {
659 let mark_deleted = match km_blob_params
660 .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
661 {
662 Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
663 let mut blob_metadata = BlobMetaData::new();
664 if let (Ok(km_uuid), Some(super_key_id)) =
665 (self.get_km_uuid(is_strongbox), super_key_id)
666 {
667 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
668 blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
669 blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
670 blob_metadata
671 .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
672 Some((LegacyBlob::Vec(data), blob_metadata))
673 } else {
674 // Oh well - we tried our best, but if we cannot determine which
675 // KeyMint instance we have to send this blob to, we cannot
676 // do more than delete the key from the file system.
677 // And if we don't know which key wraps this key we cannot
678 // unwrap it for KeyMint either.
679 None
680 }
681 }
682 Some((_, BlobValue::Decrypted(data))) => {
683 Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
684 }
685 _ => None,
686 };
687
688 if let Some((blob, blob_metadata)) = mark_deleted {
689 self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
690 "In bulk_delete: Trying to insert deleted ",
691 "blob into the database for garbage collection."
692 ))?;
693 }
694 }
695
696 self.legacy_loader
697 .remove_keystore_entry(uid, &alias)
698 .context("In bulk_delete: Trying to remove migrated key.")?;
699 }
700 Ok(())
701 }
702
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000703 fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
704 Ok(self.recently_migrated_super_key.contains(&user_id)
705 || self.legacy_loader.has_super_key(user_id))
706 }
707
708 fn check_empty(&self) -> u8 {
709 if self.legacy_loader.is_empty().unwrap_or(false) {
710 LegacyMigrator::STATE_EMPTY
711 } else {
712 LegacyMigrator::STATE_READY
713 }
714 }
715}
716
717enum LegacyBlob {
718 Vec(Vec<u8>),
719 ZVec(ZVec),
720}
721
722impl Deref for LegacyBlob {
723 type Target = [u8];
724
725 fn deref(&self) -> &Self::Target {
726 match self {
Chris Wailesd5aaaef2021-07-27 16:04:33 -0700727 Self::Vec(v) => v,
728 Self::ZVec(v) => v,
Hasini Gunasinghe3ed5da72021-02-04 15:18:54 +0000729 }
730 }
731}