Merge "Keystore 2.0: Build java sources for AIDL interfaces."
diff --git a/OWNERS b/OWNERS
index fca66f8..7d1fd14 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,4 +1,5 @@
 swillden@google.com
 cbrubaker@google.com
 jdanis@google.com
-kroot@google.com
\ No newline at end of file
+kroot@google.com
+zeuthen@google.com
\ No newline at end of file
diff --git a/identity/Android.bp b/identity/Android.bp
index 5841bf6..d66f4ec 100644
--- a/identity/Android.bp
+++ b/identity/Android.bp
@@ -40,7 +40,7 @@
         "libbase",
         "libbinder",
         "libbinder_ndk",
-        "libkeystore_aidl",
+        "android.hardware.keymaster@4.0",
         "libcredstore_aidl",
         "libutils",
         "libhidlbase",
@@ -49,7 +49,6 @@
         "libkeystore-attestation-application-id",
         "android.hardware.security.keymint-V1-ndk_platform",
         "android.security.authorization-ndk_platform",
-        "PlatformProperties",
     ],
     static_libs: [
         "android.hardware.identity-V3-cpp",
diff --git a/identity/Credential.cpp b/identity/Credential.cpp
index 183ddeb..2e6b9c1 100644
--- a/identity/Credential.cpp
+++ b/identity/Credential.cpp
@@ -22,8 +22,6 @@
 
 #include <android/security/identity/ICredentialStore.h>
 
-#include <android/security/keystore/BnCredstoreTokenCallback.h>
-#include <android/security/keystore/IKeystoreService.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <keymasterV4_0/keymaster_utils.h>
@@ -37,7 +35,6 @@
 #include <aidl/android/hardware/security/secureclock/TimeStampToken.h>
 #include <aidl/android/security/authorization/AuthorizationTokens.h>
 #include <aidl/android/security/authorization/IKeystoreAuthorization.h>
-#include <android/sysprop/Keystore2Properties.sysprop.h>
 
 #include "Credential.h"
 #include "CredentialData.h"
@@ -52,9 +49,6 @@
 using std::promise;
 using std::tuple;
 
-using android::security::keystore::IKeystoreService;
-using namespace android::sysprop;
-
 using ::android::hardware::identity::IWritableIdentityCredential;
 
 using ::android::hardware::identity::support::ecKeyPairGetPkcs12;
@@ -168,83 +162,6 @@
     return true;
 }
 
-class CredstoreTokenCallback : public android::security::keystore::BnCredstoreTokenCallback,
-                               public promise<tuple<bool, vector<uint8_t>, vector<uint8_t>>> {
-  public:
-    CredstoreTokenCallback() {}
-    virtual Status onFinished(bool success, const vector<uint8_t>& authToken,
-                              const vector<uint8_t>& verificationToken) override {
-        this->set_value({success, authToken, verificationToken});
-        return Status::ok();
-    }
-};
-
-// Returns false if an error occurred communicating with keystore.
-//
-bool getTokensFromKeystore1(uint64_t challenge, uint64_t secureUserId,
-                            unsigned int authTokenMaxAgeMillis,
-                            AidlHardwareAuthToken& aidlAuthToken,
-                            AidlVerificationToken& aidlVerificationToken) {
-    sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> binder = sm->getService(String16("android.security.keystore"));
-    sp<IKeystoreService> keystore = interface_cast<IKeystoreService>(binder);
-    if (keystore == nullptr) {
-        return false;
-    }
-
-    sp<CredstoreTokenCallback> callback = new CredstoreTokenCallback();
-    auto future = callback->get_future();
-
-    Status status =
-        keystore->getTokensForCredstore(challenge, secureUserId, authTokenMaxAgeMillis, callback);
-    if (!status.isOk()) {
-        return false;
-    }
-
-    auto fstatus = future.wait_for(std::chrono::milliseconds(5000));
-    if (fstatus != std::future_status::ready) {
-        LOG(ERROR) << "Waited 5 seconds for tokens from keystore, aborting";
-        return false;
-    }
-    auto [success, returnedAuthToken, returnedVerificationToken] = future.get();
-    if (!success) {
-        LOG(ERROR) << "Error getting tokens from keystore1";
-        return false;
-    }
-    // It's entirely possible getTokensFromKeystore() succeeded but didn't
-    // return any tokens (in which case the returned byte-vectors are
-    // empty). For example, this can happen if no auth token is available
-    // which satifies e.g. |authTokenMaxAgeMillis|.
-    //
-    if (returnedAuthToken.size() > 0) {
-        HardwareAuthToken authToken =
-            android::hardware::keymaster::V4_0::support::hidlVec2AuthToken(returnedAuthToken);
-        // Convert from HIDL to AIDL...
-        aidlAuthToken.challenge = int64_t(authToken.challenge);
-        aidlAuthToken.userId = int64_t(authToken.userId);
-        aidlAuthToken.authenticatorId = int64_t(authToken.authenticatorId);
-        aidlAuthToken.authenticatorType = ::android::hardware::keymaster::HardwareAuthenticatorType(
-            int32_t(authToken.authenticatorType));
-        aidlAuthToken.timestamp.milliSeconds = int64_t(authToken.timestamp);
-        aidlAuthToken.mac = authToken.mac;
-    }
-
-    if (returnedVerificationToken.size() > 0) {
-        optional<VerificationToken> token =
-            android::hardware::keymaster::V4_0::support::deserializeVerificationToken(
-                returnedVerificationToken);
-        if (!token) {
-            LOG(ERROR) << "Error deserializing verification token";
-        }
-        aidlVerificationToken.challenge = token->challenge;
-        aidlVerificationToken.timestamp.milliSeconds = token->timestamp;
-        aidlVerificationToken.securityLevel =
-            ::android::hardware::keymaster::SecurityLevel(token->securityLevel);
-        aidlVerificationToken.mac = token->mac;
-    }
-    return true;
-}
-
 // Returns false if an error occurred communicating with keystore.
 //
 bool getTokensFromKeystore2(uint64_t challenge, uint64_t secureUserId,
@@ -286,8 +203,8 @@
                 LOG(ERROR) << "Error getting tokens from keystore2: " << result.getDescription();
                 return false;
             } else {
-                // Log the reason for not finding auth tokens.
-                LOG(INFO) << "Auth tokens not found: " << result.getDescription();
+                // Log the reason for not receiving auth tokens from keystore2.
+                LOG(INFO) << "Auth tokens were not received due to: " << result.getDescription();
             }
         }
         return true;
@@ -429,23 +346,11 @@
         // not a guarantee and it's also not required.
         //
 
-        auto keystore2_status = Keystore2Properties::keystore2_enabled();
-        if (keystore2_status.has_value() && keystore2_status.value()) {
-            if (!getTokensFromKeystore2(selectedChallenge_, data->getSecureUserId(),
-                                        authTokenMaxAgeMillis, aidlAuthToken,
-                                        aidlVerificationToken)) {
-                LOG(ERROR) << "Error getting tokens from keystore2";
-                return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
-                                                        "Error getting tokens from keystore2");
-            }
-        } else {
-            if (!getTokensFromKeystore1(selectedChallenge_, data->getSecureUserId(),
-                                        authTokenMaxAgeMillis, aidlAuthToken,
-                                        aidlVerificationToken)) {
-                LOG(ERROR) << "Error getting tokens from keystore";
-                return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
-                                                        "Error getting tokens from keystore");
-            }
+        if (!getTokensFromKeystore2(selectedChallenge_, data->getSecureUserId(),
+                                    authTokenMaxAgeMillis, aidlAuthToken, aidlVerificationToken)) {
+            LOG(ERROR) << "Error getting tokens from keystore2";
+            return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
+                                                    "Error getting tokens from keystore2");
         }
     }
 
diff --git a/identity/main.cpp b/identity/main.cpp
index b51bd28..08f2219 100644
--- a/identity/main.cpp
+++ b/identity/main.cpp
@@ -53,10 +53,5 @@
     CHECK(ret == ::android::OK) << "Couldn't register binder service";
     LOG(ERROR) << "Registered binder service";
 
-    // This is needed for binder callbacks from keystore on a ICredstoreTokenCallback binder.
-    android::ProcessState::self()->startThreadPool();
-
-    IPCThreadState::self()->joinThreadPool();
-
     return 0;
 }
diff --git a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
index 0d4c30f..5c2d0b1 100644
--- a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
+++ b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
@@ -133,4 +133,13 @@
      * @return The array of security levels.
      */
      SecurityLevel[] getSecurityLevels();
+
+    /**
+     * This method deletes all remotely provisioned attestation keys in the database, regardless
+     * of what state in their life cycle they are in. This is primarily useful to facilitate
+     * testing.
+     *
+     * @return Number of keys deleted
+     */
+    long deleteAllKeys();
 }
diff --git a/keystore2/keystore2.rc b/keystore2/keystore2.rc
index 2d1f05a..82bf3b8 100644
--- a/keystore2/keystore2.rc
+++ b/keystore2/keystore2.rc
@@ -6,14 +6,8 @@
 #
 # See system/core/init/README.md for information on the init.rc language.
 
-# Start Keystore 2 conditionally
-# TODO b/171563717 Remove when Keystore 2 migration is complete.
-on property:persist.android.security.keystore2.enable=true
-    enable keystore2
-
 service keystore2 /system/bin/keystore2 /data/misc/keystore
     class early_hal
     user keystore
     group keystore readproc log
     writepid /dev/cpuset/foreground/tasks
-    disabled
diff --git a/keystore2/selinux/src/lib.rs b/keystore2/selinux/src/lib.rs
index 2b5091d..cc707e7 100644
--- a/keystore2/selinux/src/lib.rs
+++ b/keystore2/selinux/src/lib.rs
@@ -455,7 +455,6 @@
 
         check_keystore_perm!(add_auth);
         check_keystore_perm!(clear_ns);
-        check_keystore_perm!(get_state);
         check_keystore_perm!(lock);
         check_keystore_perm!(reset);
         check_keystore_perm!(unlock);
diff --git a/keystore2/src/apc.rs b/keystore2/src/apc.rs
index 767014e..f8259ea 100644
--- a/keystore2/src/apc.rs
+++ b/keystore2/src/apc.rs
@@ -31,7 +31,7 @@
     ExceptionCode, Interface, Result as BinderResult, SpIBinder, Status as BinderStatus, Strong,
 };
 use anyhow::{Context, Result};
-use binder::{IBinder, ThreadState};
+use binder::{IBinderInternal, ThreadState};
 use keystore2_apc_compat::ApcHal;
 use keystore2_selinux as selinux;
 use std::time::{Duration, Instant};
diff --git a/keystore2/src/async_task.rs b/keystore2/src/async_task.rs
index 2b36f1f..20a7458 100644
--- a/keystore2/src/async_task.rs
+++ b/keystore2/src/async_task.rs
@@ -89,8 +89,10 @@
 struct AsyncTaskState {
     state: State,
     thread: Option<thread::JoinHandle<()>>,
+    timeout: Duration,
     hi_prio_req: VecDeque<Box<dyn FnOnce(&mut Shelf) + Send>>,
     lo_prio_req: VecDeque<Box<dyn FnOnce(&mut Shelf) + Send>>,
+    idle_fns: Vec<Arc<dyn Fn(&mut Shelf) + Send + Sync>>,
     /// The store allows tasks to store state across invocations. It is passed to each invocation
     /// of each task. Tasks need to cooperate on the ids they use for storing state.
     shelf: Option<Shelf>,
@@ -107,25 +109,32 @@
 
 impl Default for AsyncTask {
     fn default() -> Self {
+        Self::new(Duration::from_secs(30))
+    }
+}
+
+impl AsyncTask {
+    /// Construct an [`AsyncTask`] with a specific timeout value.
+    pub fn new(timeout: Duration) -> Self {
         Self {
             state: Arc::new((
                 Condvar::new(),
                 Mutex::new(AsyncTaskState {
                     state: State::Exiting,
                     thread: None,
+                    timeout,
                     hi_prio_req: VecDeque::new(),
                     lo_prio_req: VecDeque::new(),
+                    idle_fns: Vec::new(),
                     shelf: None,
                 }),
             )),
         }
     }
-}
 
-impl AsyncTask {
-    /// Adds a job to the high priority queue. High priority jobs are completed before
-    /// low priority jobs and can also overtake low priority jobs. But they cannot
-    /// preempt them.
+    /// Adds a one-off job to the high priority queue. High priority jobs are
+    /// completed before low priority jobs and can also overtake low priority
+    /// jobs. But they cannot preempt them.
     pub fn queue_hi<F>(&self, f: F)
     where
         F: for<'r> FnOnce(&'r mut Shelf) + Send + 'static,
@@ -133,10 +142,10 @@
         self.queue(f, true)
     }
 
-    /// Adds a job to the low priority queue. Low priority jobs are completed after
-    /// high priority. And they are not executed as long as high priority jobs are
-    /// present. Jobs always run to completion and are never preempted by high
-    /// priority jobs.
+    /// Adds a one-off job to the low priority queue. Low priority jobs are
+    /// completed after high priority. And they are not executed as long as high
+    /// priority jobs are present. Jobs always run to completion and are never
+    /// preempted by high priority jobs.
     pub fn queue_lo<F>(&self, f: F)
     where
         F: FnOnce(&mut Shelf) + Send + 'static,
@@ -144,6 +153,17 @@
         self.queue(f, false)
     }
 
+    /// Adds an idle callback. This will be invoked whenever the worker becomes
+    /// idle (all high and low priority jobs have been performed).
+    pub fn add_idle<F>(&self, f: F)
+    where
+        F: Fn(&mut Shelf) + Send + Sync + 'static,
+    {
+        let (ref _condvar, ref state) = *self.state;
+        let mut state = state.lock().unwrap();
+        state.idle_fns.push(Arc::new(f));
+    }
+
     fn queue<F>(&self, f: F, hi_prio: bool)
     where
         F: for<'r> FnOnce(&'r mut Shelf) + Send + 'static,
@@ -169,39 +189,66 @@
         }
 
         let cloned_state = self.state.clone();
+        let timeout_period = state.timeout;
 
         state.thread = Some(thread::spawn(move || {
             let (ref condvar, ref state) = *cloned_state;
+
+            enum Action {
+                QueuedFn(Box<dyn FnOnce(&mut Shelf) + Send>),
+                IdleFns(Vec<Arc<dyn Fn(&mut Shelf) + Send + Sync>>),
+            };
+            let mut done_idle = false;
+
             // When the worker starts, it takes the shelf and puts it on the stack.
             let mut shelf = state.lock().unwrap().shelf.take().unwrap_or_default();
             loop {
-                if let Some(f) = {
-                    let (mut state, timeout) = condvar
-                        .wait_timeout_while(
-                            state.lock().unwrap(),
-                            Duration::from_secs(30),
-                            |state| state.hi_prio_req.is_empty() && state.lo_prio_req.is_empty(),
-                        )
-                        .unwrap();
-                    match (
-                        state.hi_prio_req.pop_front(),
-                        state.lo_prio_req.is_empty(),
-                        timeout.timed_out(),
-                    ) {
-                        (Some(f), _, _) => Some(f),
-                        (None, false, _) => state.lo_prio_req.pop_front(),
-                        (None, true, true) => {
-                            // When the worker exits it puts the shelf back into the shared
-                            // state for the next worker to use. So state is preserved not
-                            // only across invocations but also across worker thread shut down.
-                            state.shelf = Some(shelf);
-                            state.state = State::Exiting;
-                            break;
+                if let Some(action) = {
+                    let state = state.lock().unwrap();
+                    if !done_idle && state.hi_prio_req.is_empty() && state.lo_prio_req.is_empty() {
+                        // No jobs queued so invoke the idle callbacks.
+                        Some(Action::IdleFns(state.idle_fns.clone()))
+                    } else {
+                        // Wait for either a queued job to arrive or a timeout.
+                        let (mut state, timeout) = condvar
+                            .wait_timeout_while(state, timeout_period, |state| {
+                                state.hi_prio_req.is_empty() && state.lo_prio_req.is_empty()
+                            })
+                            .unwrap();
+                        match (
+                            state.hi_prio_req.pop_front(),
+                            state.lo_prio_req.is_empty(),
+                            timeout.timed_out(),
+                        ) {
+                            (Some(f), _, _) => Some(Action::QueuedFn(f)),
+                            (None, false, _) => {
+                                state.lo_prio_req.pop_front().map(|f| Action::QueuedFn(f))
+                            }
+                            (None, true, true) => {
+                                // When the worker exits it puts the shelf back into the shared
+                                // state for the next worker to use. So state is preserved not
+                                // only across invocations but also across worker thread shut down.
+                                state.shelf = Some(shelf);
+                                state.state = State::Exiting;
+                                break;
+                            }
+                            (None, true, false) => None,
                         }
-                        (None, true, false) => None,
                     }
                 } {
-                    f(&mut shelf)
+                    // Now that the lock has been dropped, perform the action.
+                    match action {
+                        Action::QueuedFn(f) => {
+                            f(&mut shelf);
+                            done_idle = false;
+                        }
+                        Action::IdleFns(idle_fns) => {
+                            for idle_fn in idle_fns {
+                                idle_fn(&mut shelf);
+                            }
+                            done_idle = true;
+                        }
+                    }
                 }
             }
         }));
@@ -212,7 +259,11 @@
 #[cfg(test)]
 mod tests {
     use super::{AsyncTask, Shelf};
-    use std::sync::mpsc::channel;
+    use std::sync::{
+        mpsc::{channel, sync_channel, RecvTimeoutError},
+        Arc,
+    };
+    use std::time::Duration;
 
     #[test]
     fn test_shelf() {
@@ -306,6 +357,21 @@
     }
 
     #[test]
+    fn test_async_task_chain() {
+        let at = Arc::new(AsyncTask::default());
+        let (sender, receiver) = channel();
+        // Queue up a job that will queue up another job. This confirms
+        // that the job is not invoked with any internal AsyncTask locks held.
+        let at_clone = at.clone();
+        at.queue_hi(move |_shelf| {
+            at_clone.queue_lo(move |_shelf| {
+                sender.send(()).unwrap();
+            });
+        });
+        receiver.recv().unwrap();
+    }
+
+    #[test]
     #[should_panic]
     fn test_async_task_panic() {
         let at = AsyncTask::default();
@@ -319,4 +385,147 @@
         });
         done_receiver.recv().unwrap();
     }
+
+    #[test]
+    fn test_async_task_idle() {
+        let at = AsyncTask::new(Duration::from_secs(3));
+        // Need a SyncSender as it is Send+Sync.
+        let (idle_done_sender, idle_done_receiver) = sync_channel::<()>(3);
+        at.add_idle(move |_shelf| {
+            idle_done_sender.send(()).unwrap();
+        });
+
+        // Queue up some high-priority and low-priority jobs that take time.
+        for _i in 0..3 {
+            at.queue_lo(|_shelf| {
+                std::thread::sleep(Duration::from_millis(500));
+            });
+            at.queue_hi(|_shelf| {
+                std::thread::sleep(Duration::from_millis(500));
+            });
+        }
+        // Final low-priority job.
+        let (done_sender, done_receiver) = channel();
+        at.queue_lo(move |_shelf| {
+            done_sender.send(()).unwrap();
+        });
+
+        // Nothing happens until the last job completes.
+        assert_eq!(
+            idle_done_receiver.recv_timeout(Duration::from_secs(1)),
+            Err(RecvTimeoutError::Timeout)
+        );
+        done_receiver.recv().unwrap();
+        idle_done_receiver.recv_timeout(Duration::from_millis(1)).unwrap();
+
+        // Idle callback not executed again even if we wait for a while.
+        assert_eq!(
+            idle_done_receiver.recv_timeout(Duration::from_secs(3)),
+            Err(RecvTimeoutError::Timeout)
+        );
+
+        // However, if more work is done then there's another chance to go idle.
+        let (done_sender, done_receiver) = channel();
+        at.queue_hi(move |_shelf| {
+            std::thread::sleep(Duration::from_millis(500));
+            done_sender.send(()).unwrap();
+        });
+        // Idle callback not immediately executed, because the high priority
+        // job is taking a while.
+        assert_eq!(
+            idle_done_receiver.recv_timeout(Duration::from_millis(1)),
+            Err(RecvTimeoutError::Timeout)
+        );
+        done_receiver.recv().unwrap();
+        idle_done_receiver.recv_timeout(Duration::from_millis(1)).unwrap();
+    }
+
+    #[test]
+    fn test_async_task_multiple_idle() {
+        let at = AsyncTask::new(Duration::from_secs(3));
+        let (idle_sender, idle_receiver) = sync_channel::<i32>(5);
+        // Queue a high priority job to start things off
+        at.queue_hi(|_shelf| {
+            std::thread::sleep(Duration::from_millis(500));
+        });
+
+        // Multiple idle callbacks.
+        for i in 0..3 {
+            let idle_sender = idle_sender.clone();
+            at.add_idle(move |_shelf| {
+                idle_sender.send(i).unwrap();
+            });
+        }
+
+        // Nothing happens immediately.
+        assert_eq!(
+            idle_receiver.recv_timeout(Duration::from_millis(1)),
+            Err(RecvTimeoutError::Timeout)
+        );
+        // Wait for a moment and the idle jobs should have run.
+        std::thread::sleep(Duration::from_secs(1));
+
+        let mut results = Vec::new();
+        while let Ok(i) = idle_receiver.recv_timeout(Duration::from_millis(1)) {
+            results.push(i);
+        }
+        assert_eq!(results, [0, 1, 2]);
+    }
+
+    #[test]
+    fn test_async_task_idle_queues_job() {
+        let at = Arc::new(AsyncTask::new(Duration::from_secs(1)));
+        let at_clone = at.clone();
+        let (idle_sender, idle_receiver) = sync_channel::<i32>(100);
+        // Add an idle callback that queues a low-priority job.
+        at.add_idle(move |shelf| {
+            at_clone.queue_lo(|_shelf| {
+                // Slow things down so the channel doesn't fill up.
+                std::thread::sleep(Duration::from_millis(50));
+            });
+            let i = shelf.get_mut::<i32>();
+            idle_sender.send(*i).unwrap();
+            *i += 1;
+        });
+
+        // Nothing happens immediately.
+        assert_eq!(
+            idle_receiver.recv_timeout(Duration::from_millis(1500)),
+            Err(RecvTimeoutError::Timeout)
+        );
+
+        // Once we queue a normal job, things start.
+        at.queue_hi(|_shelf| {});
+        assert_eq!(0, idle_receiver.recv_timeout(Duration::from_millis(200)).unwrap());
+
+        // The idle callback queues a job, and completion of that job
+        // means the task is going idle again...so the idle callback will
+        // be called repeatedly.
+        assert_eq!(1, idle_receiver.recv_timeout(Duration::from_millis(100)).unwrap());
+        assert_eq!(2, idle_receiver.recv_timeout(Duration::from_millis(100)).unwrap());
+        assert_eq!(3, idle_receiver.recv_timeout(Duration::from_millis(100)).unwrap());
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_async_task_idle_panic() {
+        let at = AsyncTask::new(Duration::from_secs(1));
+        let (idle_sender, idle_receiver) = sync_channel::<()>(3);
+        // Add an idle callback that panics.
+        at.add_idle(move |_shelf| {
+            idle_sender.send(()).unwrap();
+            panic!("Panic from idle callback");
+        });
+        // Queue a job to trigger idleness and ensuing panic.
+        at.queue_hi(|_shelf| {});
+        idle_receiver.recv().unwrap();
+
+        // Queue another job afterwards to ensure that the async thread gets joined
+        // and the panic detected.
+        let (done_sender, done_receiver) = channel();
+        at.queue_hi(move |_shelf| {
+            done_sender.send(()).unwrap();
+        });
+        done_receiver.recv().unwrap();
+    }
 }
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index 75c5add..553746a 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -32,7 +32,8 @@
 use android_system_keystore2::aidl::android::system::keystore2::{
     ResponseCode::ResponseCode as KsResponseCode };
 use anyhow::{Context, Result};
-use binder::IBinder;
+use binder::IBinderInternal;
+use keystore2_crypto::Password;
 use keystore2_selinux as selinux;
 
 /// This is the Authorization error type, it wraps binder exceptions and the
@@ -128,10 +129,10 @@
         &self,
         lock_screen_event: LockScreenEvent,
         user_id: i32,
-        password: Option<&[u8]>,
+        password: Option<Password>,
     ) -> Result<()> {
         match (lock_screen_event, password) {
-            (LockScreenEvent::UNLOCK, Some(user_password)) => {
+            (LockScreenEvent::UNLOCK, Some(password)) => {
                 //This corresponds to the unlock() method in legacy keystore API.
                 //check permission
                 check_keystore_permission(KeystorePerm::unlock())
@@ -145,7 +146,7 @@
                             &LEGACY_MIGRATOR,
                             &SUPER_KEY,
                             user_id as u32,
-                            user_password,
+                            &password,
                         )
                     })
                     .context("In on_lock_screen_event: Unlock with password.")?
@@ -213,7 +214,10 @@
         user_id: i32,
         password: Option<&[u8]>,
     ) -> BinderResult<()> {
-        map_or_log_err(self.on_lock_screen_event(lock_screen_event, user_id, password), Ok)
+        map_or_log_err(
+            self.on_lock_screen_event(lock_screen_event, user_id, password.map(|pw| pw.into())),
+            Ok,
+        )
     }
 
     fn getAuthTokensForCredStore(
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index 77dab67..98e6eef 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -58,10 +58,15 @@
 
 /// Generate a salt.
 pub fn generate_salt() -> Result<Vec<u8>, Error> {
-    // Safety: salt has the same length as the requested number of random bytes.
-    let mut salt = vec![0; SALT_LENGTH];
-    if unsafe { randomBytes(salt.as_mut_ptr(), SALT_LENGTH) } {
-        Ok(salt)
+    generate_random_data(SALT_LENGTH)
+}
+
+/// Generate random data of the given size.
+pub fn generate_random_data(size: usize) -> Result<Vec<u8>, Error> {
+    // Safety: data has the same length as the requested number of random bytes.
+    let mut data = vec![0; size];
+    if unsafe { randomBytes(data.as_mut_ptr(), size) } {
+        Ok(data)
     } else {
         Err(Error::RandomNumberGenerationFailed)
     }
@@ -144,42 +149,68 @@
     }
 }
 
-/// Generates a key from the given password and salt.
-/// The salt must be exactly 16 bytes long.
-/// Two key sizes are accepted: 16 and 32 bytes.
-pub fn derive_key_from_password(
-    pw: &[u8],
-    salt: Option<&[u8]>,
-    key_length: usize,
-) -> Result<ZVec, Error> {
-    let salt: *const u8 = match salt {
-        Some(s) => {
-            if s.len() != SALT_LENGTH {
-                return Err(Error::InvalidSaltLength);
-            }
-            s.as_ptr()
-        }
-        None => std::ptr::null(),
-    };
+/// Represents a "password" that can be used to key the PBKDF2 algorithm.
+pub enum Password<'a> {
+    /// Borrow an existing byte array
+    Ref(&'a [u8]),
+    /// Use an owned ZVec to store the key
+    Owned(ZVec),
+}
 
-    match key_length {
-        AES_128_KEY_LENGTH | AES_256_KEY_LENGTH => {}
-        _ => return Err(Error::InvalidKeyLength),
+impl<'a> From<&'a [u8]> for Password<'a> {
+    fn from(pw: &'a [u8]) -> Self {
+        Self::Ref(pw)
+    }
+}
+
+impl<'a> Password<'a> {
+    fn get_key(&'a self) -> &'a [u8] {
+        match self {
+            Self::Ref(b) => b,
+            Self::Owned(z) => &*z,
+        }
     }
 
-    let mut result = ZVec::new(key_length)?;
+    /// Generate a key from the given password and salt.
+    /// The salt must be exactly 16 bytes long.
+    /// Two key sizes are accepted: 16 and 32 bytes.
+    pub fn derive_key(&self, salt: Option<&[u8]>, key_length: usize) -> Result<ZVec, Error> {
+        let pw = self.get_key();
 
-    unsafe {
-        generateKeyFromPassword(
-            result.as_mut_ptr(),
-            result.len(),
-            pw.as_ptr() as *const std::os::raw::c_char,
-            pw.len(),
-            salt,
-        )
-    };
+        let salt: *const u8 = match salt {
+            Some(s) => {
+                if s.len() != SALT_LENGTH {
+                    return Err(Error::InvalidSaltLength);
+                }
+                s.as_ptr()
+            }
+            None => std::ptr::null(),
+        };
 
-    Ok(result)
+        match key_length {
+            AES_128_KEY_LENGTH | AES_256_KEY_LENGTH => {}
+            _ => return Err(Error::InvalidKeyLength),
+        }
+
+        let mut result = ZVec::new(key_length)?;
+
+        unsafe {
+            generateKeyFromPassword(
+                result.as_mut_ptr(),
+                result.len(),
+                pw.as_ptr() as *const std::os::raw::c_char,
+                pw.len(),
+                salt,
+            )
+        };
+
+        Ok(result)
+    }
+
+    /// Try to make another Password object with the same data.
+    pub fn try_clone(&self) -> Result<Password<'static>, Error> {
+        Ok(Password::Owned(ZVec::try_from(self.get_key())?))
+    }
 }
 
 /// Calls the boringssl HKDF_extract function.
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 5f19cf0..0e8b3d7 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -733,6 +733,11 @@
         Self(get_current_time_in_seconds())
     }
 
+    /// Constructs a new MonotonicRawTime from a given number of seconds.
+    pub fn from_secs(val: i64) -> Self {
+        Self(val)
+    }
+
     /// Returns the integer value of MonotonicRawTime as i64
     pub fn seconds(&self) -> i64 {
         self.0
@@ -1352,15 +1357,11 @@
     }
 
     fn is_locked_error(e: &anyhow::Error) -> bool {
-        matches!(e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
-        Some(rusqlite::ffi::Error {
-            code: rusqlite::ErrorCode::DatabaseBusy,
-            ..
-        })
-        | Some(rusqlite::ffi::Error {
-            code: rusqlite::ErrorCode::DatabaseLocked,
-            ..
-        }))
+        matches!(
+            e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
+            Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseBusy, .. })
+                | Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseLocked, .. })
+        )
     }
 
     /// Creates a new key entry and allocates a new randomized id for the new key.
@@ -1789,6 +1790,33 @@
         .context("In delete_expired_attestation_keys: ")
     }
 
+    /// Deletes all remotely provisioned attestation keys in the system, regardless of the state
+    /// they are in. This is useful primarily as a testing mechanism.
+    pub fn delete_all_attestation_keys(&mut self) -> Result<i64> {
+        self.with_transaction(TransactionBehavior::Immediate, |tx| {
+            let mut stmt = tx
+                .prepare(
+                    "SELECT id FROM persistent.keyentry
+                    WHERE key_type IS ?;",
+                )
+                .context("Failed to prepare statement")?;
+            let keys_to_delete = stmt
+                .query_map(params![KeyType::Attestation], |row| Ok(row.get(0)?))?
+                .collect::<rusqlite::Result<Vec<i64>>>()
+                .context("Failed to execute statement")?;
+            let num_deleted = keys_to_delete
+                .iter()
+                .map(|id| Self::mark_unreferenced(&tx, *id))
+                .collect::<Result<Vec<bool>>>()
+                .context("Failed to execute mark_unreferenced on a keyid")?
+                .into_iter()
+                .filter(|result| *result)
+                .count() as i64;
+            Ok(num_deleted).do_gc(num_deleted != 0)
+        })
+        .context("In delete_all_attestation_keys: ")
+    }
+
     /// Counts the number of keys that will expire by the provided epoch date and the number of
     /// keys not currently assigned to a domain.
     pub fn get_attestation_pool_status(
@@ -3351,6 +3379,23 @@
     }
 
     #[test]
+    fn test_delete_all_attestation_keys() -> Result<()> {
+        let mut db = new_test_db()?;
+        load_attestation_key_pool(&mut db, 45 /* expiration */, 1 /* namespace */, 0x02)?;
+        load_attestation_key_pool(&mut db, 80 /* expiration */, 2 /* namespace */, 0x03)?;
+        db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
+        let result = db.delete_all_attestation_keys()?;
+
+        // Give the garbage collector half a second to catch up.
+        std::thread::sleep(Duration::from_millis(500));
+
+        // Attestation keys should be deleted, and the regular key should remain.
+        assert_eq!(result, 2);
+
+        Ok(())
+    }
+
+    #[test]
     fn test_rebind_alias() -> Result<()> {
         fn extractor(
             ke: &KeyEntryRow,
@@ -4840,7 +4885,7 @@
     #[test]
     fn test_store_super_key() -> Result<()> {
         let mut db = new_test_db()?;
-        let pw = "xyzabc".as_bytes();
+        let pw: keystore2_crypto::Password = (&b"xyzabc"[..]).into();
         let super_key = keystore2_crypto::generate_aes256_key()?;
         let secret = String::from("keystore2 is great.");
         let secret_bytes = secret.into_bytes();
diff --git a/keystore2/src/entropy.rs b/keystore2/src/entropy.rs
new file mode 100644
index 0000000..de38187
--- /dev/null
+++ b/keystore2/src/entropy.rs
@@ -0,0 +1,98 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module holds functionality for retrieving and distributing entropy.
+
+use anyhow::{Context, Result};
+use log::error;
+use std::time::{Duration, Instant};
+
+static ENTROPY_SIZE: usize = 64;
+static MIN_FEED_INTERVAL_SECS: u64 = 30;
+
+#[derive(Default)]
+struct FeederInfo {
+    last_feed: Option<Instant>,
+}
+
+/// Register the entropy feeder as an idle callback.
+pub fn register_feeder() {
+    crate::globals::ASYNC_TASK.add_idle(|shelf| {
+        let mut info = shelf.get_mut::<FeederInfo>();
+        let now = Instant::now();
+        let feed_needed = match info.last_feed {
+            None => true,
+            Some(last) => now.duration_since(last) > Duration::from_secs(MIN_FEED_INTERVAL_SECS),
+        };
+        if feed_needed {
+            info.last_feed = Some(now);
+            feed_devices();
+        }
+    });
+}
+
+fn get_entropy(size: usize) -> Result<Vec<u8>> {
+    keystore2_crypto::generate_random_data(size).context("Retrieving entropy for KeyMint device")
+}
+
+/// Feed entropy to all known KeyMint devices.
+pub fn feed_devices() {
+    let km_devs = crate::globals::get_keymint_devices();
+    if km_devs.is_empty() {
+        return;
+    }
+    let data = match get_entropy(km_devs.len() * ENTROPY_SIZE) {
+        Ok(data) => data,
+        Err(e) => {
+            error!(
+                "Failed to retrieve {}*{} bytes of entropy: {:?}",
+                km_devs.len(),
+                ENTROPY_SIZE,
+                e
+            );
+            return;
+        }
+    };
+    for (i, km_dev) in km_devs.iter().enumerate() {
+        let offset = i * ENTROPY_SIZE;
+        let sub_data = &data[offset..(offset + ENTROPY_SIZE)];
+        if let Err(e) = km_dev.addRngEntropy(sub_data) {
+            error!("Failed to feed entropy to KeyMint device: {:?}", e);
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::collections::HashSet;
+
+    #[test]
+    fn test_entropy_size() {
+        for size in &[0, 1, 4, 8, 256, 4096] {
+            let data = get_entropy(*size).expect("failed to get entropy");
+            assert_eq!(data.len(), *size);
+        }
+    }
+    #[test]
+    fn test_entropy_uniqueness() {
+        let count = 10;
+        let mut seen = HashSet::new();
+        for _i in 0..count {
+            let data = get_entropy(16).expect("failed to get entropy");
+            seen.insert(data);
+        }
+        assert_eq!(seen.len(), count);
+    }
+}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 04bfbc9..54f7dc7 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -35,6 +35,7 @@
 use android_hardware_security_keymint::binder::{StatusCode, Strong};
 use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
 use anyhow::{Context, Result};
+use binder::FromIBinder;
 use keystore2_vintf::get_aidl_instances;
 use lazy_static::lazy_static;
 use std::sync::{Arc, Mutex};
@@ -117,6 +118,10 @@
             .map(|(dev, hw_info)| ((*dev).clone(), (*hw_info).clone(), *uuid))
     }
 
+    fn devices<T: FromIBinder + ?Sized>(&self) -> Vec<Strong<T>> {
+        self.devices_by_uuid.values().filter_map(|(asp, _)| asp.get_interface::<T>().ok()).collect()
+    }
+
     /// The requested security level and the security level of the actual implementation may
     /// differ. So we map the requested security level to the uuid of the implementation
     /// so that there cannot be any confusion as to which KeyMint instance is requested.
@@ -256,6 +261,11 @@
     }
 }
 
+/// Return all known keymint devices.
+pub fn get_keymint_devices() -> Vec<Strong<dyn IKeyMintDevice>> {
+    KEY_MINT_DEVICES.lock().unwrap().devices()
+}
+
 static TIME_STAMP_SERVICE_NAME: &str = "android.hardware.security.secureclock.ISecureClock";
 
 /// Make a new connection to a secure clock service.
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 51c78b1..5d99449 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -16,6 +16,7 @@
 
 use keystore2::apc::ApcManager;
 use keystore2::authorization::AuthorizationManager;
+use keystore2::entropy;
 use keystore2::globals::ENFORCEMENTS;
 use keystore2::remote_provisioning::RemoteProvisioningService;
 use keystore2::service::KeystoreService;
@@ -74,6 +75,8 @@
             .unwrap_or_else(|e| error!("watch_boot_level failed: {}", e));
     });
 
+    entropy::register_feeder();
+
     info!("Starting thread pool now.");
     binder::ProcessState::start_thread_pool();
 
diff --git a/keystore2/src/km_compat/km_compat.cpp b/keystore2/src/km_compat/km_compat.cpp
index 3439d2f..26b099a 100644
--- a/keystore2/src/km_compat/km_compat.cpp
+++ b/keystore2/src/km_compat/km_compat.cpp
@@ -216,6 +216,8 @@
  */
 bool isNewAndKeystoreEnforceable(const KMV1::KeyParameter& param) {
     switch (param.tag) {
+    case KMV1::Tag::MAX_BOOT_LEVEL:
+        return true;
     case KMV1::Tag::USAGE_COUNT_LIMIT:
         return true;
     default:
@@ -548,11 +550,12 @@
                                         std::vector<uint8_t>* _aidl_return) {
     auto legacyUpgradeParams = convertKeyParametersToLegacy(in_inUpgradeParams);
     V4_0_ErrorCode errorCode;
+
     auto result =
-        mDevice->upgradeKey(in_inKeyBlobToUpgrade, legacyUpgradeParams,
+        mDevice->upgradeKey(prefixedKeyBlobRemovePrefix(in_inKeyBlobToUpgrade), legacyUpgradeParams,
                             [&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& upgradedKeyBlob) {
                                 errorCode = error;
-                                *_aidl_return = upgradedKeyBlob;
+                                *_aidl_return = keyBlobPrefix(upgradedKeyBlob, false);
                             });
     if (!result.isOk()) {
         LOG(ERROR) << __func__ << " transaction failed. " << result.description();
@@ -731,7 +734,7 @@
 
     KMV1::ErrorCode errorCode;
     auto result = mDevice->finish(
-        mOperationHandle, {} /* inParams */, input, signature, authToken, verificationToken,
+        mOperationHandle, inParams, input, signature, authToken, verificationToken,
         [&](V4_0_ErrorCode error, auto /* outParams */, const hidl_vec<uint8_t>& output) {
             errorCode = convert(error);
             *out_output = output;
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 3fc77b7..5f40ece 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -26,7 +26,7 @@
     SecurityLevel::SecurityLevel, Tag::Tag, TagType::TagType,
 };
 use anyhow::{Context, Result};
-use keystore2_crypto::{aes_gcm_decrypt, derive_key_from_password, ZVec};
+use keystore2_crypto::{aes_gcm_decrypt, Password, ZVec};
 use std::collections::{HashMap, HashSet};
 use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
 use std::{
@@ -1036,7 +1036,7 @@
     }
 
     /// Load and decrypt legacy super key blob.
-    pub fn load_super_key(&self, user_id: u32, pw: &[u8]) -> Result<Option<ZVec>> {
+    pub fn load_super_key(&self, user_id: u32, pw: &Password) -> Result<Option<ZVec>> {
         let path = self.make_super_key_filename(user_id);
         let blob = Self::read_generic_blob(&path)
             .context("In load_super_key: While loading super key.")?;
@@ -1046,7 +1046,8 @@
                 Blob {
                     value: BlobValue::PwEncrypted { iv, tag, data, salt, key_size }, ..
                 } => {
-                    let key = derive_key_from_password(pw, Some(&salt), key_size)
+                    let key = pw
+                        .derive_key(Some(&salt), key_size)
                         .context("In load_super_key: Failed to derive key from password.")?;
                     let blob = aes_gcm_decrypt(&data, &iv, &tag, &key).context(
                         "In load_super_key: while trying to decrypt legacy super key blob.",
@@ -1294,7 +1295,7 @@
             Some(&error::Error::Rc(ResponseCode::LOCKED))
         );
 
-        key_manager.unlock_user_key(&mut db, 0, PASSWORD, &legacy_blob_loader)?;
+        key_manager.unlock_user_key(&mut db, 0, &(PASSWORD.into()), &legacy_blob_loader)?;
 
         if let (Some((Blob { flags, value: _ }, _params)), Some(cert), Some(chain)) =
             legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
index 1ae8719..7567070 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_migrator.rs
@@ -29,9 +29,8 @@
 };
 use anyhow::{Context, Result};
 use core::ops::Deref;
-use keystore2_crypto::ZVec;
+use keystore2_crypto::{Password, ZVec};
 use std::collections::{HashMap, HashSet};
-use std::convert::TryInto;
 use std::sync::atomic::{AtomicU8, Ordering};
 use std::sync::mpsc::channel;
 use std::sync::{Arc, Mutex};
@@ -334,7 +333,7 @@
     pub fn with_try_migrate_super_key<F, T>(
         &self,
         user_id: u32,
-        pw: &[u8],
+        pw: &Password,
         mut key_accessor: F,
     ) -> Result<Option<T>>
     where
@@ -345,12 +344,9 @@
             Ok(None) => {}
             Err(e) => return Err(e),
         }
-
-        let pw: ZVec = pw
-            .try_into()
-            .context("In with_try_migrate_super_key: copying the password into a zvec.")?;
+        let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
         let result = self.do_serialized(move |migrator_state| {
-            migrator_state.check_and_migrate_super_key(user_id, pw)
+            migrator_state.check_and_migrate_super_key(user_id, &pw)
         });
 
         if let Some(result) = result {
@@ -550,7 +546,7 @@
         }
     }
 
-    fn check_and_migrate_super_key(&mut self, user_id: u32, pw: ZVec) -> Result<()> {
+    fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
         if self.recently_migrated_super_key.contains(&user_id) {
             return Ok(());
         }
@@ -561,7 +557,7 @@
             .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
         {
             let (blob, blob_metadata) =
-                crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, &pw)
+                crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
                     .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
 
             self.db.store_super_key(user_id, &(&blob, &blob_metadata)).context(concat!(
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 8fef6cf..0e51eff 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -20,6 +20,7 @@
 pub mod authorization;
 pub mod database;
 pub mod enforcements;
+pub mod entropy;
 pub mod error;
 pub mod globals;
 /// Internal Representation of Key Parameter and convenience functions.
diff --git a/keystore2/src/operation.rs b/keystore2/src/operation.rs
index 4092684..0f5bcaf 100644
--- a/keystore2/src/operation.rs
+++ b/keystore2/src/operation.rs
@@ -135,7 +135,7 @@
     IKeystoreOperation::BnKeystoreOperation, IKeystoreOperation::IKeystoreOperation,
 };
 use anyhow::{anyhow, Context, Result};
-use binder::IBinder;
+use binder::IBinderInternal;
 use std::{
     collections::HashMap,
     sync::{Arc, Mutex, MutexGuard, Weak},
@@ -167,12 +167,14 @@
     outcome: Mutex<Outcome>,
     owner: u32, // Uid of the operation's owner.
     auth_info: Mutex<AuthInfo>,
+    forced: bool,
 }
 
 struct PruningInfo {
     last_usage: Instant,
     owner: u32,
     index: usize,
+    forced: bool,
 }
 
 // We don't except more than 32KiB of data in `update`, `updateAad`, and `finish`.
@@ -185,6 +187,7 @@
         km_op: binder::Strong<dyn IKeyMintOperation>,
         owner: u32,
         auth_info: AuthInfo,
+        forced: bool,
     ) -> Self {
         Self {
             index,
@@ -193,6 +196,7 @@
             outcome: Mutex::new(Outcome::Unknown),
             owner,
             auth_info: Mutex::new(auth_info),
+            forced,
         }
     }
 
@@ -218,6 +222,7 @@
             last_usage: *self.last_usage.lock().expect("In get_pruning_info."),
             owner: self.owner,
             index: self.index,
+            forced: self.forced,
         })
     }
 
@@ -465,6 +470,7 @@
         km_op: binder::public_api::Strong<dyn IKeyMintOperation>,
         owner: u32,
         auth_info: AuthInfo,
+        forced: bool,
     ) -> Arc<Operation> {
         // We use unwrap because we don't allow code that can panic while locked.
         let mut operations = self.operations.lock().expect("In create_operation.");
@@ -477,12 +483,13 @@
             s.upgrade().is_none()
         }) {
             Some(free_slot) => {
-                let new_op = Arc::new(Operation::new(index - 1, km_op, owner, auth_info));
+                let new_op = Arc::new(Operation::new(index - 1, km_op, owner, auth_info, forced));
                 *free_slot = Arc::downgrade(&new_op);
                 new_op
             }
             None => {
-                let new_op = Arc::new(Operation::new(operations.len(), km_op, owner, auth_info));
+                let new_op =
+                    Arc::new(Operation::new(operations.len(), km_op, owner, auth_info, forced));
                 operations.push(Arc::downgrade(&new_op));
                 new_op
             }
@@ -565,7 +572,7 @@
     /// ## Update
     /// We also allow callers to cannibalize their own sibling operations if no other
     /// slot can be found. In this case the least recently used sibling is pruned.
-    pub fn prune(&self, caller: u32) -> Result<(), Error> {
+    pub fn prune(&self, caller: u32, forced: bool) -> Result<(), Error> {
         loop {
             // Maps the uid of the owner to the number of operations that owner has
             // (running_siblings). More operations per owner lowers the pruning
@@ -590,7 +597,8 @@
                     }
                 });
 
-            let caller_malus = 1u64 + *owners.entry(caller).or_default();
+            // If the operation is forced, the caller has a malus of 0.
+            let caller_malus = if forced { 0 } else { 1u64 + *owners.entry(caller).or_default() };
 
             // We iterate through all operations computing the malus and finding
             // the candidate with the highest malus which must also be higher
@@ -604,7 +612,7 @@
             let mut oldest_caller_op: Option<CandidateInfo> = None;
             let candidate = pruning_info.iter().fold(
                 None,
-                |acc: Option<CandidateInfo>, &PruningInfo { last_usage, owner, index }| {
+                |acc: Option<CandidateInfo>, &PruningInfo { last_usage, owner, index, forced }| {
                     // Compute the age of the current operation.
                     let age = now
                         .checked_duration_since(last_usage)
@@ -624,12 +632,17 @@
                     }
 
                     // Compute the malus of the current operation.
-                    // Expect safety: Every owner in pruning_info was counted in
-                    // the owners map. So this unwrap cannot panic.
-                    let malus = *owners
-                        .get(&owner)
-                        .expect("This is odd. We should have counted every owner in pruning_info.")
-                        + ((age.as_secs() + 1) as f64).log(6.0).floor() as u64;
+                    let malus = if forced {
+                        // Forced operations have a malus of 0. And cannot even be pruned
+                        // by other forced operations.
+                        0
+                    } else {
+                        // Expect safety: Every owner in pruning_info was counted in
+                        // the owners map. So this unwrap cannot panic.
+                        *owners.get(&owner).expect(
+                            "This is odd. We should have counted every owner in pruning_info.",
+                        ) + ((age.as_secs() + 1) as f64).log(6.0).floor() as u64
+                    };
 
                     // Now check if the current operation is a viable/better candidate
                     // the one currently stored in the accumulator.
@@ -716,7 +729,7 @@
 impl KeystoreOperation {
     /// Creates a new operation instance wrapped in a
     /// BnKeystoreOperation proxy object. It also
-    /// calls `IBinder::set_requesting_sid` on the new interface, because
+    /// calls `IBinderInternal::set_requesting_sid` on the new interface, because
     /// we need it for checking Keystore permissions.
     pub fn new_native_binder(
         operation: Arc<Operation>,
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index 905b460..7f63834 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -675,7 +675,7 @@
         let shell_ctx = Context::new("u:r:shell:s0")?;
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::add_auth()));
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_ns()));
-        assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::get_state()));
+        assert!(check_keystore_permission(&shell_ctx, KeystorePerm::get_state()).is_ok());
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::list()));
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::lock()));
         assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::reset()));
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index d6cc680..cc97573 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -367,6 +367,15 @@
     pub fn get_security_levels(&self) -> Result<Vec<SecurityLevel>> {
         Ok(self.device_by_sec_level.keys().cloned().collect())
     }
+
+    /// Deletes all attestation keys generated by the IRemotelyProvisionedComponent from the device,
+    /// regardless of what state of the attestation key lifecycle they were in.
+    pub fn delete_all_keys(&self) -> Result<i64> {
+        DB.with::<_, Result<i64>>(|db| {
+            let mut db = db.borrow_mut();
+            Ok(db.delete_all_attestation_keys()?)
+        })
+    }
 }
 
 impl binder::Interface for RemoteProvisioningService {}
@@ -422,4 +431,8 @@
     fn getSecurityLevels(&self) -> binder::public_api::Result<Vec<SecurityLevel>> {
         map_or_log_err(self.get_security_levels(), Ok)
     }
+
+    fn deleteAllKeys(&self) -> binder::public_api::Result<i64> {
+        map_or_log_err(self.delete_all_keys(), Ok)
+    }
 }
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 6560d4d..d7a0a12 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -56,7 +56,7 @@
     utils::key_characteristics_to_internal,
 };
 use anyhow::{anyhow, Context, Result};
-use binder::{IBinder, Strong, ThreadState};
+use binder::{IBinderInternal, Strong, ThreadState};
 use keystore2_crypto::parse_subject_from_certificate;
 
 /// Implementation of the IKeystoreSecurityLevel Interface.
@@ -80,7 +80,7 @@
 impl KeystoreSecurityLevel {
     /// Creates a new security level instance wrapped in a
     /// BnKeystoreSecurityLevel proxy object. It also
-    /// calls `IBinder::set_requesting_sid` on the new interface, because
+    /// calls `IBinderInternal::set_requesting_sid` on the new interface, because
     /// we need it for checking keystore permissions.
     pub fn new_native_binder(
         security_level: SecurityLevel,
@@ -209,6 +209,11 @@
             Domain::BLOB => {
                 check_key_permission(KeyPerm::use_(), key, &None)
                     .context("In create_operation: checking use permission for Domain::BLOB.")?;
+                if forced {
+                    check_key_permission(KeyPerm::req_forced_op(), key, &None).context(
+                        "In create_operation: checking forced permission for Domain::BLOB.",
+                    )?;
+                }
                 (
                     match &key.blob {
                         Some(blob) => blob,
@@ -233,7 +238,13 @@
                                 KeyType::Client,
                                 KeyEntryLoadBits::KM,
                                 caller_uid,
-                                |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+                                |k, av| {
+                                    check_key_permission(KeyPerm::use_(), k, &av)?;
+                                    if forced {
+                                        check_key_permission(KeyPerm::req_forced_op(), k, &av)?;
+                                    }
+                                    Ok(())
+                                },
                             )
                         })
                     })
@@ -270,17 +281,12 @@
                 purpose,
                 key_properties.as_ref(),
                 operation_parameters.as_ref(),
-                // TODO b/178222844 Replace this with the configuration returned by
-                //      KeyMintDevice::getHardwareInfo.
-                //      For now we assume that strongbox implementations need secure timestamps.
-                self.security_level == SecurityLevel::STRONGBOX,
+                self.hw_info.timestampTokenRequired,
             )
             .context("In create_operation.")?;
 
         let immediate_hat = immediate_hat.unwrap_or_default();
 
-        let user_id = uid_to_android_user(caller_uid);
-
         let km_blob = SUPER_KEY
             .unwrap_key_if_required(&blob_metadata, km_blob)
             .context("In create_operation. Failed to handle super encryption.")?;
@@ -304,7 +310,7 @@
                         &immediate_hat,
                     )) {
                         Err(Error::Km(ErrorCode::TOO_MANY_OPERATIONS)) => {
-                            self.operation_db.prune(caller_uid)?;
+                            self.operation_db.prune(caller_uid, forced)?;
                             continue;
                         }
                         v => return v,
@@ -317,7 +323,7 @@
 
         let operation = match begin_result.operation {
             Some(km_op) => {
-                self.operation_db.create_operation(km_op, caller_uid, auth_info)
+                self.operation_db.create_operation(km_op, caller_uid, auth_info, forced)
             },
             None => return Err(Error::sys()).context("In create_operation: Begin operation returned successfully, but did not return a valid operation."),
         };
@@ -335,6 +341,10 @@
                 0 => None,
                 _ => Some(KeyParameters { keyParameter: begin_result.params }),
             },
+            // An upgraded blob should only be returned if the caller has permission
+            // to use Domain::BLOB keys. If we got to this point, we already checked
+            // that the caller had that permission.
+            upgradedBlob: if key.domain == Domain::BLOB { upgraded_blob } else { None },
         })
     }
 
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 3a4bf82..15b729d 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -43,7 +43,7 @@
     KeyDescriptor::KeyDescriptor, KeyEntryResponse::KeyEntryResponse, KeyMetadata::KeyMetadata,
 };
 use anyhow::{Context, Result};
-use binder::{IBinder, Strong, ThreadState};
+use binder::{IBinderInternal, Strong, ThreadState};
 use error::Error;
 use keystore2_selinux as selinux;
 
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 5ee685a..aa434d6 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -23,8 +23,8 @@
 use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
 use anyhow::{Context, Result};
 use keystore2_crypto::{
-    aes_gcm_decrypt, aes_gcm_encrypt, derive_key_from_password, generate_aes256_key, generate_salt,
-    ZVec, AES_256_KEY_LENGTH,
+    aes_gcm_decrypt, aes_gcm_encrypt, generate_aes256_key, generate_salt, Password, ZVec,
+    AES_256_KEY_LENGTH,
 };
 use std::ops::Deref;
 use std::{
@@ -130,7 +130,7 @@
         &self,
         db: &mut KeystoreDB,
         user: UserId,
-        pw: &[u8],
+        pw: &Password,
         legacy_blob_loader: &LegacyBlobLoader,
     ) -> Result<()> {
         let (_, entry) = db
@@ -233,7 +233,7 @@
         db: &mut KeystoreDB,
         legacy_migrator: &LegacyMigrator,
         user_id: u32,
-        pw: &[u8],
+        pw: &Password,
     ) -> Result<UserState> {
         let result = legacy_migrator
             .with_try_migrate_super_key(user_id, pw, || db.load_super_key(user_id))
@@ -261,7 +261,7 @@
         db: &mut KeystoreDB,
         legacy_migrator: &LegacyMigrator,
         user_id: u32,
-        pw: Option<&[u8]>,
+        pw: Option<&Password>,
     ) -> Result<UserState> {
         let super_key_exists_in_db =
             Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
@@ -296,7 +296,7 @@
         &self,
         user_id: u32,
         entry: KeyEntry,
-        pw: &[u8],
+        pw: &Password,
     ) -> Result<SuperKey> {
         let super_key = Self::extract_super_key_from_key_entry(entry, pw).context(
             "In populate_cache_from_super_key_blob. Failed to extract super key from key entry",
@@ -306,7 +306,7 @@
     }
 
     /// Extracts super key from the entry loaded from the database
-    pub fn extract_super_key_from_key_entry(entry: KeyEntry, pw: &[u8]) -> Result<SuperKey> {
+    pub fn extract_super_key_from_key_entry(entry: KeyEntry, pw: &Password) -> Result<SuperKey> {
         if let Some((blob, metadata)) = entry.key_blob_info() {
             let key = match (
                 metadata.encrypted_by(),
@@ -315,9 +315,9 @@
                 metadata.aead_tag(),
             ) {
                 (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
-                    let key = derive_key_from_password(pw, Some(salt), AES_256_KEY_LENGTH).context(
-                    "In extract_super_key_from_key_entry: Failed to generate key from password.",
-                )?;
+                    let key = pw.derive_key(Some(salt), AES_256_KEY_LENGTH).context(
+                        "In extract_super_key_from_key_entry: Failed to generate key from password.",
+                    )?;
 
                     aes_gcm_decrypt(blob, iv, tag, &key).context(
                         "In extract_super_key_from_key_entry: Failed to decrypt key blob.",
@@ -344,9 +344,13 @@
     }
 
     /// Encrypts the super key from a key derived from the password, before storing in the database.
-    pub fn encrypt_with_password(super_key: &[u8], pw: &[u8]) -> Result<(Vec<u8>, BlobMetaData)> {
+    pub fn encrypt_with_password(
+        super_key: &[u8],
+        pw: &Password,
+    ) -> Result<(Vec<u8>, BlobMetaData)> {
         let salt = generate_salt().context("In encrypt_with_password: Failed to generate salt.")?;
-        let derived_key = derive_key_from_password(pw, Some(&salt), AES_256_KEY_LENGTH)
+        let derived_key = pw
+            .derive_key(Some(&salt), AES_256_KEY_LENGTH)
             .context("In encrypt_with_password: Failed to derive password.")?;
         let mut metadata = BlobMetaData::new();
         metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
@@ -514,7 +518,7 @@
         legacy_migrator: &LegacyMigrator,
         skm: &SuperKeyManager,
         user_id: u32,
-        password: Option<&[u8]>,
+        password: Option<&Password>,
     ) -> Result<UserState> {
         match skm.get_per_boot_key_by_user_id(user_id) {
             Some(super_key) => {
@@ -548,7 +552,7 @@
         legacy_migrator: &LegacyMigrator,
         skm: &SuperKeyManager,
         user_id: u32,
-        password: &[u8],
+        password: &Password,
     ) -> Result<UserState> {
         match skm.get_per_boot_key_by_user_id(user_id) {
             Some(super_key) => {
diff --git a/keystore2/src/user_manager.rs b/keystore2/src/user_manager.rs
index c17fa72..0cc2e92 100644
--- a/keystore2/src/user_manager.rs
+++ b/keystore2/src/user_manager.rs
@@ -28,7 +28,8 @@
 use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
 use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
 use anyhow::{Context, Result};
-use binder::{IBinder, Strong};
+use binder::{IBinderInternal, Strong};
+use keystore2_crypto::Password;
 
 /// This struct is defined to implement the aforementioned AIDL interface.
 /// As of now, it is an empty struct.
@@ -42,7 +43,7 @@
         Ok(result)
     }
 
-    fn on_user_password_changed(user_id: i32, password: Option<&[u8]>) -> Result<()> {
+    fn on_user_password_changed(user_id: i32, password: Option<Password>) -> Result<()> {
         //Check permission. Function should return if this failed. Therefore having '?' at the end
         //is very important.
         check_keystore_permission(KeystorePerm::change_password())
@@ -55,7 +56,7 @@
                     &LEGACY_MIGRATOR,
                     &SUPER_KEY,
                     user_id as u32,
-                    password,
+                    password.as_ref(),
                 )
             })
             .context("In on_user_password_changed.")?
@@ -121,7 +122,7 @@
 
 impl IKeystoreMaintenance for Maintenance {
     fn onUserPasswordChanged(&self, user_id: i32, password: Option<&[u8]>) -> BinderResult<()> {
-        map_or_log_err(Self::on_user_password_changed(user_id, password), Ok)
+        map_or_log_err(Self::on_user_password_changed(user_id, password.map(|pw| pw.into())), Ok)
     }
 
     fn onUserAdded(&self, user_id: i32) -> BinderResult<()> {
diff --git a/ondevice-signing/CertUtils.cpp b/ondevice-signing/CertUtils.cpp
index cbd1942..b0b75a6 100644
--- a/ondevice-signing/CertUtils.cpp
+++ b/ondevice-signing/CertUtils.cpp
@@ -147,8 +147,10 @@
 
     x509->signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07);
     x509->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT;
-    auto f = fopen(path.c_str(), "wb");
-    // TODO error checking
+    auto f = fopen(path.c_str(), "wbe");
+    if (f == nullptr) {
+        return Error() << "Failed to open " << path;
+    }
     i2d_X509_fp(f, x509.get());
     fclose(f);
 
@@ -199,8 +201,12 @@
 
 Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path) {
     X509* cert;
-    auto f = fopen(path.c_str(), "r");
+    auto f = fopen(path.c_str(), "re");
+    if (f == nullptr) {
+        return Error() << "Failed to open " << path;
+    }
     if (!d2i_X509_fp(f, &cert)) {
+        fclose(f);
         return Error() << "Unable to decode x509 cert at " << path;
     }
 
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index ff7de7e..cab92e2 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -74,8 +74,14 @@
 Result<std::vector<uint8_t>> createDigest(const std::string& path) {
     struct stat filestat;
     unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
+    if (fd < 0) {
+        return ErrnoError() << "Failed to open " << path;
+    }
 
-    stat(path.c_str(), &filestat);
+    int ret = stat(path.c_str(), &filestat);
+    if (ret < 0) {
+        return ErrnoError() << "Failed to stat " << path;
+    }
     struct libfsverity_merkle_tree_params params = {
         .version = 1,
         .hash_algorithm = FS_VERITY_HASH_ALG_SHA256,
@@ -84,9 +90,13 @@
     };
 
     struct libfsverity_digest* digest;
-    libfsverity_compute_digest(&fd, &read_callback, &params, &digest);
-
-    return std::vector<uint8_t>(&digest->digest[0], &digest->digest[32]);
+    ret = libfsverity_compute_digest(&fd, &read_callback, &params, &digest);
+    if (ret < 0) {
+        return ErrnoError() << "Failed to compute fs-verity digest for " << path;
+    }
+    std::vector<uint8_t> digestVector(&digest->digest[0], &digest->digest[32]);
+    free(digest);
+    return digestVector;
 }
 
 namespace {
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index eeef868..33d04ca 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -26,6 +26,7 @@
 
 #include <android-base/file.h>
 #include <android-base/logging.h>
+#include <android-base/properties.h>
 #include <android-base/scopeguard.h>
 #include <logwrap/logwrap.h>
 
@@ -39,6 +40,7 @@
 using android::base::ErrnoError;
 using android::base::Error;
 using android::base::Result;
+using android::base::SetProperty;
 
 using OdsignInfo = ::odsign::proto::OdsignInfo;
 
@@ -54,7 +56,14 @@
 static const char* kFsVerityProcPath = "/proc/sys/fs/verity";
 
 static const bool kForceCompilation = false;
-static const bool kUseKeystore = false;
+static const bool kUseKeystore = true;
+
+static const char* kOdsignVerificationDoneProp = "odsign.verification.done";
+static const char* kOdsignKeyDoneProp = "odsign.key.done";
+
+static const char* kOdsignVerificationStatusProp = "odsign.verification.success";
+static const char* kOdsignVerificationStatusValid = "1";
+static const char* kOdsignVerificationStatusError = "0";
 
 Result<void> verifyExistingCert(const SigningKey& key) {
     if (access(kSigningKeyCert.c_str(), F_OK) < 0) {
@@ -237,23 +246,62 @@
     return {};
 }
 
-int main(int /* argc */, char** /* argv */) {
-    auto removeArtifacts = []() -> std::uintmax_t {
-        std::error_code ec;
-        auto num_removed = std::filesystem::remove_all(kArtArtifactsDir, ec);
-        if (ec) {
-            // TODO can't remove artifacts, signal Zygote shouldn't use them
-            LOG(ERROR) << "Can't remove " << kArtArtifactsDir << ": " << ec.message();
-            return 0;
-        } else {
-            if (num_removed > 0) {
-                LOG(INFO) << "Removed " << num_removed << " entries from " << kArtArtifactsDir;
-            }
-            return num_removed;
+static int removeArtifacts() {
+    std::error_code ec;
+    auto num_removed = std::filesystem::remove_all(kArtArtifactsDir, ec);
+    if (ec) {
+        LOG(ERROR) << "Can't remove " << kArtArtifactsDir << ": " << ec.message();
+        return 0;
+    } else {
+        if (num_removed > 0) {
+            LOG(INFO) << "Removed " << num_removed << " entries from " << kArtArtifactsDir;
         }
+        return num_removed;
+    }
+}
+
+static Result<void> verifyArtifacts(const SigningKey& key, bool supportsFsVerity) {
+    auto signInfo = getOdsignInfo(key);
+    // Tell init we're done with the key; this is a boot time optimization
+    // in particular for the no fs-verity case, where we need to do a
+    // costly verification. If the files haven't been tampered with, which
+    // should be the common path, the verification will succeed, and we won't
+    // need the key anymore. If it turns out the artifacts are invalid (eg not
+    // in fs-verity) or the hash doesn't match, we won't be able to generate
+    // new artifacts without the key, so in those cases, remove the artifacts,
+    // and use JIT zygote for the current boot. We should recover automatically
+    // by the next boot.
+    SetProperty(kOdsignKeyDoneProp, "1");
+    if (!signInfo.ok()) {
+        return Error() << signInfo.error().message();
+    }
+    std::map<std::string, std::string> trusted_digests(signInfo->file_hashes().begin(),
+                                                       signInfo->file_hashes().end());
+    Result<void> integrityStatus;
+
+    if (supportsFsVerity) {
+        integrityStatus = verifyIntegrityFsVerity(trusted_digests);
+    } else {
+        integrityStatus = verifyIntegrityNoFsVerity(trusted_digests);
+    }
+    if (!integrityStatus.ok()) {
+        return Error() << integrityStatus.error().message();
+    }
+
+    return {};
+}
+
+int main(int /* argc */, char** /* argv */) {
+    auto errorScopeGuard = []() {
+        // In case we hit any error, remove the artifacts and tell Zygote not to use anything
+        removeArtifacts();
+        // Tell init we don't need to use our key anymore
+        SetProperty(kOdsignKeyDoneProp, "1");
+        // Tell init we're done with verification, and that it was an error
+        SetProperty(kOdsignVerificationDoneProp, "1");
+        SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusError);
     };
-    // Make sure we delete the artifacts in all early (error) exit paths
-    auto scope_guard = android::base::make_scope_guard(removeArtifacts);
+    auto scope_guard = android::base::make_scope_guard(errorScopeGuard);
 
     SigningKey* key;
     if (kUseKeystore) {
@@ -301,35 +349,25 @@
         }
     }
 
-    auto signInfo = getOdsignInfo(*key);
-    if (!signInfo.ok()) {
-        int num_removed = removeArtifacts();
-        // Only a warning if there were artifacts to begin with, which suggests tampering or
-        // corruption
-        if (num_removed > 0) {
-            LOG(WARNING) << signInfo.error().message();
-        }
-    } else {
-        std::map<std::string, std::string> trusted_digests(signInfo->file_hashes().begin(),
-                                                           signInfo->file_hashes().end());
-        Result<void> integrityStatus;
-
-        if (supportsFsVerity) {
-            integrityStatus = verifyIntegrityFsVerity(trusted_digests);
-        } else {
-            integrityStatus = verifyIntegrityNoFsVerity(trusted_digests);
-        }
-        if (!integrityStatus.ok()) {
-            LOG(WARNING) << integrityStatus.error().message() << ", removing " << kArtArtifactsDir;
-            removeArtifacts();
-        }
-    }
-
     // Ask ART whether it considers the artifacts valid
     LOG(INFO) << "Asking odrefresh to verify artifacts (if present)...";
     bool artifactsValid = validateArtifacts();
     LOG(INFO) << "odrefresh said they are " << (artifactsValid ? "VALID" : "INVALID");
 
+    // A post-condition of validating artifacts is that if the ones on /system
+    // are used, kArtArtifactsDir is removed. Conversely, if kArtArtifactsDir
+    // exists, those are artifacts that will be used, and we should verify them.
+    int err = access(kArtArtifactsDir.c_str(), F_OK);
+    // If we receive any error other than ENOENT, be suspicious
+    bool artifactsPresent = (err == 0) || (err < 0 && errno != ENOENT);
+    if (artifactsPresent) {
+        auto verificationResult = verifyArtifacts(*key, supportsFsVerity);
+        if (!verificationResult.ok()) {
+            LOG(ERROR) << verificationResult.error().message();
+            return -1;
+        }
+    }
+
     if (!artifactsValid || kForceCompilation) {
         LOG(INFO) << "Starting compilation... ";
         bool ret = compileArtifacts(kForceCompilation);
@@ -355,10 +393,13 @@
         }
     }
 
-    // TODO we want to make sure Zygote only picks up the artifacts if we deemed
-    // everything was ok here. We could use a sysprop, or some other mechanism?
     LOG(INFO) << "On-device signing done.";
 
     scope_guard.Disable();
+    // At this point, we're done with the key for sure
+    SetProperty(kOdsignKeyDoneProp, "1");
+    // And we did a successful verification
+    SetProperty(kOdsignVerificationDoneProp, "1");
+    SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusValid);
     return 0;
 }