Merge "Set apexd.payload_metadata build property"
diff --git a/apex/Android.bp b/apex/Android.bp
index 9d6cc94..a2f272e 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -15,6 +15,7 @@
 
     // TODO(jiyong): make it updatable
     updatable: false,
+    future_updatable: true,
     platform_apis: true,
 
     system_ext_specific: true,
diff --git a/apex/product_packages.mk b/apex/product_packages.mk
index 1a431d5..322b73e 100644
--- a/apex/product_packages.mk
+++ b/apex/product_packages.mk
@@ -25,7 +25,6 @@
 
 # TODO(b/207336449): Figure out how to get these off /system
 PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST := \
-    system/lib64/libgfxstream_backend.so \
     system/framework/oat/%@service-compos.jar@classes.odex \
     system/framework/oat/%@service-compos.jar@classes.vdex \
 
diff --git a/apkdmverity/Android.bp b/apkdmverity/Android.bp
index 403e726..06d4500 100644
--- a/apkdmverity/Android.bp
+++ b/apkdmverity/Android.bp
@@ -18,7 +18,6 @@
         "liblibc",
         "libnix",
         "libnum_traits",
-        "librustutils",
         "libscopeguard",
         "libuuid",
     ],
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index a8a8f15..dbf3131 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -29,7 +29,6 @@
 use clap::{App, Arg};
 use idsig::{HashAlgorithm, V4Signature};
 use itertools::Itertools;
-use rustutils::system_properties;
 use std::fmt::Debug;
 use std::fs;
 use std::fs::File;
@@ -40,27 +39,26 @@
     let matches = App::new("apkdmverity")
         .about("Creates a dm-verity block device out of APK signed with APK signature scheme V4.")
         .arg(Arg::from_usage(
-            "--apk... <apk_path> <idsig_path> <name> \
-                            'Input APK file, idsig file, and the name of the block device. The APK \
-                            file must be signed using the APK signature scheme 4. The block device \
-                            is created at \"/dev/mapper/<name>\".'",
-        ))
+            "--apk... <apk_path> <idsig_path> <name> <root_hash> \
+                            'Input APK file, idsig file, name of the block device, and root hash. \
+                            The APK file must be signed using the APK signature scheme 4. The \
+                            block device is created at \"/dev/mapper/<name>\".' root_hash is \
+                            optional; idsig file's root hash will be used if specified as \"none\"."
+            ))
         .arg(Arg::with_name("verbose").short("v").long("verbose").help("Shows verbose output"))
         .get_matches();
 
     let apks = matches.values_of("apk").unwrap();
-    assert!(apks.len() % 3 == 0);
-
-    let roothash = if let Ok(val) = system_properties::read("microdroid_manager.apk_root_hash") {
-        Some(util::parse_hexstring(&val)?)
-    } else {
-        // This failure is not an error. We will use the roothash read from the idsig file.
-        None
-    };
+    assert!(apks.len() % 4 == 0);
 
     let verbose = matches.is_present("verbose");
 
-    for (apk, idsig, name) in apks.tuples() {
+    for (apk, idsig, name, roothash) in apks.tuples() {
+        let roothash = if roothash != "none" {
+            Some(util::parse_hexstring(roothash).expect("failed to parse roothash"))
+        } else {
+            None
+        };
         let ret = enable_verity(apk, idsig, name, roothash.as_deref())?;
         if verbose {
             println!(
diff --git a/authfs/Android.bp b/authfs/Android.bp
index a6792b0..353b597 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -20,6 +20,7 @@
         "libfuse_rust",
         "liblibc",
         "liblog_rust",
+        "libnix",
         "libstructopt",
         "libthiserror",
     ],
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index 64828fb..43dee52 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -57,7 +57,7 @@
     long getFileSize(int fd);
 
     /**
-     * Open a file given the remote directory FD.
+     * Opens a file given the remote directory FD.
      *
      * @param pathname The file path to open. Must be a related path.
      * @return file A remote FD that represents the opened file.
@@ -65,20 +65,44 @@
     int openFileInDirectory(int dirFd, String pathname);
 
     /**
-     * Create a file given the remote directory FD.
+     * Creates a file given the remote directory FD.
      *
      * @param basename The file name to create. Must not contain directory separator.
+     * @param mode File mode of the new file. See open(2).
      * @return file A remote FD that represents the new created file.
      */
-    int createFileInDirectory(int dirFd, String basename);
+    int createFileInDirectory(int dirFd, String basename, int mode);
 
     /**
-     * Create a directory inside the given remote directory FD.
+     * Creates a directory inside the given remote directory FD.
      *
      * @param basename The directory name to create. Must not contain directory separator.
+     * @param mode File mode of the new directory. See mkdir(2).
      * @return file FD that represents the new created directory.
      */
-    int createDirectoryInDirectory(int dirFd, String basename);
+    int createDirectoryInDirectory(int dirFd, String basename, int mode);
+
+    /**
+     * Deletes a file in the given directory.
+     *
+     * @param basename The file name to delete. Must not contain directory separator.
+     */
+    void deleteFile(int dirFd, String basename);
+
+    /**
+     * Deletes a sub-directory in the given directory.
+     *
+     * @param basename The directory name to delete. Must not contain directory separator.
+     */
+    void deleteDirectory(int dirFd, String basename);
+
+    /**
+     * Changes mode of the FD.
+     *
+     * @param fd The FD to change.
+     * @param mode New file mode to pass to chmod(2)/fchmod(2).
+     */
+    void chmod(int fd, int mode);
 
     /** Filesystem stats that AuthFS is interested in.*/
     parcelable FsStat {
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index ddac2bc..c2206c8 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -17,8 +17,9 @@
 use anyhow::Result;
 use log::error;
 use nix::{
-    dir::Dir, errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::mkdirat, sys::stat::Mode,
-    sys::statvfs::statvfs, sys::statvfs::Statvfs,
+    dir::Dir, errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::fchmod, sys::stat::mkdirat,
+    sys::stat::mode_t, sys::stat::Mode, sys::statvfs::statvfs, sys::statvfs::Statvfs,
+    unistd::unlinkat, unistd::UnlinkatFlags,
 };
 use std::cmp::min;
 use std::collections::{btree_map, BTreeMap};
@@ -39,17 +40,8 @@
 };
 use binder_common::{new_binder_exception, new_binder_service_specific_error};
 
-fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
-    offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
-}
-
-fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
-    if size > MAX_REQUESTING_DATA {
-        Err(new_errno_error(Errno::EFBIG))
-    } else {
-        size.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
-    }
-}
+/// Bitflags of forbidden file mode, e.g. setuid, setgid and sticky bit.
+const FORBIDDEN_MODES: Mode = Mode::from_bits_truncate(!0o777);
 
 /// Configuration of a file descriptor to be served/exposed/shared.
 pub enum FdConfig {
@@ -264,14 +256,14 @@
         })
     }
 
-    fn openFileInDirectory(&self, fd: i32, file_path: &str) -> BinderResult<i32> {
+    fn openFileInDirectory(&self, dir_fd: i32, file_path: &str) -> BinderResult<i32> {
         let path_buf = PathBuf::from(file_path);
         // Checks if the path is a simple, related path.
         if path_buf.components().any(|c| !matches!(c, Component::Normal(_))) {
             return Err(new_errno_error(Errno::EINVAL));
         }
 
-        self.insert_new_fd(fd, |config| match config {
+        self.insert_new_fd(dir_fd, |config| match config {
             FdConfig::InputDir(dir) => {
                 let file = open_readonly_at(dir.as_raw_fd(), &path_buf).map_err(new_errno_error)?;
 
@@ -288,20 +280,20 @@
         })
     }
 
-    fn createFileInDirectory(&self, fd: i32, basename: &str) -> BinderResult<i32> {
-        if basename.contains(MAIN_SEPARATOR) {
-            return Err(new_errno_error(Errno::EINVAL));
-        }
-        self.insert_new_fd(fd, |config| match config {
+    fn createFileInDirectory(&self, dir_fd: i32, basename: &str, mode: i32) -> BinderResult<i32> {
+        validate_basename(basename)?;
+
+        self.insert_new_fd(dir_fd, |config| match config {
             FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
             FdConfig::OutputDir(dir) => {
+                let mode = validate_file_mode(mode)?;
                 let new_fd = openat(
                     dir.as_raw_fd(),
                     basename,
                     // TODO(205172873): handle the case when the file already exist, e.g. truncate
                     // or fail, and possibly allow the client to specify. For now, always truncate.
                     OFlag::O_CREAT | OFlag::O_RDWR | OFlag::O_TRUNC,
-                    Mode::S_IRUSR | Mode::S_IWUSR,
+                    mode,
                 )
                 .map_err(new_errno_error)?;
                 // SAFETY: new_fd is just created and not an error.
@@ -312,14 +304,19 @@
         })
     }
 
-    fn createDirectoryInDirectory(&self, dir_fd: i32, basename: &str) -> BinderResult<i32> {
-        if basename.contains(MAIN_SEPARATOR) {
-            return Err(new_errno_error(Errno::EINVAL));
-        }
+    fn createDirectoryInDirectory(
+        &self,
+        dir_fd: i32,
+        basename: &str,
+        mode: i32,
+    ) -> BinderResult<i32> {
+        validate_basename(basename)?;
+
         self.insert_new_fd(dir_fd, |config| match config {
             FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
             FdConfig::OutputDir(_) => {
-                mkdirat(dir_fd, basename, Mode::S_IRWXU).map_err(new_errno_error)?;
+                let mode = validate_file_mode(mode)?;
+                mkdirat(dir_fd, basename, mode).map_err(new_errno_error)?;
                 let new_dir = Dir::openat(
                     dir_fd,
                     basename,
@@ -333,6 +330,44 @@
         })
     }
 
+    fn deleteFile(&self, dir_fd: i32, basename: &str) -> BinderResult<()> {
+        validate_basename(basename)?;
+
+        self.handle_fd(dir_fd, |config| match config {
+            FdConfig::OutputDir(_dir) => {
+                unlinkat(Some(dir_fd), basename, UnlinkatFlags::NoRemoveDir)
+                    .map_err(new_errno_error)?;
+                Ok(())
+            }
+            FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+            _ => Err(new_errno_error(Errno::ENOTDIR)),
+        })
+    }
+
+    fn deleteDirectory(&self, dir_fd: i32, basename: &str) -> BinderResult<()> {
+        validate_basename(basename)?;
+
+        self.handle_fd(dir_fd, |config| match config {
+            FdConfig::OutputDir(_dir) => {
+                unlinkat(Some(dir_fd), basename, UnlinkatFlags::RemoveDir)
+                    .map_err(new_errno_error)?;
+                Ok(())
+            }
+            FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+            _ => Err(new_errno_error(Errno::ENOTDIR)),
+        })
+    }
+
+    fn chmod(&self, fd: i32, mode: i32) -> BinderResult<()> {
+        self.handle_fd(fd, |config| match config {
+            FdConfig::ReadWrite(_) | FdConfig::OutputDir(_) => {
+                let mode = validate_file_mode(mode)?;
+                fchmod(fd, mode).map_err(new_errno_error)
+            }
+            _ => Err(new_errno_error(Errno::EACCES)),
+        })
+    }
+
     fn statfs(&self) -> BinderResult<FsStat> {
         let st = statvfs("/data").map_err(new_errno_error)?;
         try_into_fs_stat(st).map_err(|_e| new_errno_error(Errno::EINVAL))
@@ -368,3 +403,32 @@
     let new_file = unsafe { File::from_raw_fd(new_fd) };
     Ok(new_file)
 }
+
+fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
+    offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
+}
+
+fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
+    if size > MAX_REQUESTING_DATA {
+        Err(new_errno_error(Errno::EFBIG))
+    } else {
+        size.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
+    }
+}
+
+fn validate_basename(name: &str) -> BinderResult<()> {
+    if name.contains(MAIN_SEPARATOR) {
+        Err(new_errno_error(Errno::EINVAL))
+    } else {
+        Ok(())
+    }
+}
+
+fn validate_file_mode(mode: i32) -> BinderResult<Mode> {
+    let mode = Mode::from_bits(mode as mode_t).ok_or_else(|| new_errno_error(Errno::EINVAL))?;
+    if mode.intersects(FORBIDDEN_MODES) {
+        Err(new_errno_error(Errno::EPERM))
+    } else {
+        Ok(mode)
+    }
+}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index f5a3cba..f17b7e8 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -28,7 +28,7 @@
 use anyhow::{bail, Result};
 use binder_common::rpc_server::run_rpc_server;
 use log::debug;
-use nix::dir::Dir;
+use nix::{dir::Dir, sys::stat::umask, sys::stat::Mode};
 use std::collections::BTreeMap;
 use std::fs::File;
 use std::os::unix::io::FromRawFd;
@@ -157,10 +157,15 @@
     );
 
     let args = parse_args()?;
+
+    // Allow open/create/mkdir from authfs to create with expecting mode. It's possible to still
+    // use a custom mask on creation, then report the actual file mode back to authfs. But there
+    // is no demand now.
+    let old_umask = umask(Mode::empty());
+    debug!("Setting umask to 0 (old: {:03o})", old_umask.bits());
+
     let service = FdService::new_binder(args.fd_pool).as_binder();
-
     debug!("fd_server is starting as a rpc service.");
-
     let mut ready_fd = args.ready_fd;
     let retval = run_rpc_server(service, RPC_SERVICE_PORT, || {
         debug!("fd_server is ready");
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 6353209..9bbf3ef 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -1,6 +1,8 @@
+mod attr;
 mod dir;
 mod remote_file;
 
+pub use attr::Attr;
 pub use dir::{InMemoryDir, RemoteDirEditor};
 pub use remote_file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
 
diff --git a/authfs/src/file/attr.rs b/authfs/src/file/attr.rs
new file mode 100644
index 0000000..48084aa
--- /dev/null
+++ b/authfs/src/file/attr.rs
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use log::error;
+use nix::sys::stat::{mode_t, Mode, SFlag};
+use std::io;
+
+use super::VirtFdService;
+
+/// Default/assumed mode of files not created by authfs.
+///
+/// For files that are given to authfs as FDs (i.e. not created through authfs), their mode is
+/// unknown (or untrusted) until it is ever set. The default mode is just to make it
+/// readable/writable to VFS. When the mode is set, the value on fd_server is supposed to become
+/// consistent.
+const DEFAULT_FILE_MODE: Mode =
+    Mode::from_bits_truncate(Mode::S_IRUSR.bits() | Mode::S_IWUSR.bits());
+
+/// Default/assumed mode of directories not created by authfs.
+///
+/// See above.
+const DEFAULT_DIR_MODE: Mode = Mode::S_IRWXU;
+
+/// `Attr` maintains the local truth for attributes (e.g. mode and type) while allowing setting the
+/// remote attribute for the file description.
+pub struct Attr {
+    service: VirtFdService,
+    mode: Mode,
+    remote_fd: i32,
+    is_dir: bool,
+}
+
+impl Attr {
+    pub fn new_file(service: VirtFdService, remote_fd: i32) -> Attr {
+        Attr { service, mode: DEFAULT_FILE_MODE, remote_fd, is_dir: false }
+    }
+
+    pub fn new_dir(service: VirtFdService, remote_fd: i32) -> Attr {
+        Attr { service, mode: DEFAULT_DIR_MODE, remote_fd, is_dir: true }
+    }
+
+    pub fn new_file_with_mode(service: VirtFdService, remote_fd: i32, mode: mode_t) -> Attr {
+        Attr { service, mode: Mode::from_bits_truncate(mode), remote_fd, is_dir: false }
+    }
+
+    pub fn new_dir_with_mode(service: VirtFdService, remote_fd: i32, mode: mode_t) -> Attr {
+        Attr { service, mode: Mode::from_bits_truncate(mode), remote_fd, is_dir: true }
+    }
+
+    pub fn mode(&self) -> u32 {
+        self.mode.bits()
+    }
+
+    /// Sets the file mode.
+    ///
+    /// In addition to the actual file mode, `encoded_mode` also contains information of the file
+    /// type.
+    pub fn set_mode(&mut self, encoded_mode: u32) -> io::Result<()> {
+        let new_sflag = SFlag::from_bits_truncate(encoded_mode);
+        let new_mode = Mode::from_bits_truncate(encoded_mode);
+
+        let type_flag = if self.is_dir { SFlag::S_IFDIR } else { SFlag::S_IFREG };
+        if !type_flag.contains(new_sflag) {
+            return Err(io::Error::from_raw_os_error(libc::EINVAL));
+        }
+
+        // Request for update only if changing.
+        if new_mode != self.mode {
+            self.service.chmod(self.remote_fd, new_mode.bits() as i32).map_err(|e| {
+                error!(
+                    "Failed to chmod (fd: {}, mode: {:o}) on fd_server: {:?}",
+                    self.remote_fd, new_mode, e
+                );
+                io::Error::from_raw_os_error(libc::EIO)
+            })?;
+            self.mode = new_mode;
+        }
+        Ok(())
+    }
+}
diff --git a/authfs/src/file/dir.rs b/authfs/src/file/dir.rs
index 2eaaddd..f3cc6f8 100644
--- a/authfs/src/file/dir.rs
+++ b/authfs/src/file/dir.rs
@@ -14,17 +14,32 @@
  * limitations under the License.
  */
 
+use log::warn;
+use nix::sys::stat::Mode;
 use std::collections::{hash_map, HashMap};
+use std::ffi::{CString, OsString};
 use std::io;
+use std::os::unix::ffi::OsStringExt;
 use std::path::{Path, PathBuf};
 
+use super::attr::Attr;
 use super::remote_file::RemoteFileEditor;
 use super::{validate_basename, VirtFdService, VirtFdServiceStatus};
 use crate::fsverity::VerifiedFileEditor;
-use crate::fusefs::Inode;
+use crate::fusefs::{AuthFsDirEntry, Inode};
 
 const MAX_ENTRIES: u16 = 100; // Arbitrary limit
 
+struct InodeInfo {
+    inode: Inode,
+
+    // This information is duplicated since it is also available in `AuthFs::inode_table` via the
+    // type system. But it makes it simple to deal with deletion, where otherwise we need to get a
+    // mutable parent directory in the table, and query the table for directory/file type checking
+    // at the same time.
+    is_dir: bool,
+}
+
 /// A remote directory backed by a remote directory FD, where the provider/fd_server is not
 /// trusted.
 ///
@@ -43,9 +58,9 @@
     service: VirtFdService,
     remote_dir_fd: i32,
 
-    /// Mapping of entry names to the corresponding inode number. The actual file/directory is
-    /// stored in the global pool in fusefs.
-    entries: HashMap<PathBuf, Inode>,
+    /// Mapping of entry names to the corresponding inode. The actual file/directory is stored in
+    /// the global pool in fusefs.
+    entries: HashMap<PathBuf, InodeInfo>,
 }
 
 impl RemoteDirEditor {
@@ -63,61 +78,132 @@
         &mut self,
         basename: &Path,
         inode: Inode,
-    ) -> io::Result<VerifiedFileEditor<RemoteFileEditor>> {
-        self.validate_argument(basename)?;
-
+        mode: libc::mode_t,
+    ) -> io::Result<(VerifiedFileEditor<RemoteFileEditor>, Attr)> {
+        let mode = self.validate_arguments(basename, mode)?;
         let basename_str =
             basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
         let new_fd = self
             .service
-            .createFileInDirectory(self.remote_dir_fd, basename_str)
+            .createFileInDirectory(self.remote_dir_fd, basename_str, mode as i32)
             .map_err(into_io_error)?;
 
         let new_remote_file =
             VerifiedFileEditor::new(RemoteFileEditor::new(self.service.clone(), new_fd));
-        self.entries.insert(basename.to_path_buf(), inode);
-        Ok(new_remote_file)
+        self.entries.insert(basename.to_path_buf(), InodeInfo { inode, is_dir: false });
+        let new_attr = Attr::new_file_with_mode(self.service.clone(), new_fd, mode);
+        Ok((new_remote_file, new_attr))
     }
 
     /// Creates a remote directory named `basename` with corresponding `inode` at the current
     /// directory.
-    pub fn mkdir(&mut self, basename: &Path, inode: Inode) -> io::Result<RemoteDirEditor> {
-        self.validate_argument(basename)?;
-
+    pub fn mkdir(
+        &mut self,
+        basename: &Path,
+        inode: Inode,
+        mode: libc::mode_t,
+    ) -> io::Result<(RemoteDirEditor, Attr)> {
+        let mode = self.validate_arguments(basename, mode)?;
         let basename_str =
             basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
         let new_fd = self
             .service
-            .createDirectoryInDirectory(self.remote_dir_fd, basename_str)
+            .createDirectoryInDirectory(self.remote_dir_fd, basename_str, mode as i32)
             .map_err(into_io_error)?;
 
         let new_remote_dir = RemoteDirEditor::new(self.service.clone(), new_fd);
-        self.entries.insert(basename.to_path_buf(), inode);
-        Ok(new_remote_dir)
+        self.entries.insert(basename.to_path_buf(), InodeInfo { inode, is_dir: true });
+        let new_attr = Attr::new_dir_with_mode(self.service.clone(), new_fd, mode);
+        Ok((new_remote_dir, new_attr))
+    }
+
+    /// Deletes a file
+    pub fn delete_file(&mut self, basename: &Path) -> io::Result<Inode> {
+        let inode = self.force_delete_entry(basename, /* expect_dir */ false)?;
+
+        let basename_str =
+            basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+        if let Err(e) = self.service.deleteFile(self.remote_dir_fd, basename_str) {
+            // Ignore the error to honor the local state.
+            warn!("Deletion on the host is reportedly failed: {:?}", e);
+        }
+        Ok(inode)
+    }
+
+    /// Forces to delete a directory. The caller must only call if `basename` is a directory and
+    /// empty.
+    pub fn force_delete_directory(&mut self, basename: &Path) -> io::Result<Inode> {
+        let inode = self.force_delete_entry(basename, /* expect_dir */ true)?;
+
+        let basename_str =
+            basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+        if let Err(e) = self.service.deleteDirectory(self.remote_dir_fd, basename_str) {
+            // Ignore the error to honor the local state.
+            warn!("Deletion on the host is reportedly failed: {:?}", e);
+        }
+        Ok(inode)
     }
 
     /// Returns the inode number of a file or directory named `name` previously created through
     /// `RemoteDirEditor`.
-    pub fn find_inode(&self, name: &Path) -> Option<Inode> {
-        self.entries.get(name).copied()
+    pub fn find_inode(&self, name: &Path) -> io::Result<Inode> {
+        self.entries
+            .get(name)
+            .map(|entry| entry.inode)
+            .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
     }
 
-    fn validate_argument(&self, basename: &Path) -> io::Result<()> {
+    /// Returns whether the directory has an entry of the given name.
+    pub fn has_entry(&self, name: &Path) -> bool {
+        self.entries.contains_key(name)
+    }
+
+    pub fn retrieve_entries(&self) -> io::Result<Vec<AuthFsDirEntry>> {
+        self.entries
+            .iter()
+            .map(|(name, InodeInfo { inode, is_dir })| {
+                Ok(AuthFsDirEntry { inode: *inode, name: path_to_cstring(name)?, is_dir: *is_dir })
+            })
+            .collect::<io::Result<Vec<_>>>()
+    }
+
+    fn force_delete_entry(&mut self, basename: &Path, expect_dir: bool) -> io::Result<Inode> {
+        // Kernel should only give us a basename.
+        debug_assert!(validate_basename(basename).is_ok());
+
+        if let Some(entry) = self.entries.get(basename) {
+            match (expect_dir, entry.is_dir) {
+                (true, false) => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+                (false, true) => Err(io::Error::from_raw_os_error(libc::EISDIR)),
+                _ => {
+                    let inode = entry.inode;
+                    let _ = self.entries.remove(basename);
+                    Ok(inode)
+                }
+            }
+        } else {
+            Err(io::Error::from_raw_os_error(libc::ENOENT))
+        }
+    }
+
+    fn validate_arguments(&self, basename: &Path, mode: u32) -> io::Result<u32> {
         // Kernel should only give us a basename.
         debug_assert!(validate_basename(basename).is_ok());
 
         if self.entries.contains_key(basename) {
-            Err(io::Error::from_raw_os_error(libc::EEXIST))
-        } else if self.entries.len() >= MAX_ENTRIES.into() {
-            Err(io::Error::from_raw_os_error(libc::EMLINK))
-        } else {
-            Ok(())
+            return Err(io::Error::from_raw_os_error(libc::EEXIST));
         }
+
+        if self.entries.len() >= MAX_ENTRIES.into() {
+            return Err(io::Error::from_raw_os_error(libc::EMLINK));
+        }
+
+        Ok(Mode::from_bits_truncate(mode).bits())
     }
 }
 
 /// An in-memory directory representation of a directory structure.
-pub struct InMemoryDir(HashMap<PathBuf, Inode>);
+pub struct InMemoryDir(HashMap<PathBuf, InodeInfo>);
 
 impl InMemoryDir {
     /// Creates an empty instance of `InMemoryDir`.
@@ -131,16 +217,26 @@
         self.0.len() as u16 // limited to MAX_ENTRIES
     }
 
-    /// Adds an entry (name and the inode number) to the directory. Fails if already exists. The
+    /// Adds a directory name and its inode number to the directory. Fails if already exists. The
     /// caller is responsible for ensure the inode uniqueness.
-    pub fn add_entry(&mut self, basename: &Path, inode: Inode) -> io::Result<()> {
+    pub fn add_dir(&mut self, basename: &Path, inode: Inode) -> io::Result<()> {
+        self.add_entry(basename, InodeInfo { inode, is_dir: true })
+    }
+
+    /// Adds a file name and its inode number to the directory. Fails if already exists. The
+    /// caller is responsible for ensure the inode uniqueness.
+    pub fn add_file(&mut self, basename: &Path, inode: Inode) -> io::Result<()> {
+        self.add_entry(basename, InodeInfo { inode, is_dir: false })
+    }
+
+    fn add_entry(&mut self, basename: &Path, dir_entry: InodeInfo) -> io::Result<()> {
         validate_basename(basename)?;
         if self.0.len() >= MAX_ENTRIES.into() {
             return Err(io::Error::from_raw_os_error(libc::EMLINK));
         }
 
         if let hash_map::Entry::Vacant(entry) = self.0.entry(basename.to_path_buf()) {
-            entry.insert(inode);
+            entry.insert(dir_entry);
             Ok(())
         } else {
             Err(io::Error::from_raw_os_error(libc::EEXIST))
@@ -149,8 +245,22 @@
 
     /// Looks up an entry inode by name. `None` if not found.
     pub fn lookup_inode(&self, basename: &Path) -> Option<Inode> {
-        self.0.get(basename).copied()
+        self.0.get(basename).map(|entry| entry.inode)
     }
+
+    pub fn retrieve_entries(&self) -> io::Result<Vec<AuthFsDirEntry>> {
+        self.0
+            .iter()
+            .map(|(name, InodeInfo { inode, is_dir })| {
+                Ok(AuthFsDirEntry { inode: *inode, name: path_to_cstring(name)?, is_dir: *is_dir })
+            })
+            .collect::<io::Result<Vec<_>>>()
+    }
+}
+
+fn path_to_cstring(path: &Path) -> io::Result<CString> {
+    let bytes = OsString::from(path).into_vec();
+    CString::new(bytes).map_err(|_| io::Error::from_raw_os_error(libc::EILSEQ))
 }
 
 fn into_io_error(e: VirtFdServiceStatus) -> io::Error {
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 549df1e..cbd24a9 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -14,49 +14,44 @@
  * limitations under the License.
  */
 
-use anyhow::{anyhow, bail, Result};
-use log::{debug, warn};
-use std::collections::{btree_map, BTreeMap};
-use std::convert::TryFrom;
-use std::ffi::{CStr, OsStr};
-use std::fs::OpenOptions;
-use std::io;
-use std::mem::{zeroed, MaybeUninit};
-use std::option::Option;
-use std::os::unix::{ffi::OsStrExt, io::AsRawFd};
-use std::path::{Component, Path, PathBuf};
-use std::sync::atomic::{AtomicU64, Ordering};
-use std::sync::Mutex;
-use std::time::Duration;
+mod mount;
 
+use anyhow::{anyhow, bail, Result};
 use fuse::filesystem::{
     Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, GetxattrReply,
     SetattrValid, ZeroCopyReader, ZeroCopyWriter,
 };
-use fuse::mount::MountOption;
+use fuse::sys::OpenOptions as FuseOpenOptions;
+use log::{debug, error, warn};
+use std::collections::{btree_map, BTreeMap};
+use std::convert::{TryFrom, TryInto};
+use std::ffi::{CStr, CString, OsStr};
+use std::io;
+use std::mem::{zeroed, MaybeUninit};
+use std::option::Option;
+use std::os::unix::ffi::OsStrExt;
+use std::path::{Component, Path, PathBuf};
+use std::sync::atomic::{AtomicU64, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
 
 use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
 use crate::file::{
-    validate_basename, InMemoryDir, RandomWrite, ReadByChunk, RemoteDirEditor, RemoteFileEditor,
-    RemoteFileReader, RemoteMerkleTreeReader,
+    validate_basename, Attr, InMemoryDir, RandomWrite, ReadByChunk, RemoteDirEditor,
+    RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
 };
 use crate::fsstat::RemoteFsStatsReader;
 use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
 
+pub use self::mount::mount_and_enter_message_loop;
+use self::mount::MAX_WRITE_BYTES;
+
 pub type Inode = u64;
 type Handle = u64;
 
 const DEFAULT_METADATA_TIMEOUT: Duration = Duration::from_secs(5);
 const ROOT_INODE: Inode = 1;
 
-/// Maximum bytes in the write transaction to the FUSE device. This limits the maximum buffer
-/// size in a read request (including FUSE protocol overhead) that the filesystem writes to.
-const MAX_WRITE_BYTES: u32 = 65536;
-
-/// Maximum bytes in a read operation.
-/// TODO(victorhsieh): This option is deprecated by FUSE. Figure out if we can remove this.
-const MAX_READ_BYTES: u32 = 65536;
-
 /// `AuthFsEntry` defines the filesystem entry type supported by AuthFS.
 pub enum AuthFsEntry {
     /// A read-only directory (writable during initialization). Root directory is an example.
@@ -71,21 +66,143 @@
     UnverifiedReadonly { reader: RemoteFileReader, file_size: u64 },
     /// A file type that is initially empty, and the content is stored on a remote server. File
     /// integrity is guaranteed with private Merkle tree.
-    VerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor> },
+    VerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor>, attr: Attr },
     /// A directory type that is initially empty. One can create new file (`VerifiedNew`) and new
     /// directory (`VerifiedNewDirectory` itself) with integrity guaranteed within the VM.
-    VerifiedNewDirectory { dir: RemoteDirEditor },
+    VerifiedNewDirectory { dir: RemoteDirEditor, attr: Attr },
 }
 
+impl AuthFsEntry {
+    fn expect_empty_deletable_directory(&self) -> io::Result<()> {
+        match self {
+            AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                if dir.number_of_entries() == 0 {
+                    Ok(())
+                } else {
+                    Err(io::Error::from_raw_os_error(libc::ENOTEMPTY))
+                }
+            }
+            AuthFsEntry::ReadonlyDirectory { .. } => {
+                Err(io::Error::from_raw_os_error(libc::EACCES))
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+        }
+    }
+}
+
+struct InodeState {
+    /// Actual inode entry.
+    entry: AuthFsEntry,
+
+    /// Number of `Handle`s (i.e. file descriptors) that are currently referring to the this inode.
+    ///
+    /// Technically, this does not matter to readonly entries, since they live forever. The
+    /// reference count is only needed for manageing lifetime of writable entries like `VerifiedNew`
+    /// and `VerifiedNewDirectory`. That is, when an entry is deleted, the actual entry needs to
+    /// stay alive until the reference count reaches zero.
+    ///
+    /// Note: This is not to be confused with hardlinks, which AuthFS doesn't currently implement.
+    handle_ref_count: u64,
+
+    /// Whether the inode is already unlinked, i.e. should be removed, once `handle_ref_count` is
+    /// down to zero.
+    unlinked: bool,
+}
+
+impl InodeState {
+    fn new(entry: AuthFsEntry) -> Self {
+        InodeState { entry, handle_ref_count: 0, unlinked: false }
+    }
+
+    fn new_with_ref_count(entry: AuthFsEntry, handle_ref_count: u64) -> Self {
+        InodeState { entry, handle_ref_count, unlinked: false }
+    }
+}
+
+/// Data type that a directory implementation should be able to present its entry to `AuthFs`.
+#[derive(Clone)]
+pub struct AuthFsDirEntry {
+    pub inode: Inode,
+    pub name: CString,
+    pub is_dir: bool,
+}
+
+/// A snapshot of a directory entries for supporting `readdir` operation.
+///
+/// The `readdir` implementation is required by FUSE to not return any entries that have been
+/// returned previously (while it's fine to not return new entries). Snapshot is the easiest way to
+/// be compliant. See `fuse::filesystem::readdir` for more details.
+///
+/// A `DirEntriesSnapshot` is created on `opendir`, and is associated with the returned
+/// `Handle`/FD. The snapshot is deleted when the handle is released in `releasedir`.
+type DirEntriesSnapshot = Vec<AuthFsDirEntry>;
+
+/// An iterator for reading from `DirEntriesSnapshot`.
+pub struct DirEntriesSnapshotIterator {
+    /// A reference to the `DirEntriesSnapshot` in `AuthFs`.
+    snapshot: Arc<DirEntriesSnapshot>,
+
+    /// A value determined by `Self` to identify the last entry. 0 is a reserved value by FUSE to
+    /// mean reading from the beginning.
+    prev_offset: usize,
+}
+
+impl<'a> DirectoryIterator for DirEntriesSnapshotIterator {
+    fn next(&mut self) -> Option<DirEntry> {
+        // This iterator should not be the only reference to the snapshot. The snapshot should
+        // still be hold in `dir_handle_table`, i.e. when the FD is not yet closed.
+        //
+        // This code is unreachable when `readdir` is called with a closed FD. Only when the FD is
+        // not yet closed, `DirEntriesSnapshotIterator` can be created (but still short-lived
+        // during `readdir`).
+        debug_assert!(Arc::strong_count(&self.snapshot) >= 2);
+
+        // Since 0 is reserved, let's use 1-based index for the offset. This allows us to
+        // resume from the previous read in the snapshot easily.
+        let current_offset = if self.prev_offset == 0 {
+            1 // first element in the vector
+        } else {
+            self.prev_offset + 1 // next element in the vector
+        };
+        if current_offset > self.snapshot.len() {
+            None
+        } else {
+            let AuthFsDirEntry { inode, name, is_dir } = &self.snapshot[current_offset - 1];
+            let entry = DirEntry {
+                offset: current_offset as u64,
+                ino: *inode,
+                name,
+                type_: if *is_dir { libc::DT_DIR.into() } else { libc::DT_REG.into() },
+            };
+            self.prev_offset = current_offset;
+            Some(entry)
+        }
+    }
+}
+
+type DirHandleTable = BTreeMap<Handle, Arc<DirEntriesSnapshot>>;
+
 // AuthFS needs to be `Sync` to be accepted by fuse::worker::start_message_loop as a `FileSystem`.
 pub struct AuthFs {
-    /// Table for `Inode` to `AuthFsEntry` lookup. This needs to be `Sync` to be used in
+    /// Table for `Inode` to `InodeState` lookup. This needs to be `Sync` to be used in
     /// `fuse::worker::start_message_loop`.
-    inode_table: Mutex<BTreeMap<Inode, AuthFsEntry>>,
+    inode_table: Mutex<BTreeMap<Inode, InodeState>>,
 
     /// The next available inode number.
     next_inode: AtomicU64,
 
+    /// Table for `Handle` to `Arc<DirEntriesSnapshot>` lookup. On `opendir`, a new directory handle
+    /// is created and the snapshot of the current directory is created. This is not super
+    /// efficient, but is the simplest way to be compliant to the FUSE contract (see
+    /// `fuse::filesystem::readdir`).
+    ///
+    /// Currently, no code locks `dir_handle_table` and `inode_table` at the same time to avoid
+    /// deadlock.
+    dir_handle_table: Mutex<DirHandleTable>,
+
+    /// The next available handle number.
+    next_handle: AtomicU64,
+
     /// A reader to access the remote filesystem stats, which is supposed to be of "the" output
     /// directory. We assume all output are stored in the same partition.
     remote_fs_stats_reader: RemoteFsStatsReader,
@@ -97,11 +214,16 @@
 impl AuthFs {
     pub fn new(remote_fs_stats_reader: RemoteFsStatsReader) -> AuthFs {
         let mut inode_table = BTreeMap::new();
-        inode_table.insert(ROOT_INODE, AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() });
+        inode_table.insert(
+            ROOT_INODE,
+            InodeState::new(AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() }),
+        );
 
         AuthFs {
             inode_table: Mutex::new(inode_table),
             next_inode: AtomicU64::new(ROOT_INODE + 1),
+            dir_handle_table: Mutex::new(BTreeMap::new()),
+            next_handle: AtomicU64::new(1),
             remote_fs_stats_reader,
         }
     }
@@ -135,10 +257,12 @@
                     Component::Normal(name) => {
                         let inode_table = self.inode_table.get_mut().unwrap();
                         // Locate the internal directory structure.
-                        let current_dir_entry =
-                            inode_table.get_mut(&current_dir_inode).ok_or_else(|| {
+                        let current_dir_entry = &mut inode_table
+                            .get_mut(&current_dir_inode)
+                            .ok_or_else(|| {
                                 anyhow!("Unknown directory inode {}", current_dir_inode)
-                            })?;
+                            })?
+                            .entry;
                         let dir = match current_dir_entry {
                             AuthFsEntry::ReadonlyDirectory { dir } => dir,
                             _ => unreachable!("Not a ReadonlyDirectory"),
@@ -152,8 +276,11 @@
                                 AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() };
 
                             // Actually update the tables.
-                            dir.add_entry(name.as_ref(), new_inode)?;
-                            if inode_table.insert(new_inode, new_dir_entry).is_some() {
+                            dir.add_dir(name.as_ref(), new_inode)?;
+                            if inode_table
+                                .insert(new_inode, InodeState::new(new_dir_entry))
+                                .is_some()
+                            {
                                 bail!("Unexpected to find a duplicated inode");
                             }
                             Ok(new_inode)
@@ -165,15 +292,16 @@
 
         // 2. Insert the entry to the parent directory, as well as the inode table.
         let inode_table = self.inode_table.get_mut().unwrap();
-        match inode_table.get_mut(&parent_inode).expect("previously returned inode") {
+        let inode_state = inode_table.get_mut(&parent_inode).expect("previously returned inode");
+        match &mut inode_state.entry {
             AuthFsEntry::ReadonlyDirectory { dir } => {
                 let basename =
                     path.file_name().ok_or_else(|| anyhow!("Bad file name: {:?}", path))?;
                 let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
 
                 // Actually update the tables.
-                dir.add_entry(basename.as_ref(), new_inode)?;
-                if inode_table.insert(new_inode, entry).is_some() {
+                dir.add_file(basename.as_ref(), new_inode)?;
+                if inode_table.insert(new_inode, InodeState::new(entry)).is_some() {
                     bail!("Unexpected to find a duplicated inode");
                 }
                 Ok(new_inode)
@@ -192,12 +320,11 @@
         F: FnOnce(&AuthFsEntry) -> io::Result<R>,
     {
         let inode_table = self.inode_table.lock().unwrap();
-        let entry =
-            inode_table.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
-        handle_fn(entry)
+        handle_inode_locked(&inode_table, inode, |inode_state| handle_fn(&inode_state.entry))
     }
 
-    /// Adds a new entry `name` created by `create_fn` at `parent_inode`.
+    /// Adds a new entry `name` created by `create_fn` at `parent_inode`, with an initial ref count
+    /// of one.
     ///
     /// The operation involves two updates: adding the name with a new allocated inode to the
     /// parent directory, and insert the new inode and the actual `AuthFsEntry` to the global inode
@@ -205,7 +332,7 @@
     ///
     /// `create_fn` receives the parent directory, through which it can create the new entry at and
     /// register the new inode to. Its returned entry is then added to the inode table.
-    fn create_new_entry<F>(
+    fn create_new_entry_with_ref_count<F>(
         &self,
         parent_inode: Inode,
         name: &CStr,
@@ -215,20 +342,38 @@
         F: FnOnce(&mut AuthFsEntry, &Path, Inode) -> io::Result<AuthFsEntry>,
     {
         let mut inode_table = self.inode_table.lock().unwrap();
-        let parent_entry = inode_table
-            .get_mut(&parent_inode)
-            .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+        let (new_inode, new_file_entry) = handle_inode_mut_locked(
+            &mut inode_table,
+            &parent_inode,
+            |InodeState { entry, .. }| {
+                let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+                let basename: &Path = cstr_to_path(name);
+                let new_file_entry = create_fn(entry, basename, new_inode)?;
+                Ok((new_inode, new_file_entry))
+            },
+        )?;
 
-        let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
-        let basename: &Path = cstr_to_path(name);
-        let new_file_entry = create_fn(parent_entry, basename, new_inode)?;
         if let btree_map::Entry::Vacant(entry) = inode_table.entry(new_inode) {
-            entry.insert(new_file_entry);
+            entry.insert(InodeState::new_with_ref_count(new_file_entry, 1));
             Ok(new_inode)
         } else {
             unreachable!("Unexpected duplication of inode {}", new_inode);
         }
     }
+
+    fn open_dir_store_snapshot(
+        &self,
+        dir_entries: Vec<AuthFsDirEntry>,
+    ) -> io::Result<(Option<Handle>, FuseOpenOptions)> {
+        let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
+        let mut dir_handle_table = self.dir_handle_table.lock().unwrap();
+        if let btree_map::Entry::Vacant(value) = dir_handle_table.entry(handle) {
+            value.insert(Arc::new(dir_entries));
+            Ok((Some(handle), FuseOpenOptions::empty()))
+        } else {
+            unreachable!("Unexpected to see new handle {} to existing in the table", handle);
+        }
+    }
 }
 
 fn check_access_mode(flags: u32, mode: libc::c_int) -> io::Result<()> {
@@ -250,7 +395,7 @@
 #[allow(clippy::enum_variant_names)]
 enum AccessMode {
     ReadOnly,
-    ReadWrite,
+    Variable(u32),
 }
 
 fn create_stat(
@@ -263,10 +408,11 @@
 
     st.st_ino = ino;
     st.st_mode = match access_mode {
-        // Until needed, let's just grant the owner access.
-        // TODO(205169366): Implement mode properly.
-        AccessMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
-        AccessMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
+        AccessMode::ReadOnly => {
+            // Until needed, let's just grant the owner access.
+            libc::S_IFREG | libc::S_IRUSR
+        }
+        AccessMode::Variable(mode) => libc::S_IFREG | mode,
     };
     st.st_nlink = 1;
     st.st_uid = 0;
@@ -280,18 +426,22 @@
     Ok(st)
 }
 
-fn create_dir_stat(ino: libc::ino_t, file_number: u16) -> io::Result<libc::stat64> {
+fn create_dir_stat(
+    ino: libc::ino_t,
+    file_number: u16,
+    access_mode: AccessMode,
+) -> io::Result<libc::stat64> {
     // SAFETY: stat64 is a plan C struct without pointer.
     let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
 
     st.st_ino = ino;
-    // TODO(205169366): Implement mode properly.
-    st.st_mode = libc::S_IFDIR
-        | libc::S_IXUSR
-        | libc::S_IWUSR
-        | libc::S_IRUSR
-        | libc::S_IXGRP
-        | libc::S_IXOTH;
+    st.st_mode = match access_mode {
+        AccessMode::ReadOnly => {
+            // Until needed, let's just grant the owner access and search to group and others.
+            libc::S_IFDIR | libc::S_IXUSR | libc::S_IRUSR | libc::S_IXGRP | libc::S_IXOTH
+        }
+        AccessMode::Variable(mode) => libc::S_IFDIR | mode,
+    };
 
     // 2 extra for . and ..
     st.st_nlink = file_number
@@ -343,19 +493,10 @@
     Ok(total)
 }
 
-// TODO(205715172): Support enumerating directory entries.
-pub struct EmptyDirectoryIterator {}
-
-impl DirectoryIterator for EmptyDirectoryIterator {
-    fn next(&mut self) -> Option<DirEntry> {
-        None
-    }
-}
-
 impl FileSystem for AuthFs {
     type Inode = Inode;
     type Handle = Handle;
-    type DirIter = EmptyDirectoryIterator;
+    type DirIter = DirEntriesSnapshotIterator;
 
     fn max_buffer_size(&self) -> u32 {
         MAX_WRITE_BYTES
@@ -368,39 +509,49 @@
     }
 
     fn lookup(&self, _ctx: Context, parent: Inode, name: &CStr) -> io::Result<Entry> {
-        // Look up the entry's inode number in parent directory.
-        let inode = self.handle_inode(&parent, |parent_entry| match parent_entry {
-            AuthFsEntry::ReadonlyDirectory { dir } => {
-                let path = cstr_to_path(name);
-                dir.lookup_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
-            }
-            AuthFsEntry::VerifiedNewDirectory { dir } => {
-                let path = cstr_to_path(name);
-                dir.find_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
-            }
-            _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
-        })?;
+        let mut inode_table = self.inode_table.lock().unwrap();
 
-        // Normally, `lookup` is required to increase a reference count for the inode (while
-        // `forget` will decrease it). It is not yet necessary until we start to support
-        // deletion (only for `VerifiedNewDirectory`).
+        // Look up the entry's inode number in parent directory.
+        let inode =
+            handle_inode_locked(&inode_table, &parent, |inode_state| match &inode_state.entry {
+                AuthFsEntry::ReadonlyDirectory { dir } => {
+                    let path = cstr_to_path(name);
+                    dir.lookup_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+                }
+                AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                    let path = cstr_to_path(name);
+                    dir.find_inode(path)
+                }
+                _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+            })?;
 
         // Create the entry's stat if found.
-        let st = self.handle_inode(&inode, |entry| match entry {
-            AuthFsEntry::ReadonlyDirectory { dir } => {
-                create_dir_stat(inode, dir.number_of_entries())
-            }
-            AuthFsEntry::UnverifiedReadonly { file_size, .. }
-            | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
-                create_stat(inode, *file_size, AccessMode::ReadOnly)
-            }
-            AuthFsEntry::VerifiedNew { editor } => {
-                create_stat(inode, editor.size(), AccessMode::ReadWrite)
-            }
-            AuthFsEntry::VerifiedNewDirectory { dir } => {
-                create_dir_stat(inode, dir.number_of_entries())
-            }
-        })?;
+        let st = handle_inode_mut_locked(
+            &mut inode_table,
+            &inode,
+            |InodeState { entry, handle_ref_count, .. }| {
+                let st = match entry {
+                    AuthFsEntry::ReadonlyDirectory { dir } => {
+                        create_dir_stat(inode, dir.number_of_entries(), AccessMode::ReadOnly)
+                    }
+                    AuthFsEntry::UnverifiedReadonly { file_size, .. }
+                    | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
+                        create_stat(inode, *file_size, AccessMode::ReadOnly)
+                    }
+                    AuthFsEntry::VerifiedNew { editor, attr, .. } => {
+                        create_stat(inode, editor.size(), AccessMode::Variable(attr.mode()))
+                    }
+                    AuthFsEntry::VerifiedNewDirectory { dir, attr } => create_dir_stat(
+                        inode,
+                        dir.number_of_entries(),
+                        AccessMode::Variable(attr.mode()),
+                    ),
+                }?;
+                *handle_ref_count += 1;
+                Ok(st)
+            },
+        )?;
+
         Ok(Entry {
             inode,
             generation: 0,
@@ -410,6 +561,38 @@
         })
     }
 
+    fn forget(&self, _ctx: Context, inode: Self::Inode, count: u64) {
+        let mut inode_table = self.inode_table.lock().unwrap();
+        let delete_now = handle_inode_mut_locked(
+            &mut inode_table,
+            &inode,
+            |InodeState { handle_ref_count, unlinked, .. }| {
+                if count > *handle_ref_count {
+                    error!(
+                        "Trying to decrease refcount of inode {} by {} (> current {})",
+                        inode, count, *handle_ref_count
+                    );
+                    panic!(); // log to logcat with error!
+                }
+                *handle_ref_count = handle_ref_count.saturating_sub(count);
+                Ok(*unlinked && *handle_ref_count == 0)
+            },
+        );
+
+        match delete_now {
+            Ok(true) => {
+                let _ = inode_table.remove(&inode).expect("Removed an existing entry");
+            }
+            Ok(false) => { /* Let the inode stay */ }
+            Err(e) => {
+                warn!(
+                    "Unexpected failure when tries to forget an inode {} by refcount {}: {:?}",
+                    inode, count, e
+                );
+            }
+        }
+    }
+
     fn getattr(
         &self,
         _ctx: Context,
@@ -420,18 +603,20 @@
             Ok((
                 match config {
                     AuthFsEntry::ReadonlyDirectory { dir } => {
-                        create_dir_stat(inode, dir.number_of_entries())
+                        create_dir_stat(inode, dir.number_of_entries(), AccessMode::ReadOnly)
                     }
                     AuthFsEntry::UnverifiedReadonly { file_size, .. }
                     | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
                         create_stat(inode, *file_size, AccessMode::ReadOnly)
                     }
-                    AuthFsEntry::VerifiedNew { editor } => {
-                        create_stat(inode, editor.size(), AccessMode::ReadWrite)
+                    AuthFsEntry::VerifiedNew { editor, attr, .. } => {
+                        create_stat(inode, editor.size(), AccessMode::Variable(attr.mode()))
                     }
-                    AuthFsEntry::VerifiedNewDirectory { dir } => {
-                        create_dir_stat(inode, dir.number_of_entries())
-                    }
+                    AuthFsEntry::VerifiedNewDirectory { dir, attr } => create_dir_stat(
+                        inode,
+                        dir.number_of_entries(),
+                        AccessMode::Variable(attr.mode()),
+                    ),
                 }?,
                 DEFAULT_METADATA_TIMEOUT,
             ))
@@ -443,7 +628,7 @@
         _ctx: Context,
         inode: Self::Inode,
         flags: u32,
-    ) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
+    ) -> io::Result<(Option<Self::Handle>, FuseOpenOptions)> {
         // Since file handle is not really used in later operations (which use Inode directly),
         // return None as the handle.
         self.handle_inode(&inode, |config| {
@@ -452,8 +637,8 @@
                     check_access_mode(flags, libc::O_RDONLY)?;
                 }
                 AuthFsEntry::VerifiedNew { .. } => {
-                    // No need to check access modes since all the modes are allowed to the
-                    // read-writable file.
+                    // TODO(victorhsieh): Imeplement ACL check using the attr and ctx. Always allow
+                    // for now.
                 }
                 AuthFsEntry::ReadonlyDirectory { .. }
                 | AuthFsEntry::VerifiedNewDirectory { .. } => {
@@ -463,7 +648,7 @@
             }
             // Always cache the file content. There is currently no need to support direct I/O or
             // avoid the cache buffer. Memory mapping is only possible with cache enabled.
-            Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+            Ok((None, FuseOpenOptions::KEEP_CACHE))
         })
     }
 
@@ -472,37 +657,38 @@
         _ctx: Context,
         parent: Self::Inode,
         name: &CStr,
-        _mode: u32,
+        mode: u32,
         _flags: u32,
-        _umask: u32,
-    ) -> io::Result<(Entry, Option<Self::Handle>, fuse::sys::OpenOptions)> {
-        // TODO(205169366): Implement mode properly.
+        umask: u32,
+    ) -> io::Result<(Entry, Option<Self::Handle>, FuseOpenOptions)> {
         // TODO(205172873): handle O_TRUNC and O_EXCL properly.
-        let new_inode =
-            self.create_new_entry(parent, name, |parent_entry, basename, new_inode| {
-                match parent_entry {
-                    AuthFsEntry::VerifiedNewDirectory { dir } => {
-                        if dir.find_inode(basename).is_some() {
-                            return Err(io::Error::from_raw_os_error(libc::EEXIST));
-                        }
-                        let new_file = dir.create_file(basename, new_inode)?;
-                        Ok(AuthFsEntry::VerifiedNew { editor: new_file })
+        let new_inode = self.create_new_entry_with_ref_count(
+            parent,
+            name,
+            |parent_entry, basename, new_inode| match parent_entry {
+                AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                    if dir.has_entry(basename) {
+                        return Err(io::Error::from_raw_os_error(libc::EEXIST));
                     }
-                    _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+                    let mode = mode & !umask;
+                    let (new_file, new_attr) = dir.create_file(basename, new_inode, mode)?;
+                    Ok(AuthFsEntry::VerifiedNew { editor: new_file, attr: new_attr })
                 }
-            })?;
+                _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+            },
+        )?;
 
         Ok((
             Entry {
                 inode: new_inode,
                 generation: 0,
-                attr: create_stat(new_inode, /* file_size */ 0, AccessMode::ReadWrite)?,
+                attr: create_stat(new_inode, /* file_size */ 0, AccessMode::Variable(mode))?,
                 entry_timeout: DEFAULT_METADATA_TIMEOUT,
                 attr_timeout: DEFAULT_METADATA_TIMEOUT,
             },
             // See also `open`.
             /* handle */ None,
-            fuse::sys::OpenOptions::KEEP_CACHE,
+            FuseOpenOptions::KEEP_CACHE,
         ))
     }
 
@@ -525,12 +711,15 @@
                 AuthFsEntry::UnverifiedReadonly { reader, file_size } => {
                     read_chunks(w, reader, *file_size, offset, size)
                 }
-                AuthFsEntry::VerifiedNew { editor } => {
+                AuthFsEntry::VerifiedNew { editor, .. } => {
                     // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
                     // request a read even if the file is open with O_WRONLY.
                     read_chunks(w, editor, editor.size(), offset, size)
                 }
-                _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+                AuthFsEntry::ReadonlyDirectory { .. }
+                | AuthFsEntry::VerifiedNewDirectory { .. } => {
+                    Err(io::Error::from_raw_os_error(libc::EISDIR))
+                }
             }
         })
     }
@@ -548,12 +737,17 @@
         _flags: u32,
     ) -> io::Result<usize> {
         self.handle_inode(&inode, |config| match config {
-            AuthFsEntry::VerifiedNew { editor } => {
+            AuthFsEntry::VerifiedNew { editor, .. } => {
                 let mut buf = vec![0; size as usize];
                 r.read_exact(&mut buf)?;
                 editor.write_at(&buf, offset)
             }
-            _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+            AuthFsEntry::VerifiedReadonly { .. } | AuthFsEntry::UnverifiedReadonly { .. } => {
+                Err(io::Error::from_raw_os_error(libc::EPERM))
+            }
+            AuthFsEntry::ReadonlyDirectory { .. } | AuthFsEntry::VerifiedNewDirectory { .. } => {
+                Err(io::Error::from_raw_os_error(libc::EISDIR))
+            }
         })
     }
 
@@ -561,55 +755,51 @@
         &self,
         _ctx: Context,
         inode: Inode,
-        attr: libc::stat64,
+        in_attr: libc::stat64,
         _handle: Option<Handle>,
         valid: SetattrValid,
     ) -> io::Result<(libc::stat64, Duration)> {
-        self.handle_inode(&inode, |config| {
-            match config {
-                AuthFsEntry::VerifiedNew { editor } => {
-                    // Initialize the default stat.
-                    let mut new_attr = create_stat(inode, editor.size(), AccessMode::ReadWrite)?;
-                    // `valid` indicates what fields in `attr` are valid. Update to return correctly.
-                    if valid.contains(SetattrValid::SIZE) {
-                        // st_size is i64, but the cast should be safe since kernel should not give a
-                        // negative size.
-                        debug_assert!(attr.st_size >= 0);
-                        new_attr.st_size = attr.st_size;
-                        editor.resize(attr.st_size as u64)?;
-                    }
+        let mut inode_table = self.inode_table.lock().unwrap();
+        handle_inode_mut_locked(&mut inode_table, &inode, |InodeState { entry, .. }| match entry {
+            AuthFsEntry::VerifiedNew { editor, attr } => {
+                check_unsupported_setattr_request(valid)?;
 
-                    if valid.contains(SetattrValid::MODE) {
-                        warn!("Changing st_mode is not currently supported");
-                        return Err(io::Error::from_raw_os_error(libc::ENOSYS));
-                    }
-                    if valid.contains(SetattrValid::UID) {
-                        warn!("Changing st_uid is not currently supported");
-                        return Err(io::Error::from_raw_os_error(libc::ENOSYS));
-                    }
-                    if valid.contains(SetattrValid::GID) {
-                        warn!("Changing st_gid is not currently supported");
-                        return Err(io::Error::from_raw_os_error(libc::ENOSYS));
-                    }
-                    if valid.contains(SetattrValid::CTIME) {
-                        debug!(
-                            "Ignoring ctime change as authfs does not maintain timestamp currently"
-                        );
-                    }
-                    if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
-                        debug!(
-                            "Ignoring atime change as authfs does not maintain timestamp currently"
-                        );
-                    }
-                    if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
-                        debug!(
-                            "Ignoring mtime change as authfs does not maintain timestamp currently"
-                        );
-                    }
-                    Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
+                // Initialize the default stat.
+                let mut new_attr =
+                    create_stat(inode, editor.size(), AccessMode::Variable(attr.mode()))?;
+                // `valid` indicates what fields in `attr` are valid. Update to return correctly.
+                if valid.contains(SetattrValid::SIZE) {
+                    // st_size is i64, but the cast should be safe since kernel should not give a
+                    // negative size.
+                    debug_assert!(in_attr.st_size >= 0);
+                    new_attr.st_size = in_attr.st_size;
+                    editor.resize(in_attr.st_size as u64)?;
                 }
-                _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+                if valid.contains(SetattrValid::MODE) {
+                    attr.set_mode(in_attr.st_mode)?;
+                    new_attr.st_mode = in_attr.st_mode;
+                }
+                Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
             }
+            AuthFsEntry::VerifiedNewDirectory { dir, attr } => {
+                check_unsupported_setattr_request(valid)?;
+                if valid.contains(SetattrValid::SIZE) {
+                    return Err(io::Error::from_raw_os_error(libc::EISDIR));
+                }
+
+                // Initialize the default stat.
+                let mut new_attr = create_dir_stat(
+                    inode,
+                    dir.number_of_entries(),
+                    AccessMode::Variable(attr.mode()),
+                )?;
+                if valid.contains(SetattrValid::MODE) {
+                    attr.set_mode(in_attr.st_mode)?;
+                    new_attr.st_mode = in_attr.st_mode;
+                }
+                Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::EPERM)),
         })
     }
 
@@ -622,7 +812,7 @@
     ) -> io::Result<GetxattrReply> {
         self.handle_inode(&inode, |config| {
             match config {
-                AuthFsEntry::VerifiedNew { editor } => {
+                AuthFsEntry::VerifiedNew { editor, .. } => {
                     // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
                     // change (see b/196635431). Until it's possible, use xattr to expose what we need
                     // as an authfs specific API.
@@ -652,36 +842,146 @@
         _ctx: Context,
         parent: Self::Inode,
         name: &CStr,
-        _mode: u32,
-        _umask: u32,
+        mode: u32,
+        umask: u32,
     ) -> io::Result<Entry> {
-        // TODO(205169366): Implement mode properly.
-        let new_inode =
-            self.create_new_entry(parent, name, |parent_entry, basename, new_inode| {
-                match parent_entry {
-                    AuthFsEntry::VerifiedNewDirectory { dir } => {
-                        if dir.find_inode(basename).is_some() {
-                            return Err(io::Error::from_raw_os_error(libc::EEXIST));
-                        }
-                        let new_dir = dir.mkdir(basename, new_inode)?;
-                        Ok(AuthFsEntry::VerifiedNewDirectory { dir: new_dir })
+        let new_inode = self.create_new_entry_with_ref_count(
+            parent,
+            name,
+            |parent_entry, basename, new_inode| match parent_entry {
+                AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                    if dir.has_entry(basename) {
+                        return Err(io::Error::from_raw_os_error(libc::EEXIST));
                     }
-                    AuthFsEntry::ReadonlyDirectory { .. } => {
-                        Err(io::Error::from_raw_os_error(libc::EACCES))
-                    }
-                    _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+                    let mode = mode & !umask;
+                    let (new_dir, new_attr) = dir.mkdir(basename, new_inode, mode)?;
+                    Ok(AuthFsEntry::VerifiedNewDirectory { dir: new_dir, attr: new_attr })
                 }
-            })?;
+                AuthFsEntry::ReadonlyDirectory { .. } => {
+                    Err(io::Error::from_raw_os_error(libc::EACCES))
+                }
+                _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+            },
+        )?;
 
         Ok(Entry {
             inode: new_inode,
             generation: 0,
-            attr: create_dir_stat(new_inode, /* file_number */ 0)?,
+            attr: create_dir_stat(new_inode, /* file_number */ 0, AccessMode::Variable(mode))?,
             entry_timeout: DEFAULT_METADATA_TIMEOUT,
             attr_timeout: DEFAULT_METADATA_TIMEOUT,
         })
     }
 
+    fn unlink(&self, _ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
+        let mut inode_table = self.inode_table.lock().unwrap();
+        handle_inode_mut_locked(
+            &mut inode_table,
+            &parent,
+            |InodeState { entry, unlinked, .. }| match entry {
+                AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                    let basename: &Path = cstr_to_path(name);
+                    // Delete the file from in both the local and remote directories.
+                    let _inode = dir.delete_file(basename)?;
+                    *unlinked = true;
+                    Ok(())
+                }
+                AuthFsEntry::ReadonlyDirectory { .. } => {
+                    Err(io::Error::from_raw_os_error(libc::EACCES))
+                }
+                AuthFsEntry::VerifiedNew { .. } => {
+                    // Deleting a entry in filesystem root is not currently supported.
+                    Err(io::Error::from_raw_os_error(libc::ENOSYS))
+                }
+                AuthFsEntry::UnverifiedReadonly { .. } | AuthFsEntry::VerifiedReadonly { .. } => {
+                    Err(io::Error::from_raw_os_error(libc::ENOTDIR))
+                }
+            },
+        )
+    }
+
+    fn rmdir(&self, _ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
+        let mut inode_table = self.inode_table.lock().unwrap();
+
+        // Check before actual removal, with readonly borrow.
+        handle_inode_locked(&inode_table, &parent, |inode_state| match &inode_state.entry {
+            AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                let basename: &Path = cstr_to_path(name);
+                let existing_inode = dir.find_inode(basename)?;
+                handle_inode_locked(&inode_table, &existing_inode, |inode_state| {
+                    inode_state.entry.expect_empty_deletable_directory()
+                })
+            }
+            AuthFsEntry::ReadonlyDirectory { .. } => {
+                Err(io::Error::from_raw_os_error(libc::EACCES))
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+        })?;
+
+        // Look up again, this time with mutable borrow. This needs to be done separately because
+        // the previous lookup needs to borrow multiple entry references in the table.
+        handle_inode_mut_locked(
+            &mut inode_table,
+            &parent,
+            |InodeState { entry, unlinked, .. }| match entry {
+                AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+                    let basename: &Path = cstr_to_path(name);
+                    let _inode = dir.force_delete_directory(basename)?;
+                    *unlinked = true;
+                    Ok(())
+                }
+                _ => unreachable!("Mismatched entry type that is just checked"),
+            },
+        )
+    }
+
+    fn opendir(
+        &self,
+        _ctx: Context,
+        inode: Self::Inode,
+        _flags: u32,
+    ) -> io::Result<(Option<Self::Handle>, FuseOpenOptions)> {
+        let entries = self.handle_inode(&inode, |config| match config {
+            AuthFsEntry::VerifiedNewDirectory { dir, .. } => dir.retrieve_entries(),
+            AuthFsEntry::ReadonlyDirectory { dir } => dir.retrieve_entries(),
+            _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+        })?;
+        self.open_dir_store_snapshot(entries)
+    }
+
+    fn readdir(
+        &self,
+        _ctx: Context,
+        _inode: Self::Inode,
+        handle: Self::Handle,
+        _size: u32,
+        offset: u64,
+    ) -> io::Result<Self::DirIter> {
+        let dir_handle_table = self.dir_handle_table.lock().unwrap();
+        if let Some(entry) = dir_handle_table.get(&handle) {
+            Ok(DirEntriesSnapshotIterator {
+                snapshot: entry.clone(),
+                prev_offset: offset.try_into().unwrap(),
+            })
+        } else {
+            Err(io::Error::from_raw_os_error(libc::EBADF))
+        }
+    }
+
+    fn releasedir(
+        &self,
+        _ctx: Context,
+        inode: Self::Inode,
+        _flags: u32,
+        handle: Self::Handle,
+    ) -> io::Result<()> {
+        let mut dir_handle_table = self.dir_handle_table.lock().unwrap();
+        if dir_handle_table.remove(&handle).is_none() {
+            unreachable!("Unknown directory handle {}, inode {}", handle, inode);
+        }
+        Ok(())
+    }
+
     fn statfs(&self, _ctx: Context, _inode: Self::Inode) -> io::Result<libc::statvfs64> {
         let remote_stat = self.remote_fs_stats_reader.statfs()?;
 
@@ -709,34 +1009,55 @@
     }
 }
 
-/// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
-pub fn loop_forever(
-    authfs: AuthFs,
-    mountpoint: &Path,
-    extra_options: &Option<String>,
-) -> Result<(), fuse::Error> {
-    let dev_fuse = OpenOptions::new()
-        .read(true)
-        .write(true)
-        .open("/dev/fuse")
-        .expect("Failed to open /dev/fuse");
-
-    let mut mount_options = vec![
-        MountOption::FD(dev_fuse.as_raw_fd()),
-        MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
-        MountOption::AllowOther,
-        MountOption::UserId(0),
-        MountOption::GroupId(0),
-        MountOption::MaxRead(MAX_READ_BYTES),
-    ];
-    if let Some(value) = extra_options {
-        mount_options.push(MountOption::Extra(value));
+fn handle_inode_locked<F, R>(
+    inode_table: &BTreeMap<Inode, InodeState>,
+    inode: &Inode,
+    handle_fn: F,
+) -> io::Result<R>
+where
+    F: FnOnce(&InodeState) -> io::Result<R>,
+{
+    if let Some(inode_state) = inode_table.get(inode) {
+        handle_fn(inode_state)
+    } else {
+        Err(io::Error::from_raw_os_error(libc::ENOENT))
     }
+}
 
-    fuse::mount(mountpoint, "authfs", libc::MS_NOSUID | libc::MS_NODEV, &mount_options)
-        .expect("Failed to mount fuse");
+fn handle_inode_mut_locked<F, R>(
+    inode_table: &mut BTreeMap<Inode, InodeState>,
+    inode: &Inode,
+    handle_fn: F,
+) -> io::Result<R>
+where
+    F: FnOnce(&mut InodeState) -> io::Result<R>,
+{
+    if let Some(inode_state) = inode_table.get_mut(inode) {
+        handle_fn(inode_state)
+    } else {
+        Err(io::Error::from_raw_os_error(libc::ENOENT))
+    }
+}
 
-    fuse::worker::start_message_loop(dev_fuse, MAX_WRITE_BYTES, MAX_READ_BYTES, authfs)
+fn check_unsupported_setattr_request(valid: SetattrValid) -> io::Result<()> {
+    if valid.contains(SetattrValid::UID) {
+        warn!("Changing st_uid is not currently supported");
+        return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+    }
+    if valid.contains(SetattrValid::GID) {
+        warn!("Changing st_gid is not currently supported");
+        return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+    }
+    if valid.intersects(
+        SetattrValid::CTIME
+            | SetattrValid::ATIME
+            | SetattrValid::ATIME_NOW
+            | SetattrValid::MTIME
+            | SetattrValid::MTIME_NOW,
+    ) {
+        debug!("Ignoring ctime/atime/mtime change as authfs does not maintain timestamp currently");
+    }
+    Ok(())
 }
 
 fn cstr_to_path(cstr: &CStr) -> &Path {
diff --git a/authfs/src/fusefs/mount.rs b/authfs/src/fusefs/mount.rs
new file mode 100644
index 0000000..e7f8c94
--- /dev/null
+++ b/authfs/src/fusefs/mount.rs
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use fuse::mount::MountOption;
+use std::fs::OpenOptions;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+
+use super::AuthFs;
+
+/// Maximum bytes in the write transaction to the FUSE device. This limits the maximum buffer
+/// size in a read request (including FUSE protocol overhead) that the filesystem writes to.
+pub const MAX_WRITE_BYTES: u32 = 65536;
+
+/// Maximum bytes in a read operation.
+/// TODO(victorhsieh): This option is deprecated by FUSE. Figure out if we can remove this.
+const MAX_READ_BYTES: u32 = 65536;
+
+/// Mount and start the FUSE instance to handle messages. This requires CAP_SYS_ADMIN.
+pub fn mount_and_enter_message_loop(
+    authfs: AuthFs,
+    mountpoint: &Path,
+    extra_options: &Option<String>,
+) -> Result<(), fuse::Error> {
+    let dev_fuse = OpenOptions::new()
+        .read(true)
+        .write(true)
+        .open("/dev/fuse")
+        .expect("Failed to open /dev/fuse");
+
+    let mut mount_options = vec![
+        MountOption::FD(dev_fuse.as_raw_fd()),
+        MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
+        MountOption::AllowOther,
+        MountOption::UserId(0),
+        MountOption::GroupId(0),
+        MountOption::MaxRead(MAX_READ_BYTES),
+    ];
+    if let Some(value) = extra_options {
+        mount_options.push(MountOption::Extra(value));
+    }
+
+    fuse::mount(mountpoint, "authfs", libc::MS_NOSUID | libc::MS_NODEV, &mount_options)
+        .expect("Failed to mount fuse");
+
+    fuse::worker::start_message_loop(dev_fuse, MAX_WRITE_BYTES, MAX_READ_BYTES, authfs)
+}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 00a4614..421cc02 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -43,7 +43,7 @@
 
 use auth::FakeAuthenticator;
 use file::{
-    InMemoryDir, RemoteDirEditor, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
+    Attr, InMemoryDir, RemoteDirEditor, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
 };
 use fsstat::RemoteFsStatsReader;
 use fsverity::{VerifiedFileEditor, VerifiedFileReader};
@@ -194,16 +194,20 @@
     service: file::VirtFdService,
     remote_fd: i32,
 ) -> Result<AuthFsEntry> {
-    let remote_file = RemoteFileEditor::new(service, remote_fd);
-    Ok(AuthFsEntry::VerifiedNew { editor: VerifiedFileEditor::new(remote_file) })
+    let remote_file = RemoteFileEditor::new(service.clone(), remote_fd);
+    Ok(AuthFsEntry::VerifiedNew {
+        editor: VerifiedFileEditor::new(remote_file),
+        attr: Attr::new_file(service, remote_fd),
+    })
 }
 
 fn new_remote_new_verified_dir_entry(
     service: file::VirtFdService,
     remote_fd: i32,
 ) -> Result<AuthFsEntry> {
-    let dir = RemoteDirEditor::new(service, remote_fd);
-    Ok(AuthFsEntry::VerifiedNewDirectory { dir })
+    let dir = RemoteDirEditor::new(service.clone(), remote_fd);
+    let attr = Attr::new_dir(service, remote_fd);
+    Ok(AuthFsEntry::VerifiedNewDirectory { dir, attr })
 }
 
 fn prepare_root_dir_entries(
@@ -311,7 +315,7 @@
     let mut authfs = AuthFs::new(RemoteFsStatsReader::new(service.clone()));
     prepare_root_dir_entries(service, &mut authfs, &args)?;
 
-    fusefs::loop_forever(authfs, &args.mount_point, &args.extra_options)?;
+    fusefs::mount_and_enter_message_loop(authfs, &args.mount_point, &args.extra_options)?;
     bail!("Unexpected exit after the handler loop")
 }
 
diff --git a/authfs/tests/Android.bp b/authfs/tests/Android.bp
index 92fa428..6b3a474 100644
--- a/authfs/tests/Android.bp
+++ b/authfs/tests/Android.bp
@@ -38,10 +38,10 @@
     rustlibs: [
         "libandroid_logger",
         "libanyhow",
-        "liblibc",
         "libclap",
         "libcommand_fds",
         "liblog_rust",
+        "libnix",
     ],
     test_suites: ["general-tests"],
     test_harness: false,
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
index 2d7668a..101a349 100644
--- a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -386,6 +386,51 @@
     }
 
     @Test
+    public void testOutputDirectory_CanDeleteFile() throws Exception {
+        // Setup
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/file");
+        runOnMicrodroid("test -f " + authfsOutputDir + "/file");
+        sAndroid.run("test -f " + androidOutputDir + "/file");
+
+        // Action & Verify
+        runOnMicrodroid("rm " + authfsOutputDir + "/file");
+        runOnMicrodroid("test ! -f " + authfsOutputDir + "/file");
+        sAndroid.run("test ! -f " + androidOutputDir + "/file");
+    }
+
+    @Test
+    public void testOutputDirectory_CanDeleteDirectoryOnlyIfEmpty() throws Exception {
+        // Setup
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        runOnMicrodroid("mkdir -p " + authfsOutputDir + "/dir/dir2");
+        runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/dir/file");
+        sAndroid.run("test -d " + androidOutputDir + "/dir/dir2");
+
+        // Action & Verify
+        runOnMicrodroid("rmdir " + authfsOutputDir + "/dir/dir2");
+        runOnMicrodroid("test ! -d " + authfsOutputDir + "/dir/dir2");
+        sAndroid.run("test ! -d " + androidOutputDir + "/dir/dir2");
+        // Can only delete a directory if empty
+        assertFailedOnMicrodroid("rmdir " + authfsOutputDir + "/dir");
+        runOnMicrodroid("test -d " + authfsOutputDir + "/dir");  // still there
+        runOnMicrodroid("rm " + authfsOutputDir + "/dir/file");
+        runOnMicrodroid("rmdir " + authfsOutputDir + "/dir");
+        runOnMicrodroid("test ! -d " + authfsOutputDir + "/dir");
+        sAndroid.run("test ! -d " + androidOutputDir + "/dir");
+    }
+
+    @Test
     public void testOutputDirectory_CannotRecreateDirectoryIfNameExists() throws Exception {
         // Setup
         String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
@@ -408,6 +453,40 @@
     }
 
     @Test
+    public void testOutputDirectory_WriteToFdOfDeletedFile() throws Exception {
+        // Setup
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Create a file with some data. Test the existence.
+        String outputPath = authfsOutputDir + "/out";
+        String androidOutputPath = androidOutputDir + "/out";
+        runOnMicrodroid("echo -n 123 > " + outputPath);
+        runOnMicrodroid("test -f " + outputPath);
+        sAndroid.run("test -f " + androidOutputPath);
+
+        // Action
+        String output = runOnMicrodroid(
+                // Open the file for append and read
+                "exec 4>>" + outputPath + " 5<" + outputPath + "; "
+                // Delete the file from the directory
+                + "rm " + outputPath + "; "
+                // Append more data to the file descriptor
+                + "echo -n 456 >&4; "
+                // Print the whole file from the file descriptor
+                + "cat <&5");
+
+        // Verify
+        // Output contains all written data, while the files are deleted.
+        assertEquals("123456", output);
+        runOnMicrodroid("test ! -f " + outputPath);
+        sAndroid.run("test ! -f " + androidOutputDir + "/out");
+    }
+
+    @Test
     public void testInputDirectory_CanReadFile() throws Exception {
         // Setup
         String authfsInputDir = MOUNT_DIR + "/3";
@@ -440,6 +519,108 @@
     }
 
     @Test
+    public void testReadOutputDirectory() throws Exception {
+        // Setup
+        runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Action
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        runOnMicrodroid("mkdir -p " + authfsOutputDir + "/dir/dir2/dir3");
+        runOnMicrodroid("touch " + authfsOutputDir + "/dir/dir2/dir3/file1");
+        runOnMicrodroid("touch " + authfsOutputDir + "/dir/dir2/dir3/file2");
+        runOnMicrodroid("touch " + authfsOutputDir + "/dir/dir2/dir3/file3");
+        runOnMicrodroid("touch " + authfsOutputDir + "/file");
+
+        // Verify
+        String[] actual = runOnMicrodroid("cd " + authfsOutputDir + "; find |sort").split("\n");
+        String[] expected = new String[] {
+                ".",
+                "./dir",
+                "./dir/dir2",
+                "./dir/dir2/dir3",
+                "./dir/dir2/dir3/file1",
+                "./dir/dir2/dir3/file2",
+                "./dir/dir2/dir3/file3",
+                "./file"};
+        assertEquals(expected, actual);
+
+        // Add more entries.
+        runOnMicrodroid("mkdir -p " + authfsOutputDir + "/dir2");
+        runOnMicrodroid("touch " + authfsOutputDir + "/file2");
+        // Check new entries. Also check that the types are correct.
+        actual = runOnMicrodroid(
+                "cd " + authfsOutputDir + "; find -maxdepth 1 -type f |sort").split("\n");
+        expected = new String[] {"./file", "./file2"};
+        assertEquals(expected, actual);
+        actual = runOnMicrodroid(
+                "cd " + authfsOutputDir + "; find -maxdepth 1 -type d |sort").split("\n");
+        expected = new String[] {".", "./dir", "./dir2"};
+        assertEquals(expected, actual);
+    }
+
+    @Test
+    public void testChmod_File() throws Exception {
+        // Setup
+        runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/file", "--rw-fds 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+
+        // Action & Verify
+        // Change mode
+        runOnMicrodroid("chmod 321 " + MOUNT_DIR + "/3");
+        expectFileMode("--wx-w---x", MOUNT_DIR + "/3", TEST_OUTPUT_DIR + "/file");
+        // Can't set the disallowed bits
+        assertFailedOnMicrodroid("chmod +s " + MOUNT_DIR + "/3");
+        assertFailedOnMicrodroid("chmod +t " + MOUNT_DIR + "/3");
+    }
+
+    @Test
+    public void testChmod_Dir() throws Exception {
+        // Setup
+        runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Action & Verify
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        // Create with umask
+        runOnMicrodroid("umask 000; mkdir " + authfsOutputDir + "/dir");
+        runOnMicrodroid("umask 022; mkdir " + authfsOutputDir + "/dir/dir2");
+        expectFileMode("drwxrwxrwx", authfsOutputDir + "/dir", TEST_OUTPUT_DIR + "/dir");
+        expectFileMode("drwxr-xr-x", authfsOutputDir + "/dir/dir2", TEST_OUTPUT_DIR + "/dir/dir2");
+        // Change mode
+        runOnMicrodroid("chmod -w " + authfsOutputDir + "/dir/dir2");
+        expectFileMode("dr-xr-xr-x", authfsOutputDir + "/dir/dir2", TEST_OUTPUT_DIR + "/dir/dir2");
+        runOnMicrodroid("chmod 321 " + authfsOutputDir + "/dir");
+        expectFileMode("d-wx-w---x", authfsOutputDir + "/dir", TEST_OUTPUT_DIR + "/dir");
+        // Can't set the disallowed bits
+        assertFailedOnMicrodroid("chmod +s " + authfsOutputDir + "/dir/dir2");
+        assertFailedOnMicrodroid("chmod +t " + authfsOutputDir + "/dir");
+    }
+
+    @Test
+    public void testChmod_FileInOutputDirectory() throws Exception {
+        // Setup
+        runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Action & Verify
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        // Create with umask
+        runOnMicrodroid("umask 000; echo -n foo > " + authfsOutputDir + "/file");
+        runOnMicrodroid("umask 022; echo -n foo > " + authfsOutputDir + "/file2");
+        expectFileMode("-rw-rw-rw-", authfsOutputDir + "/file", TEST_OUTPUT_DIR + "/file");
+        expectFileMode("-rw-r--r--", authfsOutputDir + "/file2", TEST_OUTPUT_DIR + "/file2");
+        // Change mode
+        runOnMicrodroid("chmod -w " + authfsOutputDir + "/file");
+        expectFileMode("-r--r--r--", authfsOutputDir + "/file", TEST_OUTPUT_DIR + "/file");
+        runOnMicrodroid("chmod 321 " + authfsOutputDir + "/file2");
+        expectFileMode("--wx-w---x", authfsOutputDir + "/file2", TEST_OUTPUT_DIR + "/file2");
+        // Can't set the disallowed bits
+        assertFailedOnMicrodroid("chmod +s " + authfsOutputDir + "/file");
+        assertFailedOnMicrodroid("chmod +t " + authfsOutputDir + "/file2");
+    }
+
+    @Test
     public void testStatfs() throws Exception {
         // Setup
         runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
@@ -492,6 +673,15 @@
         }
     }
 
+    private void expectFileMode(String expected, String microdroidPath, String androidPath)
+            throws DeviceNotAvailableException {
+        String actual = runOnMicrodroid("stat -c '%A' " + microdroidPath);
+        assertEquals("Inconsistent mode for " + microdroidPath, expected, actual);
+
+        actual = sAndroid.run("stat -c '%A' " + androidPath);
+        assertEquals("Inconsistent mode for " + androidPath + " (android)", expected, actual);
+    }
+
     private void resizeFileOnMicrodroid(String path, long size) {
         runOnMicrodroid("truncate -c -s " + size + " " + path);
     }
diff --git a/authfs/tests/open_then_run.rs b/authfs/tests/open_then_run.rs
index fca8953..a540f9d 100644
--- a/authfs/tests/open_then_run.rs
+++ b/authfs/tests/open_then_run.rs
@@ -22,8 +22,9 @@
 use clap::{App, Arg, Values};
 use command_fds::{CommandFdExt, FdMapping};
 use log::{debug, error};
+use nix::{dir::Dir, fcntl::OFlag, sys::stat::Mode};
 use std::fs::{File, OpenOptions};
-use std::os::unix::{fs::OpenOptionsExt, io::AsRawFd, io::RawFd};
+use std::os::unix::io::{AsRawFd, RawFd};
 use std::process::Command;
 
 // `PseudoRawFd` is just an integer and not necessarily backed by a real FD. It is used to denote
@@ -31,30 +32,31 @@
 // with this alias is to improve readability by distinguishing from actual RawFd.
 type PseudoRawFd = RawFd;
 
-struct FileMapping {
-    file: File,
+struct FileMapping<T: AsRawFd> {
+    file: T,
     target_fd: PseudoRawFd,
 }
 
-impl FileMapping {
+impl<T: AsRawFd> FileMapping<T> {
     fn as_fd_mapping(&self) -> FdMapping {
         FdMapping { parent_fd: self.file.as_raw_fd(), child_fd: self.target_fd }
     }
 }
 
 struct Args {
-    ro_files: Vec<FileMapping>,
-    rw_files: Vec<FileMapping>,
-    dir_files: Vec<FileMapping>,
+    ro_files: Vec<FileMapping<File>>,
+    rw_files: Vec<FileMapping<File>>,
+    dir_files: Vec<FileMapping<Dir>>,
     cmdline_args: Vec<String>,
 }
 
-fn parse_and_create_file_mapping<F>(
+fn parse_and_create_file_mapping<F, T>(
     values: Option<Values<'_>>,
     opener: F,
-) -> Result<Vec<FileMapping>>
+) -> Result<Vec<FileMapping<T>>>
 where
-    F: Fn(&str) -> Result<File>,
+    F: Fn(&str) -> Result<T>,
+    T: AsRawFd,
 {
     if let Some(options) = values {
         options
@@ -116,19 +118,8 @@
     })?;
 
     let dir_files = parse_and_create_file_mapping(matches.values_of("open-dir"), |path| {
-        // The returned FD represents a path (that's supposed to be a directory), and is not really
-        // a file. It's better to use std::os::unix::io::OwnedFd but it's currently experimental.
-        // Ideally, all FDs opened by this program should be `OwnedFd` since we are only opening
-        // them for the provided program, and are not supposed to do anything else.
-        OpenOptions::new()
-            .custom_flags(libc::O_PATH | libc::O_DIRECTORY)
-            // The custom flags above is not taken into consideration by the unix implementation of
-            // OpenOptions for flag validation. So even though the man page of open(2) says that
-            // most flags include access mode are ignored, we still need to set a "valid" mode to
-            // make the library happy. The value does not appear to matter elsewhere in the library.
-            .read(true)
-            .open(path)
-            .with_context(|| format!("Open {} directory as path", path))
+        Dir::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::S_IRWXU)
+            .with_context(|| format!("Open {} directory", path))
     })?;
 
     let cmdline_args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
diff --git a/compos/Android.bp b/compos/Android.bp
index b9fcfff..783ba22 100644
--- a/compos/Android.bp
+++ b/compos/Android.bp
@@ -50,6 +50,8 @@
         "liblog_rust",
         "libminijail_rust",
         "libnix",
+        "libodsign_proto_rust",
+        "libprotobuf",
         "libring",
         "libscopeguard",
     ],
diff --git a/compos/aidl/com/android/compos/ICompOsService.aidl b/compos/aidl/com/android/compos/ICompOsService.aidl
index 7af2ada..194180b 100644
--- a/compos/aidl/com/android/compos/ICompOsService.aidl
+++ b/compos/aidl/com/android/compos/ICompOsService.aidl
@@ -46,13 +46,18 @@
      *
      * The execution is based on the VM's APEX mounts, files on Android's /system (by accessing
      * through systemDirFd over AuthFS), and *CLASSPATH derived in the VM, to generate the same
-     * odrefresh output aritfacts to the output directory (through outputDirFd).
+     * odrefresh output artifacts to the output directory (through outputDirFd).
      *
-     * The caller/Android is allowed to specify the zygote arch (ro.zygote).
-     *
-     * @return a CompilationResult
+     * @param systemDirFd An fd referring to /system
+     * @param outputDirFd An fd referring to the output directory, ART_APEX_DATA
+     * @param stagingDirFd An fd referring to the staging directory, e.g. ART_APEX_DATA/staging
+     * @param targetDirName The sub-directory of the output directory to which artifacts are to be
+     *                      written (e.g. dalvik-cache)
+     * @param zygoteArch The zygote architecture (ro.zygote)
+     * @return odrefresh exit code
      */
-    CompilationResult odrefresh(int systemDirFd, int outputDirFd, String zygoteArch);
+    byte odrefresh(int systemDirFd, int outputDirFd, int stagingDirFd, String targetDirName,
+            String zygoteArch);
 
     /**
      * Run dex2oat command with provided args, in a context that may be specified in FdAnnotation,
diff --git a/compos/apex/Android.bp b/compos/apex/Android.bp
index 43e75e4..f40da9c 100644
--- a/compos/apex/Android.bp
+++ b/compos/apex/Android.bp
@@ -35,6 +35,7 @@
 
     // TODO(victorhsieh): make it updatable
     updatable: false,
+    future_updatable: true,
     platform_apis: true,
 
     system_ext_specific: true,
diff --git a/compos/apk/assets/vm_config_staged.json b/compos/apk/assets/vm_config_staged.json
new file mode 100644
index 0000000..9c81e4e
--- /dev/null
+++ b/compos/apk/assets/vm_config_staged.json
@@ -0,0 +1,31 @@
+{
+  "version": 1,
+  "os": {
+    "name": "microdroid"
+  },
+  "task": {
+    "type": "executable",
+    "command": "/apex/com.android.compos/bin/compsvc",
+    "args": [
+      "--log_to_stderr"
+    ]
+  },
+  "prefer_staged": true,
+  "apexes": [
+    {
+      "name": "com.android.art"
+    },
+    {
+      "name": "com.android.compos"
+    },
+    {
+      "name": "{DEX2OATBOOTCLASSPATH}"
+    },
+    {
+      "name": "{BOOTCLASSPATH}"
+    },
+    {
+      "name": "{SYSTEMSERVERCLASSPATH}"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index 5893fd6..7c61d94 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -14,8 +14,10 @@
         "libbinder_rpc_unstable_bindgen",
         "libbinder_rs",
         "liblog_rust",
+        "libnum_traits",
         "librustutils",
     ],
+    proc_macros: ["libnum_derive"],
     shared_libs: [
         "libbinder_rpc_unstable",
     ],
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index 9e419f5..9c23fac 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -17,7 +17,7 @@
 //! Support for starting CompOS in a VM and connecting to the service
 
 use crate::timeouts::timeouts;
-use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT};
+use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT, DEFAULT_VM_CONFIG_PATH};
 use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
     IVirtualMachine::IVirtualMachine,
     IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
@@ -56,6 +56,8 @@
 pub struct VmParameters {
     /// Whether the VM should be debuggable.
     pub debug_mode: bool,
+    /// If present, overrides the path to the VM config JSON file
+    pub config_path: Option<String>,
 }
 
 impl VmInstance {
@@ -85,27 +87,31 @@
             .context("Failed to open config APK idsig file")?;
         let idsig_fd = ParcelFileDescriptor::new(idsig_fd);
 
-        let (log_fd, debug_level) = if parameters.debug_mode {
-            // Console output and the system log output from the VM are redirected to this file.
-            let log_fd =
-                File::create(data_dir.join("vm.log")).context("Failed to create log file")?;
+        let (console_fd, log_fd, debug_level) = if parameters.debug_mode {
+            // Console output and the system log output from the VM are redirected to file.
+            let console_fd = File::create(data_dir.join("vm_console.log"))
+                .context("Failed to create console log file")?;
+            let log_fd = File::create(data_dir.join("vm.log"))
+                .context("Failed to create system log file")?;
+            let console_fd = ParcelFileDescriptor::new(console_fd);
             let log_fd = ParcelFileDescriptor::new(log_fd);
-            (Some(log_fd), DebugLevel::FULL)
+            (Some(console_fd), Some(log_fd), DebugLevel::FULL)
         } else {
-            (None, DebugLevel::NONE)
+            (None, None, DebugLevel::NONE)
         };
 
+        let config_path = parameters.config_path.as_deref().unwrap_or(DEFAULT_VM_CONFIG_PATH);
         let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
             apk: Some(apk_fd),
             idsig: Some(idsig_fd),
             instanceImage: Some(instance_fd),
-            configPath: "assets/vm_config.json".to_owned(),
+            configPath: config_path.to_owned(),
             debugLevel: debug_level,
             ..Default::default()
         });
 
         let vm = service
-            .createVm(&config, log_fd.as_ref(), log_fd.as_ref())
+            .createVm(&config, console_fd.as_ref(), log_fd.as_ref())
             .context("Failed to create VM")?;
         let vm_state = Arc::new(VmStateMonitor::default());
 
diff --git a/compos/common/lib.rs b/compos/common/lib.rs
index 4bfa81f..5c28379 100644
--- a/compos/common/lib.rs
+++ b/compos/common/lib.rs
@@ -17,6 +17,7 @@
 //! Common items used by CompOS server and/or clients
 
 pub mod compos_client;
+pub mod odrefresh;
 pub mod timeouts;
 
 /// Special CID indicating "any".
@@ -52,3 +53,10 @@
 
 /// The file that holds the instance image for a CompOS instance.
 pub const INSTANCE_IMAGE_FILE: &str = "instance.img";
+
+/// The path within our config APK of our default VM configuration file, used at boot time.
+pub const DEFAULT_VM_CONFIG_PATH: &str = "assets/vm_config.json";
+
+/// The path within our config APK of the VM configuration file we use when compiling staged
+/// APEXes before reboot.
+pub const PREFER_STAGED_VM_CONFIG_PATH: &str = "assets/vm_config_staged.json";
diff --git a/compos/common/odrefresh.rs b/compos/common/odrefresh.rs
new file mode 100644
index 0000000..7838b69
--- /dev/null
+++ b/compos/common/odrefresh.rs
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Helpers for running odrefresh
+
+use num_derive::FromPrimitive;
+use num_traits::FromPrimitive;
+
+/// The path to the odrefresh binary
+pub const ODREFRESH_PATH: &str = "/apex/com.android.art/bin/odrefresh";
+
+// TODO: What if this changes?
+const EX_MAX: i8 = 78;
+
+/// The defined odrefresh exit codes - see art/odrefresh/include/odrefresh/odrefresh.h
+#[derive(Debug, PartialEq, Eq, FromPrimitive)]
+#[repr(i8)]
+pub enum ExitCode {
+    /// No compilation required, all artifacts look good
+    Okay = 0i8,
+    /// Compilation required
+    CompilationRequired = EX_MAX + 1,
+    /// New artifacts successfully generated
+    CompilationSuccess = EX_MAX + 2,
+    /// Compilation failed
+    CompilationFailed = EX_MAX + 3,
+    /// Removal of existing invalid artifacts failed
+    CleanupFailed = EX_MAX + 4,
+}
+
+impl ExitCode {
+    /// Map an integer to the corresponding ExitCode enum, if there is one
+    pub fn from_i32(exit_code: i32) -> Option<Self> {
+        FromPrimitive::from_i32(exit_code)
+    }
+}
diff --git a/compos/compos_key_cmd/Android.bp b/compos/compos_key_cmd/Android.bp
index 36c1b5c..9d5a490 100644
--- a/compos/compos_key_cmd/Android.bp
+++ b/compos/compos_key_cmd/Android.bp
@@ -8,7 +8,7 @@
     apex_available: ["com.android.compos"],
 
     static_libs: [
-        "lib_compos_proto",
+        "lib_odsign_proto",
     ],
 
     shared_libs: [
diff --git a/compos/compos_key_cmd/compos_key_cmd.cpp b/compos/compos_key_cmd/compos_key_cmd.cpp
index 560eb09..f8b3d16 100644
--- a/compos/compos_key_cmd/compos_key_cmd.cpp
+++ b/compos/compos_key_cmd/compos_key_cmd.cpp
@@ -39,12 +39,13 @@
 #include <condition_variable>
 #include <filesystem>
 #include <iostream>
+#include <map>
 #include <mutex>
 #include <string>
 #include <string_view>
 #include <thread>
 
-#include "compos_signature.pb.h"
+#include "odsign_info.pb.h"
 
 using namespace std::literals;
 
@@ -62,10 +63,11 @@
 using android::base::Fdopen;
 using android::base::Result;
 using android::base::unique_fd;
-using compos::proto::Signature;
+using android::base::WriteFully;
 using ndk::ScopedAStatus;
 using ndk::ScopedFileDescriptor;
 using ndk::SharedRefBase;
+using odsign::proto::OdsignInfo;
 
 constexpr unsigned int kRpcPort = 6432;
 
@@ -74,8 +76,9 @@
 constexpr const char* kConfigApkIdsigPath =
         "/apex/com.android.compos/etc/CompOSPayloadApp.apk.idsig";
 
-// This is a path inside the APK
-constexpr const char* kConfigFilePath = "assets/vm_config.json";
+// These are paths inside the APK
+constexpr const char* kDefaultConfigFilePath = "assets/vm_config.json";
+constexpr const char* kPreferStagedConfigFilePath = "assets/vm_config_staged.json";
 
 static bool writeBytesToFile(const std::vector<uint8_t>& bytes, const std::string& path) {
     std::string str(bytes.begin(), bytes.end());
@@ -151,9 +154,13 @@
 
     ::ndk::ScopedAStatus onError(int32_t in_cid, int32_t in_error_code,
                                  const std::string& in_message) override {
-        // For now, just log the error as onDied() will follow.
         LOG(WARNING) << "VM error! cid = " << in_cid << ", error_code = " << in_error_code
                      << ", message = " << in_message;
+        {
+            std::unique_lock lock(mMutex);
+            mDied = true;
+        }
+        mCv.notify_all();
         return ScopedAStatus::ok();
     }
 
@@ -186,11 +193,12 @@
 class TargetVm {
 public:
     TargetVm(int cid, const std::string& logFile, const std::string& instanceImageFile,
-             bool debuggable)
+             bool debuggable, bool preferStaged)
           : mCid(cid),
             mLogFile(logFile),
             mInstanceImageFile(instanceImageFile),
-            mDebuggable(debuggable) {}
+            mDebuggable(debuggable),
+            mPreferStaged(preferStaged) {}
 
     // Returns 0 if we are to connect to a local service, otherwise the CID of
     // either an existing VM or a VM we have started, depending on the command
@@ -251,7 +259,7 @@
         appConfig.apk = std::move(apkFd);
         appConfig.idsig = std::move(idsigFd);
         appConfig.instanceImage = std::move(instanceFd);
-        appConfig.configPath = kConfigFilePath;
+        appConfig.configPath = mPreferStaged ? kPreferStagedConfigFilePath : kDefaultConfigFilePath;
         appConfig.debugLevel = mDebuggable ? VirtualMachineAppConfig::DebugLevel::FULL
                                            : VirtualMachineAppConfig::DebugLevel::NONE;
         appConfig.memoryMib = 0; // Use default
@@ -297,6 +305,7 @@
     const std::string mLogFile;
     const std::string mInstanceImageFile;
     const bool mDebuggable;
+    const bool mPreferStaged;
     std::shared_ptr<Callback> mCallback;
     std::shared_ptr<IVirtualMachine> mVm;
 };
@@ -396,21 +405,12 @@
     return result;
 }
 
-static Result<void> signFile(ICompOsService* service, const std::string& file) {
+static Result<std::vector<uint8_t>> computeDigest(const std::string& file) {
     unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
     if (!fd.ok()) {
         return ErrnoError() << "Failed to open";
     }
 
-    std::filesystem::path signature_path{file};
-    signature_path += ".signature";
-    unique_fd out_fd(TEMP_FAILURE_RETRY(open(signature_path.c_str(),
-                                             O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC,
-                                             S_IRUSR | S_IWUSR | S_IRGRP)));
-    if (!out_fd.ok()) {
-        return ErrnoError() << "Unable to create signature file";
-    }
-
     struct stat filestat;
     if (fstat(fd, &filestat) != 0) {
         return ErrnoError() << "Failed to fstat";
@@ -436,34 +436,34 @@
     }
     std::unique_ptr<libfsverity_digest, decltype(&std::free)> digestOwner{digest, std::free};
 
-    std::vector<uint8_t> buffer(sizeof(fsverity_formatted_digest) + digest->digest_size);
-    auto to_be_signed = new (buffer.data()) fsverity_formatted_digest;
-    memcpy(to_be_signed->magic, "FSVerity", sizeof(to_be_signed->magic));
-    to_be_signed->digest_algorithm = __cpu_to_le16(digest->digest_algorithm);
-    to_be_signed->digest_size = __cpu_to_le16(digest->digest_size);
-    memcpy(to_be_signed->digest, digest->digest, digest->digest_size);
-
-    std::vector<uint8_t> signature;
-    auto status = service->sign(buffer, &signature);
-    if (!status.isOk()) {
-        return Error() << "Failed to sign: " << status.getDescription();
-    }
-
-    Signature compos_signature;
-    compos_signature.set_digest(digest->digest, digest->digest_size);
-    compos_signature.set_signature(signature.data(), signature.size());
-    if (!compos_signature.SerializeToFileDescriptor(out_fd.get())) {
-        return Error() << "Failed to write signature";
-    }
-    if (close(out_fd.release()) != 0) {
-        return ErrnoError() << "Failed to close signature file";
-    }
-
-    return {};
+    return std::vector(&digest->digest[0], &digest->digest[digest->digest_size]);
 }
 
-static Result<void> sign(TargetVm& vm, const std::string& blob_file,
-                         const std::vector<std::string>& files) {
+static std::string toHex(const std::vector<uint8_t>& digest) {
+    std::stringstream ss;
+    for (auto it = digest.begin(); it != digest.end(); ++it) {
+        ss << std::setfill('0') << std::setw(2) << std::hex << static_cast<unsigned>(*it);
+    }
+    return ss.str();
+}
+
+static Result<void> signInfo(TargetVm& vm, const std::string& blob_file,
+                             const std::string& info_file, const std::vector<std::string>& files) {
+    unique_fd info_fd(
+            TEMP_FAILURE_RETRY(open(info_file.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC,
+                                    S_IRUSR | S_IWUSR | S_IRGRP)));
+    if (!info_fd.ok()) {
+        return ErrnoError() << "Unable to create " << info_file;
+    }
+
+    std::string signature_file = info_file + ".signature";
+    unique_fd signature_fd(TEMP_FAILURE_RETRY(open(signature_file.c_str(),
+                                                   O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC,
+                                                   S_IRUSR | S_IWUSR | S_IRGRP)));
+    if (!signature_fd.ok()) {
+        return ErrnoError() << "Unable to create " << signature_file;
+    }
+
     auto cid = vm.resolveCid();
     if (!cid.ok()) {
         return cid.error();
@@ -478,17 +478,45 @@
         return blob.error();
     }
 
-    auto status = service->initializeSigningKey(blob.value());
-    if (!status.isOk()) {
-        return Error() << "Failed to initialize signing key: " << status.getDescription();
+    auto initialized = service->initializeSigningKey(blob.value());
+    if (!initialized.isOk()) {
+        return Error() << "Failed to initialize signing key: " << initialized.getDescription();
     }
 
+    std::map<std::string, std::string> file_digests;
+
     for (auto& file : files) {
-        auto result = signFile(service.get(), file);
-        if (!result.ok()) {
-            return Error() << result.error() << ": " << file;
+        auto digest = computeDigest(file);
+        if (!digest.ok()) {
+            return digest.error();
         }
+        file_digests.emplace(file, toHex(*digest));
     }
+
+    OdsignInfo info;
+    info.mutable_file_hashes()->insert(file_digests.begin(), file_digests.end());
+
+    std::vector<uint8_t> serialized(info.ByteSizeLong());
+    if (!info.SerializeToArray(serialized.data(), serialized.size())) {
+        return Error() << "Failed to serialize protobuf";
+    }
+
+    if (!WriteFully(info_fd, serialized.data(), serialized.size()) ||
+        close(info_fd.release()) != 0) {
+        return Error() << "Failed to write info file";
+    }
+
+    std::vector<uint8_t> signature;
+    auto status = service->sign(serialized, &signature);
+    if (!status.isOk()) {
+        return Error() << "Failed to sign: " << status.getDescription();
+    }
+
+    if (!WriteFully(signature_fd, signature.data(), signature.size()) ||
+        close(signature_fd.release()) != 0) {
+        return Error() << "Failed to write signature";
+    }
+
     return {};
 }
 
@@ -543,17 +571,25 @@
     std::string imageFile;
     std::string logFile;
     bool debuggable = false;
+    bool preferStaged = false;
 
     for (;;) {
+        // Options with no associated value
         if (argc >= 2) {
             if (argv[1] == "--debug"sv) {
                 debuggable = true;
                 argc -= 1;
                 argv += 1;
                 continue;
+            } else if (argv[1] == "--staged"sv) {
+                preferStaged = true;
+                argc -= 1;
+                argv += 1;
+                continue;
             }
         }
         if (argc < 3) break;
+        // Options requiring a value
         if (argv[1] == "--cid"sv) {
             cid = atoi(argv[2]);
             if (cid == 0) {
@@ -571,7 +607,7 @@
         argv += 2;
     }
 
-    TargetVm vm(cid, logFile, imageFile, debuggable);
+    TargetVm vm(cid, logFile, imageFile, debuggable, preferStaged);
 
     if (argc == 4 && argv[1] == "generate"sv) {
         auto result = generate(vm, argv[2], argv[3]);
@@ -592,11 +628,13 @@
         } else {
             std::cerr << result.error() << '\n';
         }
-    } else if (argc >= 4 && argv[1] == "sign"sv) {
-        const std::vector<std::string> files{&argv[3], &argv[argc]};
-        auto result = sign(vm, argv[2], files);
+    } else if (argc >= 5 && argv[1] == "sign-info"sv) {
+        const std::string blob_file = argv[2];
+        const std::string info_file = argv[3];
+        const std::vector<std::string> files{&argv[4], &argv[argc]};
+        auto result = signInfo(vm, blob_file, info_file, files);
         if (result.ok()) {
-            std::cerr << "All signatures generated.\n";
+            std::cerr << "Info file generated and signed.\n";
             return 0;
         } else {
             std::cerr << result.error() << '\n';
@@ -616,20 +654,23 @@
             std::cerr << result.error() << '\n';
         }
     } else {
-        std::cerr << "Usage: compos_key_cmd [OPTIONS] generate|verify|sign|make-instance|init-key\n"
+        std::cerr << "Usage: compos_key_cmd [OPTIONS] COMMAND\n"
+                  << "Where COMMAND can be:\n"
+                  << "  make-instance <image file> Create an empty instance image file for a VM.\n"
                   << "  generate <blob file> <public key file> Generate new key pair and write\n"
                   << "    the private key blob and public key to the specified files.\n "
                   << "  verify <blob file> <public key file> Verify that the content of the\n"
                   << "    specified private key blob and public key files are valid.\n "
                   << "  init-key <blob file> Initialize the service key.\n"
-                  << "  sign <blob file> <files to be signed> Generate signatures for one or\n"
-                  << "    more files using the supplied private key blob. Signature is stored in\n"
-                  << "    <filename>.signature\n"
-                  << "  make-instance <image file> Create an empty instance image file for a VM.\n"
+                  << "  sign-info <blob file> <info file> <files to be signed> Generate\n"
+                  << "    an info file listing the paths and root digests of each of the files to\n"
+                  << "    be signed, along with a signature of that file.\n"
                   << "\n"
-                  << "OPTIONS: --log <log file> --debug (--cid <cid> | --start <image file>)\n"
+                  << "OPTIONS: --log <log file> --debug --staged\n"
+                  << "    (--cid <cid> | --start <image file>)\n"
                   << "  Specify --log to write VM log to a file rather than stdout.\n"
                   << "  Specify --debug with --start to make the VM fully debuggable.\n"
+                  << "  Specify --staged with --start to prefer staged APEXes in the VM.\n"
                   << "  Specify --cid to connect to a VM rather than the host.\n"
                   << "  Specify --start to start a VM from the given instance image file and\n "
                   << "    connect to that.\n";
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index 735b9a5..3190395 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -20,13 +20,11 @@
         "libcomposd_native_rust",
         "libminijail_rust",
         "libnix",
-        "libnum_traits",
         "liblibc",
         "liblog_rust",
         "librustutils",
         "libshared_child",
     ],
-    proc_macros: ["libnum_derive"],
     apex_available: [
         "com.android.compos",
     ],
diff --git a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
index 5e72cd2..ec5f2f5 100644
--- a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
+++ b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
@@ -50,7 +50,9 @@
      * and writes the results to a test directory to avoid disrupting any real artifacts in
      * existence.
      *
-     * TODO(205750213): Change the API to async.
+     * Compilation continues in the background, and success/failure is reported via the supplied
+     * callback, unless the returned ICompilationTask is cancelled. The caller should maintain
+     * a reference to the ICompilationTask until compilation completes or is cancelled.
      */
-    byte startTestOdrefresh();
+    ICompilationTask startAsyncOdrefresh(ICompilationTaskCallback callback);
 }
diff --git a/compos/composd/native/Android.bp b/compos/composd/native/Android.bp
index ad0afd9..135f4d4 100644
--- a/compos/composd/native/Android.bp
+++ b/compos/composd/native/Android.bp
@@ -7,12 +7,17 @@
     crate_name: "composd_native",
     srcs: ["lib.rs"],
     rustlibs: [
+        "libanyhow",
         "libcxx",
+        "liblibc",
     ],
     static_libs: [
         "libcomposd_native_cpp",
     ],
-    shared_libs: ["libcrypto"],
+    shared_libs: [
+        "libartpalette-system",
+        "libcrypto",
+    ],
     apex_available: ["com.android.compos"],
 }
 
diff --git a/compos/composd/native/lib.rs b/compos/composd/native/lib.rs
index ace9600..cbec7fd 100644
--- a/compos/composd/native/lib.rs
+++ b/compos/composd/native/lib.rs
@@ -12,12 +12,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! Bindings native helpers for composd.
+//! Native helpers for composd.
 
-pub use ffi::*;
+pub use art::*;
+pub use crypto::*;
 
 #[cxx::bridge]
-mod ffi {
+mod crypto {
     /// Contains either a key or a reason why the key could not be extracted.
     struct KeyResult {
         /// The extracted key. If empty, the attempt to extract the key failed.
@@ -36,3 +37,38 @@
         fn extract_rsa_public_key(der_certificate: &[u8]) -> KeyResult;
     }
 }
+
+mod art {
+    use anyhow::{anyhow, Result};
+    use libc::c_char;
+    use std::ffi::{CStr, OsStr};
+    use std::io::Error;
+    use std::os::unix::ffi::OsStrExt;
+    use std::path::Path;
+    use std::ptr::null;
+
+    // From libartpalette(-system)
+    extern "C" {
+        fn PaletteCreateOdrefreshStagingDirectory(out_staging_dir: *mut *const c_char) -> i32;
+    }
+    const PALETTE_STATUS_OK: i32 = 0;
+    const PALETTE_STATUS_CHECK_ERRNO: i32 = 1;
+
+    /// Creates and returns the staging directory for odrefresh.
+    pub fn palette_create_odrefresh_staging_directory() -> Result<&'static Path> {
+        let mut staging_dir: *const c_char = null();
+        // SAFETY: The C function always returns a non-null C string (after created the directory).
+        let status = unsafe { PaletteCreateOdrefreshStagingDirectory(&mut staging_dir) };
+        match status {
+            PALETTE_STATUS_OK => {
+                // SAFETY: The previously returned `*const c_char` should point to a legitimate C
+                // string.
+                let cstr = unsafe { CStr::from_ptr(staging_dir) };
+                let path = OsStr::from_bytes(cstr.to_bytes()).as_ref();
+                Ok(path)
+            }
+            PALETTE_STATUS_CHECK_ERRNO => Err(anyhow!(Error::last_os_error().to_string())),
+            _ => Err(anyhow!("Failed with palette status {}", status)),
+        }
+    }
+}
diff --git a/compos/composd/src/compilation_task.rs b/compos/composd/src/compilation_task.rs
index 18f5aac..871c4fb 100644
--- a/compos/composd/src/compilation_task.rs
+++ b/compos/composd/src/compilation_task.rs
@@ -15,16 +15,19 @@
  */
 
 use crate::instance_starter::CompOsInstance;
-use crate::odrefresh::{self, Odrefresh};
+use crate::odrefresh::Odrefresh;
 use android_system_composd::aidl::android::system::composd::{
     ICompilationTask::ICompilationTask, ICompilationTaskCallback::ICompilationTaskCallback,
 };
 use android_system_composd::binder::{Interface, Result as BinderResult, Strong};
 use anyhow::Result;
+use compos_common::odrefresh::ExitCode;
 use log::{error, warn};
 use std::sync::{Arc, Mutex};
 use std::thread;
 
+// TODO: Delete
+
 #[derive(Clone)]
 pub struct CompilationTask {
     running_task: Arc<Mutex<Option<RunningTask>>>,
@@ -91,7 +94,7 @@
             // We don't do the callback if cancel has already happened.
             if let Some(task) = task {
                 let result = match exit_code {
-                    Ok(odrefresh::ExitCode::CompilationSuccess) => task.callback.onSuccess(),
+                    Ok(ExitCode::CompilationSuccess) => task.callback.onSuccess(),
                     Ok(exit_code) => {
                         error!("Unexpected odrefresh result: {:?}", exit_code);
                         task.callback.onFailure()
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
index 67b5974..2915a58 100644
--- a/compos/composd/src/composd_main.rs
+++ b/compos/composd/src/composd_main.rs
@@ -24,6 +24,7 @@
 mod instance_starter;
 mod internal_service;
 mod odrefresh;
+mod odrefresh_task;
 mod service;
 mod util;
 
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index 767e9f7..24ae576 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -23,7 +23,7 @@
 use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
 use compos_aidl_interface::binder::Strong;
 use compos_common::compos_client::VmParameters;
-use compos_common::{PENDING_INSTANCE_DIR, TEST_INSTANCE_DIR};
+use compos_common::{PENDING_INSTANCE_DIR, PREFER_STAGED_VM_CONFIG_PATH, TEST_INSTANCE_DIR};
 use std::sync::{Arc, Mutex, Weak};
 use virtualizationservice::IVirtualizationService::IVirtualizationService;
 
@@ -44,11 +44,13 @@
     }
 
     pub fn start_pending_instance(&self) -> Result<Arc<CompOsInstance>> {
-        self.start_instance(PENDING_INSTANCE_DIR, VmParameters::default())
+        let config_path = Some(PREFER_STAGED_VM_CONFIG_PATH.to_owned());
+        let vm_parameters = VmParameters { config_path, ..Default::default() };
+        self.start_instance(PENDING_INSTANCE_DIR, vm_parameters)
     }
 
     pub fn start_test_instance(&self) -> Result<Arc<CompOsInstance>> {
-        let vm_parameters = VmParameters { debug_mode: true };
+        let vm_parameters = VmParameters { debug_mode: true, ..Default::default() };
         self.start_instance(TEST_INSTANCE_DIR, vm_parameters)
     }
 
diff --git a/compos/composd/src/odrefresh.rs b/compos/composd/src/odrefresh.rs
index 9debf00..f06a4b2 100644
--- a/compos/composd/src/odrefresh.rs
+++ b/compos/composd/src/odrefresh.rs
@@ -16,29 +16,15 @@
 
 //! Handle the details of executing odrefresh to generate compiled artifacts.
 
+// TODO: Delete
+
 use anyhow::{bail, Context, Result};
+use compos_common::odrefresh::{ExitCode, ODREFRESH_PATH};
 use compos_common::timeouts::{need_extra_time, EXTENDED_TIMEOUTS};
 use compos_common::VMADDR_CID_ANY;
-use num_derive::FromPrimitive;
-use num_traits::FromPrimitive;
 use shared_child::SharedChild;
 use std::process::Command;
 
-// TODO: What if this changes?
-const EX_MAX: i32 = 78;
-const ODREFRESH_BIN: &str = "/apex/com.android.art/bin/odrefresh";
-
-#[derive(Debug, PartialEq, Eq, FromPrimitive)]
-#[repr(i32)]
-pub enum ExitCode {
-    // Copied from art/odrefresh/include/odrefresh/odrefresh.h
-    Okay = 0i32,
-    CompilationRequired = EX_MAX + 1,
-    CompilationSuccess = EX_MAX + 2,
-    CompilationFailed = EX_MAX + 3,
-    CleanupFailed = EX_MAX + 4,
-}
-
 pub struct Odrefresh {
     child: SharedChild,
 }
@@ -54,7 +40,7 @@
 
     fn spawn_odrefresh(target_dir: &str, compile_arg: &str) -> Result<Self> {
         // We don`t need to capture stdout/stderr - odrefresh writes to the log
-        let mut cmdline = Command::new(ODREFRESH_BIN);
+        let mut cmdline = Command::new(ODREFRESH_PATH);
         if need_extra_time()? {
             cmdline
                 .arg(format!(
@@ -77,7 +63,7 @@
     pub fn wait_for_exit(&self) -> Result<ExitCode> {
         // No timeout here - but clients can kill the process, which will end the wait.
         let status = self.child.wait()?;
-        if let Some(exit_code) = status.code().and_then(FromPrimitive::from_i32) {
+        if let Some(exit_code) = status.code().and_then(ExitCode::from_i32) {
             Ok(exit_code)
         } else {
             bail!("odrefresh exited with {}", status)
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
new file mode 100644
index 0000000..9b70248
--- /dev/null
+++ b/compos/composd/src/odrefresh_task.rs
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Handle running odrefresh in the VM, with an async interface to allow cancellation
+
+use crate::fd_server_helper::FdServerConfig;
+use crate::instance_starter::CompOsInstance;
+use android_system_composd::aidl::android::system::composd::{
+    ICompilationTask::ICompilationTask, ICompilationTaskCallback::ICompilationTaskCallback,
+};
+use android_system_composd::binder::{Interface, Result as BinderResult, Strong};
+use anyhow::{bail, Context, Result};
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
+use compos_common::odrefresh::ExitCode;
+use log::{error, warn};
+use rustutils::system_properties;
+use std::fs::{File, OpenOptions};
+use std::os::unix::fs::OpenOptionsExt;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+const ART_APEX_DATA: &str = "/data/misc/apexdata/com.android.art";
+
+#[derive(Clone)]
+pub struct OdrefreshTask {
+    running_task: Arc<Mutex<Option<RunningTask>>>,
+}
+
+impl Interface for OdrefreshTask {}
+
+impl ICompilationTask for OdrefreshTask {
+    fn cancel(&self) -> BinderResult<()> {
+        let task = self.take();
+        // Drop the VM, which should end compilation - and cause our thread to exit
+        drop(task);
+        Ok(())
+    }
+}
+
+struct RunningTask {
+    callback: Strong<dyn ICompilationTaskCallback>,
+    #[allow(dead_code)] // Keeps the CompOS VM alive
+    comp_os: Arc<CompOsInstance>,
+}
+
+impl OdrefreshTask {
+    /// Return the current running task, if any, removing it from this CompilationTask.
+    /// Once removed, meaning the task has ended or been canceled, further calls will always return
+    /// None.
+    fn take(&self) -> Option<RunningTask> {
+        self.running_task.lock().unwrap().take()
+    }
+
+    pub fn start(
+        comp_os: Arc<CompOsInstance>,
+        target_dir_name: String,
+        callback: &Strong<dyn ICompilationTaskCallback>,
+    ) -> Result<OdrefreshTask> {
+        let service = comp_os.get_service();
+        let task = RunningTask { comp_os, callback: callback.clone() };
+        let task = OdrefreshTask { running_task: Arc::new(Mutex::new(Some(task))) };
+
+        task.clone().start_thread(service, target_dir_name);
+
+        Ok(task)
+    }
+
+    fn start_thread(self, service: Strong<dyn ICompOsService>, target_dir_name: String) {
+        thread::spawn(move || {
+            let exit_code = run_in_vm(service, &target_dir_name);
+
+            let task = self.take();
+            // We don't do the callback if cancel has already happened.
+            if let Some(task) = task {
+                let result = match exit_code {
+                    Ok(ExitCode::CompilationSuccess) => task.callback.onSuccess(),
+                    Ok(exit_code) => {
+                        error!("Unexpected odrefresh result: {:?}", exit_code);
+                        task.callback.onFailure()
+                    }
+                    Err(e) => {
+                        error!("Running odrefresh failed: {:?}", e);
+                        task.callback.onFailure()
+                    }
+                };
+                if let Err(e) = result {
+                    warn!("Failed to deliver callback: {:?}", e);
+                }
+            }
+        });
+    }
+}
+
+fn run_in_vm(service: Strong<dyn ICompOsService>, target_dir_name: &str) -> Result<ExitCode> {
+    let staging_dir = open_dir(composd_native::palette_create_odrefresh_staging_directory()?)?;
+    let system_dir = open_dir(Path::new("/system"))?;
+    let output_dir = open_dir(Path::new(ART_APEX_DATA))?;
+
+    // Spawn a fd_server to serve the FDs.
+    let fd_server_config = FdServerConfig {
+        ro_dir_fds: vec![system_dir.as_raw_fd()],
+        rw_dir_fds: vec![staging_dir.as_raw_fd(), output_dir.as_raw_fd()],
+        ..Default::default()
+    };
+    let fd_server_raii = fd_server_config.into_fd_server()?;
+
+    let zygote_arch = system_properties::read("ro.zygote")?;
+    let exit_code = service.odrefresh(
+        system_dir.as_raw_fd(),
+        output_dir.as_raw_fd(),
+        staging_dir.as_raw_fd(),
+        target_dir_name,
+        &zygote_arch,
+    )?;
+
+    drop(fd_server_raii);
+    if let Some(exit_code) = ExitCode::from_i32(exit_code.into()) {
+        Ok(exit_code)
+    } else {
+        bail!("odrefresh exited with {}", exit_code)
+    }
+}
+
+/// Returns an owned FD of the directory. It currently returns a `File` as a FD owner, but
+/// it's better to use `std::os::unix::io::OwnedFd` once/if it becomes standard.
+fn open_dir(path: &Path) -> Result<File> {
+    OpenOptions::new()
+        .custom_flags(libc::O_DIRECTORY)
+        .read(true) // O_DIRECTORY can only be opened with read
+        .open(path)
+        .with_context(|| format!("Failed to open {:?} directory as path fd", path))
+}
diff --git a/compos/composd/src/service.rs b/compos/composd/src/service.rs
index aa96ddf..23c411b 100644
--- a/compos/composd/src/service.rs
+++ b/compos/composd/src/service.rs
@@ -18,9 +18,8 @@
 //! desired.
 
 use crate::compilation_task::CompilationTask;
-use crate::fd_server_helper::FdServerConfig;
 use crate::instance_manager::InstanceManager;
-use crate::instance_starter::CompOsInstance;
+use crate::odrefresh_task::OdrefreshTask;
 use crate::util::to_binder_result;
 use android_system_composd::aidl::android::system::composd::{
     ICompilationTask::{BnCompilationTask, ICompilationTask},
@@ -31,12 +30,7 @@
     self, BinderFeatures, ExceptionCode, Interface, Status, Strong, ThreadState,
 };
 use anyhow::{Context, Result};
-use compos_common::COMPOS_DATA_ROOT;
-use rustutils::{system_properties, users::AID_ROOT, users::AID_SYSTEM};
-use std::fs::{create_dir, File, OpenOptions};
-use std::os::unix::fs::OpenOptionsExt;
-use std::os::unix::io::AsRawFd;
-use std::path::{Path, PathBuf};
+use rustutils::{users::AID_ROOT, users::AID_SYSTEM};
 use std::sync::Arc;
 
 pub struct IsolatedCompilationService {
@@ -69,9 +63,12 @@
         to_binder_result(self.do_start_test_compile(callback))
     }
 
-    fn startTestOdrefresh(&self) -> binder::Result<i8> {
+    fn startAsyncOdrefresh(
+        &self,
+        callback: &Strong<dyn ICompilationTaskCallback>,
+    ) -> binder::Result<Strong<dyn ICompilationTask>> {
         check_permissions()?;
-        to_binder_result(self.do_odrefresh_for_test())
+        to_binder_result(self.do_start_async_odrefresh(callback))
     }
 }
 
@@ -99,38 +96,16 @@
         Ok(BnCompilationTask::new_binder(task, BinderFeatures::default()))
     }
 
-    fn do_odrefresh_for_test(&self) -> Result<i8> {
-        let mut staging_dir_path = PathBuf::from(COMPOS_DATA_ROOT);
-        staging_dir_path.push("test-artifacts");
-        to_binder_result(create_dir(&staging_dir_path))?;
+    fn do_start_async_odrefresh(
+        &self,
+        callback: &Strong<dyn ICompilationTaskCallback>,
+    ) -> Result<Strong<dyn ICompilationTask>> {
+        let comp_os = self.instance_manager.start_test_instance().context("Starting CompOS")?;
 
-        let compos = self
-            .instance_manager
-            .start_test_instance()
-            .context("Starting CompOS for odrefresh test")?;
-        self.do_odrefresh(compos, &staging_dir_path)
-    }
+        let target_dir_name = "test-artifacts".to_owned();
+        let task = OdrefreshTask::start(comp_os, target_dir_name, callback)?;
 
-    fn do_odrefresh(&self, compos: Arc<CompOsInstance>, staging_dir_path: &Path) -> Result<i8> {
-        let output_dir = open_dir_path(staging_dir_path)?;
-        let system_dir = open_dir_path(Path::new("/system"))?;
-
-        // Spawn a fd_server to serve the FDs.
-        let fd_server_config = FdServerConfig {
-            ro_dir_fds: vec![system_dir.as_raw_fd()],
-            rw_dir_fds: vec![output_dir.as_raw_fd()],
-            ..Default::default()
-        };
-        let fd_server_raii = fd_server_config.into_fd_server()?;
-
-        let zygote_arch = system_properties::read("ro.zygote")?;
-        let result = compos.get_service().odrefresh(
-            system_dir.as_raw_fd(),
-            output_dir.as_raw_fd(),
-            &zygote_arch,
-        );
-        drop(fd_server_raii);
-        Ok(result?.exitCode)
+        Ok(BnCompilationTask::new_binder(task, BinderFeatures::default()))
     }
 }
 
@@ -143,17 +118,3 @@
         Ok(())
     }
 }
-
-/// Returns an owned FD of the directory path. It currently returns a `File` as a FD owner, but
-/// it's better to use `std::os::unix::io::OwnedFd` once/if it becomes standard.
-fn open_dir_path(path: &Path) -> Result<File> {
-    OpenOptions::new()
-        .custom_flags(libc::O_PATH | libc::O_DIRECTORY)
-        // The custom flags above is not taken into consideration by the unix implementation of
-        // OpenOptions for flag validation. So even though the man page of open(2) says that
-        // most flags include access mode are ignored, we still need to set a "valid" mode to
-        // make the library happy. The value does not appear to matter elsewhere in the library.
-        .read(true)
-        .open(path)
-        .with_context(|| format!("Failed to open {:?} directory as path fd", path))
-}
diff --git a/compos/composd_cmd/composd_cmd.rs b/compos/composd_cmd/composd_cmd.rs
index f22dc13..41e2b1a 100644
--- a/compos/composd_cmd/composd_cmd.rs
+++ b/compos/composd_cmd/composd_cmd.rs
@@ -38,7 +38,7 @@
             .index(1)
             .takes_value(true)
             .required(true)
-            .possible_values(&["staged-apex-compile", "forced-compile-test", "forced-odrefresh"]),
+            .possible_values(&["staged-apex-compile", "forced-compile-test", "async-odrefresh"]),
     );
     let args = app.get_matches();
     let command = args.value_of("command").unwrap();
@@ -48,7 +48,7 @@
     match command {
         "staged-apex-compile" => run_staged_apex_compile()?,
         "forced-compile-test" => run_forced_compile_for_test()?,
-        "forced-odrefresh" => run_forced_odrefresh_for_test()?,
+        "async-odrefresh" => run_async_odrefresh_for_test()?,
         _ => panic!("Unexpected command {}", command),
     }
 
@@ -113,6 +113,10 @@
     run_async_compilation(|service, callback| service.startTestCompile(callback))
 }
 
+fn run_async_odrefresh_for_test() -> Result<()> {
+    run_async_compilation(|service, callback| service.startAsyncOdrefresh(callback))
+}
+
 fn run_async_compilation<F>(start_compile_fn: F) -> Result<()>
 where
     F: FnOnce(
@@ -152,11 +156,3 @@
         }
     }
 }
-
-fn run_forced_odrefresh_for_test() -> Result<()> {
-    let service = wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
-        .context("Failed to connect to composd service")?;
-    let compilation_result = service.startTestOdrefresh().context("Compilation failed")?;
-    println!("odrefresh exit code: {:?}", compilation_result);
-    Ok(())
-}
diff --git a/compos/service/Android.bp b/compos/service/Android.bp
index 6270c9a..336ae9b 100644
--- a/compos/service/Android.bp
+++ b/compos/service/Android.bp
@@ -12,6 +12,10 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
 java_library {
     name: "service-compos",
     srcs: [
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
index 685d60c..f801a8d 100644
--- a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
@@ -18,8 +18,11 @@
 
 import static java.util.Objects.requireNonNull;
 
+import android.app.job.JobInfo;
 import android.app.job.JobParameters;
+import android.app.job.JobScheduler;
 import android.app.job.JobService;
+import android.content.ComponentName;
 import android.os.IBinder;
 import android.os.RemoteException;
 import android.os.ServiceManager;
@@ -28,6 +31,7 @@
 import android.system.composd.IIsolatedCompilationService;
 import android.util.Log;
 
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
 /**
@@ -37,35 +41,66 @@
  */
 public class IsolatedCompilationJobService extends JobService {
     private static final String TAG = IsolatedCompilationJobService.class.getName();
+    private static final int DAILY_JOB_ID = 5132250;
+    private static final int STAGED_APEX_JOB_ID = 5132251;
 
     private final AtomicReference<CompilationJob> mCurrentJob = new AtomicReference<>();
 
+    static void scheduleDailyJob(JobScheduler scheduler) {
+        // TODO(b/205296305) Remove this
+        ComponentName serviceName =
+                new ComponentName("android", IsolatedCompilationJobService.class.getName());
+
+        int result = scheduler.schedule(new JobInfo.Builder(DAILY_JOB_ID, serviceName)
+                .setRequiresDeviceIdle(true)
+                .setRequiresCharging(true)
+                .setPeriodic(TimeUnit.DAYS.toMillis(1))
+                .build());
+        if (result != JobScheduler.RESULT_SUCCESS) {
+            Log.e(TAG, "Failed to schedule daily job");
+        }
+    }
+
+    static void scheduleStagedApexJob(JobScheduler scheduler) {
+        ComponentName serviceName =
+                new ComponentName("android", IsolatedCompilationJobService.class.getName());
+
+        int result = scheduler.schedule(new JobInfo.Builder(STAGED_APEX_JOB_ID, serviceName)
+                // Wait in case more APEXes are staged
+                .setMinimumLatency(TimeUnit.MINUTES.toMillis(60))
+                // We consume CPU, power, and storage
+                .setRequiresDeviceIdle(true)
+                .setRequiresCharging(true)
+                .setRequiresStorageNotLow(true)
+                .build());
+        if (result != JobScheduler.RESULT_SUCCESS) {
+            Log.e(TAG, "Failed to schedule staged APEX job");
+        }
+    }
+
+    static boolean isStagedApexJobScheduled(JobScheduler scheduler) {
+        return scheduler.getPendingJob(STAGED_APEX_JOB_ID) != null;
+    }
+
     @Override
     public boolean onStartJob(JobParameters params) {
-        Log.i(TAG, "starting job");
+        int jobId = params.getJobId();
 
-        CompilationJob oldJob = mCurrentJob.getAndSet(null);
-        if (oldJob != null) {
-            // This should probably never happen, but just in case
-            oldJob.stop();
-        }
+        Log.i(TAG, "Starting job " + jobId);
 
         // This function (and onStopJob) are only ever called on the main thread, so we don't have
         // to worry about two starts at once, or start and stop happening at once. But onCompletion
         // can be called on any thread, so we need to be careful with that.
 
-        CompilationCallback callback = new CompilationCallback() {
-            @Override
-            public void onSuccess() {
-                onCompletion(params, true);
-            }
+        CompilationJob oldJob = mCurrentJob.get();
+        if (oldJob != null) {
+            // We're already running a job, give up on this one
+            Log.w(TAG, "Another job is in progress, skipping");
+            return false;  // Already finished
+        }
 
-            @Override
-            public void onFailure() {
-                onCompletion(params, false);
-            }
-        };
-        CompilationJob newJob = new CompilationJob(callback);
+        CompilationJob newJob = new CompilationJob(IsolatedCompilationJobService.this::onCompletion,
+                params);
         mCurrentJob.set(newJob);
 
         // This can take some time - we need to start up a VM - so we do it on a separate
@@ -75,9 +110,10 @@
             @Override
             public void run() {
                 try {
-                    newJob.start();
+                    newJob.start(jobId);
                 } catch (RuntimeException e) {
                     Log.e(TAG, "Starting CompilationJob failed", e);
+                    mCurrentJob.set(null);
                     newJob.stop(); // Just in case it managed to start before failure
                     jobFinished(params, /*wantReschedule=*/ false);
                 }
@@ -112,23 +148,23 @@
     }
 
     interface CompilationCallback {
-        void onSuccess();
-
-        void onFailure();
+        void onCompletion(JobParameters params, boolean succeeded);
     }
 
     static class CompilationJob extends ICompilationTaskCallback.Stub
             implements IBinder.DeathRecipient {
         private final AtomicReference<ICompilationTask> mTask = new AtomicReference<>();
         private final CompilationCallback mCallback;
+        private final JobParameters mParams;
         private volatile boolean mStopRequested = false;
         private volatile boolean mCanceled = false;
 
-        CompilationJob(CompilationCallback callback) {
+        CompilationJob(CompilationCallback callback, JobParameters params) {
             mCallback = requireNonNull(callback);
+            mParams = params;
         }
 
-        void start() {
+        void start(int jobId) {
             IBinder binder = ServiceManager.waitForService("android.system.composd");
             IIsolatedCompilationService composd =
                     IIsolatedCompilationService.Stub.asInterface(binder);
@@ -138,8 +174,12 @@
             }
 
             try {
-                // TODO(b/205296305) Call startStagedApexCompile instead
-                ICompilationTask composTask = composd.startTestCompile(this);
+                ICompilationTask composTask;
+                if (jobId == DAILY_JOB_ID) {
+                    composTask = composd.startTestCompile(this);
+                } else {
+                    composTask = composd.startStagedApexCompile(this);
+                }
                 mTask.set(composTask);
                 composTask.asBinder().linkToDeath(this, 0);
             } catch (RemoteException e) {
@@ -180,17 +220,18 @@
 
         @Override
         public void onSuccess() {
-            mTask.set(null);
-            if (!mCanceled) {
-                mCallback.onSuccess();
-            }
+            onCompletion(true);
         }
 
         @Override
         public void onFailure() {
+            onCompletion(false);
+        }
+
+        private void onCompletion(boolean succeeded) {
             mTask.set(null);
             if (!mCanceled) {
-                mCallback.onFailure();
+                mCallback.onCompletion(mParams, succeeded);
             }
         }
     }
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationService.java b/compos/service/java/com/android/server/compos/IsolatedCompilationService.java
index cbc3371..6918572 100644
--- a/compos/service/java/com/android/server/compos/IsolatedCompilationService.java
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationService.java
@@ -17,17 +17,20 @@
 package com.android.server.compos;
 
 import android.annotation.NonNull;
-import android.app.job.JobInfo;
 import android.app.job.JobScheduler;
-import android.content.ComponentName;
 import android.content.Context;
+import android.content.pm.ApexStagedEvent;
+import android.content.pm.IPackageManagerNative;
+import android.content.pm.IStagedApexObserver;
+import android.content.pm.StagedApexInfo;
+import android.os.RemoteException;
+import android.os.ServiceManager;
 import android.provider.DeviceConfig;
 import android.util.Log;
 
 import com.android.server.SystemService;
 
 import java.io.File;
-import java.util.concurrent.TimeUnit;
 
 /**
  * A system service responsible for performing Isolated Compilation (compiling boot & system server
@@ -37,8 +40,6 @@
  */
 public class IsolatedCompilationService extends SystemService {
     private static final String TAG = IsolatedCompilationService.class.getName();
-    private static final int JOB_ID = 5132250;
-    private static final long JOB_PERIOD_MILLIS = TimeUnit.DAYS.toMillis(1);
 
     public IsolatedCompilationService(@NonNull Context context) {
         super(context);
@@ -59,24 +60,15 @@
             return;
         }
 
-        ComponentName serviceName =
-                new ComponentName("android", IsolatedCompilationJobService.class.getName());
 
         JobScheduler scheduler = getContext().getSystemService(JobScheduler.class);
         if (scheduler == null) {
             Log.e(TAG, "No scheduler");
             return;
         }
-        int result =
-                scheduler.schedule(
-                        new JobInfo.Builder(JOB_ID, serviceName)
-                                .setRequiresDeviceIdle(true)
-                                .setRequiresCharging(true)
-                                .setPeriodic(JOB_PERIOD_MILLIS)
-                                .build());
-        if (result != JobScheduler.RESULT_SUCCESS) {
-            Log.e(TAG, "Failed to schedule job");
-        }
+
+        IsolatedCompilationJobService.scheduleDailyJob(scheduler);
+        StagedApexObserver.registerForStagedApexUpdates(scheduler);
     }
 
     private static boolean isIsolatedCompilationSupported() {
@@ -94,4 +86,66 @@
 
         return true;
     }
+
+    private static class StagedApexObserver extends IStagedApexObserver.Stub {
+        private final JobScheduler mScheduler;
+        private final IPackageManagerNative mPackageNative;
+
+        static void registerForStagedApexUpdates(JobScheduler scheduler) {
+            final IPackageManagerNative packageNative = IPackageManagerNative.Stub.asInterface(
+                    ServiceManager.getService("package_native"));
+            if (packageNative == null) {
+                Log.e(TAG, "No IPackageManagerNative");
+                return;
+            }
+
+            StagedApexObserver observer = new StagedApexObserver(scheduler, packageNative);
+            try {
+                packageNative.registerStagedApexObserver(observer);
+                // In the unlikely event that an APEX has been staged before we get here, we may
+                // have to schedule compilation immediately.
+                observer.checkModules(packageNative.getStagedApexModuleNames());
+            } catch (RemoteException e) {
+                Log.e(TAG, "Failed to initialize observer", e);
+            }
+        }
+
+        private StagedApexObserver(JobScheduler scheduler,
+                IPackageManagerNative packageNative) {
+            mScheduler = scheduler;
+            mPackageNative = packageNative;
+        }
+
+        @Override
+        public void onApexStaged(ApexStagedEvent event) {
+            Log.d(TAG, "onApexStaged");
+            checkModules(event.stagedApexModuleNames);
+        }
+
+        void checkModules(String[] moduleNames) {
+            if (IsolatedCompilationJobService.isStagedApexJobScheduled(mScheduler)) {
+                Log.d(TAG, "Job already scheduled");
+                // We're going to run anyway, we don't need to check this update
+                return;
+            }
+            boolean needCompilation = false;
+            for (String moduleName : moduleNames) {
+                try {
+                    StagedApexInfo apexInfo = mPackageNative.getStagedApexInfo(moduleName);
+                    if (apexInfo != null && (apexInfo.hasBootClassPathJars
+                            || apexInfo.hasDex2OatBootClassPathJars
+                            || apexInfo.hasSystemServerClassPathJars)) {
+                        Log.i(TAG, "Classpath affecting module updated: " + moduleName);
+                        needCompilation = true;
+                        break;
+                    }
+                } catch (RemoteException e) {
+                    Log.w(TAG, "Failed to get getStagedApexInfo for " + moduleName);
+                }
+            }
+            if (needCompilation) {
+                IsolatedCompilationJobService.scheduleStagedApexJob(mScheduler);
+            }
+        }
+    }
 }
diff --git a/compos/src/artifact_signer.rs b/compos/src/artifact_signer.rs
new file mode 100644
index 0000000..ce32d6b
--- /dev/null
+++ b/compos/src/artifact_signer.rs
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Support for generating and signing an info file listing names and digests of generated
+//! artifacts.
+
+#![allow(dead_code)] // Will be used soon
+
+use crate::compos_key_service::Signer;
+use crate::fsverity;
+use anyhow::{anyhow, Context, Result};
+use odsign_proto::odsign_info::OdsignInfo;
+use protobuf::Message;
+use std::fs::File;
+use std::io::Write;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+
+const TARGET_DIRECTORY: &str = "/data/misc/apexdata/com.android.art/dalvik-cache";
+const SIGNATURE_EXTENSION: &str = ".signature";
+
+/// Accumulates and then signs information about generated artifacts.
+pub struct ArtifactSigner<'a> {
+    base_directory: &'a Path,
+    file_digests: Vec<(String, String)>, // (File name, digest in hex)
+}
+
+impl<'a> ArtifactSigner<'a> {
+    /// base_directory specifies the directory under which the artifacts are currently located;
+    /// they will eventually be moved under TARGET_DIRECTORY once they are verified and activated.
+    pub fn new(base_directory: &'a Path) -> Self {
+        Self { base_directory, file_digests: Vec::new() }
+    }
+
+    pub fn add_artifact(&mut self, path: &Path) -> Result<()> {
+        // The path we store is where the file will be when it is verified, not where it is now.
+        let suffix = path
+            .strip_prefix(&self.base_directory)
+            .context("Artifacts must be under base directory")?;
+        let target_path = Path::new(TARGET_DIRECTORY).join(suffix);
+        let target_path = target_path.to_str().ok_or_else(|| anyhow!("Invalid path"))?;
+
+        let file = File::open(path).with_context(|| format!("Opening {}", path.display()))?;
+        let digest = fsverity::measure(file.as_raw_fd())?;
+        let digest = to_hex_string(&digest);
+
+        self.file_digests.push((target_path.to_owned(), digest));
+        Ok(())
+    }
+
+    /// Consume this ArtifactSigner and write details of all its artifacts to the given path,
+    /// with accompanying sigature file.
+    pub fn write_info_and_signature(self, signer: Signer, info_path: &Path) -> Result<()> {
+        let mut info = OdsignInfo::new();
+        info.mut_file_hashes().extend(self.file_digests.into_iter());
+        let bytes = info.write_to_bytes()?;
+
+        let signature = signer.sign(&bytes)?;
+
+        let mut file = File::create(info_path)?;
+        file.write_all(&bytes)?;
+
+        let mut signature_name = info_path.file_name().unwrap().to_owned();
+        signature_name.push(SIGNATURE_EXTENSION);
+        let signature_path = info_path.with_file_name(&signature_name);
+        let mut signature_file = File::create(&signature_path)?;
+        signature_file.write_all(&signature)?;
+
+        Ok(())
+    }
+}
+
+fn to_hex_string(buf: &[u8]) -> String {
+    buf.iter().map(|b| format!("{:02x}", b)).collect()
+}
diff --git a/compos/src/compilation.rs b/compos/src/compilation.rs
index 44b4049..cf6f30a 100644
--- a/compos/src/compilation.rs
+++ b/compos/src/compilation.rs
@@ -15,13 +15,15 @@
  */
 
 use anyhow::{anyhow, bail, Context, Result};
-use log::error;
+use log::{debug, error, info};
 use minijail::{self, Minijail};
 use std::env;
-use std::fs::{create_dir, File};
+use std::fs::{read_dir, File};
 use std::os::unix::io::{AsRawFd, RawFd};
 use std::path::{Path, PathBuf};
 
+use crate::artifact_signer::ArtifactSigner;
+use crate::compos_key_service::Signer;
 use crate::fsverity;
 use authfs_aidl_interface::aidl::com::android::virt::fs::{
     AuthFsConfig::{
@@ -34,6 +36,7 @@
 };
 use authfs_aidl_interface::binder::{ParcelFileDescriptor, Strong};
 use compos_aidl_interface::aidl::com::android::compos::FdAnnotation::FdAnnotation;
+use compos_common::odrefresh::ExitCode;
 
 const FD_SERVER_PORT: i32 = 3264; // TODO: support dynamic port
 
@@ -58,64 +61,121 @@
     image: ParcelFileDescriptor,
 }
 
-pub fn odrefresh(
-    odrefresh_path: &Path,
+pub struct OdrefreshContext<'a> {
     system_dir_fd: i32,
     output_dir_fd: i32,
-    zygote_arch: &str,
+    staging_dir_fd: i32,
+    target_dir_name: &'a str,
+    zygote_arch: &'a str,
+}
+
+impl<'a> OdrefreshContext<'a> {
+    pub fn new(
+        system_dir_fd: i32,
+        output_dir_fd: i32,
+        staging_dir_fd: i32,
+        target_dir_name: &'a str,
+        zygote_arch: &'a str,
+    ) -> Result<Self> {
+        if system_dir_fd < 0 || output_dir_fd < 0 || staging_dir_fd < 0 {
+            bail!("The remote FDs are expected to be non-negative");
+        }
+        if zygote_arch != "zygote64" && zygote_arch != "zygote64_32" {
+            bail!("Invalid zygote arch");
+        }
+        Ok(Self { system_dir_fd, output_dir_fd, staging_dir_fd, target_dir_name, zygote_arch })
+    }
+}
+
+pub fn odrefresh(
+    odrefresh_path: &Path,
+    context: OdrefreshContext,
     authfs_service: Strong<dyn IAuthFsService>,
-) -> Result<CompilerOutput> {
+    signer: Signer,
+) -> Result<ExitCode> {
     // Mount authfs (via authfs_service). The authfs instance unmounts once the `authfs` variable
     // is out of scope.
     let authfs_config = AuthFsConfig {
         port: FD_SERVER_PORT,
         inputDirFdAnnotations: vec![InputDirFdAnnotation {
-            fd: system_dir_fd,
+            fd: context.system_dir_fd,
             // TODO(206869687): Replace /dev/null with the real path when possible.
             manifestPath: "/dev/null".to_string(),
             prefix: "/system".to_string(),
         }],
-        outputDirFdAnnotations: vec![OutputDirFdAnnotation { fd: output_dir_fd }],
+        outputDirFdAnnotations: vec![
+            OutputDirFdAnnotation { fd: context.output_dir_fd },
+            OutputDirFdAnnotation { fd: context.staging_dir_fd },
+        ],
         ..Default::default()
     };
     let authfs = authfs_service.mount(&authfs_config)?;
     let mountpoint = PathBuf::from(authfs.getMountPoint()?);
 
     let mut android_root = mountpoint.clone();
-    android_root.push(system_dir_fd.to_string());
+    android_root.push(context.system_dir_fd.to_string());
     android_root.push("system");
     env::set_var("ANDROID_ROOT", &android_root);
+    debug!("ANDROID_ROOT={:?}", &android_root);
 
-    let mut art_apex_data = mountpoint.clone();
-    art_apex_data.push(output_dir_fd.to_string());
+    let art_apex_data = mountpoint.join(context.output_dir_fd.to_string());
     env::set_var("ART_APEX_DATA", &art_apex_data);
+    debug!("ART_APEX_DATA={:?}", &art_apex_data);
 
-    let mut staging_dir = mountpoint;
-    staging_dir.push(output_dir_fd.to_string());
-    staging_dir.push("staging");
-    create_dir(&staging_dir)
-        .with_context(|| format!("Create staging directory {}", staging_dir.display()))?;
+    let staging_dir = mountpoint.join(context.staging_dir_fd.to_string());
 
     let args = vec![
         "odrefresh".to_string(),
-        format!("--zygote-arch={}", zygote_arch),
+        format!("--zygote-arch={}", context.zygote_arch),
+        format!("--dalvik-cache={}", context.target_dir_name),
         "--no-refresh".to_string(),
         format!("--staging-dir={}", staging_dir.display()),
         "--force-compile".to_string(),
     ];
+    debug!("Running odrefresh with args: {:?}", &args);
     let jail = spawn_jailed_task(odrefresh_path, &args, Vec::new() /* fd_mapping */)
         .context("Spawn odrefresh")?;
-    match jail.wait() {
-        // TODO(161471326): On success, sign all files in the output directory.
-        Ok(()) => Ok(CompilerOutput::ExitCode(0)),
-        Err(minijail::Error::ReturnCode(exit_code)) => {
-            error!("odrefresh failed with exit code {}", exit_code);
-            Ok(CompilerOutput::ExitCode(exit_code as i8))
-        }
+    let exit_code = match jail.wait() {
+        Ok(_) => Result::<u8>::Ok(0),
+        Err(minijail::Error::ReturnCode(exit_code)) => Ok(exit_code),
         Err(e) => {
             bail!("Unexpected minijail error: {}", e)
         }
+    }?;
+
+    let exit_code = ExitCode::from_i32(exit_code.into())
+        .ok_or_else(|| anyhow!("Unexpected odrefresh exit code: {}", exit_code))?;
+    info!("odrefresh exited with {:?}", exit_code);
+
+    if exit_code == ExitCode::CompilationSuccess {
+        // authfs only shows us the files we created, so it's ok to just sign everything under
+        // the target directory.
+        let target_dir = art_apex_data.join(context.target_dir_name);
+        let mut artifact_signer = ArtifactSigner::new(&target_dir);
+        add_artifacts(&target_dir, &mut artifact_signer)?;
+
+        artifact_signer.write_info_and_signature(signer, &target_dir.join("compos.info"))?;
     }
+
+    Ok(exit_code)
+}
+
+fn add_artifacts(target_dir: &Path, artifact_signer: &mut ArtifactSigner) -> Result<()> {
+    for entry in
+        read_dir(&target_dir).with_context(|| format!("Traversing {}", target_dir.display()))?
+    {
+        let entry = entry?;
+        let file_type = entry.file_type()?;
+        if file_type.is_dir() {
+            add_artifacts(&entry.path(), artifact_signer)?;
+        } else if file_type.is_file() {
+            artifact_signer.add_artifact(&entry.path())?;
+        } else {
+            // authfs shouldn't create anything else, but just in case
+            bail!("Unexpected file type in artifacts: {:?}", entry);
+        }
+    }
+    Ok(())
 }
 
 /// Runs the compiler with given flags with file descriptors described in `fd_annotation` retrieved
diff --git a/compos/src/compos_key_service.rs b/compos/src/compos_key_service.rs
index 4a1566d..086a162 100644
--- a/compos/src/compos_key_service.rs
+++ b/compos/src/compos_key_service.rs
@@ -77,7 +77,7 @@
         })
     }
 
-    pub fn do_generate(&self) -> Result<CompOsKeyData> {
+    pub fn generate(&self) -> Result<CompOsKeyData> {
         let key_descriptor = BLOB_KEY_DESCRIPTOR;
         let key_parameters =
             [PURPOSE_SIGN, ALGORITHM, PADDING, DIGEST, KEY_SIZE, EXPONENT, NO_AUTH_REQUIRED];
@@ -97,11 +97,11 @@
         }
     }
 
-    pub fn do_verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
+    pub fn verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
         let mut data = [0u8; 32];
         self.random.fill(&mut data).context("No random data")?;
 
-        let signature = self.do_sign(key_blob, &data)?;
+        let signature = self.new_signer(key_blob).sign(&data)?;
 
         let public_key =
             signature::UnparsedPublicKey::new(&signature::RSA_PKCS1_2048_8192_SHA256, public_key);
@@ -110,8 +110,19 @@
         Ok(())
     }
 
-    pub fn do_sign(&self, key_blob: &[u8], data: &[u8]) -> Result<Vec<u8>> {
-        let key_descriptor = KeyDescriptor { blob: Some(key_blob.to_vec()), ..BLOB_KEY_DESCRIPTOR };
+    pub fn new_signer(&self, key_blob: &[u8]) -> Signer {
+        Signer { key_blob: key_blob.to_vec(), security_level: self.security_level.clone() }
+    }
+}
+
+pub struct Signer {
+    key_blob: Vec<u8>,
+    security_level: Strong<dyn IKeystoreSecurityLevel>,
+}
+
+impl Signer {
+    pub fn sign(self, data: &[u8]) -> Result<Vec<u8>> {
+        let key_descriptor = KeyDescriptor { blob: Some(self.key_blob), ..BLOB_KEY_DESCRIPTOR };
         let operation_parameters = [PURPOSE_SIGN, ALGORITHM, PADDING, DIGEST];
         let forced = false;
 
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index 0a15876..aa4b9bd 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -24,10 +24,10 @@
 use std::default::Default;
 use std::env;
 use std::path::PathBuf;
-use std::sync::{Arc, RwLock};
+use std::sync::RwLock;
 
-use crate::compilation::{compile_cmd, odrefresh, CompilerOutput};
-use crate::compos_key_service::CompOsKeyService;
+use crate::compilation::{compile_cmd, odrefresh, CompilerOutput, OdrefreshContext};
+use crate::compos_key_service::{CompOsKeyService, Signer};
 use crate::fsverity;
 use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFsService::IAuthFsService;
 use compos_aidl_interface::aidl::com::android::compos::{
@@ -39,10 +39,10 @@
 use compos_aidl_interface::binder::{
     BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Strong,
 };
+use compos_common::odrefresh::ODREFRESH_PATH;
 
 const AUTHFS_SERVICE_NAME: &str = "authfs_service";
 const DEX2OAT_PATH: &str = "/apex/com.android.art/bin/dex2oat64";
-const ODREFRESH_PATH: &str = "/apex/com.android.art/bin/odrefresh";
 
 /// Constructs a binder object that implements ICompOsService.
 pub fn new_binder() -> Result<Strong<dyn ICompOsService>> {
@@ -50,7 +50,7 @@
         dex2oat_path: PathBuf::from(DEX2OAT_PATH),
         odrefresh_path: PathBuf::from(ODREFRESH_PATH),
         key_service: CompOsKeyService::new()?,
-        key_blob: Arc::new(RwLock::new(Vec::new())),
+        key_blob: RwLock::new(Vec::new()),
     };
     Ok(BnCompOsService::new_binder(service, BinderFeatures::default()))
 }
@@ -59,20 +59,27 @@
     dex2oat_path: PathBuf,
     odrefresh_path: PathBuf,
     key_service: CompOsKeyService,
-    key_blob: Arc<RwLock<Vec<u8>>>,
+    key_blob: RwLock<Vec<u8>>,
 }
 
 impl CompOsService {
     fn generate_raw_fsverity_signature(
         &self,
-        key_blob: &[u8],
         fsverity_digest: &fsverity::Sha256Digest,
-    ) -> Vec<u8> {
+    ) -> BinderResult<Vec<u8>> {
         let formatted_digest = fsverity::to_formatted_digest(fsverity_digest);
-        self.key_service.do_sign(key_blob, &formatted_digest[..]).unwrap_or_else(|e| {
-            warn!("Failed to sign the fsverity digest, returning empty signature.  Error: {}", e);
-            Vec::new()
-        })
+        self.new_signer()?
+            .sign(&formatted_digest[..])
+            .map_err(|e| new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, e.to_string()))
+    }
+
+    fn new_signer(&self) -> BinderResult<Signer> {
+        let key = &*self.key_blob.read().unwrap();
+        if key.is_empty() {
+            Err(new_binder_exception(ExceptionCode::ILLEGAL_STATE, "Key is not initialized"))
+        } else {
+            Ok(self.key_service.new_signer(key))
+        }
     }
 }
 
@@ -106,42 +113,31 @@
         &self,
         system_dir_fd: i32,
         output_dir_fd: i32,
+        staging_dir_fd: i32,
+        target_dir_name: &str,
         zygote_arch: &str,
-    ) -> BinderResult<CompilationResult> {
-        if system_dir_fd < 0 || output_dir_fd < 0 {
-            return Err(new_binder_exception(
-                ExceptionCode::ILLEGAL_ARGUMENT,
-                "The remote FDs are expected to be non-negative",
-            ));
-        }
-        if zygote_arch != "zygote64" && zygote_arch != "zygote64_32" {
-            return Err(new_binder_exception(
-                ExceptionCode::ILLEGAL_ARGUMENT,
-                "Invalid zygote arch",
-            ));
-        }
-
-        let authfs_service = get_authfs_service()?;
-        let output = odrefresh(
-            &self.odrefresh_path,
+    ) -> BinderResult<i8> {
+        let context = OdrefreshContext::new(
             system_dir_fd,
             output_dir_fd,
+            staging_dir_fd,
+            target_dir_name,
             zygote_arch,
-            authfs_service,
         )
-        .map_err(|e| {
-            warn!("odrefresh failed: {}", e);
-            new_binder_exception(
-                ExceptionCode::SERVICE_SPECIFIC,
-                format!("odrefresh failed: {}", e),
-            )
-        })?;
-        match output {
-            CompilerOutput::ExitCode(exit_code) => {
-                Ok(CompilationResult { exitCode: exit_code, ..Default::default() })
-            }
-            _ => Err(new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, "odrefresh failed")),
-        }
+        .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, e.to_string()))?;
+
+        let authfs_service = get_authfs_service()?;
+        let exit_code =
+            odrefresh(&self.odrefresh_path, context, authfs_service, self.new_signer()?).map_err(
+                |e| {
+                    warn!("odrefresh failed: {:?}", e);
+                    new_binder_exception(
+                        ExceptionCode::SERVICE_SPECIFIC,
+                        format!("odrefresh failed: {}", e),
+                    )
+                },
+            )?;
+        Ok(exit_code as i8)
     }
 
     fn compile_cmd(
@@ -159,23 +155,15 @@
             })?;
         match output {
             CompilerOutput::Digests { oat, vdex, image } => {
-                let key = &*self.key_blob.read().unwrap();
-                if key.is_empty() {
-                    Err(new_binder_exception(
-                        ExceptionCode::ILLEGAL_STATE,
-                        "Key is not initialized",
-                    ))
-                } else {
-                    let oat_signature = self.generate_raw_fsverity_signature(key, &oat);
-                    let vdex_signature = self.generate_raw_fsverity_signature(key, &vdex);
-                    let image_signature = self.generate_raw_fsverity_signature(key, &image);
-                    Ok(CompilationResult {
-                        exitCode: 0,
-                        oatSignature: oat_signature,
-                        vdexSignature: vdex_signature,
-                        imageSignature: image_signature,
-                    })
-                }
+                let oat_signature = self.generate_raw_fsverity_signature(&oat)?;
+                let vdex_signature = self.generate_raw_fsverity_signature(&vdex)?;
+                let image_signature = self.generate_raw_fsverity_signature(&image)?;
+                Ok(CompilationResult {
+                    exitCode: 0,
+                    oatSignature: oat_signature,
+                    vdexSignature: vdex_signature,
+                    imageSignature: image_signature,
+                })
             }
             CompilerOutput::ExitCode(exit_code) => {
                 Ok(CompilationResult { exitCode: exit_code, ..Default::default() })
@@ -189,12 +177,12 @@
 
     fn generateSigningKey(&self) -> BinderResult<CompOsKeyData> {
         self.key_service
-            .do_generate()
+            .generate()
             .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
     }
 
     fn verifySigningKey(&self, key_blob: &[u8], public_key: &[u8]) -> BinderResult<bool> {
-        Ok(if let Err(e) = self.key_service.do_verify(key_blob, public_key) {
+        Ok(if let Err(e) = self.key_service.verify(key_blob, public_key) {
             warn!("Signing key verification failed: {}", e.to_string());
             false
         } else {
@@ -203,14 +191,9 @@
     }
 
     fn sign(&self, data: &[u8]) -> BinderResult<Vec<u8>> {
-        let key = &*self.key_blob.read().unwrap();
-        if key.is_empty() {
-            Err(new_binder_exception(ExceptionCode::ILLEGAL_STATE, "Key is not initialized"))
-        } else {
-            self.key_service
-                .do_sign(key, data)
-                .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
-        }
+        self.new_signer()?
+            .sign(data)
+            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
     }
 }
 
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index fc00039..9347905 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -16,11 +16,11 @@
 
 //! A tool to start a standalone compsvc server that serves over RPC binder.
 
+mod artifact_signer;
 mod compilation;
 mod compos_key_service;
 mod compsvc;
 mod fsverity;
-mod signer;
 
 use android_system_virtualmachineservice::{
     aidl::android::system::virtualmachineservice::IVirtualMachineService::{
diff --git a/compos/src/signer.rs b/compos/src/signer.rs
deleted file mode 100644
index 9ff1477..0000000
--- a/compos/src/signer.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use anyhow::Result;
-
-/// Provides the ability to cryptographically sign messages.
-pub trait Signer: Send + Sync {
-    /// Sign the supplied data. The result is a raw signature over the input data.
-    fn sign(&self, data: &[u8]) -> Result<Vec<u8>>;
-}
diff --git a/compos/tests/java/android/compos/test/ComposKeyTestCase.java b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
index 140f74b..eacf3fb 100644
--- a/compos/tests/java/android/compos/test/ComposKeyTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
@@ -141,12 +141,13 @@
         android.run(
                 COMPOS_KEY_CMD_BIN,
                 "--cid " + mCid,
-                "sign",
+                "sign-info",
                 TEST_ROOT + "test_key3.blob",
+                TEST_ROOT + "test.info",
                 "/data/local/tmp/something.txt");
 
         // Check existence of the output signature - should succeed
-        android.run("test -f /data/local/tmp/something.txt.signature");
+        android.run("test -f " + TEST_ROOT + "test.info.signature");
     }
 
     private void startVm() throws Exception {
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index 5f4bd00..5ef6649 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -87,6 +87,56 @@
         }
 
         // Save the expected checksum for the output directory.
+        String expectedChecksumSnapshot = checksumDirectoryContentPartial(android,
+                ODREFRESH_OUTPUT_DIR);
+
+        // --check may delete the output.
+        CommandResult result = runOdrefresh(android, "--check");
+        assertThat(result.getExitCode()).isEqualTo(OKAY);
+
+        // Make sure we generate a fresh instance.
+        android.tryRun("rm", "-rf", COMPOS_TEST_ROOT);
+        // TODO: remove once composd starts to clean up the directory.
+        android.tryRun("rm", "-rf", ODREFRESH_OUTPUT_DIR);
+
+        // Expect the compilation in Compilation OS to finish successfully.
+        {
+            long start = System.currentTimeMillis();
+            result =
+                    android.runForResultWithTimeout(
+                            ODREFRESH_TIMEOUT_MS, COMPOSD_CMD_BIN, "async-odrefresh");
+            long elapsed = System.currentTimeMillis() - start;
+            assertThat(result.getExitCode()).isEqualTo(0);
+            CLog.i("Comp OS compilation took " + elapsed + "ms");
+        }
+        killVmAndReconnectAdb();
+
+        // Save the actual checksum for the output directory.
+        String actualChecksumSnapshot = checksumDirectoryContentPartial(android,
+                ODREFRESH_OUTPUT_DIR);
+
+        // Expect the output of Comp OS to be the same as compiled on Android.
+        assertThat(actualChecksumSnapshot).isEqualTo(expectedChecksumSnapshot);
+
+        // Expect extra files generated by CompOS exist.
+        android.assumeSuccess("test -f " + ODREFRESH_OUTPUT_DIR + "/compos.info");
+        android.assumeSuccess("test -f " + ODREFRESH_OUTPUT_DIR + "/compos.info.signature");
+    }
+
+    @Test
+    public void testOdrefreshDeprecated() throws Exception {
+        CommandRunner android = new CommandRunner(getDevice());
+
+        // Prepare the groundtruth. The compilation on Android should finish successfully.
+        {
+            long start = System.currentTimeMillis();
+            CommandResult result = runOdrefresh(android, "--force-compile");
+            long elapsed = System.currentTimeMillis() - start;
+            assertThat(result.getExitCode()).isEqualTo(COMPILATION_SUCCESS);
+            CLog.i("Local compilation took " + elapsed + "ms");
+        }
+
+        // Save the expected checksum for the output directory.
         String expectedChecksumSnapshot = checksumDirectoryContent(android, ODREFRESH_OUTPUT_DIR);
 
         // Let --check clean up the output.
@@ -151,4 +201,15 @@
         // Sort by filename (second column) to make comparison easier.
         return runner.run("find " + path + " -type f -exec sha256sum {} \\; | sort -k2");
     }
+
+    private String checksumDirectoryContentPartial(CommandRunner runner, String path)
+            throws Exception {
+        // Sort by filename (second column) to make comparison easier. Filter out compos.info and
+        // compos.info.signature since it's only generated by CompOS.
+        // TODO(b/210473615): Remove irrelevant APEXes (i.e. those aren't contributing to the
+        // classpaths, thus not in the VM) from cache-info.xml.
+        return runner.run("cd " + path + "; find -type f -exec sha256sum {} \\;"
+                + "| grep -v cache-info.xml | grep -v compos.info"
+                + "| sort -k2");
+    }
 }
diff --git a/compos/verify_key/verify_key.rs b/compos/verify_key/verify_key.rs
index 945acb4..e0ed5e5 100644
--- a/compos/verify_key/verify_key.rs
+++ b/compos/verify_key/verify_key.rs
@@ -105,8 +105,11 @@
     let instance_image = File::open(instance_image).context("Failed to open instance image")?;
 
     let virtualization_service = VmInstance::connect_to_virtualization_service()?;
-    let vm_instance =
-        VmInstance::start(&*virtualization_service, instance_image, &VmParameters { debug_mode })?;
+    let vm_instance = VmInstance::start(
+        &*virtualization_service,
+        instance_image,
+        &VmParameters { debug_mode, ..Default::default() },
+    )?;
     let service = vm_instance.get_service()?;
 
     let result = service.verifySigningKey(&blob, &public_key).context("Verifying signing key")?;
diff --git a/docs/getting_started/index.md b/docs/getting_started/index.md
index f82f982..6d3b208 100644
--- a/docs/getting_started/index.md
+++ b/docs/getting_started/index.md
@@ -36,9 +36,9 @@
 all can run via `atest`.
 
 ```shell
-atest VirtualizationTestCasea
-atest MicrodroidHostTestHostCases
-atest MicrodroidDemoTestApp
+atest VirtualizationTestCases
+atest MicrodroidHostTestCases
+atest MicrodroidTestApp
 ```
 
 If you run into problems, inspect the logs produced by `atest`. Their location is printed at the
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java b/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java
index 9dbed64..2ddaf30 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java
@@ -33,7 +33,12 @@
 public interface VirtualMachineCallback {
     /** @hide */
     @Retention(RetentionPolicy.SOURCE)
-    @IntDef({ERROR_UNKNOWN, ERROR_PAYLOAD_VERIFICATION_FAILED, ERROR_PAYLOAD_CHANGED})
+    @IntDef({
+        ERROR_UNKNOWN,
+        ERROR_PAYLOAD_VERIFICATION_FAILED,
+        ERROR_PAYLOAD_CHANGED,
+        ERROR_PAYLOAD_INVALID_CONFIG
+    })
     @interface ErrorCode {}
 
     /** Error code for all other errors not listed below. */
@@ -48,6 +53,9 @@
     /** Error code indicating that the payload is verified, but has changed since the last boot. */
     int ERROR_PAYLOAD_CHANGED = 2;
 
+    /** Error code indicating that the payload config is invalid. */
+    int ERROR_PAYLOAD_INVALID_CONFIG = 3;
+
     /** Called when the payload starts in the VM. */
     void onPayloadStarted(@NonNull VirtualMachine vm, @Nullable ParcelFileDescriptor stream);
 
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 664402f..86f6d0a 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -18,9 +18,13 @@
     start ueventd
 
     mkdir /mnt/apk 0755 system system
+    mkdir /mnt/extra-apk 0755 root root
     # Microdroid_manager starts apkdmverity/zipfuse/apexd
     start microdroid_manager
 
+    # restorecon so microdroid_manager can create subdirectories
+    restorecon /mnt/extra-apk
+
     # Wait for apexd to finish activating APEXes before starting more processes.
     wait_for_prop apexd.status activated
     perform_apex_config
diff --git a/microdroid/keymint/Android.bp b/microdroid/keymint/Android.bp
index a0bbaf4..7915ada 100644
--- a/microdroid/keymint/Android.bp
+++ b/microdroid/keymint/Android.bp
@@ -14,8 +14,10 @@
         "-Wall",
         "-Wextra",
     ],
+    defaults: [
+        "keymint_use_latest_hal_aidl_ndk_shared",
+    ],
     shared_libs: [
-        "android.hardware.security.keymint-V1-ndk",
         "lib_android_keymaster_keymint_utils",
         "libbase",
         "libbinder_ndk",
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index f427966..23a61d9 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -17,13 +17,16 @@
         "libbinder_rpc_unstable_bindgen",
         "libbinder_rs",
         "libbyteorder",
+        "libglob",
         "libidsig",
+        "libitertools",
         "libkernlog",
         "liblibc",
         "liblog_rust",
         "libmicrodroid_metadata",
         "libmicrodroid_payload_config",
         "libnix",
+        "libonce_cell",
         "libprotobuf",
         "libring",
         "librustutils",
diff --git a/microdroid_manager/src/instance.rs b/microdroid_manager/src/instance.rs
index 47230e3..aadb71f 100644
--- a/microdroid_manager/src/instance.rs
+++ b/microdroid_manager/src/instance.rs
@@ -314,7 +314,9 @@
 #[derive(Debug, Serialize, Deserialize, PartialEq)]
 pub struct MicrodroidData {
     pub apk_data: ApkData,
+    pub extra_apks_data: Vec<ApkData>,
     pub apex_data: Vec<ApexData>,
+    pub bootconfig: Box<[u8]>,
 }
 
 #[derive(Debug, Serialize, Deserialize, PartialEq)]
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 99ebc51..fccf031 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -23,14 +23,17 @@
 use apkverify::{get_public_key_der, verify};
 use binder::unstable_api::{new_spibinder, AIBinder};
 use binder::{FromIBinder, Strong};
+use glob::glob;
 use idsig::V4Signature;
+use itertools::sorted;
 use log::{error, info, warn};
 use microdroid_metadata::{write_metadata, Metadata};
 use microdroid_payload_config::{Task, TaskType, VmPayloadConfig};
+use once_cell::sync::OnceCell;
 use payload::{get_apex_data_from_payload, load_metadata, to_metadata};
 use rustutils::system_properties;
 use rustutils::system_properties::PropertyWatcher;
-use std::fs::{self, File, OpenOptions};
+use std::fs::{self, create_dir, File, OpenOptions};
 use std::os::unix::io::{FromRawFd, IntoRawFd};
 use std::path::Path;
 use std::process::{Child, Command, Stdio};
@@ -39,17 +42,15 @@
 use vsock::VsockStream;
 
 use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::{
-    ERROR_PAYLOAD_CHANGED, ERROR_PAYLOAD_VERIFICATION_FAILED, ERROR_UNKNOWN, VM_BINDER_SERVICE_PORT, VM_STREAM_SERVICE_PORT, IVirtualMachineService,
+    ERROR_PAYLOAD_CHANGED, ERROR_PAYLOAD_VERIFICATION_FAILED, ERROR_PAYLOAD_INVALID_CONFIG, ERROR_UNKNOWN, VM_BINDER_SERVICE_PORT, VM_STREAM_SERVICE_PORT, IVirtualMachineService,
 };
 
 const WAIT_TIMEOUT: Duration = Duration::from_secs(10);
-const APK_DM_VERITY_ARGUMENT: ApkDmverityArgument = {
-    ApkDmverityArgument {
-        apk: "/dev/block/by-name/microdroid-apk",
-        idsig: "/dev/block/by-name/microdroid-apk-idsig",
-        name: "microdroid-apk",
-    }
-};
+const MAIN_APK_PATH: &str = "/dev/block/by-name/microdroid-apk";
+const MAIN_APK_IDSIG_PATH: &str = "/dev/block/by-name/microdroid-apk-idsig";
+const MAIN_APK_DEVICE_NAME: &str = "microdroid-apk";
+const EXTRA_APK_PATH_PATTERN: &str = "/dev/block/by-name/extra-apk-*";
+const EXTRA_IDSIG_PATH_PATTERN: &str = "/dev/block/by-name/extra-idsig-*";
 const DM_MOUNTED_APK_PATH: &str = "/dev/block/mapper/microdroid-apk";
 const APKDMVERITY_BIN: &str = "/system/bin/apkdmverity";
 const ZIPFUSE_BIN: &str = "/system/bin/zipfuse";
@@ -66,6 +67,8 @@
     PayloadChanged(String),
     #[error("Payload verification has failed: {0}")]
     PayloadVerificationFailed(String),
+    #[error("Payload config is invalid: {0}")]
+    InvalidConfig(String),
 }
 
 fn translate_error(err: &Error) -> (i32, String) {
@@ -75,6 +78,7 @@
             MicrodroidError::PayloadVerificationFailed(msg) => {
                 (ERROR_PAYLOAD_VERIFICATION_FAILED, msg.to_string())
             }
+            MicrodroidError::InvalidConfig(msg) => (ERROR_PAYLOAD_INVALID_CONFIG, msg.to_string()),
         }
     } else {
         (ERROR_UNKNOWN, err.to_string())
@@ -112,16 +116,27 @@
     info!("started.");
 
     let service = get_vms_rpc_binder().context("cannot connect to VirtualMachineService")?;
-    if let Err(err) = try_start_payload(&service) {
-        let (error_code, message) = translate_error(&err);
-        service.notifyError(error_code, &message)?;
-        Err(err)
-    } else {
-        Ok(())
+    match try_run_payload(&service) {
+        Ok(code) => {
+            info!("notifying payload finished");
+            service.notifyPayloadFinished(code)?;
+            if code == 0 {
+                info!("task successfully finished");
+            } else {
+                error!("task exited with exit code: {}", code);
+            }
+            Ok(())
+        }
+        Err(err) => {
+            error!("task terminated: {:?}", err);
+            let (error_code, message) = translate_error(&err);
+            service.notifyError(error_code, &message)?;
+            Err(err)
+        }
     }
 }
 
-fn try_start_payload(service: &Strong<dyn IVirtualMachineService>) -> Result<()> {
+fn try_run_payload(service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
     let metadata = load_metadata().context("Failed to load payload metadata")?;
 
     let mut instance = InstanceDisk::new().context("Failed to load instance.img")?;
@@ -151,33 +166,42 @@
     )
     .context("Failed to run zipfuse")?;
 
-    if !metadata.payload_config_path.is_empty() {
-        let config = load_config(Path::new(&metadata.payload_config_path))?;
+    ensure!(
+        !metadata.payload_config_path.is_empty(),
+        MicrodroidError::InvalidConfig("No payload_config_path in metadata".to_string())
+    );
 
-        let fake_secret = "This is a placeholder for a value that is derived from the images that are loaded in the VM.";
-        if let Err(err) = rustutils::system_properties::write("ro.vmsecret.keymint", fake_secret) {
-            warn!("failed to set ro.vmsecret.keymint: {}", err);
-        }
+    let config = load_config(Path::new(&metadata.payload_config_path))?;
+    if config.extra_apks.len() != verified_data.extra_apks_data.len() {
+        return Err(anyhow!(
+            "config expects {} extra apks, but found only {}",
+            config.extra_apks.len(),
+            verified_data.extra_apks_data.len()
+        ));
+    }
+    mount_extra_apks(&config)?;
 
-        // Wait until apex config is done. (e.g. linker configuration for apexes)
-        // TODO(jooyung): wait until sys.boot_completed?
-        wait_for_apex_config_done()?;
-
-        if let Some(main_task) = &config.task {
-            exec_task(main_task, service).map_err(|e| {
-                error!("failed to execute task: {}", e);
-                e
-            })?;
-        }
+    let fake_secret = "This is a placeholder for a value that is derived from the images that are loaded in the VM.";
+    if let Err(err) = rustutils::system_properties::write("ro.vmsecret.keymint", fake_secret) {
+        warn!("failed to set ro.vmsecret.keymint: {}", err);
     }
 
-    Ok(())
+    // Wait until apex config is done. (e.g. linker configuration for apexes)
+    // TODO(jooyung): wait until sys.boot_completed?
+    wait_for_apex_config_done()?;
+
+    ensure!(
+        config.task.is_some(),
+        MicrodroidError::InvalidConfig("No task in VM config".to_string())
+    );
+    exec_task(&config.task.unwrap(), service)
 }
 
 struct ApkDmverityArgument<'a> {
     apk: &'a str,
     idsig: &'a str,
     name: &'a str,
+    saved_root_hash: Option<&'a RootHash>,
 }
 
 fn run_apkdmverity(args: &[ApkDmverityArgument]) -> Result<Child> {
@@ -187,6 +211,11 @@
 
     for argument in args {
         cmd.arg("--apk").arg(argument.apk).arg(argument.idsig).arg(argument.name);
+        if let Some(root_hash) = argument.saved_root_hash {
+            cmd.arg(&to_hex_string(root_hash));
+        } else {
+            cmd.arg("none");
+        }
     }
 
     cmd.spawn().context("Spawn apkdmverity")
@@ -215,19 +244,93 @@
 ) -> Result<MicrodroidData> {
     let start_time = SystemTime::now();
 
+    if let Some(saved_bootconfig) = saved_data.map(|d| &d.bootconfig) {
+        ensure!(
+            saved_bootconfig.as_ref() == get_bootconfig()?.as_slice(),
+            MicrodroidError::PayloadChanged(String::from("Bootconfig has changed."))
+        );
+    }
+
+    // Verify main APK
     let root_hash = saved_data.map(|d| &d.apk_data.root_hash);
-    let root_hash_from_idsig = get_apk_root_hash_from_idsig()?;
+    let root_hash_from_idsig = get_apk_root_hash_from_idsig(MAIN_APK_IDSIG_PATH)?;
     let root_hash_trustful = root_hash == Some(&root_hash_from_idsig);
 
     // If root_hash can be trusted, pass it to apkdmverity so that it uses the passed root_hash
     // instead of the value read from the idsig file.
-    if root_hash_trustful {
-        let root_hash = to_hex_string(root_hash.unwrap());
-        system_properties::write("microdroid_manager.apk_root_hash", &root_hash)?;
+    let main_apk_argument = {
+        ApkDmverityArgument {
+            apk: MAIN_APK_PATH,
+            idsig: MAIN_APK_IDSIG_PATH,
+            name: MAIN_APK_DEVICE_NAME,
+            saved_root_hash: if root_hash_trustful {
+                Some(root_hash_from_idsig.as_ref())
+            } else {
+                None
+            },
+        }
+    };
+    let mut apkdmverity_arguments = vec![main_apk_argument];
+
+    // Verify extra APKs
+    // For now, we can't read the payload config, so glob APKs and idsigs.
+    // Later, we'll see if it matches with the payload config.
+
+    // sort globbed paths to match apks (extra-apk-{idx}) and idsigs (extra-idsig-{idx})
+    // e.g. "extra-apk-0" corresponds to "extra-idsig-0"
+    let extra_apks =
+        sorted(glob(EXTRA_APK_PATH_PATTERN)?.collect::<Result<Vec<_>, _>>()?).collect::<Vec<_>>();
+    let extra_idsigs =
+        sorted(glob(EXTRA_IDSIG_PATH_PATTERN)?.collect::<Result<Vec<_>, _>>()?).collect::<Vec<_>>();
+    if extra_apks.len() != extra_idsigs.len() {
+        return Err(anyhow!(
+            "Extra apks/idsigs mismatch: {} apks but {} idsigs",
+            extra_apks.len(),
+            extra_idsigs.len()
+        ));
+    }
+    let extra_apks_count = extra_apks.len();
+
+    let (extra_apk_names, extra_root_hashes_from_idsig): (Vec<_>, Vec<_>) = extra_idsigs
+        .iter()
+        .enumerate()
+        .map(|(i, extra_idsig)| {
+            (
+                format!("extra-apk-{}", i),
+                get_apk_root_hash_from_idsig(extra_idsig.to_str().unwrap())
+                    .expect("Can't find root hash from extra idsig"),
+            )
+        })
+        .unzip();
+
+    let saved_extra_root_hashes: Vec<_> = saved_data
+        .map(|d| d.extra_apks_data.iter().map(|apk_data| &apk_data.root_hash).collect())
+        .unwrap_or_else(Vec::new);
+    let extra_root_hashes_trustful: Vec<_> = extra_root_hashes_from_idsig
+        .iter()
+        .enumerate()
+        .map(|(i, root_hash_from_idsig)| {
+            saved_extra_root_hashes.get(i).copied() == Some(root_hash_from_idsig)
+        })
+        .collect();
+
+    for i in 0..extra_apks_count {
+        apkdmverity_arguments.push({
+            ApkDmverityArgument {
+                apk: extra_apks[i].to_str().unwrap(),
+                idsig: extra_idsigs[i].to_str().unwrap(),
+                name: &extra_apk_names[i],
+                saved_root_hash: if extra_root_hashes_trustful[i] {
+                    Some(&extra_root_hashes_from_idsig[i])
+                } else {
+                    None
+                },
+            }
+        });
     }
 
     // Start apkdmverity and wait for the dm-verify block
-    let mut apkdmverity_child = run_apkdmverity(&[APK_DM_VERITY_ARGUMENT])?;
+    let mut apkdmverity_child = run_apkdmverity(&apkdmverity_arguments)?;
 
     // While waiting for apkdmverity to mount APK, gathers public keys and root digests from
     // APEX payload.
@@ -259,25 +362,47 @@
     // taken only when the root_hash is un-trustful which can be either when this is the first boot
     // of the VM or APK was updated in the host.
     // TODO(jooyung): consider multithreading to make this faster
-    let apk_pubkey = if !root_hash_trustful {
-        verify(DM_MOUNTED_APK_PATH).context(MicrodroidError::PayloadVerificationFailed(format!(
-            "failed to verify {}",
-            DM_MOUNTED_APK_PATH
-        )))?
-    } else {
-        get_public_key_der(DM_MOUNTED_APK_PATH)?
-    };
+    let main_apk_pubkey = get_public_key_from_apk(DM_MOUNTED_APK_PATH, root_hash_trustful)?;
+    let extra_apks_data = extra_root_hashes_from_idsig
+        .into_iter()
+        .enumerate()
+        .map(|(i, extra_root_hash)| {
+            let mount_path = format!("/dev/block/mapper/{}", &extra_apk_names[i]);
+            let apk_pubkey = get_public_key_from_apk(&mount_path, extra_root_hashes_trustful[i])?;
+            Ok(ApkData { root_hash: extra_root_hash, pubkey: apk_pubkey })
+        })
+        .collect::<Result<Vec<_>>>()?;
 
     info!("payload verification successful. took {:#?}", start_time.elapsed().unwrap());
 
     // At this point, we can ensure that the root_hash from the idsig file is trusted, either by
     // fully verifying the APK or by comparing it with the saved root_hash.
     Ok(MicrodroidData {
-        apk_data: ApkData { root_hash: root_hash_from_idsig, pubkey: apk_pubkey },
+        apk_data: ApkData { root_hash: root_hash_from_idsig, pubkey: main_apk_pubkey },
+        extra_apks_data,
         apex_data: apex_data_from_payload,
+        bootconfig: get_bootconfig()?.clone().into_boxed_slice(),
     })
 }
 
+fn mount_extra_apks(config: &VmPayloadConfig) -> Result<()> {
+    // For now, only the number of apks is important, as the mount point and dm-verity name is fixed
+    for i in 0..config.extra_apks.len() {
+        let mount_dir = format!("/mnt/extra-apk/{}", i);
+        create_dir(Path::new(&mount_dir)).context("Failed to create mount dir for extra apks")?;
+
+        // don't wait, just detach
+        run_zipfuse(
+            "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:extra_apk_file:s0",
+            Path::new(&format!("/dev/block/mapper/extra-apk-{}", i)),
+            Path::new(&mount_dir),
+        )
+        .context("Failed to zipfuse extra apks")?;
+    }
+
+    Ok(())
+}
+
 // Waits until linker config is generated
 fn wait_for_apex_config_done() -> Result<()> {
     let mut prop = PropertyWatcher::new(APEX_CONFIG_DONE_PROP)?;
@@ -291,12 +416,28 @@
     Ok(())
 }
 
-fn get_apk_root_hash_from_idsig() -> Result<Box<RootHash>> {
-    let mut idsig = File::open("/dev/block/by-name/microdroid-apk-idsig")?;
+fn get_apk_root_hash_from_idsig(path: &str) -> Result<Box<RootHash>> {
+    let mut idsig = File::open(path)?;
     let idsig = V4Signature::from(&mut idsig)?;
     Ok(idsig.hashing_info.raw_root_hash)
 }
 
+fn get_public_key_from_apk(apk: &str, root_hash_trustful: bool) -> Result<Box<[u8]>> {
+    if !root_hash_trustful {
+        verify(apk).context(MicrodroidError::PayloadVerificationFailed(format!(
+            "failed to verify {}",
+            apk
+        )))
+    } else {
+        get_public_key_der(apk)
+    }
+}
+
+fn get_bootconfig() -> Result<&'static Vec<u8>> {
+    static VAL: OnceCell<Vec<u8>> = OnceCell::new();
+    VAL.get_or_try_init(|| fs::read("/proc/bootconfig").context("Failed to read bootconfig"))
+}
+
 fn load_config(path: &Path) -> Result<VmPayloadConfig> {
     info!("loading config from {:?}...", path);
     let file = ioutil::wait_for_file(path, WAIT_TIMEOUT)?;
@@ -305,7 +446,7 @@
 
 /// Executes the given task. Stdout of the task is piped into the vsock stream to the
 /// virtualizationservice in the host side.
-fn exec_task(task: &Task, service: &Strong<dyn IVirtualMachineService>) -> Result<()> {
+fn exec_task(task: &Task, service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
     info!("executing main task {:?}...", task);
     let mut command = build_command(task)?;
 
@@ -319,19 +460,7 @@
     }
 
     let exit_status = command.spawn()?.wait()?;
-    if let Some(code) = exit_status.code() {
-        info!("notifying payload finished");
-        service.notifyPayloadFinished(code)?;
-
-        if code == 0 {
-            info!("task successfully finished");
-        } else {
-            error!("task exited with exit code: {}", code);
-        }
-    } else {
-        error!("task terminated: {}", exit_status);
-    }
-    Ok(())
+    exit_status.code().ok_or_else(|| anyhow!("Failed to get exit_code from the paylaod."))
 }
 
 fn build_command(task: &Task) -> Result<Command> {
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 61c3edc..0e99745 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -15,14 +15,21 @@
  */
 package com.android.microdroid.test;
 
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNot.not;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeNoException;
+import static org.junit.Assume.assumeThat;
+
+import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
 
 import android.content.Context;
 import android.os.ParcelFileDescriptor;
 import android.system.virtualmachine.VirtualMachine;
 import android.system.virtualmachine.VirtualMachineCallback;
 import android.system.virtualmachine.VirtualMachineConfig;
+import android.system.virtualmachine.VirtualMachineConfig.DebugLevel;
 import android.system.virtualmachine.VirtualMachineException;
 import android.system.virtualmachine.VirtualMachineManager;
 
@@ -36,6 +43,9 @@
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
 
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -148,4 +158,65 @@
                 };
         listener.runToFinish(mInner.mVm);
     }
+
+    @Test
+    public void changingDebugLevelInvalidatesVmIdentity()
+            throws VirtualMachineException, InterruptedException, IOException {
+        assumeThat("Skip on Cuttlefish. b/195765441",
+                android.os.Build.DEVICE, is(not("vsoc_x86_64")));
+
+        VirtualMachineConfig.Builder builder =
+                new VirtualMachineConfig.Builder(mInner.mContext, "assets/vm_config.json");
+        VirtualMachineConfig normalConfig = builder.debugLevel(DebugLevel.NONE).build();
+        mInner.mVm = mInner.mVmm.getOrCreate("test_vm", normalConfig);
+        VmEventListener listener =
+                new VmEventListener() {
+                    @Override
+                    public void onPayloadReady(VirtualMachine vm) {
+                        // TODO(b/208639280): remove this sleep. For now, we need to wait for a few
+                        // seconds so that crosvm can actually persist instance.img.
+                        try {
+                            Thread.sleep(30 * 1000);
+                        } catch (InterruptedException e) { }
+                        forceStop(vm);
+                    }
+                };
+        listener.runToFinish(mInner.mVm);
+
+        // Launch the same VM with different debug level. The Java API prohibits this (thankfully).
+        // For testing, we do that by creating another VM with debug level, and copy the config file
+        // from the new VM directory to the old VM directory.
+        VirtualMachineConfig debugConfig = builder.debugLevel(DebugLevel.FULL).build();
+        VirtualMachine newVm  = mInner.mVmm.getOrCreate("test_debug_vm", debugConfig);
+        File vmRoot = new File(mInner.mContext.getFilesDir(), "vm");
+        File newVmConfig = new File(new File(vmRoot, "test_debug_vm"), "config.xml");
+        File oldVmConfig = new File(new File(vmRoot, "test_vm"), "config.xml");
+        Files.copy(newVmConfig.toPath(), oldVmConfig.toPath(), REPLACE_EXISTING);
+        newVm.delete();
+        mInner.mVm = mInner.mVmm.get("test_vm"); // re-load with the copied-in config file.
+        listener =
+                new VmEventListener() {
+                    private boolean mPayloadStarted = false;
+                    private boolean mErrorOccurred = false;
+
+                    @Override
+                    public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {
+                        mPayloadStarted = true;
+                        forceStop(vm);
+                    }
+
+                    @Override
+                    public void onError(VirtualMachine vm, int errorCode, String message) {
+                        mErrorOccurred = true;
+                        forceStop(vm);
+                    }
+
+                    @Override
+                    public void onDied(VirtualMachine vm) {
+                        assertFalse(mPayloadStarted);
+                        assertTrue(mErrorOccurred);
+                    }
+                };
+        listener.runToFinish(mInner.mVm);
+    }
 }
diff --git a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
index 97f6ca3..1a16f2a 100644
--- a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
@@ -64,4 +64,9 @@
      * Error code indicating that the payload is verified, but has changed since the last boot.
      */
     const int ERROR_PAYLOAD_CHANGED = 2;
+
+    /**
+     * Error code indicating that the payload config is invalid.
+     */
+    const int ERROR_PAYLOAD_INVALID_CONFIG = 3;
 }