Merge "Fix build error."
diff --git a/apkdmverity/Android.bp b/apkdmverity/Android.bp
index 9b53a47..df46324 100644
--- a/apkdmverity/Android.bp
+++ b/apkdmverity/Android.bp
@@ -13,6 +13,7 @@
         "libbitflags",
         "libclap",
         "libdata_model",
+        "libidsig",
         "liblibc",
         "libnix",
         "libnum_traits",
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index ff3944e..9d1ef1c 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -21,15 +21,13 @@
 //! system managed by the host Android which is assumed to be compromisable, it is important to
 //! keep the integrity of the file "inside" Microdroid.
 
-mod apksigv4;
 mod dm;
 mod loopdevice;
 mod util;
 
-use crate::apksigv4::*;
-
 use anyhow::{bail, Context, Result};
 use clap::{App, Arg};
+use idsig::{HashAlgorithm, V4Signature};
 use std::fmt::Debug;
 use std::fs;
 use std::fs::File;
@@ -112,7 +110,7 @@
         .hash_device(&hash_device)
         .root_digest(&sig.hashing_info.raw_root_hash)
         .hash_algorithm(match sig.hashing_info.hash_algorithm {
-            apksigv4::HashAlgorithm::SHA256 => dm::DmVerityHashAlgorithm::SHA256,
+            HashAlgorithm::SHA256 => dm::DmVerityHashAlgorithm::SHA256,
         })
         .salt(&sig.hashing_info.salt)
         .build()
diff --git a/apkverify/Android.bp b/apkverify/Android.bp
index d2dbf41..df1cac6 100644
--- a/apkverify/Android.bp
+++ b/apkverify/Android.bp
@@ -4,7 +4,6 @@
 
 rust_defaults {
     name: "libapkverify.defaults",
-    host_supported: true,
     crate_name: "apkverify",
     srcs: ["src/lib.rs"],
     prefer_rlib: true,
@@ -33,12 +32,14 @@
 
 rust_test {
     name: "libapkverify.integration_test",
-    host_supported: true,
     crate_name: "apkverify_test",
     srcs: ["tests/*_test.rs"],
     prefer_rlib: true,
     edition: "2018",
     test_suites: ["general-tests"],
-    rustlibs: ["libapkverify"],
+    rustlibs: [
+        "libapkverify",
+        "libzip",
+    ],
     data: ["tests/data/*"],
 }
diff --git a/apkverify/src/lib.rs b/apkverify/src/lib.rs
index 9930099..869431e 100644
--- a/apkverify/src/lib.rs
+++ b/apkverify/src/lib.rs
@@ -18,6 +18,7 @@
 
 mod bytes_ext;
 mod sigutil;
+mod testing;
 mod v3;
 mod ziputil;
 
diff --git a/apkverify/src/testing.rs b/apkverify/src/testing.rs
new file mode 100644
index 0000000..777afb8
--- /dev/null
+++ b/apkverify/src/testing.rs
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A collection of utilities for testing
+
+/// Asserts if `haystack.contains(needed)`
+#[macro_export]
+macro_rules! assert_contains {
+    ($haystack:expr,$needle:expr $(,)?) => {
+        match (&$haystack, &$needle) {
+            (haystack_value, needle_value) => {
+                assert!(
+                    haystack_value.contains(needle_value),
+                    "{} is not found in {}",
+                    needle_value,
+                    haystack_value
+                );
+            }
+        }
+    };
+}
diff --git a/apkverify/src/ziputil.rs b/apkverify/src/ziputil.rs
index dbf5131..bfb1c01 100644
--- a/apkverify/src/ziputil.rs
+++ b/apkverify/src/ziputil.rs
@@ -22,8 +22,10 @@
 use zip::ZipArchive;
 
 const EOCD_MIN_SIZE: usize = 22;
+const EOCD_CENTRAL_DIRECTORY_SIZE_FIELD_OFFSET: usize = 12;
 const EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET: usize = 16;
 const EOCD_MAGIC: u32 = 0x06054b50;
+const ZIP64_MARK: u32 = 0xffffffff;
 
 #[derive(Debug, PartialEq)]
 pub struct ZipSections {
@@ -44,30 +46,39 @@
     // retrieve reader back
     reader = archive.into_inner();
     // the current position should point EOCD offset
-    let eocd_offset = reader.seek(SeekFrom::Current(0))?;
+    let eocd_offset = reader.seek(SeekFrom::Current(0))? as u32;
     let mut eocd = vec![0u8; eocd_size as usize];
     reader.read_exact(&mut eocd)?;
     if (&eocd[0..]).get_u32_le() != EOCD_MAGIC {
         bail!("Invalid ZIP: ZipArchive::new() should point EOCD after reading.");
     }
-    let central_directory_offset = get_central_directory_offset(&eocd)?;
-    let central_directory_size = eocd_offset as u32 - central_directory_offset;
+    let (central_directory_size, central_directory_offset) = get_central_directory(&eocd)?;
+    if central_directory_offset == ZIP64_MARK || central_directory_size == ZIP64_MARK {
+        bail!("Unsupported ZIP: ZIP64 is not supported.");
+    }
+    if central_directory_offset + central_directory_size != eocd_offset {
+        bail!("Invalid ZIP: EOCD should follow CD with no extra data or overlap.");
+    }
+
     Ok((
         reader,
         ZipSections {
             central_directory_offset,
             central_directory_size,
-            eocd_offset: eocd_offset as u32,
+            eocd_offset,
             eocd_size: eocd_size as u32,
         },
     ))
 }
 
-fn get_central_directory_offset(buf: &[u8]) -> Result<u32> {
+fn get_central_directory(buf: &[u8]) -> Result<(u32, u32)> {
     if buf.len() < EOCD_MIN_SIZE {
         bail!("Invalid EOCD size: {}", buf.len());
     }
-    Ok((&buf[EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET..]).get_u32_le())
+    let mut buf = &buf[EOCD_CENTRAL_DIRECTORY_SIZE_FIELD_OFFSET..];
+    let size = buf.get_u32_le();
+    let offset = buf.get_u32_le();
+    Ok((size, offset))
 }
 
 /// Update EOCD's central_directory_offset field.
@@ -78,3 +89,42 @@
     (&mut buf[EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET..]).put_u32_le(value);
     Ok(())
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::assert_contains;
+    use std::io::{Cursor, Write};
+    use zip::{write::FileOptions, ZipWriter};
+
+    fn create_test_zip() -> Cursor<Vec<u8>> {
+        let mut writer = ZipWriter::new(Cursor::new(Vec::new()));
+        writer.start_file("testfile", FileOptions::default()).unwrap();
+        writer.write_all(b"testcontent").unwrap();
+        writer.finish().unwrap()
+    }
+
+    #[test]
+    fn test_zip_sections() {
+        let (cursor, sections) = zip_sections(create_test_zip()).unwrap();
+        assert_eq!(sections.eocd_offset, (cursor.get_ref().len() - EOCD_MIN_SIZE) as u32);
+    }
+
+    #[test]
+    fn test_reject_if_extra_data_between_cd_and_eocd() {
+        // prepare normal zip
+        let buf = create_test_zip().into_inner();
+
+        // insert garbage between CD and EOCD.
+        // by the way, to mock zip-rs, use CD as garbage. This is implementation detail of zip-rs,
+        // which reads CD at (eocd_offset - cd_size) instead of at cd_offset from EOCD.
+        let (pre_eocd, eocd) = buf.split_at(buf.len() - EOCD_MIN_SIZE);
+        let (_, cd_offset) = get_central_directory(eocd).unwrap();
+        let cd = &pre_eocd[cd_offset as usize..];
+
+        // ZipArchive::new() succeeds, but we should reject
+        let res = zip_sections(Cursor::new([pre_eocd, cd, eocd].concat()));
+        assert!(res.is_err());
+        assert_contains!(res.err().unwrap().to_string(), "Invalid ZIP: offset should be 0");
+    }
+}
diff --git a/apkverify/tests/apkverify_test.rs b/apkverify/tests/apkverify_test.rs
index cad5ef2..3366524 100644
--- a/apkverify/tests/apkverify_test.rs
+++ b/apkverify/tests/apkverify_test.rs
@@ -14,22 +14,8 @@
  * limitations under the License.
  */
 
-use apkverify::verify;
-
-macro_rules! assert_contains {
-    ($haystack:expr,$needle:expr $(,)?) => {
-        match (&$haystack, &$needle) {
-            (haystack_value, needle_value) => {
-                assert!(
-                    haystack_value.contains(needle_value),
-                    "{} is not found in {}",
-                    needle_value,
-                    haystack_value
-                );
-            }
-        }
-    };
-}
+use apkverify::{assert_contains, verify};
+use std::matches;
 
 #[test]
 fn test_verify_v3() {
@@ -49,3 +35,14 @@
     assert!(res.is_err());
     assert_contains!(res.err().unwrap().to_string(), "Public key mismatch");
 }
+
+#[test]
+fn test_verify_truncated_cd() {
+    use zip::result::ZipError;
+    let res = verify("tests/data/v2-only-truncated-cd.apk");
+    // TODO(jooyung): consider making a helper for err assertion
+    assert!(matches!(
+        res.err().unwrap().root_cause().downcast_ref::<ZipError>().unwrap(),
+        ZipError::InvalidArchive(_),
+    ));
+}
diff --git a/apkverify/tests/data/README.md b/apkverify/tests/data/README.md
index 953ecdb..7556921 100644
--- a/apkverify/tests/data/README.md
+++ b/apkverify/tests/data/README.md
@@ -11,6 +11,4 @@
 Number of signers: 1
 ```
 
-Some test APKs are copied from tools/apksig/src/test/resources/com/android/apksig/.
-- v3-only-cert-and-public-key-mismatch.apk
-- v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
+APK files are copied from tools/apksig/src/test/resources/com/android/apksig/.
diff --git a/apkverify/tests/data/v2-only-truncated-cd.apk b/apkverify/tests/data/v2-only-truncated-cd.apk
new file mode 100644
index 0000000..d2e3e8d
--- /dev/null
+++ b/apkverify/tests/data/v2-only-truncated-cd.apk
Binary files differ
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 86ff4d6..f1e7529 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -88,8 +88,12 @@
         Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
     }
 
+    /// Returns the fs-verity digest size in bytes.
+    pub fn get_fsverity_digest_size(&self) -> usize {
+        Sha256Hasher::HASH_SIZE
+    }
+
     /// Calculates the fs-verity digest of the current file.
-    #[allow(dead_code)]
     pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
         let merkle_tree = self.merkle_tree.read().unwrap();
         merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 1b0e935..6bdb498 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -28,8 +28,8 @@
 use std::time::Duration;
 
 use fuse::filesystem::{
-    Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, SetattrValid,
-    ZeroCopyReader, ZeroCopyWriter,
+    Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, GetxattrReply,
+    SetattrValid, ZeroCopyReader, ZeroCopyWriter,
 };
 use fuse::mount::MountOption;
 
@@ -374,6 +374,38 @@
             _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
         }
     }
+
+    fn getxattr(
+        &self,
+        _ctx: Context,
+        inode: Self::Inode,
+        name: &CStr,
+        size: u32,
+    ) -> io::Result<GetxattrReply> {
+        match self.get_file_config(&inode)? {
+            FileConfig::RemoteVerifiedNew { editor } => {
+                // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
+                // change (see b/196635431). Until it's possible, use xattr to expose what we need
+                // as an authfs specific API.
+                if name != CStr::from_bytes_with_nul(b"authfs.fsverity.digest\0").unwrap() {
+                    return Err(io::Error::from_raw_os_error(libc::ENODATA));
+                }
+
+                if size == 0 {
+                    // Per protocol, when size is 0, return the value size.
+                    Ok(GetxattrReply::Count(editor.get_fsverity_digest_size() as u32))
+                } else {
+                    let digest = editor.calculate_fsverity_digest()?;
+                    if digest.len() > size as usize {
+                        Err(io::Error::from_raw_os_error(libc::ERANGE))
+                    } else {
+                        Ok(GetxattrReply::Value(digest.to_vec()))
+                    }
+                }
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::ENODATA)),
+        }
+    }
 }
 
 /// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
diff --git a/compos/Android.bp b/compos/Android.bp
index 7f4f55c..faf9576 100644
--- a/compos/Android.bp
+++ b/compos/Android.bp
@@ -30,39 +30,16 @@
     name: "compsvc",
     srcs: ["src/compsvc_main.rs"],
     rustlibs: [
-        "authfs_aidl_interface-rust",
-        "compos_aidl_interface-rust",
-        "libandroid_logger",
-        "libanyhow",
-        "libbinder_rpc_unstable_bindgen",
-        "libbinder_rs",
-        "libclap",
-        "liblog_rust",
-        "libminijail_rust",
-    ],
-    prefer_rlib: true,
-    shared_libs: [
-        "libbinder_rpc_unstable",
-    ],
-    apex_available: [
-        "com.android.compos",
-    ],
-}
-
-rust_binary {
-    name: "compos_key_main",
-    srcs: ["src/compos_key_main.rs"],
-    edition: "2018",
-    rustlibs: [
-        "authfs_aidl_interface-rust",
-        "compos_aidl_interface-rust",
-        "android.system.keystore2-V1-rust",
         "android.hardware.security.keymint-V1-rust",
+        "android.system.keystore2-V1-rust",
+        "authfs_aidl_interface-rust",
+        "compos_aidl_interface-rust",
         "libandroid_logger",
         "libanyhow",
-        "libbinder_rs",
         "libbinder_rpc_unstable_bindgen",
+        "libbinder_rs",
         "libclap",
+        "liblibc",
         "liblog_rust",
         "libminijail_rust",
         "libring",
@@ -72,5 +49,7 @@
     shared_libs: [
         "libbinder_rpc_unstable",
     ],
-    apex_available: ["com.android.compos"],
+    apex_available: [
+        "com.android.compos",
+    ],
 }
diff --git a/compos/aidl/com/android/compos/ICompOsKeyService.aidl b/compos/aidl/com/android/compos/ICompOsService.aidl
similarity index 72%
rename from compos/aidl/com/android/compos/ICompOsKeyService.aidl
rename to compos/aidl/com/android/compos/ICompOsService.aidl
index eb2caa7..ec4f0f6 100644
--- a/compos/aidl/com/android/compos/ICompOsKeyService.aidl
+++ b/compos/aidl/com/android/compos/ICompOsService.aidl
@@ -17,10 +17,23 @@
 package com.android.compos;
 
 import com.android.compos.CompOsKeyData;
-import com.android.compos.ICompService;
+import com.android.compos.Metadata;
 
 /** {@hide} */
-interface ICompOsKeyService {
+interface ICompOsService {
+    /**
+     * Execute a command composed of the args, in a context that may be specified in the Metadata,
+     * e.g. with file descriptors pre-opened. The service is responsible to decide what executables
+     * it may run.
+     *
+     * @param args The command line arguments to run. The 0-th args is normally the program name,
+     *             which may not be used by the service. The service may be configured to always use
+     *             a fixed executable, or possibly use the 0-th args are the executable lookup hint.
+     * @param metadata Additional information of the execution
+     * @return exit code of the program
+     */
+    byte execute(in String[] args, in Metadata metadata);
+
     /**
      * Generate a new public/private key pair suitable for signing CompOs output files.
      *
@@ -49,13 +62,4 @@
      */
     // STOPSHIP(b/193241041): We must not expose this from the PVM.
     byte[] sign(in byte[] keyBlob, in byte[] data);
-
-    /**
-     * Return an instance of ICompService that will sign output files with a given encrypted
-     * private key.
-     *
-     * @param keyBlob The encrypted blob containing the private key, as returned by
-     *                generateSigningKey().
-     */
-    ICompService getCompService(in byte[] keyBlob);
 }
diff --git a/compos/aidl/com/android/compos/ICompService.aidl b/compos/aidl/com/android/compos/ICompService.aidl
deleted file mode 100644
index 0e18442..0000000
--- a/compos/aidl/com/android/compos/ICompService.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.compos;
-
-import com.android.compos.Metadata;
-
-/** {@hide} */
-interface ICompService {
-    /**
-     * Execute a command composed of the args, in a context that may be specified in the Metadata,
-     * e.g. with file descriptors pre-opened. The service is responsible to decide what executables
-     * it may run.
-     *
-     * @param args The command line arguments to run. The 0-th args is normally the program name,
-     *             which may not be used by the service. The service may be configured to always use
-     *             a fixed executable, or possibly use the 0-th args are the executable lookup hint.
-     * @param metadata Additional information of the execution
-     * @return exit code of the program
-     */
-    byte execute(in String[] args, in Metadata metadata);
-}
diff --git a/compos/apex/Android.bp b/compos/apex/Android.bp
index 12d2f06..5b21802 100644
--- a/compos/apex/Android.bp
+++ b/compos/apex/Android.bp
@@ -39,7 +39,6 @@
 
     binaries: [
         "compos_key_cmd",
-        "compos_key_main",
         "compsvc",
         "pvm_exec",
     ],
diff --git a/compos/apk/assets/key_service_vm_config.json b/compos/apk/assets/key_service_vm_config.json
deleted file mode 100644
index 3b6b88c..0000000
--- a/compos/apk/assets/key_service_vm_config.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-    "version": 1,
-    "os": {
-        "name": "microdroid"
-    },
-    "task": {
-        "type": "executable",
-        "command": "/apex/com.android.compos/bin/compos_key_main",
-        "args": [
-            "--rpc-binder"
-        ]
-    },
-    "apexes": [
-        {
-            "name": "com.android.compos"
-        }
-    ]
-}
\ No newline at end of file
diff --git a/compos/apk/assets/vm_config.json b/compos/apk/assets/vm_config.json
index f9f1f90..3be8a8a 100644
--- a/compos/apk/assets/vm_config.json
+++ b/compos/apk/assets/vm_config.json
@@ -7,8 +7,7 @@
     "type": "executable",
     "command": "/apex/com.android.compos/bin/compsvc",
     "args": [
-      "--rpc-binder",
-      "/apex/com.android.art/bin/dex2oat64"
+      "--rpc-binder"
     ]
   },
   "apexes": [
diff --git a/compos/compos_key_cmd/compos_key_cmd.cpp b/compos/compos_key_cmd/compos_key_cmd.cpp
index 84a0a7c..04ba1d0 100644
--- a/compos/compos_key_cmd/compos_key_cmd.cpp
+++ b/compos/compos_key_cmd/compos_key_cmd.cpp
@@ -16,7 +16,7 @@
 
 #include <aidl/android/system/virtualizationservice/BnVirtualMachineCallback.h>
 #include <aidl/android/system/virtualizationservice/IVirtualizationService.h>
-#include <aidl/com/android/compos/ICompOsKeyService.h>
+#include <aidl/com/android/compos/ICompOsService.h>
 #include <android-base/file.h>
 #include <android-base/logging.h>
 #include <android-base/result.h>
@@ -56,7 +56,7 @@
 using aidl::android::system::virtualizationservice::IVirtualMachineCallback;
 using aidl::android::system::virtualizationservice::VirtualMachineConfig;
 using aidl::com::android::compos::CompOsKeyData;
-using aidl::com::android::compos::ICompOsKeyService;
+using aidl::com::android::compos::ICompOsService;
 using android::base::ErrnoError;
 using android::base::Error;
 using android::base::Result;
@@ -66,7 +66,7 @@
 using ndk::ScopedFileDescriptor;
 using ndk::SharedRefBase;
 
-constexpr unsigned int kRpcPort = 3142;
+constexpr unsigned int kRpcPort = 6432;
 
 constexpr const char* kConfigApkPath =
         "/apex/com.android.compos/app/CompOSPayloadApp/CompOSPayloadApp.apk";
@@ -89,11 +89,11 @@
     return std::vector<uint8_t>(str.begin(), str.end());
 }
 
-static std::shared_ptr<ICompOsKeyService> getService(int cid) {
+static std::shared_ptr<ICompOsService> getService(int cid) {
     LOG(INFO) << "Connecting to cid " << cid;
     ndk::SpAIBinder binder(cid == 0 ? AServiceManager_getService("android.system.composkeyservice")
                                     : RpcClient(cid, kRpcPort));
-    return ICompOsKeyService::fromBinder(binder);
+    return ICompOsService::fromBinder(binder);
 }
 
 namespace {
@@ -337,7 +337,7 @@
     return result;
 }
 
-static Result<void> signFile(ICompOsKeyService* service, const std::vector<uint8_t>& key_blob,
+static Result<void> signFile(ICompOsService* service, const std::vector<uint8_t>& key_blob,
                              const std::string& file) {
     unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
     if (!fd.ok()) {
diff --git a/compos/src/compilation.rs b/compos/src/compilation.rs
new file mode 100644
index 0000000..24266e6
--- /dev/null
+++ b/compos/src/compilation.rs
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{anyhow, bail, Context, Result};
+use libc::getxattr;
+use log::error;
+use minijail::{self, Minijail};
+use std::ffi::CString;
+use std::fs::File;
+use std::io;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::path::Path;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::{
+    AuthFsConfig::AuthFsConfig, IAuthFs::IAuthFs, IAuthFsService::IAuthFsService,
+    InputFdAnnotation::InputFdAnnotation, OutputFdAnnotation::OutputFdAnnotation,
+};
+use authfs_aidl_interface::binder::{ParcelFileDescriptor, Strong};
+use compos_aidl_interface::aidl::com::android::compos::Metadata::Metadata;
+
+/// The number that represents the file descriptor number expecting by the task. The number may be
+/// meaningless in the current process.
+pub type PseudoRawFd = i32;
+
+const SHA256_HASH_SIZE: usize = 32;
+type Sha256Hash = [u8; SHA256_HASH_SIZE];
+
+pub enum CompilerOutput {
+    /// Fs-verity digests of output files, if the compiler finishes successfully.
+    Digests { oat: Sha256Hash, vdex: Sha256Hash, image: Sha256Hash },
+    /// Exit code returned by the compiler, if not 0.
+    ExitCode(i8),
+}
+
+struct CompilerOutputParcelFds {
+    oat: ParcelFileDescriptor,
+    vdex: ParcelFileDescriptor,
+    image: ParcelFileDescriptor,
+}
+
+/// Runs the compiler with given flags with file descriptors described in `metadata` retrieved via
+/// `authfs_service`. Returns exit code of the compiler process.
+pub fn compile(
+    compiler_path: &Path,
+    compiler_args: &[String],
+    authfs_service: Strong<dyn IAuthFsService>,
+    metadata: &Metadata,
+) -> Result<CompilerOutput> {
+    // Mount authfs (via authfs_service). The authfs instance unmounts once the `authfs` variable
+    // is out of scope.
+    let authfs_config = build_authfs_config(metadata);
+    let authfs = authfs_service.mount(&authfs_config)?;
+
+    // The task expects to receive FD numbers that match its flags (e.g. --zip-fd=42) prepared
+    // on the host side. Since the local FD opened from authfs (e.g. /authfs/42) may not match
+    // the task's expectation, prepare a FD mapping and let minijail prepare the correct FD
+    // setup.
+    let fd_mapping =
+        open_authfs_files_for_fd_mapping(&authfs, &authfs_config).context("Open on authfs")?;
+
+    let jail =
+        spawn_jailed_task(compiler_path, compiler_args, fd_mapping).context("Spawn dex2oat")?;
+    let jail_result = jail.wait();
+
+    let parcel_fds = parse_compiler_args(&authfs, compiler_args)?;
+    let oat_file: &File = parcel_fds.oat.as_ref();
+    let vdex_file: &File = parcel_fds.vdex.as_ref();
+    let image_file: &File = parcel_fds.image.as_ref();
+
+    match jail_result {
+        Ok(()) => Ok(CompilerOutput::Digests {
+            oat: fsverity_measure(oat_file.as_raw_fd())?,
+            vdex: fsverity_measure(vdex_file.as_raw_fd())?,
+            image: fsverity_measure(image_file.as_raw_fd())?,
+        }),
+        Err(minijail::Error::ReturnCode(exit_code)) => {
+            error!("dex2oat failed with exit code {}", exit_code);
+            Ok(CompilerOutput::ExitCode(exit_code as i8))
+        }
+        Err(e) => {
+            bail!("Unexpected minijail error: {}", e)
+        }
+    }
+}
+
+fn parse_compiler_args(
+    authfs: &Strong<dyn IAuthFs>,
+    args: &[String],
+) -> Result<CompilerOutputParcelFds> {
+    const OAT_FD_PREFIX: &str = "--oat-fd=";
+    const VDEX_FD_PREFIX: &str = "--output-vdex-fd=";
+    const IMAGE_FD_PREFIX: &str = "--image-fd=";
+    const APP_IMAGE_FD_PREFIX: &str = "--app-image-fd=";
+
+    let mut oat = None;
+    let mut vdex = None;
+    let mut image = None;
+
+    for arg in args {
+        if let Some(value) = arg.strip_prefix(OAT_FD_PREFIX) {
+            let fd = value.parse::<RawFd>().context("Invalid --oat-fd flag")?;
+            debug_assert!(oat.is_none());
+            oat = Some(authfs.openFile(fd, false)?);
+        } else if let Some(value) = arg.strip_prefix(VDEX_FD_PREFIX) {
+            let fd = value.parse::<RawFd>().context("Invalid --output-vdex-fd flag")?;
+            debug_assert!(vdex.is_none());
+            vdex = Some(authfs.openFile(fd, false)?);
+        } else if let Some(value) = arg.strip_prefix(IMAGE_FD_PREFIX) {
+            let fd = value.parse::<RawFd>().context("Invalid --image-fd flag")?;
+            debug_assert!(image.is_none());
+            image = Some(authfs.openFile(fd, false)?);
+        } else if let Some(value) = arg.strip_prefix(APP_IMAGE_FD_PREFIX) {
+            let fd = value.parse::<RawFd>().context("Invalid --app-image-fd flag")?;
+            debug_assert!(image.is_none());
+            image = Some(authfs.openFile(fd, false)?);
+        }
+    }
+
+    Ok(CompilerOutputParcelFds {
+        oat: oat.ok_or_else(|| anyhow!("Missing --oat-fd"))?,
+        vdex: vdex.ok_or_else(|| anyhow!("Missing --vdex-fd"))?,
+        image: image.ok_or_else(|| anyhow!("Missing --image-fd or --app-image-fd"))?,
+    })
+}
+
+fn build_authfs_config(metadata: &Metadata) -> AuthFsConfig {
+    AuthFsConfig {
+        port: 3264, // TODO: support dynamic port
+        inputFdAnnotations: metadata
+            .input_fd_annotations
+            .iter()
+            .map(|x| InputFdAnnotation { fd: x.fd, fileSize: x.file_size })
+            .collect(),
+        outputFdAnnotations: metadata
+            .output_fd_annotations
+            .iter()
+            .map(|x| OutputFdAnnotation { fd: x.fd })
+            .collect(),
+    }
+}
+
+fn open_authfs_files_for_fd_mapping(
+    authfs: &Strong<dyn IAuthFs>,
+    config: &AuthFsConfig,
+) -> Result<Vec<(ParcelFileDescriptor, PseudoRawFd)>> {
+    let mut fd_mapping = Vec::new();
+
+    let results: Result<Vec<_>> = config
+        .inputFdAnnotations
+        .iter()
+        .map(|annotation| Ok((authfs.openFile(annotation.fd, false)?, annotation.fd)))
+        .collect();
+    fd_mapping.append(&mut results?);
+
+    let results: Result<Vec<_>> = config
+        .outputFdAnnotations
+        .iter()
+        .map(|annotation| Ok((authfs.openFile(annotation.fd, true)?, annotation.fd)))
+        .collect();
+    fd_mapping.append(&mut results?);
+
+    Ok(fd_mapping)
+}
+
+fn spawn_jailed_task(
+    executable: &Path,
+    args: &[String],
+    fd_mapping: Vec<(ParcelFileDescriptor, PseudoRawFd)>,
+) -> Result<Minijail> {
+    // TODO(b/185175567): Run in a more restricted sandbox.
+    let jail = Minijail::new()?;
+    let preserve_fds: Vec<_> = fd_mapping.iter().map(|(f, id)| (f.as_raw_fd(), *id)).collect();
+    let _pid = jail.run_remap(executable, preserve_fds.as_slice(), args)?;
+    Ok(jail)
+}
+
+fn fsverity_measure(fd: RawFd) -> Result<Sha256Hash> {
+    // TODO(b/196635431): Unfortunately, the FUSE API doesn't allow authfs to implement the standard
+    // fs-verity ioctls. Until the kernel allows, use the alternative xattr that authfs provides.
+    let path = CString::new(format!("/proc/self/fd/{}", fd).as_str()).unwrap();
+    let name = CString::new("authfs.fsverity.digest").unwrap();
+    let mut buf = [0u8; SHA256_HASH_SIZE];
+    // SAFETY: getxattr should not write beyond the given buffer size.
+    let size = unsafe {
+        getxattr(path.as_ptr(), name.as_ptr(), buf.as_mut_ptr() as *mut libc::c_void, buf.len())
+    };
+    if size < 0 {
+        bail!("Failed to getxattr: {}", io::Error::last_os_error());
+    } else if size != SHA256_HASH_SIZE as isize {
+        bail!("Unexpected hash size: {}", size);
+    } else {
+        Ok(buf)
+    }
+}
diff --git a/compos/src/compos_key_main.rs b/compos/src/compos_key_main.rs
deleted file mode 100644
index 9d57e4d..0000000
--- a/compos/src/compos_key_main.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Run the CompOS key management service, either in the host using normal Binder or in the
-//! VM using RPC Binder.
-
-mod compos_key_service;
-mod compsvc;
-mod signer;
-
-use crate::compos_key_service::KeystoreNamespace;
-use anyhow::{bail, Context, Result};
-use binder::unstable_api::AsNative;
-use compos_aidl_interface::binder::{add_service, ProcessState};
-use log::{info, Level};
-
-const LOG_TAG: &str = "CompOsKeyService";
-const OUR_SERVICE_NAME: &str = "android.system.composkeyservice";
-const OUR_VSOCK_PORT: u32 = 3142;
-
-fn main() -> Result<()> {
-    android_logger::init_once(
-        android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Info),
-    );
-
-    let matches = clap::App::new("compos_key_main")
-        .arg(clap::Arg::with_name("rpc_binder").long("rpc-binder"))
-        .get_matches();
-
-    let rpc_binder = matches.is_present("rpc_binder");
-
-    let key_namespace =
-        if rpc_binder { KeystoreNamespace::VmPayload } else { KeystoreNamespace::Odsign };
-    let mut service = compos_key_service::new(key_namespace)?.as_binder();
-
-    if rpc_binder {
-        info!("Starting RPC service");
-        // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
-        // Plus the binder objects are threadsafe.
-        let retval = unsafe {
-            binder_rpc_unstable_bindgen::RunRpcServer(
-                service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder,
-                OUR_VSOCK_PORT,
-            )
-        };
-        if retval {
-            info!("RPC server has shut down gracefully");
-        } else {
-            bail!("Premature termination of RPC server");
-        }
-    } else {
-        info!("Starting binder service");
-        add_service(OUR_SERVICE_NAME, service).context("Adding service failed")?;
-        info!("It's alive!");
-
-        ProcessState::join_thread_pool();
-    }
-
-    Ok(())
-}
diff --git a/compos/src/compos_key_service.rs b/compos/src/compos_key_service.rs
index 779b798..92b04f2 100644
--- a/compos/src/compos_key_service.rs
+++ b/compos/src/compos_key_service.rs
@@ -16,8 +16,6 @@
 //! access to Keystore in the VM, but not persistent storage; instead the host stores the key
 //! on our behalf via this service.
 
-use crate::compsvc;
-use crate::signer::Signer;
 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
     Algorithm::Algorithm, Digest::Digest, KeyParameter::KeyParameter,
     KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
@@ -27,20 +25,12 @@
     Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel,
     IKeystoreService::IKeystoreService, KeyDescriptor::KeyDescriptor,
 };
+use android_system_keystore2::binder::{wait_for_interface, Strong};
 use anyhow::{anyhow, Context, Result};
-use compos_aidl_interface::aidl::com::android::compos::{
-    CompOsKeyData::CompOsKeyData,
-    ICompOsKeyService::{BnCompOsKeyService, ICompOsKeyService},
-    ICompService::ICompService,
-};
-use compos_aidl_interface::binder::{
-    self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, Status, Strong,
-};
-use log::warn;
+use compos_aidl_interface::aidl::com::android::compos::CompOsKeyData::CompOsKeyData;
 use ring::rand::{SecureRandom, SystemRandom};
 use ring::signature;
 use scopeguard::ScopeGuard;
-use std::ffi::CString;
 
 /// Keystore2 namespace IDs, used for access control to keys.
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
@@ -52,23 +42,6 @@
     VmPayload = 140,
 }
 
-/// Constructs a binder object that implements ICompOsKeyService. namespace is the Keystore2 namespace to
-/// use for the keys.
-pub fn new(namespace: KeystoreNamespace) -> Result<Strong<dyn ICompOsKeyService>> {
-    let keystore_service = wait_for_interface::<dyn IKeystoreService>(KEYSTORE_SERVICE_NAME)
-        .context("No Keystore service")?;
-
-    let service = CompOsKeyService {
-        namespace,
-        random: SystemRandom::new(),
-        security_level: keystore_service
-            .getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT)
-            .context("Getting SecurityLevel failed")?,
-    };
-
-    Ok(BnCompOsKeyService::new_binder(service, BinderFeatures::default()))
-}
-
 const KEYSTORE_SERVICE_NAME: &str = "android.system.keystore2.IKeystoreService/default";
 const PURPOSE_SIGN: KeyParameter =
     KeyParameter { tag: Tag::PURPOSE, value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN) };
@@ -90,65 +63,31 @@
 const BLOB_KEY_DESCRIPTOR: KeyDescriptor =
     KeyDescriptor { domain: Domain::BLOB, nspace: 0, alias: None, blob: None };
 
+/// An internal service for CompOS key management.
 #[derive(Clone)]
-struct CompOsKeyService {
+pub struct CompOsKeyService {
     namespace: KeystoreNamespace,
     random: SystemRandom,
     security_level: Strong<dyn IKeystoreSecurityLevel>,
 }
 
-impl Interface for CompOsKeyService {}
+impl CompOsKeyService {
+    pub fn new(rpc_binder: bool) -> Result<Self> {
+        let keystore_service = wait_for_interface::<dyn IKeystoreService>(KEYSTORE_SERVICE_NAME)
+            .context("No Keystore service")?;
 
-impl ICompOsKeyService for CompOsKeyService {
-    fn generateSigningKey(&self) -> binder::Result<CompOsKeyData> {
-        self.do_generate()
-            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
-    }
-
-    fn verifySigningKey(&self, key_blob: &[u8], public_key: &[u8]) -> binder::Result<bool> {
-        Ok(if let Err(e) = self.do_verify(key_blob, public_key) {
-            warn!("Signing key verification failed: {}", e.to_string());
-            false
-        } else {
-            true
+        let namespace =
+            if rpc_binder { KeystoreNamespace::VmPayload } else { KeystoreNamespace::Odsign };
+        Ok(CompOsKeyService {
+            namespace,
+            random: SystemRandom::new(),
+            security_level: keystore_service
+                .getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT)
+                .context("Getting SecurityLevel failed")?,
         })
     }
 
-    fn sign(&self, key_blob: &[u8], data: &[u8]) -> binder::Result<Vec<u8>> {
-        self.do_sign(key_blob, data)
-            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
-    }
-
-    fn getCompService(&self, key_blob: &[u8]) -> binder::Result<Strong<dyn ICompService>> {
-        let signer =
-            Box::new(CompOsSigner { key_blob: key_blob.to_owned(), key_service: self.clone() });
-        let debuggable = true;
-        Ok(compsvc::new_binder(
-            "/apex/com.android.art/bin/dex2oat64".to_owned(),
-            debuggable,
-            Some(signer),
-        ))
-    }
-}
-
-/// Constructs a new Binder error `Status` with the given `ExceptionCode` and message.
-fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
-    Status::new_exception(exception, CString::new(message.as_ref()).ok().as_deref())
-}
-
-struct CompOsSigner {
-    key_blob: Vec<u8>,
-    key_service: CompOsKeyService,
-}
-
-impl Signer for CompOsSigner {
-    fn sign(&self, data: &[u8]) -> Result<Vec<u8>> {
-        self.key_service.do_sign(&self.key_blob, data)
-    }
-}
-
-impl CompOsKeyService {
-    fn do_generate(&self) -> Result<CompOsKeyData> {
+    pub fn do_generate(&self) -> Result<CompOsKeyData> {
         let key_descriptor = KeyDescriptor { nspace: self.namespace as i64, ..BLOB_KEY_DESCRIPTOR };
         let key_parameters =
             [PURPOSE_SIGN, ALGORITHM, PADDING, DIGEST, KEY_SIZE, EXPONENT, NO_AUTH_REQUIRED];
@@ -168,7 +107,7 @@
         }
     }
 
-    fn do_verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
+    pub fn do_verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
         let mut data = [0u8; 32];
         self.random.fill(&mut data).context("No random data")?;
 
@@ -181,7 +120,7 @@
         Ok(())
     }
 
-    fn do_sign(&self, key_blob: &[u8], data: &[u8]) -> Result<Vec<u8>> {
+    pub fn do_sign(&self, key_blob: &[u8], data: &[u8]) -> Result<Vec<u8>> {
         let key_descriptor = KeyDescriptor {
             nspace: self.namespace as i64,
             blob: Some(key_blob.to_vec()),
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index 14b520e..8fe4795 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -14,167 +14,93 @@
  * limitations under the License.
  */
 
-//! compsvc is a service to run computational tasks in a PVM upon request. It is able to set up
+//! compsvc is a service to run compilation tasks in a PVM upon request. It is able to set up
 //! file descriptors backed by authfs (via authfs_service) and pass the file descriptors to the
-//! actual tasks.
+//! actual compiler.
 
 use anyhow::Result;
-use log::error;
-use minijail::{self, Minijail};
+use log::{debug, warn};
 use std::ffi::CString;
-use std::os::unix::io::AsRawFd;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
 
-use crate::signer::Signer;
-use authfs_aidl_interface::aidl::com::android::virt::fs::{
-    AuthFsConfig::AuthFsConfig, IAuthFs::IAuthFs, IAuthFsService::IAuthFsService,
-    InputFdAnnotation::InputFdAnnotation, OutputFdAnnotation::OutputFdAnnotation,
+use crate::compilation::{compile, CompilerOutput};
+use crate::compos_key_service::CompOsKeyService;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFsService::IAuthFsService;
+use compos_aidl_interface::aidl::com::android::compos::{
+    CompOsKeyData::CompOsKeyData,
+    ICompOsService::{BnCompOsService, ICompOsService},
+    Metadata::Metadata,
 };
-use authfs_aidl_interface::binder::ParcelFileDescriptor;
-use compos_aidl_interface::aidl::com::android::compos::ICompService::{
-    BnCompService, ICompService,
-};
-use compos_aidl_interface::aidl::com::android::compos::Metadata::Metadata;
 use compos_aidl_interface::binder::{
-    BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, StatusCode, Strong,
+    BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, Strong,
 };
 
 const AUTHFS_SERVICE_NAME: &str = "authfs_service";
+const DEX2OAT_PATH: &str = "/apex/com.android.art/bin/dex2oat64";
 
-/// The number that represents the file descriptor number expecting by the task. The number may be
-/// meaningless in the current process.
-pub type PseudoRawFd = i32;
-
-/// Constructs a binder object that implements ICompService. task_bin is the path to the binary that will
-/// be run when execute() is called. If debuggable is true then stdout/stderr from the binary will be
-/// available for debugging.
-pub fn new_binder(
-    task_bin: String,
-    debuggable: bool,
-    signer: Option<Box<dyn Signer>>,
-) -> Strong<dyn ICompService> {
-    let service = CompService { task_bin: PathBuf::from(task_bin), debuggable, signer };
-    BnCompService::new_binder(service, BinderFeatures::default())
+/// Constructs a binder object that implements ICompOsService.
+pub fn new_binder(rpc_binder: bool) -> Result<Strong<dyn ICompOsService>> {
+    let service = CompOsService {
+        dex2oat_path: PathBuf::from(DEX2OAT_PATH),
+        key_service: CompOsKeyService::new(rpc_binder)?,
+    };
+    Ok(BnCompOsService::new_binder(service, BinderFeatures::default()))
 }
 
-struct CompService {
-    task_bin: PathBuf,
-    debuggable: bool,
-    #[allow(dead_code)] // TODO: Make use of this
-    signer: Option<Box<dyn Signer>>,
+struct CompOsService {
+    dex2oat_path: PathBuf,
+    key_service: CompOsKeyService,
 }
 
-impl Interface for CompService {}
+impl Interface for CompOsService {}
 
-impl ICompService for CompService {
+impl ICompOsService for CompOsService {
     fn execute(&self, args: &[String], metadata: &Metadata) -> BinderResult<i8> {
-        // Mount authfs (via authfs_service).
-        let authfs_config = build_authfs_config(metadata);
-        let authfs = get_authfs_service()?.mount(&authfs_config)?;
-
-        // The task expects to receive FD numbers that match its flags (e.g. --zip-fd=42) prepared
-        // on the host side. Since the local FD opened from authfs (e.g. /authfs/42) may not match
-        // the task's expectation, prepare a FD mapping and let minijail prepare the correct FD
-        // setup.
-        let fd_mapping =
-            open_authfs_files_for_fd_mapping(&authfs, &authfs_config).map_err(|e| {
-                new_binder_exception(
-                    ExceptionCode::SERVICE_SPECIFIC,
-                    format!("Failed to create FDs on authfs: {:?}", e),
-                )
-            })?;
-
-        let jail =
-            spawn_jailed_task(&self.task_bin, args, fd_mapping, self.debuggable).map_err(|e| {
-                new_binder_exception(
-                    ExceptionCode::SERVICE_SPECIFIC,
-                    format!("Failed to spawn the task: {:?}", e),
-                )
-            })?;
-        let jail_result = jail.wait();
-
-        // Be explicit about the lifetime, which should last at least until the task is finished.
-        drop(authfs);
-
-        match jail_result {
-            Ok(_) => Ok(0), // TODO(b/161471326): Sign the output on succeed.
-            Err(minijail::Error::ReturnCode(exit_code)) => {
-                error!("Task failed with exit code {}", exit_code);
-                Err(Status::from(StatusCode::FAILED_TRANSACTION))
+        let authfs_service = get_authfs_service()?;
+        let output = compile(&self.dex2oat_path, args, authfs_service, metadata).map_err(|e| {
+            new_binder_exception(
+                ExceptionCode::SERVICE_SPECIFIC,
+                format!("Compilation failed: {}", e),
+            )
+        })?;
+        match output {
+            CompilerOutput::Digests { oat, vdex, image } => {
+                // TODO(b/161471326): Sign the output on succeed.
+                debug!("oat fs-verity digest: {:02x?}", oat);
+                debug!("vdex fs-verity digest: {:02x?}", vdex);
+                debug!("image fs-verity digest: {:02x?}", image);
+                Ok(0)
             }
-            Err(e) => {
-                error!("Unexpected minijail error: {}", e);
-                Err(Status::from(StatusCode::UNKNOWN_ERROR))
-            }
+            CompilerOutput::ExitCode(exit_code) => Ok(exit_code),
         }
     }
+
+    fn generateSigningKey(&self) -> BinderResult<CompOsKeyData> {
+        self.key_service
+            .do_generate()
+            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
+    }
+
+    fn verifySigningKey(&self, key_blob: &[u8], public_key: &[u8]) -> BinderResult<bool> {
+        Ok(if let Err(e) = self.key_service.do_verify(key_blob, public_key) {
+            warn!("Signing key verification failed: {}", e.to_string());
+            false
+        } else {
+            true
+        })
+    }
+
+    fn sign(&self, key_blob: &[u8], data: &[u8]) -> BinderResult<Vec<u8>> {
+        self.key_service
+            .do_sign(key_blob, data)
+            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
+    }
 }
 
 fn get_authfs_service() -> BinderResult<Strong<dyn IAuthFsService>> {
     Ok(authfs_aidl_interface::binder::get_interface(AUTHFS_SERVICE_NAME)?)
 }
 
-fn build_authfs_config(metadata: &Metadata) -> AuthFsConfig {
-    AuthFsConfig {
-        port: 3264, // TODO: support dynamic port
-        inputFdAnnotations: metadata
-            .input_fd_annotations
-            .iter()
-            .map(|x| InputFdAnnotation { fd: x.fd, fileSize: x.file_size })
-            .collect(),
-        outputFdAnnotations: metadata
-            .output_fd_annotations
-            .iter()
-            .map(|x| OutputFdAnnotation { fd: x.fd })
-            .collect(),
-    }
-}
-
-fn open_authfs_files_for_fd_mapping(
-    authfs: &Strong<dyn IAuthFs>,
-    config: &AuthFsConfig,
-) -> Result<Vec<(ParcelFileDescriptor, PseudoRawFd)>> {
-    let mut fd_mapping = Vec::new();
-
-    let results: Result<Vec<_>> = config
-        .inputFdAnnotations
-        .iter()
-        .map(|annotation| Ok((authfs.openFile(annotation.fd, false)?, annotation.fd)))
-        .collect();
-    fd_mapping.append(&mut results?);
-
-    let results: Result<Vec<_>> = config
-        .outputFdAnnotations
-        .iter()
-        .map(|annotation| Ok((authfs.openFile(annotation.fd, true)?, annotation.fd)))
-        .collect();
-    fd_mapping.append(&mut results?);
-
-    Ok(fd_mapping)
-}
-
-fn spawn_jailed_task(
-    executable: &Path,
-    args: &[String],
-    fd_mapping: Vec<(ParcelFileDescriptor, PseudoRawFd)>,
-    debuggable: bool,
-) -> Result<Minijail> {
-    // TODO(b/185175567): Run in a more restricted sandbox.
-    let jail = Minijail::new()?;
-
-    let mut preserve_fds = if debuggable {
-        // Inherit/redirect stdout/stderr for debugging, assuming no conflict
-        vec![(1, 1), (2, 2)]
-    } else {
-        vec![]
-    };
-
-    preserve_fds.extend(fd_mapping.iter().map(|(f, id)| (f.as_raw_fd(), *id)));
-
-    let _pid = jail.run_remap(executable, preserve_fds.as_slice(), args)?;
-    Ok(jail)
-}
-
 fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
     Status::new_exception(exception, CString::new(message.as_ref()).as_deref().ok())
 }
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index 9f12132..48e37b6 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -16,11 +16,10 @@
 
 //! A tool to start a standalone compsvc server, either in the host using Binder or in a VM using
 //! RPC binder over vsock.
-//!
-//! Example:
-//! $ compsvc /system/bin/sleep
 
 mod common;
+mod compilation;
+mod compos_key_service;
 mod compsvc;
 mod signer;
 
@@ -31,27 +30,17 @@
 use log::debug;
 
 struct Config {
-    task_bin: String,
     rpc_binder: bool,
-    debuggable: bool,
 }
 
 fn parse_args() -> Result<Config> {
     #[rustfmt::skip]
     let matches = clap::App::new("compsvc")
-        .arg(clap::Arg::with_name("debug")
-             .long("debug"))
-        .arg(clap::Arg::with_name("task_bin")
-             .required(true))
         .arg(clap::Arg::with_name("rpc_binder")
              .long("rpc-binder"))
         .get_matches();
 
-    Ok(Config {
-        task_bin: matches.value_of("task_bin").unwrap().to_string(),
-        rpc_binder: matches.is_present("rpc_binder"),
-        debuggable: matches.is_present("debug"),
-    })
+    Ok(Config { rpc_binder: matches.is_present("rpc_binder") })
 }
 
 fn main() -> Result<()> {
@@ -60,7 +49,7 @@
     );
 
     let config = parse_args()?;
-    let mut service = compsvc::new_binder(config.task_bin, config.debuggable, None).as_binder();
+    let mut service = compsvc::new_binder(config.rpc_binder)?.as_binder();
     if config.rpc_binder {
         debug!("compsvc is starting as a rpc service.");
         // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
diff --git a/compos/src/pvm_exec.rs b/compos/src/pvm_exec.rs
index 03fbf72..2218d10 100644
--- a/compos/src/pvm_exec.rs
+++ b/compos/src/pvm_exec.rs
@@ -36,7 +36,7 @@
 use std::process::exit;
 
 use compos_aidl_interface::aidl::com::android::compos::{
-    ICompService::ICompService, InputFdAnnotation::InputFdAnnotation, Metadata::Metadata,
+    ICompOsService::ICompOsService, InputFdAnnotation::InputFdAnnotation, Metadata::Metadata,
     OutputFdAnnotation::OutputFdAnnotation,
 };
 use compos_aidl_interface::binder::Strong;
@@ -46,18 +46,18 @@
 
 const FD_SERVER_BIN: &str = "/apex/com.android.virt/bin/fd_server";
 
-fn get_local_service() -> Result<Strong<dyn ICompService>> {
+fn get_local_service() -> Result<Strong<dyn ICompOsService>> {
     compos_aidl_interface::binder::get_interface(SERVICE_NAME).context("get local binder")
 }
 
-fn get_rpc_binder(cid: u32) -> Result<Strong<dyn ICompService>> {
+fn get_rpc_binder(cid: u32) -> Result<Strong<dyn ICompOsService>> {
     // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
     // safely taken by new_spibinder.
     let ibinder = unsafe {
         new_spibinder(binder_rpc_unstable_bindgen::RpcClient(cid, VSOCK_PORT) as *mut AIBinder)
     };
     if let Some(ibinder) = ibinder {
-        <dyn ICompService>::try_from(ibinder).context("Cannot connect to RPC service")
+        <dyn ICompOsService>::try_from(ibinder).context("Cannot connect to RPC service")
     } else {
         bail!("Invalid raw AIBinder")
     }
diff --git a/compos/tests/java/android/compos/test/ComposKeyTestCase.java b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
index 654dc0b..6ef82f7 100644
--- a/compos/tests/java/android/compos/test/ComposKeyTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
@@ -131,7 +131,7 @@
                         getBuild(),
                         apkName,
                         packageName,
-                        "assets/key_service_vm_config.json",
+                        "assets/vm_config.json",
                         /* debug */ true);
         adbConnectToMicrodroid(getDevice(), mCid);
     }
@@ -145,6 +145,6 @@
     }
 
     private boolean isServiceRunning() {
-        return tryRunOnMicrodroid("pidof compos_key_main") != null;
+        return tryRunOnMicrodroid("pidof compsvc") != null;
     }
 }
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index 4471e63..f69b7b7 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -138,7 +138,7 @@
                         apkName,
                         packageName,
                         "assets/vm_config.json",
-                        /* debug */ true);
+                        /* debug */ false);
         adbConnectToMicrodroid(getDevice(), mCid);
     }
 
diff --git a/idsig/Android.bp b/idsig/Android.bp
new file mode 100644
index 0000000..90525ff
--- /dev/null
+++ b/idsig/Android.bp
@@ -0,0 +1,34 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+    name: "libidsig.defaults",
+    crate_name: "idsig",
+    srcs: ["src/lib.rs"],
+    edition: "2018",
+    prefer_rlib: true,
+    rustlibs: [
+        "libanyhow",
+        "libring",
+        "libnum_traits",
+    ],
+    proc_macros: ["libnum_derive"],
+    multilib: {
+        lib32: {
+            enabled: false,
+        },
+    },
+}
+
+rust_library {
+    name: "libidsig",
+    defaults: ["libidsig.defaults"],
+}
+
+rust_test {
+    name: "libidsig.test",
+    defaults: ["libidsig.defaults"],
+    test_suites: ["general-tests"],
+    compile_multilib: "first",
+}
diff --git a/idsig/Cargo.toml b/idsig/Cargo.toml
new file mode 100644
index 0000000..91b2842
--- /dev/null
+++ b/idsig/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "idsig"
+version = "0.1.0"
+authors = ["Jiyong Park <jiyong@google.com>"]
+edition = "2018"
+
+[dependencies]
+anyhow = "1.0"
+ring = "0.16"
+num-derive = "0.3"
+num-traits = "0.2"
diff --git a/apkdmverity/src/apksigv4.rs b/idsig/src/apksigv4.rs
similarity index 75%
rename from apkdmverity/src/apksigv4.rs
rename to idsig/src/apksigv4.rs
index fef21a5..6f4603d 100644
--- a/apkdmverity/src/apksigv4.rs
+++ b/idsig/src/apksigv4.rs
@@ -22,36 +22,56 @@
 // `apksigv4` module provides routines to decode the idsig file as defined in [APK signature
 // scheme v4] (https://source.android.com/security/apksigning/v4).
 
+/// `V4Signature` provides access to the various fields in an idsig file.
 #[derive(Debug)]
 pub struct V4Signature {
+    /// Version of the header. Should be 2.
     pub version: Version,
+    /// Provides access to the information about how the APK is hashed.
     pub hashing_info: HashingInfo,
+    /// Provides access to the information that can be used to verify this file
     pub signing_info: SigningInfo,
+    /// Total size of the merkle tree
     pub merkle_tree_size: u32,
+    /// Offset of the merkle tree in the idsig file
     pub merkle_tree_offset: u64,
 }
 
+/// `HashingInfo` provides information about how the APK is hashed.
 #[derive(Debug)]
 pub struct HashingInfo {
+    /// Hash algorithm used when creating the merkle tree for the APK.
     pub hash_algorithm: HashAlgorithm,
+    /// The log size of a block used when creating the merkle tree. 12 if 4k block was used.
     pub log2_blocksize: u8,
+    /// The salt used when creating the merkle tree. 32 bytes max.
     pub salt: Box<[u8]>,
+    /// The root hash of the merkle tree created.
     pub raw_root_hash: Box<[u8]>,
 }
 
+/// `SigningInfo` provides information that can be used to verify the idsig file.
 #[derive(Debug)]
 pub struct SigningInfo {
+    /// Digest of the APK that this idsig file is for.
     pub apk_digest: Box<[u8]>,
+    /// Certificate of the signer that signed this idsig file. ASN.1 DER form.
     pub x509_certificate: Box<[u8]>,
+    /// A free-form binary data
     pub additional_data: Box<[u8]>,
+    /// Public key of the signer in ASN.1 DER form. This must match the `x509_certificate` field.
     pub public_key: Box<[u8]>,
+    /// Signature algorithm used to sign this file.
     pub signature_algorithm_id: SignatureAlgorithmId,
+    /// The signature of this file.
     pub signature: Box<[u8]>,
 }
 
+/// Version of the idsig file format
 #[derive(Debug, PartialEq, FromPrimitive)]
 #[repr(u32)]
 pub enum Version {
+    /// Version 2, the only supported version.
     V2 = 2,
 }
 
@@ -61,9 +81,11 @@
     }
 }
 
+/// Hash algorithm that can be used for idsig file.
 #[derive(Debug, PartialEq, FromPrimitive)]
 #[repr(u32)]
 pub enum HashAlgorithm {
+    /// SHA2-256
     SHA256 = 1,
 }
 
@@ -73,16 +95,24 @@
     }
 }
 
+/// Signature algorithm that can be used for idsig file
 #[derive(Debug, PartialEq, FromPrimitive)]
 #[allow(non_camel_case_types)]
 #[repr(u32)]
 pub enum SignatureAlgorithmId {
+    /// RSASSA-PSS with SHA2-256 digest, SHA2-256 MGF1, 32 bytes of salt, trailer: 0xbc
     RSASSA_PSS_SHA2_256 = 0x0101,
+    /// RSASSA-PSS with SHA2-512 digest, SHA2-512 MGF1, 64 bytes of salt, trailer: 0xbc
     RSASSA_PSS_SHA2_512 = 0x0102,
+    /// RSASSA-PKCS1-v1_5 with SHA2-256 digest.
     RSASSA_PKCS1_SHA2_256 = 0x0103,
+    /// RSASSA-PKCS1-v1_5 with SHA2-512 digest.
     RSASSA_PKCS1_SHA2_512 = 0x0104,
+    /// ECDSA with SHA2-256 digest.
     ECDSA_SHA2_256 = 0x0201,
+    /// ECDSA with SHA2-512 digest.
     ECDSA_SHA2_512 = 0x0202,
+    /// DSA with SHA2-256 digest
     DSA_SHA2_256 = 0x0301,
 }
 
@@ -153,10 +183,13 @@
 
 #[cfg(test)]
 mod tests {
-    use crate::util::hexstring_from;
-    use crate::*;
+    use crate::apksigv4::*;
     use std::io::Cursor;
 
+    fn hexstring_from(s: &[u8]) -> String {
+        s.iter().map(|byte| format!("{:02x}", byte)).reduce(|i, j| i + &j).unwrap_or_default()
+    }
+
     #[test]
     fn parse_idsig_file() {
         let idsig = Cursor::new(include_bytes!("../testdata/test.apk.idsig"));
diff --git a/idsig/src/hashtree.rs b/idsig/src/hashtree.rs
new file mode 100644
index 0000000..a4727a9
--- /dev/null
+++ b/idsig/src/hashtree.rs
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use ring::digest::{self, Algorithm, Digest};
+use std::io::{Cursor, Read, Result, Write};
+
+/// `HashTree` is a merkle tree (and its root hash) that is compatible with fs-verity.
+pub struct HashTree {
+    /// Binary presentation of the merkle tree
+    pub tree: Vec<u8>,
+    /// Root hash
+    pub root_hash: Vec<u8>,
+}
+
+impl HashTree {
+    /// Creates merkle tree from `input`, using the given `salt` and hashing `algorithm`. `input`
+    /// is divided into `block_size` chunks.
+    pub fn from<R: Read>(
+        input: &mut R,
+        input_size: usize,
+        salt: &[u8],
+        block_size: usize,
+        algorithm: &'static Algorithm,
+    ) -> Result<Self> {
+        let salt = zero_pad_salt(salt, algorithm);
+        let tree = generate_hash_tree(input, input_size, &salt, block_size, algorithm)?;
+
+        // Root hash is from the first block of the hash or the input data if there is no hash tree
+        // generated which can happen when input data is smaller than block size
+        let root_hash = if tree.is_empty() {
+            let mut data = Vec::new();
+            input.read_to_end(&mut data)?;
+            hash_one_block(&data, &salt, block_size, algorithm).as_ref().to_vec()
+        } else {
+            let first_block = &tree[0..block_size];
+            hash_one_block(first_block, &salt, block_size, algorithm).as_ref().to_vec()
+        };
+        Ok(HashTree { tree, root_hash })
+    }
+}
+
+/// Calculate hash tree for the blocks in `input`.
+///
+/// This function implements: https://www.kernel.org/doc/html/latest/filesystems/fsverity.html#merkle-tree
+///
+/// The file contents is divided into blocks, where the block size is configurable but is usually
+/// 4096 bytes. The end of the last block is zero-padded if needed. Each block is then hashed,
+/// producing the first level of hashes. Then, the hashes in this first level are grouped into
+/// blocksize-byte blocks (zero-padding the ends as needed) and these blocks are hashed,
+/// producing the second level of hashes. This proceeds up the tree until only a single block
+/// remains.
+fn generate_hash_tree<R: Read>(
+    input: &mut R,
+    input_size: usize,
+    salt: &[u8],
+    block_size: usize,
+    algorithm: &'static Algorithm,
+) -> Result<Vec<u8>> {
+    let digest_size = algorithm.output_len;
+    let levels = calc_hash_levels(input_size, block_size, digest_size);
+    let tree_size = levels.iter().map(|r| r.len()).sum();
+
+    // The contiguous memory that holds the entire merkle tree
+    let mut hash_tree = vec![0; tree_size];
+
+    for (n, cur) in levels.iter().enumerate() {
+        if n == 0 {
+            // Level 0: the (zero-padded) input stream is hashed into level 0
+            let pad_size = round_to_multiple(input_size, block_size) - input_size;
+            let mut input = input.chain(Cursor::new(vec![0; pad_size]));
+            let mut level0 = Cursor::new(&mut hash_tree[cur.start..cur.end]);
+
+            let mut a_block = vec![0; block_size];
+            let mut num_blocks = (input_size + block_size - 1) / block_size;
+            while num_blocks > 0 {
+                input.read_exact(&mut a_block)?;
+                let h = hash_one_block(&a_block, salt, block_size, algorithm);
+                level0.write_all(h.as_ref()).unwrap();
+                num_blocks -= 1;
+            }
+        } else {
+            // Intermediate levels: level n - 1 is hashed into level n
+            // Both levels belong to the same `hash_tree`. In order to have a mutable slice for
+            // level n while having a slice for level n - 1, take the mutable slice for both levels
+            // and split it.
+            let prev = &levels[n - 1];
+            let cur_and_prev = &mut hash_tree[cur.start..prev.end];
+            let (cur, prev) = cur_and_prev.split_at_mut(prev.start);
+            let mut cur = Cursor::new(cur);
+            prev.chunks(block_size).for_each(|data| {
+                let h = hash_one_block(data, salt, block_size, algorithm);
+                cur.write_all(h.as_ref()).unwrap();
+            });
+        }
+    }
+    Ok(hash_tree)
+}
+
+/// Hash one block of input using the given hash algorithm and the salt. Input might be smaller
+/// than a block, in which case zero is padded.
+fn hash_one_block(
+    input: &[u8],
+    salt: &[u8],
+    block_size: usize,
+    algorithm: &'static Algorithm,
+) -> Digest {
+    let mut ctx = digest::Context::new(algorithm);
+    ctx.update(salt);
+    ctx.update(input);
+    let pad_size = block_size - input.len();
+    ctx.update(&vec![0; pad_size]);
+    ctx.finish()
+}
+
+type Range = std::ops::Range<usize>;
+
+/// Calculate the ranges of hash for each level
+fn calc_hash_levels(input_size: usize, block_size: usize, digest_size: usize) -> Vec<Range> {
+    // The input is split into multiple blocks and each block is hashed, which becomes the input
+    // for the next level. Size of a single hash is `digest_size`.
+    let mut level_sizes = Vec::new();
+    loop {
+        // Input for this level is from either the last level (if exists), or the input parameter.
+        let input_size = *level_sizes.last().unwrap_or(&input_size);
+        if input_size <= block_size {
+            break;
+        }
+        let num_blocks = (input_size + block_size - 1) / block_size;
+        let hashes_size = round_to_multiple(num_blocks * digest_size, block_size);
+        level_sizes.push(hashes_size);
+    }
+
+    // The hash tree is stored upside down. The top level is at offset 0. The second level comes
+    // next, and so on. Level 0 is located at the end.
+    //
+    // Given level_sizes [10, 3, 1], the offsets for each label are ...
+    //
+    // Level 2 is at offset 0
+    // Level 1 is at offset 1 (because Level 2 is of size 1)
+    // Level 0 is at offset 4 (because Level 1 is of size 3)
+    //
+    // This is done by scanning the sizes in reverse order
+    let mut ranges = level_sizes
+        .iter()
+        .rev()
+        .scan(0, |prev_end, size| {
+            let range = *prev_end..*prev_end + size;
+            *prev_end = range.end;
+            Some(range)
+        })
+        .collect::<Vec<_>>();
+    ranges.reverse(); // reverse again so that index N is for level N
+    ranges
+}
+
+/// Round `n` up to the nearest multiple of `unit`
+fn round_to_multiple(n: usize, unit: usize) -> usize {
+    (n + unit - 1) & !(unit - 1)
+}
+
+/// Pad zero to salt if necessary.
+///
+/// According to https://www.kernel.org/doc/html/latest/filesystems/fsverity.html:
+///
+/// If a salt was specified, then it’s zero-padded to the closest multiple of the input size of the
+/// hash algorithm’s compression function, e.g. 64 bytes for SHA-256 or 128 bytes for SHA-512. The
+/// padded salt is prepended to every data or Merkle tree block that is hashed.
+fn zero_pad_salt(salt: &[u8], algorithm: &Algorithm) -> Vec<u8> {
+    if salt.is_empty() {
+        salt.to_vec()
+    } else {
+        let padded_len = round_to_multiple(salt.len(), algorithm.block_len);
+        let mut salt = salt.to_vec();
+        salt.resize(padded_len, 0);
+        salt
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::hashtree::*;
+    use ring::digest;
+    use std::fs::{self, File};
+
+    #[test]
+    fn compare_with_golden_output() -> Result<()> {
+        // The golden outputs are generated by using the `fsverity` utility.
+        let sizes = ["512", "4K", "1M", "10000000"];
+        for size in sizes.iter() {
+            let input_name = format!("testdata/input.{}", size);
+            let mut input = File::open(&input_name)?;
+            let golden_hash_tree = fs::read(format!("testdata/input.{}.hash", size))?;
+            let golden_descriptor = fs::read(format!("testdata/input.{}.descriptor", size))?;
+            let golden_root_hash = &golden_descriptor[16..16 + 32];
+
+            let size = std::fs::metadata(&input_name)?.len() as usize;
+            let salt = vec![1, 2, 3, 4, 5, 6];
+            let ht = HashTree::from(&mut input, size, &salt, 4096, &digest::SHA256)?;
+
+            assert_eq!(golden_hash_tree.as_slice(), ht.tree.as_slice());
+            assert_eq!(golden_root_hash, ht.root_hash.as_slice());
+        }
+        Ok(())
+    }
+}
diff --git a/idsig/src/lib.rs b/idsig/src/lib.rs
new file mode 100644
index 0000000..7937d71
--- /dev/null
+++ b/idsig/src/lib.rs
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! `idsig` provides routines for creating the idsig file that is defined for the APK signature
+//! scheme v4 and for parsing the file.
+
+mod apksigv4;
+mod hashtree;
+
+pub use crate::apksigv4::*;
+pub use crate::hashtree::*;
diff --git a/idsig/testdata/create.sh b/idsig/testdata/create.sh
new file mode 100755
index 0000000..eadfdb2
--- /dev/null
+++ b/idsig/testdata/create.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sizes="512 4K 1M 10000000"
+for size in $sizes; do
+  echo $size
+  dd if=/dev/random of=input.$size bs=$size count=1
+  fsverity digest input.$size \
+    --hash-alg=sha256 \
+    --salt=010203040506 \
+    --block-size=4096 \
+    --out-merkle-tree input.$size.hash \
+    --out-descriptor input.$size.descriptor
+done
diff --git a/idsig/testdata/input.10000000 b/idsig/testdata/input.10000000
new file mode 100644
index 0000000..6bc5a4b
--- /dev/null
+++ b/idsig/testdata/input.10000000
Binary files differ
diff --git a/idsig/testdata/input.10000000.descriptor b/idsig/testdata/input.10000000.descriptor
new file mode 100644
index 0000000..dc0d096
--- /dev/null
+++ b/idsig/testdata/input.10000000.descriptor
Binary files differ
diff --git a/idsig/testdata/input.10000000.hash b/idsig/testdata/input.10000000.hash
new file mode 100644
index 0000000..354c5c2
--- /dev/null
+++ b/idsig/testdata/input.10000000.hash
Binary files differ
diff --git a/idsig/testdata/input.1M b/idsig/testdata/input.1M
new file mode 100644
index 0000000..7040ec3
--- /dev/null
+++ b/idsig/testdata/input.1M
Binary files differ
diff --git a/idsig/testdata/input.1M.descriptor b/idsig/testdata/input.1M.descriptor
new file mode 100644
index 0000000..f11753d
--- /dev/null
+++ b/idsig/testdata/input.1M.descriptor
Binary files differ
diff --git a/idsig/testdata/input.1M.hash b/idsig/testdata/input.1M.hash
new file mode 100644
index 0000000..689790c
--- /dev/null
+++ b/idsig/testdata/input.1M.hash
Binary files differ
diff --git a/idsig/testdata/input.4K b/idsig/testdata/input.4K
new file mode 100644
index 0000000..99db32a
--- /dev/null
+++ b/idsig/testdata/input.4K
Binary files differ
diff --git a/idsig/testdata/input.4K.descriptor b/idsig/testdata/input.4K.descriptor
new file mode 100644
index 0000000..b120e2f
--- /dev/null
+++ b/idsig/testdata/input.4K.descriptor
Binary files differ
diff --git a/idsig/testdata/input.4K.hash b/idsig/testdata/input.4K.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/idsig/testdata/input.4K.hash
diff --git a/idsig/testdata/input.512 b/idsig/testdata/input.512
new file mode 100644
index 0000000..a57797f
--- /dev/null
+++ b/idsig/testdata/input.512
Binary files differ
diff --git a/idsig/testdata/input.512.descriptor b/idsig/testdata/input.512.descriptor
new file mode 100644
index 0000000..805019b
--- /dev/null
+++ b/idsig/testdata/input.512.descriptor
Binary files differ
diff --git a/idsig/testdata/input.512.hash b/idsig/testdata/input.512.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/idsig/testdata/input.512.hash
diff --git a/idsig/testdata/test.apk.idsig b/idsig/testdata/test.apk.idsig
new file mode 100644
index 0000000..8c112de
--- /dev/null
+++ b/idsig/testdata/test.apk.idsig
Binary files differ
diff --git a/microdroid/README.md b/microdroid/README.md
index 0578921..196c543 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -7,7 +7,7 @@
 
 ## Prerequisites
 
-Any 64-bit target (either x86\_64 or arm64) is supported. 32-bit target is not
+Any 64-bit target (either x86_64 or arm64) is supported. 32-bit target is not
 supported. Note that we currently don't support user builds; only userdebug
 builds are supported.
 
@@ -39,7 +39,7 @@
 adb reboot
 ```
 
-If your target is x86\_64 (e.g. `aosp_cf_x86_64_phone`), replace `aosp_arm64`
+If your target is x86_64 (e.g. `aosp_cf_x86_64_phone`), replace `aosp_arm64`
 with `aosp_x86_64`.
 
 ## Building an app
@@ -69,7 +69,7 @@
 
 ```json
 {
-  "os": {"name": "microdroid"},
+  "os": { "name": "microdroid" },
   "task": {
     "type": "microdroid_launcher",
     "command": "MyMicrodroidApp.so"
@@ -78,7 +78,7 @@
 ```
 
 The value of `task.command` should match with the name of the shared library
-defined above. If your app rquires APEXes to be imported, you can declare the
+defined above. If your app requires APEXes to be imported, you can declare the
 list in `apexes` key like following.
 
 ```json
@@ -134,6 +134,7 @@
 
 `ALL_CAP`s below are placeholders. They need to be replaced with correct
 values:
+
 * `VM_CONFIG_FILE`: the name of the VM config file that you embedded in the APK.
   (e.g. `vm_config.json`)
 * `PACKAGE_NAME_OF_YOUR_APP`: package name of your app (e.g. `com.acme.app`).
@@ -174,10 +175,10 @@
 Stopping the VM can be done as follows:
 
 ```sh
-adb shell /apex/com.android.virt/bin/vm stop CID
+adb shell /apex/com.android.virt/bin/vm stop $CID
 ```
 
-, where `CID` is the reported CID value. This works only when the `vm` was
+, where `$CID` is the reported CID value. This works only when the `vm` was
 invoked with the `--daemonize` flag. If the flag was not used, press Ctrl+C on
 the console where the `vm run-app` command was invoked.
 
@@ -190,10 +191,10 @@
 adb connect localhost:8000
 ```
 
-`CID` should be the CID that `vm` reported upon execution of the `vm run`
-command in the above. You can also check it with `adb shell
-"/apex/com.android.virt/bin/vm list"`. `5555` must be
-the value. `8000` however can be any port in the development machine.
+`$CID` should be the CID that `vm` reported upon execution of the `vm run`
+command in the above. You can also check it with
+`adb shell "/apex/com.android.virt/bin/vm list"`. `5555` must be the value.
+`8000` however can be any port on the development machine.
 
 Done. Now you can log into microdroid. Have fun!
 
diff --git a/microdroid/keymint/MicrodroidKeymasterContext.cpp b/microdroid/keymint/MicrodroidKeymasterContext.cpp
index b5440f3..1d1346b 100644
--- a/microdroid/keymint/MicrodroidKeymasterContext.cpp
+++ b/microdroid/keymint/MicrodroidKeymasterContext.cpp
@@ -55,11 +55,14 @@
     // doesn't pose a problem for the current applications but may be a
     // candidate for hardening.
     auto encrypted_key = EncryptKey(key_material, AES_GCM_WITH_SW_ENFORCED, *hw_enforced,
-                                    *sw_enforced, hidden, root_key_, random_, &error);
-    if (error != KM_ERROR_OK) return error;
+                                    *sw_enforced, hidden, SecureDeletionData{}, root_key_, random_);
+    if (!encrypted_key) return encrypted_key.error();
 
-    *blob = SerializeAuthEncryptedBlob(encrypted_key, *hw_enforced, *sw_enforced, &error);
-    return error;
+    auto serialized = SerializeAuthEncryptedBlob(*encrypted_key, *hw_enforced, *sw_enforced,
+                                                 0 /* key_slot */);
+    if (!serialized) return serialized.error();
+    *blob = *serialized;
+    return KM_ERROR_OK;
 }
 
 keymaster_error_t MicrodroidKeymasterContext::ParseKeyBlob(
@@ -71,21 +74,21 @@
     error = BuildHiddenAuthorizations(additional_params, &hidden, microdroidSoftwareRootOfTrust);
     if (error != KM_ERROR_OK) return error;
 
-    auto deserialized_key = DeserializeAuthEncryptedBlob(blob, &error);
-    if (error != KM_ERROR_OK) return error;
+    auto deserialized_key = DeserializeAuthEncryptedBlob(blob);
+    if (!deserialized_key) return deserialized_key.error();
 
     keymaster_algorithm_t algorithm;
-    if (!deserialized_key.sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
+    if (!deserialized_key->sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
         return KM_ERROR_INVALID_ARGUMENT;
     }
 
-    auto key_material = DecryptKey(deserialized_key, hidden, root_key_, &error);
-    if (error != KM_ERROR_OK) return error;
+    auto key_material = DecryptKey(*deserialized_key, hidden, SecureDeletionData{}, root_key_);
+    if (!key_material) return key_material.error();
 
     auto factory = GetKeyFactory(algorithm);
-    return factory->LoadKey(move(key_material), additional_params,
-                            move(deserialized_key.hw_enforced), move(deserialized_key.sw_enforced),
-                            key);
+    return factory->LoadKey(move(*key_material), additional_params,
+                            move(deserialized_key->hw_enforced),
+                            move(deserialized_key->sw_enforced), key);
 }
 
 static bool UpgradeIntegerTag(keymaster_tag_t tag, uint32_t value, AuthorizationSet* set) {
@@ -137,10 +140,13 @@
 
     auto encrypted_key =
             EncryptKey(key->key_material(), AES_GCM_WITH_SW_ENFORCED, key->hw_enforced(),
-                       key->sw_enforced(), hidden, root_key_, random_, &error);
-    if (error != KM_ERROR_OK) return error;
+                       key->sw_enforced(), hidden, SecureDeletionData{}, root_key_, random_);
+    if (!encrypted_key) return encrypted_key.error();
 
-    *upgraded_key = SerializeAuthEncryptedBlob(encrypted_key, key->hw_enforced(),
-                                               key->sw_enforced(), &error);
+    auto serialized = SerializeAuthEncryptedBlob(*encrypted_key, key->hw_enforced(),
+                                                 key->sw_enforced(), 0 /* key_slot */);
+    if (!serialized) return serialized.error();
+
+    *upgraded_key = std::move(*serialized);
     return error;
 }
diff --git a/microdroid/payload/Android.bp b/microdroid/payload/Android.bp
index 72711c3..f77c037 100644
--- a/microdroid/payload/Android.bp
+++ b/microdroid/payload/Android.bp
@@ -25,19 +25,6 @@
     defaults: ["microdroid_metadata_default"],
 }
 
-cc_library_static {
-    name: "lib_microdroid_metadata_proto_lite",
-    recovery_available: true,
-    proto: {
-        export_proto_headers: true,
-        type: "lite",
-    },
-    defaults: ["microdroid_metadata_default"],
-    apex_available: [
-        "com.android.virt",
-    ],
-}
-
 rust_protobuf {
     name: "libmicrodroid_metadata_proto_rust",
     crate_name: "microdroid_metadata",
diff --git a/microdroid/payload/README.md b/microdroid/payload/README.md
index bf05c49..c2f624a 100644
--- a/microdroid/payload/README.md
+++ b/microdroid/payload/README.md
@@ -28,22 +28,20 @@
 
 The partition is a protobuf message prefixed with the size of the message.
 
-| offset | size | description                                                    |
-|--------|------|----------------------------------------------------------------|
-| 0      | 4    | Header. unsigned int32: body length(L) in big endian           |
-| 4      | L    | Body. A protobuf message. [schema](metadata.proto) |
+| offset | size | description                                          |
+| ------ | ---- | ---------------------------------------------------- |
+| 0      | 4    | Header. unsigned int32: body length(L) in big endian |
+| 4      | L    | Body. A protobuf message. [schema](metadata.proto)   |
 
 ### Payload partitions
 
 Each payload partition presents APEX or APK passed from the host.
 
-Note that each payload passed to the Guest is read by a block device. If a payload is not sized to a
-multiples of 4k, reading it would fail. To prevent that, "zero fillers" are added for those files.
-For example, if an APK is 8000 byte big, the APK partition would be padded with 192 bytes of zeros.
+The size of a payload partition must be a multiple of 4096 bytes.
 
 # `mk_payload`
 
-`mk_payload` is a small utility to create a payload disk image.
+`mk_payload` is a small utility to create a payload disk image. It is used by ARCVM.
 
 ```
 $ cat payload_config.json
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
index aa7c9ab..a7b855a 100644
--- a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -44,6 +44,11 @@
                         /* debug */ false);
         adbConnectToMicrodroid(getDevice(), cid);
 
+        // Wait until logd-init starts. The service is one of the last services that are started in
+        // the microdroid boot procedure. Therefore, waiting for the service means that we wait for
+        // the boot to complete. TODO: we need a better marker eventually.
+        tryRunOnMicrodroid("watch -e \"getprop init.svc.logd-reinit | grep '^$'\"");
+
         // Test writing to /data partition
         runOnMicrodroid("echo MicrodroidTest > /data/local/tmp/test.txt");
         assertThat(runOnMicrodroid("cat /data/local/tmp/test.txt"), is("MicrodroidTest"));
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl
index 33c9716..081580c 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl
@@ -32,4 +32,7 @@
      * we might miss some events that happen before the registration is done.
      */
     void registerCallback(IVirtualMachineCallback callback);
+
+    /** Open a vsock connection to the CID of the VM on the given port. */
+    ParcelFileDescriptor connectVsock(int port);
 }
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index dc38075..96e3c44 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -517,6 +517,23 @@
         self.instance.callbacks.add(callback.clone());
         Ok(())
     }
+
+    fn connectVsock(&self, port: i32) -> binder::Result<ParcelFileDescriptor> {
+        if !self.instance.running() {
+            return Err(new_binder_exception(
+                ExceptionCode::SERVICE_SPECIFIC,
+                "VM is no longer running",
+            ));
+        }
+        let stream =
+            VsockStream::connect_with_cid_port(self.instance.cid, port as u32).map_err(|e| {
+                new_binder_exception(
+                    ExceptionCode::SERVICE_SPECIFIC,
+                    format!("Failed to connect: {}", e),
+                )
+            })?;
+        Ok(vsock_stream_to_pfd(stream))
+    }
 }
 
 impl Drop for VirtualMachine {
@@ -535,9 +552,7 @@
     /// Call all registered callbacks to notify that the payload has started.
     pub fn notify_payload_started(&self, cid: Cid, stream: VsockStream) {
         let callbacks = &*self.0.lock().unwrap();
-        // SAFETY: ownership is transferred from stream to f
-        let f = unsafe { File::from_raw_fd(stream.into_raw_fd()) };
-        let pfd = ParcelFileDescriptor::new(f);
+        let pfd = vsock_stream_to_pfd(stream);
         for callback in callbacks {
             if let Err(e) = callback.onPayloadStarted(cid as i32, &pfd) {
                 error!("Error notifying payload start event from VM CID {}: {}", cid, e);
@@ -641,6 +656,13 @@
     })
 }
 
+/// Converts a `VsockStream` to a `ParcelFileDescriptor`.
+fn vsock_stream_to_pfd(stream: VsockStream) -> ParcelFileDescriptor {
+    // SAFETY: ownership is transferred from stream to f
+    let f = unsafe { File::from_raw_fd(stream.into_raw_fd()) };
+    ParcelFileDescriptor::new(f)
+}
+
 /// Constructs a new Binder error `Status` with the given `ExceptionCode` and message.
 fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
     Status::new_exception(exception, CString::new(message.as_ref()).ok().as_deref())