Merge "Add method to open vsock connection to VM."
diff --git a/TEST_MAPPING b/TEST_MAPPING
index d2a4821..5218abb 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -20,6 +20,9 @@
       "path": "packages/modules/Virtualization/apkdmverity"
     },
     {
+      "path": "packages/modules/Virtualization/apkverify"
+    },
+    {
       "path": "packages/modules/Virtualization/authfs"
     },
     {
diff --git a/apkdmverity/Android.bp b/apkdmverity/Android.bp
index 9b53a47..df46324 100644
--- a/apkdmverity/Android.bp
+++ b/apkdmverity/Android.bp
@@ -13,6 +13,7 @@
         "libbitflags",
         "libclap",
         "libdata_model",
+        "libidsig",
         "liblibc",
         "libnix",
         "libnum_traits",
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index ff3944e..9d1ef1c 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -21,15 +21,13 @@
 //! system managed by the host Android which is assumed to be compromisable, it is important to
 //! keep the integrity of the file "inside" Microdroid.
 
-mod apksigv4;
 mod dm;
 mod loopdevice;
 mod util;
 
-use crate::apksigv4::*;
-
 use anyhow::{bail, Context, Result};
 use clap::{App, Arg};
+use idsig::{HashAlgorithm, V4Signature};
 use std::fmt::Debug;
 use std::fs;
 use std::fs::File;
@@ -112,7 +110,7 @@
         .hash_device(&hash_device)
         .root_digest(&sig.hashing_info.raw_root_hash)
         .hash_algorithm(match sig.hashing_info.hash_algorithm {
-            apksigv4::HashAlgorithm::SHA256 => dm::DmVerityHashAlgorithm::SHA256,
+            HashAlgorithm::SHA256 => dm::DmVerityHashAlgorithm::SHA256,
         })
         .salt(&sig.hashing_info.salt)
         .build()
diff --git a/apkverify/Android.bp b/apkverify/Android.bp
new file mode 100644
index 0000000..df1cac6
--- /dev/null
+++ b/apkverify/Android.bp
@@ -0,0 +1,45 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+    name: "libapkverify.defaults",
+    crate_name: "apkverify",
+    srcs: ["src/lib.rs"],
+    prefer_rlib: true,
+    edition: "2018",
+    rustlibs: [
+        "libanyhow",
+        "libbyteorder",
+        "libbytes",
+        "liblog_rust",
+        "libring",
+        "libx509_parser",
+        "libzip",
+    ],
+}
+
+rust_library {
+    name: "libapkverify",
+    defaults: ["libapkverify.defaults"],
+}
+
+rust_test {
+    name: "libapkverify.test",
+    defaults: ["libapkverify.defaults"],
+    test_suites: ["general-tests"],
+}
+
+rust_test {
+    name: "libapkverify.integration_test",
+    crate_name: "apkverify_test",
+    srcs: ["tests/*_test.rs"],
+    prefer_rlib: true,
+    edition: "2018",
+    test_suites: ["general-tests"],
+    rustlibs: [
+        "libapkverify",
+        "libzip",
+    ],
+    data: ["tests/data/*"],
+}
diff --git a/apkverify/TEST_MAPPING b/apkverify/TEST_MAPPING
new file mode 100644
index 0000000..9248716
--- /dev/null
+++ b/apkverify/TEST_MAPPING
@@ -0,0 +1,10 @@
+{
+  "presubmit" : [
+    {
+      "name" : "libapkverify.test"
+    },
+    {
+      "name" : "libapkverify.integration_test"
+    }
+  ]
+}
diff --git a/apkverify/src/bytes_ext.rs b/apkverify/src/bytes_ext.rs
new file mode 100644
index 0000000..1b8d6b6
--- /dev/null
+++ b/apkverify/src/bytes_ext.rs
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Provides extension methods Bytes::read<T>(), which calls back ReadFromBytes::read_from_byte()
+
+use anyhow::{bail, Result};
+use bytes::{Buf, Bytes};
+use std::ops::Deref;
+
+#[derive(Clone, Debug)]
+pub struct LengthPrefixed<T> {
+    inner: T,
+}
+
+impl<T> Deref for LengthPrefixed<T> {
+    type Target = T;
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+pub trait BytesExt {
+    fn read<T: ReadFromBytes>(&mut self) -> Result<T>;
+}
+
+impl BytesExt for Bytes {
+    fn read<T: ReadFromBytes>(&mut self) -> Result<T> {
+        T::read_from_bytes(self)
+    }
+}
+
+pub trait ReadFromBytes {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self>
+    where
+        Self: Sized;
+}
+
+impl ReadFromBytes for u32 {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        Ok(buf.get_u32_le())
+    }
+}
+
+impl<T: ReadFromBytes> ReadFromBytes for Vec<T> {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        let mut result = vec![];
+        while buf.has_remaining() {
+            result.push(buf.read()?);
+        }
+        Ok(result)
+    }
+}
+
+impl<T: ReadFromBytes> ReadFromBytes for LengthPrefixed<T> {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        let mut inner = read_length_prefixed_slice(buf)?;
+        let inner = inner.read()?;
+        Ok(LengthPrefixed { inner })
+    }
+}
+
+impl ReadFromBytes for Bytes {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        Ok(buf.slice(..))
+    }
+}
+
+fn read_length_prefixed_slice(buf: &mut Bytes) -> Result<Bytes> {
+    if buf.remaining() < 4 {
+        bail!(
+            "Remaining buffer too short to contain length of length-prefixed field. Remaining: {}",
+            buf.remaining()
+        );
+    }
+    let len = buf.get_u32_le() as usize;
+    if len > buf.remaining() {
+        bail!(
+            "length-prefixed field longer than remaining buffer. Field length: {}, remaining: {}",
+            len,
+            buf.remaining()
+        );
+    }
+    Ok(buf.split_to(len))
+}
+
+#[cfg(test)]
+mod tests {
+    use bytes::{BufMut, BytesMut};
+    #[test]
+    fn test_read_length_prefixed_slice() {
+        let data = b"hello world";
+        let mut b = BytesMut::new();
+        b.put_u32_le(data.len() as u32);
+        b.put_slice(data);
+        let mut slice = b.freeze();
+        let res = super::read_length_prefixed_slice(&mut slice);
+        assert!(res.is_ok());
+        assert_eq!(data, res.ok().unwrap().as_ref());
+    }
+}
diff --git a/apkverify/src/lib.rs b/apkverify/src/lib.rs
new file mode 100644
index 0000000..869431e
--- /dev/null
+++ b/apkverify/src/lib.rs
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Verifies APK/APEX signing with v2/v3 scheme
+
+mod bytes_ext;
+mod sigutil;
+mod testing;
+mod v3;
+mod ziputil;
+
+use anyhow::Result;
+use std::path::Path;
+
+/// Verifies APK/APEX signing with v2/v3 scheme
+pub fn verify<P: AsRef<Path>>(path: P) -> Result<()> {
+    // TODO(jooyung) fallback to v2 when v3 not found
+    v3::verify(path)
+}
diff --git a/apkverify/src/sigutil.rs b/apkverify/src/sigutil.rs
new file mode 100644
index 0000000..06645fe
--- /dev/null
+++ b/apkverify/src/sigutil.rs
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Utilities for Signature Verification
+
+use anyhow::{anyhow, bail, Result};
+use byteorder::{LittleEndian, ReadBytesExt};
+use bytes::{Buf, BufMut, Bytes};
+use ring::digest;
+use std::cmp::min;
+use std::io::{Cursor, Read, Seek, SeekFrom, Take};
+
+use crate::ziputil::{set_central_directory_offset, zip_sections};
+
+const APK_SIG_BLOCK_MIN_SIZE: u32 = 32;
+const APK_SIG_BLOCK_MAGIC: u128 = 0x3234206b636f6c4220676953204b5041;
+
+// TODO(jooyung): introduce type
+pub const SIGNATURE_RSA_PSS_WITH_SHA256: u32 = 0x0101;
+pub const SIGNATURE_RSA_PSS_WITH_SHA512: u32 = 0x0102;
+pub const SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256: u32 = 0x0103;
+pub const SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512: u32 = 0x0104;
+pub const SIGNATURE_ECDSA_WITH_SHA256: u32 = 0x0201;
+pub const SIGNATURE_ECDSA_WITH_SHA512: u32 = 0x0202;
+pub const SIGNATURE_DSA_WITH_SHA256: u32 = 0x0301;
+pub const SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256: u32 = 0x0421;
+pub const SIGNATURE_VERITY_ECDSA_WITH_SHA256: u32 = 0x0423;
+pub const SIGNATURE_VERITY_DSA_WITH_SHA256: u32 = 0x0425;
+
+// TODO(jooyung): introduce type
+const CONTENT_DIGEST_CHUNKED_SHA256: u32 = 1;
+const CONTENT_DIGEST_CHUNKED_SHA512: u32 = 2;
+const CONTENT_DIGEST_VERITY_CHUNKED_SHA256: u32 = 3;
+#[allow(unused)]
+const CONTENT_DIGEST_SHA256: u32 = 4;
+
+const CHUNK_SIZE_BYTES: u64 = 1024 * 1024;
+
+pub struct ApkSections<R> {
+    inner: R,
+    signing_block_offset: u32,
+    signing_block_size: u32,
+    central_directory_offset: u32,
+    central_directory_size: u32,
+    eocd_offset: u32,
+    eocd_size: u32,
+}
+
+impl<R: Read + Seek> ApkSections<R> {
+    pub fn new(reader: R) -> Result<ApkSections<R>> {
+        let (mut f, zip_sections) = zip_sections(reader)?;
+        let (signing_block_offset, signing_block_size) =
+            find_signing_block(&mut f, zip_sections.central_directory_offset)?;
+        Ok(ApkSections {
+            inner: f,
+            signing_block_offset,
+            signing_block_size,
+            central_directory_offset: zip_sections.central_directory_offset,
+            central_directory_size: zip_sections.central_directory_size,
+            eocd_offset: zip_sections.eocd_offset,
+            eocd_size: zip_sections.eocd_size,
+        })
+    }
+
+    /// Returns the APK Signature Scheme block contained in the provided file for the given ID
+    /// and the additional information relevant for verifying the block against the file.
+    pub fn find_signature(&mut self, block_id: u32) -> Result<Bytes> {
+        let signing_block = self.bytes(self.signing_block_offset, self.signing_block_size)?;
+        // TODO(jooyung): propagate NotFound error so that verification can fallback to V2
+        find_signature_scheme_block(Bytes::from(signing_block), block_id)
+    }
+
+    /// Computes digest with "signature algorithm" over APK contents, central directory, and EOCD.
+    /// 1. The digest of each chunk is computed over the concatenation of byte 0xa5, the chunk’s
+    ///    length in bytes (little-endian uint32), and the chunk’s contents.
+    /// 2. The top-level digest is computed over the concatenation of byte 0x5a, the number of
+    ///    chunks (little-endian uint32), and the concatenation of digests of the chunks in the
+    ///    order the chunks appear in the APK.
+    /// (see https://source.android.com/security/apksigning/v2#integrity-protected-contents)
+    pub fn compute_digest(&mut self, signature_algorithm_id: u32) -> Result<Vec<u8>> {
+        let digester = Digester::new(signature_algorithm_id)?;
+
+        let mut digests_of_chunks = bytes::BytesMut::new();
+        let mut chunk_count = 0u32;
+        let mut chunk = vec![0u8; CHUNK_SIZE_BYTES as usize];
+        for data in &[
+            ApkSections::zip_entries,
+            ApkSections::central_directory,
+            ApkSections::eocd_for_verification,
+        ] {
+            let mut data = data(self)?;
+            while data.limit() > 0 {
+                let chunk_size = min(CHUNK_SIZE_BYTES, data.limit());
+                let mut slice = &mut chunk[..(chunk_size as usize)];
+                data.read_exact(&mut slice)?;
+                digests_of_chunks.put_slice(
+                    digester.digest(slice, CHUNK_HEADER_MID, chunk_size as u32).as_ref(),
+                );
+                chunk_count += 1;
+            }
+        }
+        Ok(digester.digest(&digests_of_chunks, CHUNK_HEADER_TOP, chunk_count).as_ref().into())
+    }
+
+    fn zip_entries(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+        scoped_read(&mut self.inner, 0, self.signing_block_offset as u64)
+    }
+    fn central_directory(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+        scoped_read(
+            &mut self.inner,
+            self.central_directory_offset as u64,
+            self.central_directory_size as u64,
+        )
+    }
+    fn eocd_for_verification(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+        let mut eocd = self.bytes(self.eocd_offset, self.eocd_size)?;
+        // Protection of section 4 (ZIP End of Central Directory) is complicated by the section
+        // containing the offset of ZIP Central Directory. The offset changes when the size of the
+        // APK Signing Block changes, for instance, when a new signature is added. Thus, when
+        // computing digest over the ZIP End of Central Directory, the field containing the offset
+        // of ZIP Central Directory must be treated as containing the offset of the APK Signing
+        // Block.
+        set_central_directory_offset(&mut eocd, self.signing_block_offset)?;
+        Ok(Read::take(Box::new(Cursor::new(eocd)), self.eocd_size as u64))
+    }
+    fn bytes(&mut self, offset: u32, size: u32) -> Result<Vec<u8>> {
+        self.inner.seek(SeekFrom::Start(offset as u64))?;
+        let mut buf = vec![0u8; size as usize];
+        self.inner.read_exact(&mut buf)?;
+        Ok(buf)
+    }
+}
+
+fn scoped_read<'a, R: Read + Seek>(
+    src: &'a mut R,
+    offset: u64,
+    size: u64,
+) -> Result<Take<Box<dyn Read + 'a>>> {
+    src.seek(SeekFrom::Start(offset))?;
+    Ok(Read::take(Box::new(src), size))
+}
+
+struct Digester {
+    algorithm: &'static digest::Algorithm,
+}
+
+const CHUNK_HEADER_TOP: &[u8] = &[0x5a];
+const CHUNK_HEADER_MID: &[u8] = &[0xa5];
+impl Digester {
+    fn new(signature_algorithm_id: u32) -> Result<Digester> {
+        let digest_algorithm_id = to_content_digest_algorithm(signature_algorithm_id)?;
+        let algorithm = match digest_algorithm_id {
+            CONTENT_DIGEST_CHUNKED_SHA256 => &digest::SHA256,
+            CONTENT_DIGEST_CHUNKED_SHA512 => &digest::SHA512,
+            // TODO(jooyung): implement
+            CONTENT_DIGEST_VERITY_CHUNKED_SHA256 => {
+                bail!("TODO(b/190343842): CONTENT_DIGEST_VERITY_CHUNKED_SHA256: not implemented")
+            }
+            _ => bail!("Unknown digest algorithm: {}", digest_algorithm_id),
+        };
+        Ok(Digester { algorithm })
+    }
+    // v2/v3 digests are computed after prepending "header" byte and "size" info.
+    fn digest(&self, data: &[u8], header: &[u8], size: u32) -> digest::Digest {
+        let mut ctx = digest::Context::new(self.algorithm);
+        ctx.update(header);
+        ctx.update(&size.to_le_bytes());
+        ctx.update(data);
+        ctx.finish()
+    }
+}
+
+fn find_signing_block<T: Read + Seek>(
+    reader: &mut T,
+    central_directory_offset: u32,
+) -> Result<(u32, u32)> {
+    // FORMAT:
+    // OFFSET       DATA TYPE  DESCRIPTION
+    // * @+0  bytes uint64:    size in bytes (excluding this field)
+    // * @+8  bytes payload
+    // * @-24 bytes uint64:    size in bytes (same as the one above)
+    // * @-16 bytes uint128:   magic
+    if central_directory_offset < APK_SIG_BLOCK_MIN_SIZE {
+        bail!(
+            "APK too small for APK Signing Block. ZIP Central Directory offset: {}",
+            central_directory_offset
+        );
+    }
+    reader.seek(SeekFrom::Start((central_directory_offset - 24) as u64))?;
+    let size_in_footer = reader.read_u64::<LittleEndian>()? as u32;
+    if reader.read_u128::<LittleEndian>()? != APK_SIG_BLOCK_MAGIC {
+        bail!("No APK Signing Block before ZIP Central Directory")
+    }
+    let total_size = size_in_footer + 8;
+    let signing_block_offset = central_directory_offset
+        .checked_sub(total_size)
+        .ok_or_else(|| anyhow!("APK Signing Block size out of range: {}", size_in_footer))?;
+    reader.seek(SeekFrom::Start(signing_block_offset as u64))?;
+    let size_in_header = reader.read_u64::<LittleEndian>()? as u32;
+    if size_in_header != size_in_footer {
+        bail!(
+            "APK Signing Block sizes in header and footer do not match: {} vs {}",
+            size_in_header,
+            size_in_footer
+        );
+    }
+    Ok((signing_block_offset, total_size))
+}
+
+fn find_signature_scheme_block(buf: Bytes, block_id: u32) -> Result<Bytes> {
+    // FORMAT:
+    // OFFSET       DATA TYPE  DESCRIPTION
+    // * @+0  bytes uint64:    size in bytes (excluding this field)
+    // * @+8  bytes pairs
+    // * @-24 bytes uint64:    size in bytes (same as the one above)
+    // * @-16 bytes uint128:   magic
+    let mut pairs = buf.slice(8..(buf.len() - 24));
+    let mut entry_count = 0;
+    while pairs.has_remaining() {
+        entry_count += 1;
+        if pairs.remaining() < 8 {
+            bail!("Insufficient data to read size of APK Signing Block entry #{}", entry_count);
+        }
+        let length = pairs.get_u64_le();
+        let mut pair = pairs.split_to(length as usize);
+        let id = pair.get_u32_le();
+        if id == block_id {
+            return Ok(pair);
+        }
+    }
+    // TODO(jooyung): return NotFound error
+    bail!("No APK Signature Scheme block in APK Signing Block with ID: {}", block_id)
+}
+
+pub fn is_supported_signature_algorithm(algorithm_id: u32) -> bool {
+    matches!(
+        algorithm_id,
+        SIGNATURE_RSA_PSS_WITH_SHA256
+            | SIGNATURE_RSA_PSS_WITH_SHA512
+            | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256
+            | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512
+            | SIGNATURE_ECDSA_WITH_SHA256
+            | SIGNATURE_ECDSA_WITH_SHA512
+            | SIGNATURE_DSA_WITH_SHA256
+            | SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256
+            | SIGNATURE_VERITY_ECDSA_WITH_SHA256
+            | SIGNATURE_VERITY_DSA_WITH_SHA256
+    )
+}
+
+fn to_content_digest_algorithm(algorithm_id: u32) -> Result<u32> {
+    match algorithm_id {
+        SIGNATURE_RSA_PSS_WITH_SHA256
+        | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256
+        | SIGNATURE_ECDSA_WITH_SHA256
+        | SIGNATURE_DSA_WITH_SHA256 => Ok(CONTENT_DIGEST_CHUNKED_SHA256),
+        SIGNATURE_RSA_PSS_WITH_SHA512
+        | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512
+        | SIGNATURE_ECDSA_WITH_SHA512 => Ok(CONTENT_DIGEST_CHUNKED_SHA512),
+        SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256
+        | SIGNATURE_VERITY_ECDSA_WITH_SHA256
+        | SIGNATURE_VERITY_DSA_WITH_SHA256 => Ok(CONTENT_DIGEST_VERITY_CHUNKED_SHA256),
+        _ => bail!("Unknown signature algorithm: {}", algorithm_id),
+    }
+}
+
+pub fn rank_signature_algorithm(algo: u32) -> Result<u32> {
+    rank_content_digest_algorithm(to_content_digest_algorithm(algo)?)
+}
+
+fn rank_content_digest_algorithm(id: u32) -> Result<u32> {
+    match id {
+        CONTENT_DIGEST_CHUNKED_SHA256 => Ok(0),
+        CONTENT_DIGEST_VERITY_CHUNKED_SHA256 => Ok(1),
+        CONTENT_DIGEST_CHUNKED_SHA512 => Ok(2),
+        _ => bail!("Unknown digest algorithm: {}", id),
+    }
+}
diff --git a/apkverify/src/testing.rs b/apkverify/src/testing.rs
new file mode 100644
index 0000000..777afb8
--- /dev/null
+++ b/apkverify/src/testing.rs
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A collection of utilities for testing
+
+/// Asserts if `haystack.contains(needed)`
+#[macro_export]
+macro_rules! assert_contains {
+    ($haystack:expr,$needle:expr $(,)?) => {
+        match (&$haystack, &$needle) {
+            (haystack_value, needle_value) => {
+                assert!(
+                    haystack_value.contains(needle_value),
+                    "{} is not found in {}",
+                    needle_value,
+                    haystack_value
+                );
+            }
+        }
+    };
+}
diff --git a/apkverify/src/v3.rs b/apkverify/src/v3.rs
new file mode 100644
index 0000000..5ec3d07
--- /dev/null
+++ b/apkverify/src/v3.rs
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Verifies APK Signature Scheme V3
+
+// TODO(jooyung) remove this
+#![allow(dead_code)]
+
+use anyhow::{anyhow, bail, Context, Result};
+use bytes::Bytes;
+use std::fs::File;
+use std::io::{Read, Seek};
+use std::ops::Range;
+use std::path::Path;
+use x509_parser::x509;
+
+use crate::bytes_ext::{BytesExt, LengthPrefixed, ReadFromBytes};
+use crate::sigutil::*;
+
+pub const APK_SIGNATURE_SCHEME_V3_BLOCK_ID: u32 = 0xf05368c0;
+
+// TODO(jooyung): get "ro.build.version.sdk"
+const SDK_INT: u32 = 31;
+
+/// Data model for Signature Scheme V3
+/// https://source.android.com/security/apksigning/v3#verification
+
+type Signers = LengthPrefixed<Vec<LengthPrefixed<Signer>>>;
+
+struct Signer {
+    signed_data: LengthPrefixed<Bytes>, // not verified yet
+    min_sdk: u32,
+    max_sdk: u32,
+    signatures: LengthPrefixed<Vec<LengthPrefixed<Signature>>>,
+    public_key: LengthPrefixed<SubjectPublicKeyInfo>,
+}
+
+impl Signer {
+    fn sdk_range(&self) -> Range<u32> {
+        self.min_sdk..self.max_sdk
+    }
+}
+
+struct SignedData {
+    digests: LengthPrefixed<Vec<LengthPrefixed<Digest>>>,
+    certificates: LengthPrefixed<Vec<LengthPrefixed<X509Certificate>>>,
+    min_sdk: u32,
+    max_sdk: u32,
+    additional_attributes: LengthPrefixed<Vec<LengthPrefixed<AdditionalAttributes>>>,
+}
+
+impl SignedData {
+    fn sdk_range(&self) -> Range<u32> {
+        self.min_sdk..self.max_sdk
+    }
+}
+
+#[derive(Debug)]
+struct Signature {
+    signature_algorithm_id: u32,
+    signature: LengthPrefixed<Bytes>,
+}
+
+struct Digest {
+    signature_algorithm_id: u32,
+    digest: LengthPrefixed<Bytes>,
+}
+
+type SubjectPublicKeyInfo = Bytes;
+type X509Certificate = Bytes;
+type AdditionalAttributes = Bytes;
+
+/// Verifies APK Signature Scheme v3 signatures of the provided APK and returns the certificates
+/// associated with each signer.
+pub fn verify<P: AsRef<Path>>(path: P) -> Result<()> {
+    let f = File::open(path.as_ref())?;
+    let mut sections = ApkSections::new(f)?;
+    verify_signature(&mut sections)?;
+    Ok(())
+}
+
+/// Verifies the contents of the provided APK file against the provided APK Signature Scheme v3
+/// Block.
+fn verify_signature<R: Read + Seek>(sections: &mut ApkSections<R>) -> Result<()> {
+    let mut block = sections.find_signature(APK_SIGNATURE_SCHEME_V3_BLOCK_ID)?;
+
+    // parse v3 scheme block
+    let signers = block.read::<Signers>()?;
+
+    // find supported by platform
+    let mut supported =
+        signers.iter().filter(|s| s.sdk_range().contains(&SDK_INT)).collect::<Vec<_>>();
+
+    // there should be exactly one
+    if supported.len() != 1 {
+        bail!("APK Signature Scheme V3 only supports one signer: {} signers found.", signers.len())
+    }
+
+    // and it should be verified
+    supported.pop().unwrap().verify(sections)?;
+
+    Ok(())
+}
+
+impl Signer {
+    fn verify<R: Read + Seek>(&self, sections: &mut ApkSections<R>) -> Result<()> {
+        // 1. Choose the strongest supported signature algorithm ID from signatures. The strength
+        //    ordering is up to each implementation/platform version.
+        let strongest: &Signature = self
+            .signatures
+            .iter()
+            .filter(|sig| is_supported_signature_algorithm(sig.signature_algorithm_id))
+            .max_by_key(|sig| rank_signature_algorithm(sig.signature_algorithm_id).unwrap())
+            .ok_or_else(|| anyhow!("No supported signatures found"))?;
+
+        // 2. Verify the corresponding signature from signatures against signed data using public key.
+        //    (It is now safe to parse signed data.)
+        let (_, key_info) = x509::SubjectPublicKeyInfo::from_der(self.public_key.as_ref())?;
+        verify_signed_data(&self.signed_data, strongest, &key_info)?;
+
+        // It is now safe to parse signed data.
+        let signed_data: SignedData = self.signed_data.slice(..).read()?;
+
+        // 3. Verify the min and max SDK versions in the signed data match those specified for the
+        //    signer.
+        if self.sdk_range() != signed_data.sdk_range() {
+            bail!("SDK versions mismatch between signed and unsigned in v3 signer block.");
+        }
+
+        // 4. Verify that the ordered list of signature algorithm IDs in digests and signatures is
+        //    identical. (This is to prevent signature stripping/addition.)
+        if !self
+            .signatures
+            .iter()
+            .map(|sig| sig.signature_algorithm_id)
+            .eq(signed_data.digests.iter().map(|dig| dig.signature_algorithm_id))
+        {
+            bail!("Signature algorithms don't match between digests and signatures records");
+        }
+
+        // 5. Compute the digest of APK contents using the same digest algorithm as the digest
+        //    algorithm used by the signature algorithm.
+        let digest = signed_data
+            .digests
+            .iter()
+            .find(|&dig| dig.signature_algorithm_id == strongest.signature_algorithm_id)
+            .unwrap(); // ok to unwrap since we check if two lists are the same above
+        let computed = sections.compute_digest(digest.signature_algorithm_id)?;
+
+        // 6. Verify that the computed digest is identical to the corresponding digest from digests.
+        if computed != digest.digest.as_ref() {
+            bail!(
+                "Digest mismatch: computed={:?} vs expected={:?}",
+                to_hex_string(&computed),
+                to_hex_string(&digest.digest),
+            );
+        }
+
+        // 7. Verify that SubjectPublicKeyInfo of the first certificate of certificates is identical
+        //    to public key.
+        let cert = signed_data.certificates.first().context("No certificates listed")?;
+        let (_, cert) = x509_parser::parse_x509_certificate(cert.as_ref())?;
+        if cert.tbs_certificate.subject_pki != key_info {
+            bail!("Public key mismatch between certificate and signature record");
+        }
+
+        // TODO(jooyung) 8. If the proof-of-rotation attribute exists for the signer verify that the struct is valid and this signer is the last certificate in the list.
+        Ok(())
+    }
+}
+
+fn verify_signed_data(
+    data: &Bytes,
+    signature: &Signature,
+    key_info: &x509::SubjectPublicKeyInfo,
+) -> Result<()> {
+    use ring::signature;
+    let verification_alg: &dyn signature::VerificationAlgorithm =
+        match signature.signature_algorithm_id {
+            SIGNATURE_RSA_PSS_WITH_SHA256 => &signature::RSA_PSS_2048_8192_SHA256,
+            SIGNATURE_RSA_PSS_WITH_SHA512 => &signature::RSA_PSS_2048_8192_SHA512,
+            SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256 | SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256 => {
+                &signature::RSA_PKCS1_2048_8192_SHA256
+            }
+            SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512 => &signature::RSA_PKCS1_2048_8192_SHA512,
+            SIGNATURE_ECDSA_WITH_SHA256 | SIGNATURE_VERITY_ECDSA_WITH_SHA256 => {
+                &signature::ECDSA_P256_SHA256_ASN1
+            }
+            // TODO(b/190343842) not implemented signature algorithm
+            SIGNATURE_ECDSA_WITH_SHA512
+            | SIGNATURE_DSA_WITH_SHA256
+            | SIGNATURE_VERITY_DSA_WITH_SHA256 => {
+                bail!(
+                    "TODO(b/190343842) not implemented signature algorithm: {:#x}",
+                    signature.signature_algorithm_id
+                );
+            }
+            _ => bail!("Unsupported signature algorithm: {:#x}", signature.signature_algorithm_id),
+        };
+    let key = signature::UnparsedPublicKey::new(verification_alg, &key_info.subject_public_key);
+    key.verify(data.as_ref(), signature.signature.as_ref())?;
+    Ok(())
+}
+
+// ReadFromBytes implementations
+// TODO(jooyung): add derive macro: #[derive(ReadFromBytes)]
+
+impl ReadFromBytes for Signer {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        Ok(Self {
+            signed_data: buf.read()?,
+            min_sdk: buf.read()?,
+            max_sdk: buf.read()?,
+            signatures: buf.read()?,
+            public_key: buf.read()?,
+        })
+    }
+}
+
+impl ReadFromBytes for SignedData {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        Ok(Self {
+            digests: buf.read()?,
+            certificates: buf.read()?,
+            min_sdk: buf.read()?,
+            max_sdk: buf.read()?,
+            additional_attributes: buf.read()?,
+        })
+    }
+}
+
+impl ReadFromBytes for Signature {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        Ok(Signature { signature_algorithm_id: buf.read()?, signature: buf.read()? })
+    }
+}
+
+impl ReadFromBytes for Digest {
+    fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+        Ok(Self { signature_algorithm_id: buf.read()?, digest: buf.read()? })
+    }
+}
+
+#[inline]
+fn to_hex_string(buf: &[u8]) -> String {
+    buf.iter().map(|b| format!("{:02X}", b)).collect()
+}
diff --git a/apkverify/src/ziputil.rs b/apkverify/src/ziputil.rs
new file mode 100644
index 0000000..bfb1c01
--- /dev/null
+++ b/apkverify/src/ziputil.rs
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Utilities for zip handling
+
+use anyhow::{bail, Result};
+use bytes::{Buf, BufMut};
+use std::io::{Read, Seek, SeekFrom};
+use zip::ZipArchive;
+
+const EOCD_MIN_SIZE: usize = 22;
+const EOCD_CENTRAL_DIRECTORY_SIZE_FIELD_OFFSET: usize = 12;
+const EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET: usize = 16;
+const EOCD_MAGIC: u32 = 0x06054b50;
+const ZIP64_MARK: u32 = 0xffffffff;
+
+#[derive(Debug, PartialEq)]
+pub struct ZipSections {
+    pub central_directory_offset: u32,
+    pub central_directory_size: u32,
+    pub eocd_offset: u32,
+    pub eocd_size: u32,
+}
+
+/// Discover the layout of a zip file.
+pub fn zip_sections<R: Read + Seek>(mut reader: R) -> Result<(R, ZipSections)> {
+    // open a zip to parse EOCD
+    let archive = ZipArchive::new(reader)?;
+    let eocd_size = archive.comment().len() + EOCD_MIN_SIZE;
+    if archive.offset() != 0 {
+        bail!("Invalid ZIP: offset should be 0, but {}.", archive.offset());
+    }
+    // retrieve reader back
+    reader = archive.into_inner();
+    // the current position should point EOCD offset
+    let eocd_offset = reader.seek(SeekFrom::Current(0))? as u32;
+    let mut eocd = vec![0u8; eocd_size as usize];
+    reader.read_exact(&mut eocd)?;
+    if (&eocd[0..]).get_u32_le() != EOCD_MAGIC {
+        bail!("Invalid ZIP: ZipArchive::new() should point EOCD after reading.");
+    }
+    let (central_directory_size, central_directory_offset) = get_central_directory(&eocd)?;
+    if central_directory_offset == ZIP64_MARK || central_directory_size == ZIP64_MARK {
+        bail!("Unsupported ZIP: ZIP64 is not supported.");
+    }
+    if central_directory_offset + central_directory_size != eocd_offset {
+        bail!("Invalid ZIP: EOCD should follow CD with no extra data or overlap.");
+    }
+
+    Ok((
+        reader,
+        ZipSections {
+            central_directory_offset,
+            central_directory_size,
+            eocd_offset,
+            eocd_size: eocd_size as u32,
+        },
+    ))
+}
+
+fn get_central_directory(buf: &[u8]) -> Result<(u32, u32)> {
+    if buf.len() < EOCD_MIN_SIZE {
+        bail!("Invalid EOCD size: {}", buf.len());
+    }
+    let mut buf = &buf[EOCD_CENTRAL_DIRECTORY_SIZE_FIELD_OFFSET..];
+    let size = buf.get_u32_le();
+    let offset = buf.get_u32_le();
+    Ok((size, offset))
+}
+
+/// Update EOCD's central_directory_offset field.
+pub fn set_central_directory_offset(buf: &mut [u8], value: u32) -> Result<()> {
+    if buf.len() < EOCD_MIN_SIZE {
+        bail!("Invalid EOCD size: {}", buf.len());
+    }
+    (&mut buf[EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET..]).put_u32_le(value);
+    Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::assert_contains;
+    use std::io::{Cursor, Write};
+    use zip::{write::FileOptions, ZipWriter};
+
+    fn create_test_zip() -> Cursor<Vec<u8>> {
+        let mut writer = ZipWriter::new(Cursor::new(Vec::new()));
+        writer.start_file("testfile", FileOptions::default()).unwrap();
+        writer.write_all(b"testcontent").unwrap();
+        writer.finish().unwrap()
+    }
+
+    #[test]
+    fn test_zip_sections() {
+        let (cursor, sections) = zip_sections(create_test_zip()).unwrap();
+        assert_eq!(sections.eocd_offset, (cursor.get_ref().len() - EOCD_MIN_SIZE) as u32);
+    }
+
+    #[test]
+    fn test_reject_if_extra_data_between_cd_and_eocd() {
+        // prepare normal zip
+        let buf = create_test_zip().into_inner();
+
+        // insert garbage between CD and EOCD.
+        // by the way, to mock zip-rs, use CD as garbage. This is implementation detail of zip-rs,
+        // which reads CD at (eocd_offset - cd_size) instead of at cd_offset from EOCD.
+        let (pre_eocd, eocd) = buf.split_at(buf.len() - EOCD_MIN_SIZE);
+        let (_, cd_offset) = get_central_directory(eocd).unwrap();
+        let cd = &pre_eocd[cd_offset as usize..];
+
+        // ZipArchive::new() succeeds, but we should reject
+        let res = zip_sections(Cursor::new([pre_eocd, cd, eocd].concat()));
+        assert!(res.is_err());
+        assert_contains!(res.err().unwrap().to_string(), "Invalid ZIP: offset should be 0");
+    }
+}
diff --git a/apkverify/tests/apkverify_test.rs b/apkverify/tests/apkverify_test.rs
new file mode 100644
index 0000000..3366524
--- /dev/null
+++ b/apkverify/tests/apkverify_test.rs
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use apkverify::{assert_contains, verify};
+use std::matches;
+
+#[test]
+fn test_verify_v3() {
+    assert!(verify("tests/data/test.apex").is_ok());
+}
+
+#[test]
+fn test_verify_v3_digest_mismatch() {
+    let res = verify("tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk");
+    assert!(res.is_err());
+    assert_contains!(res.err().unwrap().to_string(), "Digest mismatch");
+}
+
+#[test]
+fn test_verify_v3_cert_and_publick_key_mismatch() {
+    let res = verify("tests/data/v3-only-cert-and-public-key-mismatch.apk");
+    assert!(res.is_err());
+    assert_contains!(res.err().unwrap().to_string(), "Public key mismatch");
+}
+
+#[test]
+fn test_verify_truncated_cd() {
+    use zip::result::ZipError;
+    let res = verify("tests/data/v2-only-truncated-cd.apk");
+    // TODO(jooyung): consider making a helper for err assertion
+    assert!(matches!(
+        res.err().unwrap().root_cause().downcast_ref::<ZipError>().unwrap(),
+        ZipError::InvalidArchive(_),
+    ));
+}
diff --git a/apkverify/tests/data/README.md b/apkverify/tests/data/README.md
new file mode 100644
index 0000000..7556921
--- /dev/null
+++ b/apkverify/tests/data/README.md
@@ -0,0 +1,14 @@
+test.apex is copied from ADBD apex built in AOSP.
+
+```sh
+$ apksigner verify -v test.apex
+Verifies
+Verified using v1 scheme (JAR signing): false
+Verified using v2 scheme (APK Signature Scheme v2): false
+Verified using v3 scheme (APK Signature Scheme v3): true
+Verified using v4 scheme (APK Signature Scheme v4): false
+Verified for SourceStamp: false
+Number of signers: 1
+```
+
+APK files are copied from tools/apksig/src/test/resources/com/android/apksig/.
diff --git a/apkverify/tests/data/test.apex b/apkverify/tests/data/test.apex
new file mode 100644
index 0000000..0e6a576
--- /dev/null
+++ b/apkverify/tests/data/test.apex
Binary files differ
diff --git a/apkverify/tests/data/v2-only-truncated-cd.apk b/apkverify/tests/data/v2-only-truncated-cd.apk
new file mode 100644
index 0000000..d2e3e8d
--- /dev/null
+++ b/apkverify/tests/data/v2-only-truncated-cd.apk
Binary files differ
diff --git a/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk b/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk
new file mode 100644
index 0000000..2291e7e
--- /dev/null
+++ b/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk
Binary files differ
diff --git a/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk b/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
new file mode 100644
index 0000000..2800929
--- /dev/null
+++ b/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
Binary files differ
diff --git a/authfs/aidl/Android.bp b/authfs/aidl/Android.bp
index 35a3c4a..9504037 100644
--- a/authfs/aidl/Android.bp
+++ b/authfs/aidl/Android.bp
@@ -9,7 +9,10 @@
     backend: {
         rust: {
             enabled: true,
-            apex_available: ["com.android.virt"],
+            apex_available: [
+                "com.android.compos",
+                "com.android.virt",
+            ],
         },
     },
 }
diff --git a/authfs/aidl/com/android/virt/fs/AuthFsConfig.aidl b/authfs/aidl/com/android/virt/fs/AuthFsConfig.aidl
new file mode 100644
index 0000000..dfccee5
--- /dev/null
+++ b/authfs/aidl/com/android/virt/fs/AuthFsConfig.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+import com.android.virt.fs.InputFdAnnotation;
+import com.android.virt.fs.OutputFdAnnotation;
+
+/** @hide */
+parcelable AuthFsConfig {
+    /** Port of the filesystem backend. */
+    int port;
+
+    /** Annotation for the remote input file descriptors. */
+    InputFdAnnotation[] inputFdAnnotations;
+
+    /** Annotation for the remote output file descriptors. */
+    OutputFdAnnotation[] outputFdAnnotations;
+}
diff --git a/authfs/aidl/com/android/virt/fs/IAuthFs.aidl b/authfs/aidl/com/android/virt/fs/IAuthFs.aidl
new file mode 100644
index 0000000..064b6f3
--- /dev/null
+++ b/authfs/aidl/com/android/virt/fs/IAuthFs.aidl
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+import com.android.virt.fs.AuthFsConfig;
+
+/** @hide */
+interface IAuthFs {
+    /** Returns a file descriptor given the name of a remote file descriptor. */
+    ParcelFileDescriptor openFile(int remoteFdName, boolean writable);
+}
diff --git a/authfs/aidl/com/android/virt/fs/IAuthFsService.aidl b/authfs/aidl/com/android/virt/fs/IAuthFsService.aidl
new file mode 100644
index 0000000..b349db2
--- /dev/null
+++ b/authfs/aidl/com/android/virt/fs/IAuthFsService.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+import com.android.virt.fs.AuthFsConfig;
+import com.android.virt.fs.IAuthFs;
+
+/** @hide */
+interface IAuthFsService {
+    /**
+     * Creates an AuthFS mount given the config. Returns the binder object that represent the AuthFS
+     * instance. The AuthFS setup is deleted once the lifetime of the returned binder object ends.
+     */
+    IAuthFs mount(in AuthFsConfig config);
+}
diff --git a/authfs/aidl/com/android/virt/fs/InputFdAnnotation.aidl b/authfs/aidl/com/android/virt/fs/InputFdAnnotation.aidl
new file mode 100644
index 0000000..dafb137
--- /dev/null
+++ b/authfs/aidl/com/android/virt/fs/InputFdAnnotation.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+/** @hide */
+parcelable InputFdAnnotation {
+    /**
+     * File descriptor number to be passed to the program.  This is also the same file descriptor
+     * number used in the backend server.
+     */
+    int fd;
+
+    /** The actual file size in bytes of the backing file to be read. */
+    long fileSize;
+}
diff --git a/authfs/aidl/com/android/virt/fs/OutputFdAnnotation.aidl b/authfs/aidl/com/android/virt/fs/OutputFdAnnotation.aidl
new file mode 100644
index 0000000..4e4e621
--- /dev/null
+++ b/authfs/aidl/com/android/virt/fs/OutputFdAnnotation.aidl
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+/** @hide */
+parcelable OutputFdAnnotation {
+    /**
+     * File descriptor number to be passed to the program.  This is currently assumed to be same as
+     * the file descriptor number used in the backend server.
+     */
+    int fd;
+}
diff --git a/authfs/service/Android.bp b/authfs/service/Android.bp
new file mode 100644
index 0000000..943db35
--- /dev/null
+++ b/authfs/service/Android.bp
@@ -0,0 +1,23 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+    name: "authfs_service",
+    srcs: [
+        "src/main.rs",
+    ],
+    edition: "2018",
+    rustlibs: [
+        "authfs_aidl_interface-rust",
+        "libandroid_logger",
+        "libanyhow",
+        "libbinder_rs",
+        "liblibc",
+        "liblog_rust",
+        "libnix",
+        "libshared_child",
+    ],
+    prefer_rlib: true,
+    init_rc: ["authfs_service.rc"],
+}
diff --git a/authfs/service/authfs_service.rc b/authfs/service/authfs_service.rc
new file mode 100644
index 0000000..9ad0ce6
--- /dev/null
+++ b/authfs/service/authfs_service.rc
@@ -0,0 +1,2 @@
+service authfs_service /system/bin/authfs_service
+    disabled
diff --git a/authfs/service/src/authfs.rs b/authfs/service/src/authfs.rs
new file mode 100644
index 0000000..7a466d3
--- /dev/null
+++ b/authfs/service/src/authfs.rs
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{bail, Context, Result};
+use log::{debug, error, warn};
+use nix::mount::{umount2, MntFlags};
+use nix::sys::statfs::{statfs, FsType};
+use shared_child::SharedChild;
+use std::ffi::{OsStr, OsString};
+use std::fs::{remove_dir, OpenOptions};
+use std::path::PathBuf;
+use std::process::Command;
+use std::thread::sleep;
+use std::time::{Duration, Instant};
+
+use crate::common::new_binder_exception;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFs::{BnAuthFs, IAuthFs};
+use authfs_aidl_interface::aidl::com::android::virt::fs::{
+    AuthFsConfig::AuthFsConfig, InputFdAnnotation::InputFdAnnotation,
+    OutputFdAnnotation::OutputFdAnnotation,
+};
+use authfs_aidl_interface::binder::{
+    self, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor, Strong,
+};
+
+const AUTHFS_BIN: &str = "/system/bin/authfs";
+const AUTHFS_SETUP_POLL_INTERVAL_MS: Duration = Duration::from_millis(50);
+const AUTHFS_SETUP_TIMEOUT_SEC: Duration = Duration::from_secs(10);
+const FUSE_SUPER_MAGIC: FsType = FsType(0x65735546);
+
+/// An `AuthFs` instance is supposed to be backed by an `authfs` process. When the lifetime of the
+/// instance is over, it should leave no trace on the system: the process should be terminated, the
+/// FUSE should be unmounted, and the mount directory should be deleted.
+pub struct AuthFs {
+    mountpoint: OsString,
+    process: SharedChild,
+}
+
+impl Interface for AuthFs {}
+
+impl IAuthFs for AuthFs {
+    fn openFile(
+        &self,
+        remote_fd_name: i32,
+        writable: bool,
+    ) -> binder::Result<ParcelFileDescriptor> {
+        let mut path = PathBuf::from(&self.mountpoint);
+        path.push(remote_fd_name.to_string());
+        let file = OpenOptions::new().read(true).write(writable).open(&path).map_err(|e| {
+            new_binder_exception(
+                ExceptionCode::SERVICE_SPECIFIC,
+                format!("failed to open {:?} on authfs: {}", &path, e),
+            )
+        })?;
+        Ok(ParcelFileDescriptor::new(file))
+    }
+}
+
+impl AuthFs {
+    /// Mount an authfs at `mountpoint` with specified FD annotations.
+    pub fn mount_and_wait(
+        mountpoint: OsString,
+        config: &AuthFsConfig,
+        debuggable: bool,
+    ) -> Result<Strong<dyn IAuthFs>> {
+        let child = run_authfs(
+            &mountpoint,
+            &config.inputFdAnnotations,
+            &config.outputFdAnnotations,
+            debuggable,
+        )?;
+        wait_until_authfs_ready(&mountpoint).map_err(|e| {
+            debug!("Wait for authfs: {:?}", child.wait());
+            e
+        })?;
+
+        let authfs = AuthFs { mountpoint, process: child };
+        Ok(BnAuthFs::new_binder(authfs, BinderFeatures::default()))
+    }
+}
+
+impl Drop for AuthFs {
+    /// On drop, try to erase all the traces for this authfs mount.
+    fn drop(&mut self) {
+        debug!("Dropping AuthFs instance at mountpoint {:?}", &self.mountpoint);
+        if let Err(e) = self.process.kill() {
+            error!("Failed to kill authfs: {}", e);
+        }
+        match self.process.wait() {
+            Ok(status) => debug!("authfs exit code: {}", status),
+            Err(e) => warn!("Failed to wait for authfs: {}", e),
+        }
+        // The client may still hold the file descriptors that refer to this filesystem. Use
+        // MNT_DETACH to detach the mountpoint, and automatically unmount when there is no more
+        // reference.
+        if let Err(e) = umount2(self.mountpoint.as_os_str(), MntFlags::MNT_DETACH) {
+            error!("Failed to umount authfs at {:?}: {}", &self.mountpoint, e)
+        }
+
+        if let Err(e) = remove_dir(&self.mountpoint) {
+            error!("Failed to clean up mount directory {:?}: {}", &self.mountpoint, e)
+        }
+    }
+}
+
+fn run_authfs(
+    mountpoint: &OsStr,
+    in_fds: &[InputFdAnnotation],
+    out_fds: &[OutputFdAnnotation],
+    debuggable: bool,
+) -> Result<SharedChild> {
+    let mut args = vec![mountpoint.to_owned(), OsString::from("--cid=2")];
+    args.push(OsString::from("-o"));
+    args.push(OsString::from("fscontext=u:object_r:authfs_fuse:s0"));
+    for conf in in_fds {
+        // TODO(b/185178698): Many input files need to be signed and verified.
+        // or can we use debug cert for now, which is better than nothing?
+        args.push(OsString::from("--remote-ro-file-unverified"));
+        args.push(OsString::from(format!("{}:{}:{}", conf.fd, conf.fd, conf.fileSize)));
+    }
+    for conf in out_fds {
+        args.push(OsString::from("--remote-new-rw-file"));
+        args.push(OsString::from(format!("{}:{}", conf.fd, conf.fd)));
+    }
+    if debuggable {
+        args.push(OsString::from("--debug"));
+    }
+
+    let mut command = Command::new(AUTHFS_BIN);
+    command.args(&args);
+    SharedChild::spawn(&mut command).context("Spawn authfs")
+}
+
+fn wait_until_authfs_ready(mountpoint: &OsStr) -> Result<()> {
+    let start_time = Instant::now();
+    loop {
+        if is_fuse(mountpoint)? {
+            break;
+        }
+        if start_time.elapsed() > AUTHFS_SETUP_TIMEOUT_SEC {
+            bail!("Time out mounting authfs");
+        }
+        sleep(AUTHFS_SETUP_POLL_INTERVAL_MS);
+    }
+    Ok(())
+}
+
+fn is_fuse(path: &OsStr) -> Result<bool> {
+    Ok(statfs(path)?.filesystem_type() == FUSE_SUPER_MAGIC)
+}
diff --git a/authfs/service/src/common.rs b/authfs/service/src/common.rs
new file mode 100644
index 0000000..00efe9e
--- /dev/null
+++ b/authfs/service/src/common.rs
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::ffi::CString;
+
+use authfs_aidl_interface::binder::{ExceptionCode, Status};
+
+/// Helper function to create a binder exception.
+pub fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
+    Status::new_exception(exception, CString::new(message.as_ref()).as_deref().ok())
+}
diff --git a/authfs/service/src/main.rs b/authfs/service/src/main.rs
new file mode 100644
index 0000000..e426734
--- /dev/null
+++ b/authfs/service/src/main.rs
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! AuthFsService facilitates authfs mounting (which is a privileged operation) for the client. The
+//! client will provide an `AuthFsConfig` which includes the backend address (only port for now) and
+//! the filesystem configuration. It is up to the client to ensure the backend server is running. On
+//! a successful mount, the client receives an `IAuthFs`, and through the binder object, the client
+//! is able to retrieve "remote file descriptors".
+
+mod authfs;
+mod common;
+
+use anyhow::{bail, Context, Result};
+use log::*;
+use std::ffi::OsString;
+use std::fs::{create_dir, read_dir, remove_dir_all, remove_file};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use crate::common::new_binder_exception;
+use authfs_aidl_interface::aidl::com::android::virt::fs::AuthFsConfig::AuthFsConfig;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFs::IAuthFs;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFsService::{
+    BnAuthFsService, IAuthFsService,
+};
+use authfs_aidl_interface::binder::{
+    self, add_service, BinderFeatures, ExceptionCode, Interface, ProcessState, Strong,
+};
+
+const SERVICE_NAME: &str = "authfs_service";
+const SERVICE_ROOT: &str = "/data/misc/authfs";
+
+/// Implementation of `IAuthFsService`.
+pub struct AuthFsService {
+    serial_number: AtomicUsize,
+    debuggable: bool,
+}
+
+impl Interface for AuthFsService {}
+
+impl IAuthFsService for AuthFsService {
+    fn mount(&self, config: &AuthFsConfig) -> binder::Result<Strong<dyn IAuthFs>> {
+        self.validate(config)?;
+
+        let mountpoint = self.get_next_mount_point();
+
+        // The directory is supposed to be deleted when `AuthFs` is dropped.
+        create_dir(&mountpoint).map_err(|e| {
+            new_binder_exception(
+                ExceptionCode::SERVICE_SPECIFIC,
+                format!("Cannot create mount directory {:?}: {}", &mountpoint, e),
+            )
+        })?;
+
+        authfs::AuthFs::mount_and_wait(mountpoint, config, self.debuggable).map_err(|e| {
+            new_binder_exception(
+                ExceptionCode::SERVICE_SPECIFIC,
+                format!("mount_and_wait failed: {:?}", e),
+            )
+        })
+    }
+}
+
+impl AuthFsService {
+    fn new_binder(debuggable: bool) -> Strong<dyn IAuthFsService> {
+        let service = AuthFsService { serial_number: AtomicUsize::new(1), debuggable };
+        BnAuthFsService::new_binder(service, BinderFeatures::default())
+    }
+
+    fn validate(&self, config: &AuthFsConfig) -> binder::Result<()> {
+        if config.port < 0 {
+            return Err(new_binder_exception(
+                ExceptionCode::ILLEGAL_ARGUMENT,
+                format!("Invalid port: {}", config.port),
+            ));
+        }
+        Ok(())
+    }
+
+    fn get_next_mount_point(&self) -> OsString {
+        let previous = self.serial_number.fetch_add(1, Ordering::Relaxed);
+        OsString::from(format!("{}/{}", SERVICE_ROOT, previous))
+    }
+}
+
+fn clean_up_working_directory() -> Result<()> {
+    for entry in read_dir(SERVICE_ROOT)? {
+        let entry = entry?;
+        let path = entry.path();
+        if path.is_dir() {
+            remove_dir_all(path)?;
+        } else if path.is_file() {
+            remove_file(path)?;
+        } else {
+            bail!("Unrecognized path type: {:?}", path);
+        }
+    }
+    Ok(())
+}
+
+fn main() -> Result<()> {
+    let debuggable = env!("TARGET_BUILD_VARIANT") != "user";
+    let log_level = if debuggable { log::Level::Trace } else { log::Level::Info };
+    android_logger::init_once(
+        android_logger::Config::default().with_tag("authfs_service").with_min_level(log_level),
+    );
+
+    clean_up_working_directory()?;
+
+    ProcessState::start_thread_pool();
+
+    let service = AuthFsService::new_binder(debuggable).as_binder();
+    add_service(SERVICE_NAME, service)
+        .with_context(|| format!("Failed to register service {}", SERVICE_NAME))?;
+    debug!("{} is running", SERVICE_NAME);
+
+    ProcessState::join_thread_pool();
+    bail!("Unexpected exit after join_thread_pool")
+}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 77743bd..1b0e935 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -380,6 +380,7 @@
 pub fn loop_forever(
     file_pool: BTreeMap<Inode, FileConfig>,
     mountpoint: &Path,
+    extra_options: &Option<String>,
 ) -> Result<(), fuse::Error> {
     let max_read: u32 = 65536;
     let max_write: u32 = 65536;
@@ -389,20 +390,20 @@
         .open("/dev/fuse")
         .expect("Failed to open /dev/fuse");
 
-    fuse::mount(
-        mountpoint,
-        "authfs",
-        libc::MS_NOSUID | libc::MS_NODEV,
-        &[
-            MountOption::FD(dev_fuse.as_raw_fd()),
-            MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
-            MountOption::AllowOther,
-            MountOption::UserId(0),
-            MountOption::GroupId(0),
-            MountOption::MaxRead(max_read),
-        ],
-    )
-    .expect("Failed to mount fuse");
+    let mut mount_options = vec![
+        MountOption::FD(dev_fuse.as_raw_fd()),
+        MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
+        MountOption::AllowOther,
+        MountOption::UserId(0),
+        MountOption::GroupId(0),
+        MountOption::MaxRead(max_read),
+    ];
+    if let Some(value) = extra_options {
+        mount_options.push(MountOption::Extra(value));
+    }
+
+    fuse::mount(mountpoint, "authfs", libc::MS_NOSUID | libc::MS_NODEV, &mount_options)
+        .expect("Failed to mount fuse");
 
     fuse::worker::start_message_loop(
         dev_fuse,
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index d583f92..32ea3de 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -56,6 +56,10 @@
     #[structopt(long)]
     cid: Option<u32>,
 
+    /// Extra options to FUSE
+    #[structopt(short = "o")]
+    extra_options: Option<String>,
+
     /// A read-only remote file with integrity check. Can be multiple.
     ///
     /// For example, `--remote-verified-file 5:10:1234:/path/to/cert` tells the filesystem to
@@ -339,6 +343,6 @@
     );
 
     let file_pool = prepare_file_pool(&args)?;
-    fusefs::loop_forever(file_pool, &args.mount_point)?;
+    fusefs::loop_forever(file_pool, &args.mount_point, &args.extra_options)?;
     bail!("Unexpected exit after the handler loop")
 }
diff --git a/compos/Android.bp b/compos/Android.bp
index ec3f67f..e29387d 100644
--- a/compos/Android.bp
+++ b/compos/Android.bp
@@ -30,53 +30,14 @@
     name: "compsvc",
     srcs: ["src/compsvc_main.rs"],
     rustlibs: [
-        "compos_aidl_interface-rust",
-        "libandroid_logger",
-        "libanyhow",
-        "libbinder_rpc_unstable_bindgen",
-        "libbinder_rs",
-        "libclap",
-        "liblog_rust",
-        "libminijail_rust",
-    ],
-    prefer_rlib: true,
-    shared_libs: [
-        "libbinder_rpc_unstable",
-    ],
-    apex_available: [
-        "com.android.compos",
-    ],
-}
-
-rust_binary {
-    name: "compsvc_worker",
-    srcs: ["src/compsvc_worker.rs"],
-    rustlibs: [
-        "libandroid_logger",
-        "libanyhow",
-        "libclap",
-        "liblog_rust",
-        "libminijail_rust",
-        "libnix",
-    ],
-    prefer_rlib: true,
-    apex_available: [
-        "com.android.compos",
-    ],
-}
-
-rust_binary {
-    name: "compos_key_main",
-    srcs: ["src/compos_key_main.rs"],
-    edition: "2018",
-    rustlibs: [
-        "compos_aidl_interface-rust",
-        "android.system.keystore2-V1-rust",
         "android.hardware.security.keymint-V1-rust",
+        "android.system.keystore2-V1-rust",
+        "authfs_aidl_interface-rust",
+        "compos_aidl_interface-rust",
         "libandroid_logger",
         "libanyhow",
-        "libbinder_rs",
         "libbinder_rpc_unstable_bindgen",
+        "libbinder_rs",
         "libclap",
         "liblog_rust",
         "libminijail_rust",
@@ -87,5 +48,7 @@
     shared_libs: [
         "libbinder_rpc_unstable",
     ],
-    apex_available: ["com.android.compos"],
+    apex_available: [
+        "com.android.compos",
+    ],
 }
diff --git a/compos/aidl/com/android/compos/ICompOsKeyService.aidl b/compos/aidl/com/android/compos/ICompOsService.aidl
similarity index 72%
rename from compos/aidl/com/android/compos/ICompOsKeyService.aidl
rename to compos/aidl/com/android/compos/ICompOsService.aidl
index eb2caa7..ec4f0f6 100644
--- a/compos/aidl/com/android/compos/ICompOsKeyService.aidl
+++ b/compos/aidl/com/android/compos/ICompOsService.aidl
@@ -17,10 +17,23 @@
 package com.android.compos;
 
 import com.android.compos.CompOsKeyData;
-import com.android.compos.ICompService;
+import com.android.compos.Metadata;
 
 /** {@hide} */
-interface ICompOsKeyService {
+interface ICompOsService {
+    /**
+     * Execute a command composed of the args, in a context that may be specified in the Metadata,
+     * e.g. with file descriptors pre-opened. The service is responsible to decide what executables
+     * it may run.
+     *
+     * @param args The command line arguments to run. The 0-th args is normally the program name,
+     *             which may not be used by the service. The service may be configured to always use
+     *             a fixed executable, or possibly use the 0-th args are the executable lookup hint.
+     * @param metadata Additional information of the execution
+     * @return exit code of the program
+     */
+    byte execute(in String[] args, in Metadata metadata);
+
     /**
      * Generate a new public/private key pair suitable for signing CompOs output files.
      *
@@ -49,13 +62,4 @@
      */
     // STOPSHIP(b/193241041): We must not expose this from the PVM.
     byte[] sign(in byte[] keyBlob, in byte[] data);
-
-    /**
-     * Return an instance of ICompService that will sign output files with a given encrypted
-     * private key.
-     *
-     * @param keyBlob The encrypted blob containing the private key, as returned by
-     *                generateSigningKey().
-     */
-    ICompService getCompService(in byte[] keyBlob);
 }
diff --git a/compos/aidl/com/android/compos/ICompService.aidl b/compos/aidl/com/android/compos/ICompService.aidl
deleted file mode 100644
index 0e18442..0000000
--- a/compos/aidl/com/android/compos/ICompService.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.compos;
-
-import com.android.compos.Metadata;
-
-/** {@hide} */
-interface ICompService {
-    /**
-     * Execute a command composed of the args, in a context that may be specified in the Metadata,
-     * e.g. with file descriptors pre-opened. The service is responsible to decide what executables
-     * it may run.
-     *
-     * @param args The command line arguments to run. The 0-th args is normally the program name,
-     *             which may not be used by the service. The service may be configured to always use
-     *             a fixed executable, or possibly use the 0-th args are the executable lookup hint.
-     * @param metadata Additional information of the execution
-     * @return exit code of the program
-     */
-    byte execute(in String[] args, in Metadata metadata);
-}
diff --git a/compos/apex/Android.bp b/compos/apex/Android.bp
index 061c362..5b21802 100644
--- a/compos/apex/Android.bp
+++ b/compos/apex/Android.bp
@@ -39,9 +39,7 @@
 
     binaries: [
         "compos_key_cmd",
-        "compos_key_main",
         "compsvc",
-        "compsvc_worker",
         "pvm_exec",
     ],
 
diff --git a/compos/apk/assets/key_service_vm_config.json b/compos/apk/assets/key_service_vm_config.json
deleted file mode 100644
index 3b6b88c..0000000
--- a/compos/apk/assets/key_service_vm_config.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-    "version": 1,
-    "os": {
-        "name": "microdroid"
-    },
-    "task": {
-        "type": "executable",
-        "command": "/apex/com.android.compos/bin/compos_key_main",
-        "args": [
-            "--rpc-binder"
-        ]
-    },
-    "apexes": [
-        {
-            "name": "com.android.compos"
-        }
-    ]
-}
\ No newline at end of file
diff --git a/compos/apk/assets/vm_config.json b/compos/apk/assets/vm_config.json
index f9f1f90..3be8a8a 100644
--- a/compos/apk/assets/vm_config.json
+++ b/compos/apk/assets/vm_config.json
@@ -7,8 +7,7 @@
     "type": "executable",
     "command": "/apex/com.android.compos/bin/compsvc",
     "args": [
-      "--rpc-binder",
-      "/apex/com.android.art/bin/dex2oat64"
+      "--rpc-binder"
     ]
   },
   "apexes": [
diff --git a/compos/compos_key_cmd/compos_key_cmd.cpp b/compos/compos_key_cmd/compos_key_cmd.cpp
index 84a0a7c..04ba1d0 100644
--- a/compos/compos_key_cmd/compos_key_cmd.cpp
+++ b/compos/compos_key_cmd/compos_key_cmd.cpp
@@ -16,7 +16,7 @@
 
 #include <aidl/android/system/virtualizationservice/BnVirtualMachineCallback.h>
 #include <aidl/android/system/virtualizationservice/IVirtualizationService.h>
-#include <aidl/com/android/compos/ICompOsKeyService.h>
+#include <aidl/com/android/compos/ICompOsService.h>
 #include <android-base/file.h>
 #include <android-base/logging.h>
 #include <android-base/result.h>
@@ -56,7 +56,7 @@
 using aidl::android::system::virtualizationservice::IVirtualMachineCallback;
 using aidl::android::system::virtualizationservice::VirtualMachineConfig;
 using aidl::com::android::compos::CompOsKeyData;
-using aidl::com::android::compos::ICompOsKeyService;
+using aidl::com::android::compos::ICompOsService;
 using android::base::ErrnoError;
 using android::base::Error;
 using android::base::Result;
@@ -66,7 +66,7 @@
 using ndk::ScopedFileDescriptor;
 using ndk::SharedRefBase;
 
-constexpr unsigned int kRpcPort = 3142;
+constexpr unsigned int kRpcPort = 6432;
 
 constexpr const char* kConfigApkPath =
         "/apex/com.android.compos/app/CompOSPayloadApp/CompOSPayloadApp.apk";
@@ -89,11 +89,11 @@
     return std::vector<uint8_t>(str.begin(), str.end());
 }
 
-static std::shared_ptr<ICompOsKeyService> getService(int cid) {
+static std::shared_ptr<ICompOsService> getService(int cid) {
     LOG(INFO) << "Connecting to cid " << cid;
     ndk::SpAIBinder binder(cid == 0 ? AServiceManager_getService("android.system.composkeyservice")
                                     : RpcClient(cid, kRpcPort));
-    return ICompOsKeyService::fromBinder(binder);
+    return ICompOsService::fromBinder(binder);
 }
 
 namespace {
@@ -337,7 +337,7 @@
     return result;
 }
 
-static Result<void> signFile(ICompOsKeyService* service, const std::vector<uint8_t>& key_blob,
+static Result<void> signFile(ICompOsService* service, const std::vector<uint8_t>& key_blob,
                              const std::string& file) {
     unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
     if (!fd.ok()) {
diff --git a/compos/src/authfs.rs b/compos/src/authfs.rs
deleted file mode 100644
index ce9aaf8..0000000
--- a/compos/src/authfs.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use anyhow::{bail, Context, Result};
-use log::warn;
-use minijail::Minijail;
-use nix::sys::statfs::{statfs, FsType};
-use std::fs::{File, OpenOptions};
-use std::path::Path;
-use std::thread::sleep;
-use std::time::{Duration, Instant};
-
-const AUTHFS_BIN: &str = "/system/bin/authfs";
-const AUTHFS_SETUP_POLL_INTERVAL_MS: Duration = Duration::from_millis(50);
-const AUTHFS_SETUP_TIMEOUT_SEC: Duration = Duration::from_secs(10);
-const FUSE_SUPER_MAGIC: FsType = FsType(0x65735546);
-
-/// The number that hints the future file descriptor. These are not really file descriptor, but
-/// represents the file descriptor number to pass to the task.
-pub type PseudoRawFd = i32;
-
-/// Annotation of input file descriptor.
-#[derive(Debug)]
-pub struct InFdAnnotation {
-    /// A number/file descriptor that is supposed to represent a remote file.
-    pub fd: PseudoRawFd,
-
-    /// The file size of the remote file. Remote input files are supposed to be immutable and
-    /// to be verified with fs-verity by authfs.
-    pub file_size: u64,
-}
-
-/// Annotation of output file descriptor.
-#[derive(Debug)]
-pub struct OutFdAnnotation {
-    /// A number/file descriptor that is supposed to represent a remote file.
-    pub fd: PseudoRawFd,
-}
-
-/// An `AuthFs` instance is supposed to be backed by the `authfs` process. When the lifetime of the
-/// instance is over, the process is terminated and the FUSE is unmounted.
-pub struct AuthFs {
-    mountpoint: String,
-    jail: Minijail,
-}
-
-impl AuthFs {
-    /// Mount an authfs at `mountpoint` with specified FD annotations.
-    pub fn mount_and_wait(
-        mountpoint: &str,
-        in_fds: &[InFdAnnotation],
-        out_fds: &[OutFdAnnotation],
-        debuggable: bool,
-    ) -> Result<AuthFs> {
-        let jail = jail_authfs(mountpoint, in_fds, out_fds, debuggable)?;
-        wait_until_authfs_ready(mountpoint)?;
-        Ok(AuthFs { mountpoint: mountpoint.to_string(), jail })
-    }
-
-    /// Open a file at authfs' root directory.
-    pub fn open_file(&self, basename: PseudoRawFd, writable: bool) -> Result<File> {
-        OpenOptions::new()
-            .read(true)
-            .write(writable)
-            .open(format!("{}/{}", self.mountpoint, basename))
-            .with_context(|| format!("open authfs file {}", basename))
-    }
-}
-
-impl Drop for AuthFs {
-    fn drop(&mut self) {
-        if let Err(e) = self.jail.kill() {
-            if !matches!(e, minijail::Error::Killed(_)) {
-                warn!("Failed to kill authfs: {}", e);
-            }
-        }
-    }
-}
-
-fn jail_authfs(
-    mountpoint: &str,
-    in_fds: &[InFdAnnotation],
-    out_fds: &[OutFdAnnotation],
-    debuggable: bool,
-) -> Result<Minijail> {
-    // TODO(b/185175567): Run in a more restricted sandbox.
-    let jail = Minijail::new()?;
-
-    let mut args = vec![
-        AUTHFS_BIN.to_string(),
-        mountpoint.to_string(),
-        "--cid=2".to_string(), // Always use host unless we need to support other cases
-    ];
-    for conf in in_fds {
-        // TODO(b/185178698): Many input files need to be signed and verified.
-        // or can we use debug cert for now, which is better than nothing?
-        args.push("--remote-ro-file-unverified".to_string());
-        args.push(format!("{}:{}:{}", conf.fd, conf.fd, conf.file_size));
-    }
-    for conf in out_fds {
-        args.push("--remote-new-rw-file".to_string());
-        args.push(format!("{}:{}", conf.fd, conf.fd));
-    }
-
-    let preserve_fds = if debuggable {
-        vec![1, 2] // inherit/redirect stdout/stderr for debugging
-    } else {
-        vec![]
-    };
-
-    let _pid = jail.run(Path::new(AUTHFS_BIN), &preserve_fds, &args)?;
-    Ok(jail)
-}
-
-fn wait_until_authfs_ready(mountpoint: &str) -> Result<()> {
-    let start_time = Instant::now();
-    loop {
-        if is_fuse(mountpoint)? {
-            break;
-        }
-        if start_time.elapsed() > AUTHFS_SETUP_TIMEOUT_SEC {
-            bail!("Time out mounting authfs");
-        }
-        sleep(AUTHFS_SETUP_POLL_INTERVAL_MS);
-    }
-    Ok(())
-}
-
-fn is_fuse(path: &str) -> Result<bool> {
-    Ok(statfs(path)?.filesystem_type() == FUSE_SUPER_MAGIC)
-}
diff --git a/compos/src/compilation.rs b/compos/src/compilation.rs
new file mode 100644
index 0000000..53302e8
--- /dev/null
+++ b/compos/src/compilation.rs
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{bail, Context, Result};
+use log::error;
+use minijail::{self, Minijail};
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::{
+    AuthFsConfig::AuthFsConfig, IAuthFs::IAuthFs, IAuthFsService::IAuthFsService,
+    InputFdAnnotation::InputFdAnnotation, OutputFdAnnotation::OutputFdAnnotation,
+};
+use authfs_aidl_interface::binder::{ParcelFileDescriptor, Strong};
+use compos_aidl_interface::aidl::com::android::compos::Metadata::Metadata;
+
+/// The number that represents the file descriptor number expecting by the task. The number may be
+/// meaningless in the current process.
+pub type PseudoRawFd = i32;
+
+/// Runs the compiler with given flags with file descriptors described in `metadata` retrieved via
+/// `authfs_service`. Returns exit code of the compiler process.
+pub fn compile(
+    compiler_path: &Path,
+    compiler_args: &[String],
+    authfs_service: Strong<dyn IAuthFsService>,
+    metadata: &Metadata,
+) -> Result<i8> {
+    // Mount authfs (via authfs_service).
+    let authfs_config = build_authfs_config(metadata);
+    let authfs = authfs_service.mount(&authfs_config)?;
+
+    // The task expects to receive FD numbers that match its flags (e.g. --zip-fd=42) prepared
+    // on the host side. Since the local FD opened from authfs (e.g. /authfs/42) may not match
+    // the task's expectation, prepare a FD mapping and let minijail prepare the correct FD
+    // setup.
+    let fd_mapping =
+        open_authfs_files_for_fd_mapping(&authfs, &authfs_config).context("Open on authfs")?;
+
+    let jail =
+        spawn_jailed_task(compiler_path, compiler_args, fd_mapping).context("Spawn dex2oat")?;
+    let jail_result = jail.wait();
+
+    // Be explicit about the lifetime, which should last at least until the task is finished.
+    drop(authfs);
+
+    match jail_result {
+        Ok(()) => Ok(0), // TODO(b/161471326): Sign the output on succeed.
+        Err(minijail::Error::ReturnCode(exit_code)) => {
+            error!("Task failed with exit code {}", exit_code);
+            Ok(exit_code as i8)
+        }
+        Err(e) => {
+            bail!("Unexpected minijail error: {}", e)
+        }
+    }
+}
+
+fn build_authfs_config(metadata: &Metadata) -> AuthFsConfig {
+    AuthFsConfig {
+        port: 3264, // TODO: support dynamic port
+        inputFdAnnotations: metadata
+            .input_fd_annotations
+            .iter()
+            .map(|x| InputFdAnnotation { fd: x.fd, fileSize: x.file_size })
+            .collect(),
+        outputFdAnnotations: metadata
+            .output_fd_annotations
+            .iter()
+            .map(|x| OutputFdAnnotation { fd: x.fd })
+            .collect(),
+    }
+}
+
+fn open_authfs_files_for_fd_mapping(
+    authfs: &Strong<dyn IAuthFs>,
+    config: &AuthFsConfig,
+) -> Result<Vec<(ParcelFileDescriptor, PseudoRawFd)>> {
+    let mut fd_mapping = Vec::new();
+
+    let results: Result<Vec<_>> = config
+        .inputFdAnnotations
+        .iter()
+        .map(|annotation| Ok((authfs.openFile(annotation.fd, false)?, annotation.fd)))
+        .collect();
+    fd_mapping.append(&mut results?);
+
+    let results: Result<Vec<_>> = config
+        .outputFdAnnotations
+        .iter()
+        .map(|annotation| Ok((authfs.openFile(annotation.fd, true)?, annotation.fd)))
+        .collect();
+    fd_mapping.append(&mut results?);
+
+    Ok(fd_mapping)
+}
+
+fn spawn_jailed_task(
+    executable: &Path,
+    args: &[String],
+    fd_mapping: Vec<(ParcelFileDescriptor, PseudoRawFd)>,
+) -> Result<Minijail> {
+    // TODO(b/185175567): Run in a more restricted sandbox.
+    let jail = Minijail::new()?;
+    let preserve_fds: Vec<_> = fd_mapping.iter().map(|(f, id)| (f.as_raw_fd(), *id)).collect();
+    let _pid = jail.run_remap(executable, preserve_fds.as_slice(), args)?;
+    Ok(jail)
+}
diff --git a/compos/src/compos_key_main.rs b/compos/src/compos_key_main.rs
deleted file mode 100644
index 9d57e4d..0000000
--- a/compos/src/compos_key_main.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Run the CompOS key management service, either in the host using normal Binder or in the
-//! VM using RPC Binder.
-
-mod compos_key_service;
-mod compsvc;
-mod signer;
-
-use crate::compos_key_service::KeystoreNamespace;
-use anyhow::{bail, Context, Result};
-use binder::unstable_api::AsNative;
-use compos_aidl_interface::binder::{add_service, ProcessState};
-use log::{info, Level};
-
-const LOG_TAG: &str = "CompOsKeyService";
-const OUR_SERVICE_NAME: &str = "android.system.composkeyservice";
-const OUR_VSOCK_PORT: u32 = 3142;
-
-fn main() -> Result<()> {
-    android_logger::init_once(
-        android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Info),
-    );
-
-    let matches = clap::App::new("compos_key_main")
-        .arg(clap::Arg::with_name("rpc_binder").long("rpc-binder"))
-        .get_matches();
-
-    let rpc_binder = matches.is_present("rpc_binder");
-
-    let key_namespace =
-        if rpc_binder { KeystoreNamespace::VmPayload } else { KeystoreNamespace::Odsign };
-    let mut service = compos_key_service::new(key_namespace)?.as_binder();
-
-    if rpc_binder {
-        info!("Starting RPC service");
-        // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
-        // Plus the binder objects are threadsafe.
-        let retval = unsafe {
-            binder_rpc_unstable_bindgen::RunRpcServer(
-                service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder,
-                OUR_VSOCK_PORT,
-            )
-        };
-        if retval {
-            info!("RPC server has shut down gracefully");
-        } else {
-            bail!("Premature termination of RPC server");
-        }
-    } else {
-        info!("Starting binder service");
-        add_service(OUR_SERVICE_NAME, service).context("Adding service failed")?;
-        info!("It's alive!");
-
-        ProcessState::join_thread_pool();
-    }
-
-    Ok(())
-}
diff --git a/compos/src/compos_key_service.rs b/compos/src/compos_key_service.rs
index 779b798..92b04f2 100644
--- a/compos/src/compos_key_service.rs
+++ b/compos/src/compos_key_service.rs
@@ -16,8 +16,6 @@
 //! access to Keystore in the VM, but not persistent storage; instead the host stores the key
 //! on our behalf via this service.
 
-use crate::compsvc;
-use crate::signer::Signer;
 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
     Algorithm::Algorithm, Digest::Digest, KeyParameter::KeyParameter,
     KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
@@ -27,20 +25,12 @@
     Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel,
     IKeystoreService::IKeystoreService, KeyDescriptor::KeyDescriptor,
 };
+use android_system_keystore2::binder::{wait_for_interface, Strong};
 use anyhow::{anyhow, Context, Result};
-use compos_aidl_interface::aidl::com::android::compos::{
-    CompOsKeyData::CompOsKeyData,
-    ICompOsKeyService::{BnCompOsKeyService, ICompOsKeyService},
-    ICompService::ICompService,
-};
-use compos_aidl_interface::binder::{
-    self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, Status, Strong,
-};
-use log::warn;
+use compos_aidl_interface::aidl::com::android::compos::CompOsKeyData::CompOsKeyData;
 use ring::rand::{SecureRandom, SystemRandom};
 use ring::signature;
 use scopeguard::ScopeGuard;
-use std::ffi::CString;
 
 /// Keystore2 namespace IDs, used for access control to keys.
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
@@ -52,23 +42,6 @@
     VmPayload = 140,
 }
 
-/// Constructs a binder object that implements ICompOsKeyService. namespace is the Keystore2 namespace to
-/// use for the keys.
-pub fn new(namespace: KeystoreNamespace) -> Result<Strong<dyn ICompOsKeyService>> {
-    let keystore_service = wait_for_interface::<dyn IKeystoreService>(KEYSTORE_SERVICE_NAME)
-        .context("No Keystore service")?;
-
-    let service = CompOsKeyService {
-        namespace,
-        random: SystemRandom::new(),
-        security_level: keystore_service
-            .getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT)
-            .context("Getting SecurityLevel failed")?,
-    };
-
-    Ok(BnCompOsKeyService::new_binder(service, BinderFeatures::default()))
-}
-
 const KEYSTORE_SERVICE_NAME: &str = "android.system.keystore2.IKeystoreService/default";
 const PURPOSE_SIGN: KeyParameter =
     KeyParameter { tag: Tag::PURPOSE, value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN) };
@@ -90,65 +63,31 @@
 const BLOB_KEY_DESCRIPTOR: KeyDescriptor =
     KeyDescriptor { domain: Domain::BLOB, nspace: 0, alias: None, blob: None };
 
+/// An internal service for CompOS key management.
 #[derive(Clone)]
-struct CompOsKeyService {
+pub struct CompOsKeyService {
     namespace: KeystoreNamespace,
     random: SystemRandom,
     security_level: Strong<dyn IKeystoreSecurityLevel>,
 }
 
-impl Interface for CompOsKeyService {}
+impl CompOsKeyService {
+    pub fn new(rpc_binder: bool) -> Result<Self> {
+        let keystore_service = wait_for_interface::<dyn IKeystoreService>(KEYSTORE_SERVICE_NAME)
+            .context("No Keystore service")?;
 
-impl ICompOsKeyService for CompOsKeyService {
-    fn generateSigningKey(&self) -> binder::Result<CompOsKeyData> {
-        self.do_generate()
-            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
-    }
-
-    fn verifySigningKey(&self, key_blob: &[u8], public_key: &[u8]) -> binder::Result<bool> {
-        Ok(if let Err(e) = self.do_verify(key_blob, public_key) {
-            warn!("Signing key verification failed: {}", e.to_string());
-            false
-        } else {
-            true
+        let namespace =
+            if rpc_binder { KeystoreNamespace::VmPayload } else { KeystoreNamespace::Odsign };
+        Ok(CompOsKeyService {
+            namespace,
+            random: SystemRandom::new(),
+            security_level: keystore_service
+                .getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT)
+                .context("Getting SecurityLevel failed")?,
         })
     }
 
-    fn sign(&self, key_blob: &[u8], data: &[u8]) -> binder::Result<Vec<u8>> {
-        self.do_sign(key_blob, data)
-            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
-    }
-
-    fn getCompService(&self, key_blob: &[u8]) -> binder::Result<Strong<dyn ICompService>> {
-        let signer =
-            Box::new(CompOsSigner { key_blob: key_blob.to_owned(), key_service: self.clone() });
-        let debuggable = true;
-        Ok(compsvc::new_binder(
-            "/apex/com.android.art/bin/dex2oat64".to_owned(),
-            debuggable,
-            Some(signer),
-        ))
-    }
-}
-
-/// Constructs a new Binder error `Status` with the given `ExceptionCode` and message.
-fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
-    Status::new_exception(exception, CString::new(message.as_ref()).ok().as_deref())
-}
-
-struct CompOsSigner {
-    key_blob: Vec<u8>,
-    key_service: CompOsKeyService,
-}
-
-impl Signer for CompOsSigner {
-    fn sign(&self, data: &[u8]) -> Result<Vec<u8>> {
-        self.key_service.do_sign(&self.key_blob, data)
-    }
-}
-
-impl CompOsKeyService {
-    fn do_generate(&self) -> Result<CompOsKeyData> {
+    pub fn do_generate(&self) -> Result<CompOsKeyData> {
         let key_descriptor = KeyDescriptor { nspace: self.namespace as i64, ..BLOB_KEY_DESCRIPTOR };
         let key_parameters =
             [PURPOSE_SIGN, ALGORITHM, PADDING, DIGEST, KEY_SIZE, EXPONENT, NO_AUTH_REQUIRED];
@@ -168,7 +107,7 @@
         }
     }
 
-    fn do_verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
+    pub fn do_verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
         let mut data = [0u8; 32];
         self.random.fill(&mut data).context("No random data")?;
 
@@ -181,7 +120,7 @@
         Ok(())
     }
 
-    fn do_sign(&self, key_blob: &[u8], data: &[u8]) -> Result<Vec<u8>> {
+    pub fn do_sign(&self, key_blob: &[u8], data: &[u8]) -> Result<Vec<u8>> {
         let key_descriptor = KeyDescriptor {
             nspace: self.namespace as i64,
             blob: Some(key_blob.to_vec()),
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index ae242de..b5edd98 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -14,121 +14,83 @@
  * limitations under the License.
  */
 
-//! compsvc is a service to run computational tasks in a PVM upon request. It is able to set up
-//! file descriptors backed by fd_server and pass the file descriptors to the actual tasks for
-//! read/write. The service also attempts to sandbox the execution so that one task cannot leak or
-//! impact future tasks.
-//!
-//! The current architecture / process hierarchy looks like:
-//! - compsvc (handle requests)
-//!   - compsvc_worker (for environment setup)
-//!     - authfs (fd translation)
-//!     - actual task
+//! compsvc is a service to run compilation tasks in a PVM upon request. It is able to set up
+//! file descriptors backed by authfs (via authfs_service) and pass the file descriptors to the
+//! actual compiler.
 
 use anyhow::Result;
-use log::error;
-use minijail::{self, Minijail};
+use log::warn;
+use std::ffi::CString;
 use std::path::PathBuf;
 
-use crate::signer::Signer;
-use compos_aidl_interface::aidl::com::android::compos::ICompService::{
-    BnCompService, ICompService,
+use crate::compilation::compile;
+use crate::compos_key_service::CompOsKeyService;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFsService::IAuthFsService;
+use compos_aidl_interface::aidl::com::android::compos::{
+    CompOsKeyData::CompOsKeyData,
+    ICompOsService::{BnCompOsService, ICompOsService},
+    Metadata::Metadata,
 };
-use compos_aidl_interface::aidl::com::android::compos::Metadata::Metadata;
 use compos_aidl_interface::binder::{
-    BinderFeatures, Interface, Result as BinderResult, Status, StatusCode, Strong,
+    BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, Strong,
 };
 
-const WORKER_BIN: &str = "/apex/com.android.compos/bin/compsvc_worker";
+const AUTHFS_SERVICE_NAME: &str = "authfs_service";
+const DEX2OAT_PATH: &str = "/apex/com.android.art/bin/dex2oat64";
 
-// TODO: Replace with a valid directory setup in the VM.
-const AUTHFS_MOUNTPOINT: &str = "/data/local/tmp";
-
-/// Constructs a binder object that implements ICompService. task_bin is the path to the binary that will
-/// be run when execute() is called. If debuggable is true then stdout/stderr from the binary will be
-/// available for debugging.
-pub fn new_binder(
-    task_bin: String,
-    debuggable: bool,
-    signer: Option<Box<dyn Signer>>,
-) -> Strong<dyn ICompService> {
-    let service = CompService {
-        worker_bin: PathBuf::from(WORKER_BIN.to_owned()),
-        task_bin,
-        debuggable,
-        signer,
+/// Constructs a binder object that implements ICompOsService.
+pub fn new_binder(rpc_binder: bool) -> Result<Strong<dyn ICompOsService>> {
+    let service = CompOsService {
+        dex2oat_path: PathBuf::from(DEX2OAT_PATH),
+        key_service: CompOsKeyService::new(rpc_binder)?,
     };
-    BnCompService::new_binder(service, BinderFeatures::default())
+    Ok(BnCompOsService::new_binder(service, BinderFeatures::default()))
 }
 
-struct CompService {
-    task_bin: String,
-    worker_bin: PathBuf,
-    debuggable: bool,
-    #[allow(dead_code)] // TODO: Make use of this
-    signer: Option<Box<dyn Signer>>,
+struct CompOsService {
+    dex2oat_path: PathBuf,
+    key_service: CompOsKeyService,
 }
 
-impl CompService {
-    fn run_worker_in_jail_and_wait(&self, args: &[String]) -> Result<(), minijail::Error> {
-        let mut jail = Minijail::new()?;
+impl Interface for CompOsService {}
 
-        // TODO(b/185175567): New user and uid namespace when supported. Run as nobody.
-        // New mount namespace to isolate the FUSE mount.
-        jail.namespace_vfs();
-
-        let inheritable_fds = if self.debuggable {
-            vec![1, 2] // inherit/redirect stdout/stderr for debugging
-        } else {
-            vec![]
-        };
-        let _pid = jail.run(&self.worker_bin, &inheritable_fds, args)?;
-        jail.wait()
-    }
-
-    fn build_worker_args(&self, args: &[String], metadata: &Metadata) -> Vec<String> {
-        let mut worker_args = vec![
-            WORKER_BIN.to_string(),
-            "--authfs-root".to_string(),
-            AUTHFS_MOUNTPOINT.to_string(),
-        ];
-        for annotation in &metadata.input_fd_annotations {
-            worker_args.push("--in-fd".to_string());
-            worker_args.push(format!("{}:{}", annotation.fd, annotation.file_size));
-        }
-        for annotation in &metadata.output_fd_annotations {
-            worker_args.push("--out-fd".to_string());
-            worker_args.push(annotation.fd.to_string());
-        }
-        if self.debuggable {
-            worker_args.push("--debug".to_string());
-        }
-        worker_args.push("--".to_string());
-
-        // Do not accept arbitrary code execution. We want to execute some specific task of this
-        // service. Use the associated executable.
-        worker_args.push(self.task_bin.clone());
-        worker_args.extend_from_slice(&args[1..]);
-        worker_args
-    }
-}
-
-impl Interface for CompService {}
-
-impl ICompService for CompService {
+impl ICompOsService for CompOsService {
     fn execute(&self, args: &[String], metadata: &Metadata) -> BinderResult<i8> {
-        let worker_args = self.build_worker_args(args, metadata);
-
-        match self.run_worker_in_jail_and_wait(&worker_args) {
-            Ok(_) => Ok(0), // TODO(b/161471326): Sign the output on succeed.
-            Err(minijail::Error::ReturnCode(exit_code)) => {
-                error!("Task failed with exit code {}", exit_code);
-                Err(Status::from(StatusCode::FAILED_TRANSACTION))
-            }
-            Err(e) => {
-                error!("Unexpected error: {}", e);
-                Err(Status::from(StatusCode::UNKNOWN_ERROR))
-            }
-        }
+        let authfs_service = get_authfs_service()?;
+        compile(&self.dex2oat_path, args, authfs_service, metadata).map_err(|e| {
+            new_binder_exception(
+                ExceptionCode::SERVICE_SPECIFIC,
+                format!("Compilation failed: {}", e),
+            )
+        })
     }
+
+    fn generateSigningKey(&self) -> BinderResult<CompOsKeyData> {
+        self.key_service
+            .do_generate()
+            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
+    }
+
+    fn verifySigningKey(&self, key_blob: &[u8], public_key: &[u8]) -> BinderResult<bool> {
+        Ok(if let Err(e) = self.key_service.do_verify(key_blob, public_key) {
+            warn!("Signing key verification failed: {}", e.to_string());
+            false
+        } else {
+            true
+        })
+    }
+
+    fn sign(&self, key_blob: &[u8], data: &[u8]) -> BinderResult<Vec<u8>> {
+        self.key_service
+            .do_sign(key_blob, data)
+            .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
+    }
+}
+
+fn get_authfs_service() -> BinderResult<Strong<dyn IAuthFsService>> {
+    Ok(authfs_aidl_interface::binder::get_interface(AUTHFS_SERVICE_NAME)?)
+}
+
+fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
+    Status::new_exception(exception, CString::new(message.as_ref()).as_deref().ok())
 }
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index 9f12132..48e37b6 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -16,11 +16,10 @@
 
 //! A tool to start a standalone compsvc server, either in the host using Binder or in a VM using
 //! RPC binder over vsock.
-//!
-//! Example:
-//! $ compsvc /system/bin/sleep
 
 mod common;
+mod compilation;
+mod compos_key_service;
 mod compsvc;
 mod signer;
 
@@ -31,27 +30,17 @@
 use log::debug;
 
 struct Config {
-    task_bin: String,
     rpc_binder: bool,
-    debuggable: bool,
 }
 
 fn parse_args() -> Result<Config> {
     #[rustfmt::skip]
     let matches = clap::App::new("compsvc")
-        .arg(clap::Arg::with_name("debug")
-             .long("debug"))
-        .arg(clap::Arg::with_name("task_bin")
-             .required(true))
         .arg(clap::Arg::with_name("rpc_binder")
              .long("rpc-binder"))
         .get_matches();
 
-    Ok(Config {
-        task_bin: matches.value_of("task_bin").unwrap().to_string(),
-        rpc_binder: matches.is_present("rpc_binder"),
-        debuggable: matches.is_present("debug"),
-    })
+    Ok(Config { rpc_binder: matches.is_present("rpc_binder") })
 }
 
 fn main() -> Result<()> {
@@ -60,7 +49,7 @@
     );
 
     let config = parse_args()?;
-    let mut service = compsvc::new_binder(config.task_bin, config.debuggable, None).as_binder();
+    let mut service = compsvc::new_binder(config.rpc_binder)?.as_binder();
     if config.rpc_binder {
         debug!("compsvc is starting as a rpc service.");
         // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
diff --git a/compos/src/compsvc_worker.rs b/compos/src/compsvc_worker.rs
deleted file mode 100644
index f33659e..0000000
--- a/compos/src/compsvc_worker.rs
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//! This executable works as a child/worker for the main compsvc service. This worker is mainly
-//! responsible for setting up the execution environment, e.g. to create file descriptors for
-//! remote file access via an authfs mount.
-
-mod authfs;
-
-use anyhow::{bail, Result};
-use minijail::Minijail;
-use std::fs::File;
-use std::os::unix::io::AsRawFd;
-use std::path::Path;
-use std::process::exit;
-
-use crate::authfs::{AuthFs, InFdAnnotation, OutFdAnnotation, PseudoRawFd};
-
-fn open_authfs_files_for_mapping(
-    authfs: &AuthFs,
-    config: &Config,
-) -> Result<Vec<(File, PseudoRawFd)>> {
-    let mut fd_mapping = Vec::with_capacity(config.in_fds.len() + config.out_fds.len());
-
-    let results: Result<Vec<_>> =
-        config.in_fds.iter().map(|conf| Ok((authfs.open_file(conf.fd, false)?, conf.fd))).collect();
-    fd_mapping.append(&mut results?);
-
-    let results: Result<Vec<_>> =
-        config.out_fds.iter().map(|conf| Ok((authfs.open_file(conf.fd, true)?, conf.fd))).collect();
-    fd_mapping.append(&mut results?);
-
-    Ok(fd_mapping)
-}
-
-fn spawn_jailed_task(config: &Config, fd_mapping: Vec<(File, PseudoRawFd)>) -> Result<Minijail> {
-    // TODO(b/185175567): Run in a more restricted sandbox.
-    let jail = Minijail::new()?;
-    let mut preserve_fds: Vec<_> = fd_mapping.iter().map(|(f, id)| (f.as_raw_fd(), *id)).collect();
-    if config.debuggable {
-        // inherit/redirect stdout/stderr for debugging
-        preserve_fds.push((1, 1));
-        preserve_fds.push((2, 2));
-    }
-    let _pid =
-        jail.run_remap(&Path::new(&config.args[0]), preserve_fds.as_slice(), &config.args)?;
-    Ok(jail)
-}
-
-struct Config {
-    authfs_root: String,
-    in_fds: Vec<InFdAnnotation>,
-    out_fds: Vec<OutFdAnnotation>,
-    args: Vec<String>,
-    debuggable: bool,
-}
-
-fn parse_args() -> Result<Config> {
-    #[rustfmt::skip]
-    let matches = clap::App::new("compsvc_worker")
-        .arg(clap::Arg::with_name("authfs-root")
-             .long("authfs-root")
-             .value_name("DIR")
-             .required(true)
-             .takes_value(true))
-        .arg(clap::Arg::with_name("in-fd")
-             .long("in-fd")
-             .multiple(true)
-             .takes_value(true)
-             .requires("authfs-root"))
-        .arg(clap::Arg::with_name("out-fd")
-             .long("out-fd")
-             .multiple(true)
-             .takes_value(true)
-             .requires("authfs-root"))
-        .arg(clap::Arg::with_name("debug")
-             .long("debug"))
-        .arg(clap::Arg::with_name("args")
-             .last(true)
-             .required(true)
-             .multiple(true))
-        .get_matches();
-
-    // Safe to unwrap since the arg is required by the clap rule
-    let authfs_root = matches.value_of("authfs-root").unwrap().to_string();
-
-    let results: Result<Vec<_>> = matches
-        .values_of("in-fd")
-        .unwrap_or_default()
-        .into_iter()
-        .map(|arg| {
-            if let Some(index) = arg.find(':') {
-                let (fd, size) = arg.split_at(index);
-                Ok(InFdAnnotation { fd: fd.parse()?, file_size: size[1..].parse()? })
-            } else {
-                bail!("Invalid argument: {}", arg);
-            }
-        })
-        .collect();
-    let in_fds = results?;
-
-    let results: Result<Vec<_>> = matches
-        .values_of("out-fd")
-        .unwrap_or_default()
-        .into_iter()
-        .map(|arg| Ok(OutFdAnnotation { fd: arg.parse()? }))
-        .collect();
-    let out_fds = results?;
-
-    let args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
-    let debuggable = matches.is_present("debug");
-
-    Ok(Config { authfs_root, in_fds, out_fds, args, debuggable })
-}
-
-fn main() -> Result<()> {
-    let log_level =
-        if env!("TARGET_BUILD_VARIANT") == "eng" { log::Level::Trace } else { log::Level::Info };
-    android_logger::init_once(
-        android_logger::Config::default().with_tag("compsvc_worker").with_min_level(log_level),
-    );
-
-    let config = parse_args()?;
-
-    let authfs = AuthFs::mount_and_wait(
-        &config.authfs_root,
-        &config.in_fds,
-        &config.out_fds,
-        config.debuggable,
-    )?;
-    let fd_mapping = open_authfs_files_for_mapping(&authfs, &config)?;
-
-    let jail = spawn_jailed_task(&config, fd_mapping)?;
-    let jail_result = jail.wait();
-
-    // Be explicit about the lifetime, which should last at least until the task is finished.
-    drop(authfs);
-
-    match jail_result {
-        Ok(_) => Ok(()),
-        Err(minijail::Error::ReturnCode(exit_code)) => {
-            exit(exit_code as i32);
-        }
-        Err(e) => {
-            bail!("Unexpected minijail error: {}", e);
-        }
-    }
-}
diff --git a/compos/src/pvm_exec.rs b/compos/src/pvm_exec.rs
index 03fbf72..2218d10 100644
--- a/compos/src/pvm_exec.rs
+++ b/compos/src/pvm_exec.rs
@@ -36,7 +36,7 @@
 use std::process::exit;
 
 use compos_aidl_interface::aidl::com::android::compos::{
-    ICompService::ICompService, InputFdAnnotation::InputFdAnnotation, Metadata::Metadata,
+    ICompOsService::ICompOsService, InputFdAnnotation::InputFdAnnotation, Metadata::Metadata,
     OutputFdAnnotation::OutputFdAnnotation,
 };
 use compos_aidl_interface::binder::Strong;
@@ -46,18 +46,18 @@
 
 const FD_SERVER_BIN: &str = "/apex/com.android.virt/bin/fd_server";
 
-fn get_local_service() -> Result<Strong<dyn ICompService>> {
+fn get_local_service() -> Result<Strong<dyn ICompOsService>> {
     compos_aidl_interface::binder::get_interface(SERVICE_NAME).context("get local binder")
 }
 
-fn get_rpc_binder(cid: u32) -> Result<Strong<dyn ICompService>> {
+fn get_rpc_binder(cid: u32) -> Result<Strong<dyn ICompOsService>> {
     // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
     // safely taken by new_spibinder.
     let ibinder = unsafe {
         new_spibinder(binder_rpc_unstable_bindgen::RpcClient(cid, VSOCK_PORT) as *mut AIBinder)
     };
     if let Some(ibinder) = ibinder {
-        <dyn ICompService>::try_from(ibinder).context("Cannot connect to RPC service")
+        <dyn ICompOsService>::try_from(ibinder).context("Cannot connect to RPC service")
     } else {
         bail!("Invalid raw AIBinder")
     }
diff --git a/compos/tests/java/android/compos/test/ComposKeyTestCase.java b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
index 654dc0b..6ef82f7 100644
--- a/compos/tests/java/android/compos/test/ComposKeyTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
@@ -131,7 +131,7 @@
                         getBuild(),
                         apkName,
                         packageName,
-                        "assets/key_service_vm_config.json",
+                        "assets/vm_config.json",
                         /* debug */ true);
         adbConnectToMicrodroid(getDevice(), mCid);
     }
@@ -145,6 +145,6 @@
     }
 
     private boolean isServiceRunning() {
-        return tryRunOnMicrodroid("pidof compos_key_main") != null;
+        return tryRunOnMicrodroid("pidof compsvc") != null;
     }
 }
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index 4471e63..f69b7b7 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -138,7 +138,7 @@
                         apkName,
                         packageName,
                         "assets/vm_config.json",
-                        /* debug */ true);
+                        /* debug */ false);
         adbConnectToMicrodroid(getDevice(), mCid);
     }
 
diff --git a/idsig/Android.bp b/idsig/Android.bp
new file mode 100644
index 0000000..90525ff
--- /dev/null
+++ b/idsig/Android.bp
@@ -0,0 +1,34 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+    name: "libidsig.defaults",
+    crate_name: "idsig",
+    srcs: ["src/lib.rs"],
+    edition: "2018",
+    prefer_rlib: true,
+    rustlibs: [
+        "libanyhow",
+        "libring",
+        "libnum_traits",
+    ],
+    proc_macros: ["libnum_derive"],
+    multilib: {
+        lib32: {
+            enabled: false,
+        },
+    },
+}
+
+rust_library {
+    name: "libidsig",
+    defaults: ["libidsig.defaults"],
+}
+
+rust_test {
+    name: "libidsig.test",
+    defaults: ["libidsig.defaults"],
+    test_suites: ["general-tests"],
+    compile_multilib: "first",
+}
diff --git a/idsig/Cargo.toml b/idsig/Cargo.toml
new file mode 100644
index 0000000..91b2842
--- /dev/null
+++ b/idsig/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "idsig"
+version = "0.1.0"
+authors = ["Jiyong Park <jiyong@google.com>"]
+edition = "2018"
+
+[dependencies]
+anyhow = "1.0"
+ring = "0.16"
+num-derive = "0.3"
+num-traits = "0.2"
diff --git a/apkdmverity/src/apksigv4.rs b/idsig/src/apksigv4.rs
similarity index 75%
rename from apkdmverity/src/apksigv4.rs
rename to idsig/src/apksigv4.rs
index fef21a5..6f4603d 100644
--- a/apkdmverity/src/apksigv4.rs
+++ b/idsig/src/apksigv4.rs
@@ -22,36 +22,56 @@
 // `apksigv4` module provides routines to decode the idsig file as defined in [APK signature
 // scheme v4] (https://source.android.com/security/apksigning/v4).
 
+/// `V4Signature` provides access to the various fields in an idsig file.
 #[derive(Debug)]
 pub struct V4Signature {
+    /// Version of the header. Should be 2.
     pub version: Version,
+    /// Provides access to the information about how the APK is hashed.
     pub hashing_info: HashingInfo,
+    /// Provides access to the information that can be used to verify this file
     pub signing_info: SigningInfo,
+    /// Total size of the merkle tree
     pub merkle_tree_size: u32,
+    /// Offset of the merkle tree in the idsig file
     pub merkle_tree_offset: u64,
 }
 
+/// `HashingInfo` provides information about how the APK is hashed.
 #[derive(Debug)]
 pub struct HashingInfo {
+    /// Hash algorithm used when creating the merkle tree for the APK.
     pub hash_algorithm: HashAlgorithm,
+    /// The log size of a block used when creating the merkle tree. 12 if 4k block was used.
     pub log2_blocksize: u8,
+    /// The salt used when creating the merkle tree. 32 bytes max.
     pub salt: Box<[u8]>,
+    /// The root hash of the merkle tree created.
     pub raw_root_hash: Box<[u8]>,
 }
 
+/// `SigningInfo` provides information that can be used to verify the idsig file.
 #[derive(Debug)]
 pub struct SigningInfo {
+    /// Digest of the APK that this idsig file is for.
     pub apk_digest: Box<[u8]>,
+    /// Certificate of the signer that signed this idsig file. ASN.1 DER form.
     pub x509_certificate: Box<[u8]>,
+    /// A free-form binary data
     pub additional_data: Box<[u8]>,
+    /// Public key of the signer in ASN.1 DER form. This must match the `x509_certificate` field.
     pub public_key: Box<[u8]>,
+    /// Signature algorithm used to sign this file.
     pub signature_algorithm_id: SignatureAlgorithmId,
+    /// The signature of this file.
     pub signature: Box<[u8]>,
 }
 
+/// Version of the idsig file format
 #[derive(Debug, PartialEq, FromPrimitive)]
 #[repr(u32)]
 pub enum Version {
+    /// Version 2, the only supported version.
     V2 = 2,
 }
 
@@ -61,9 +81,11 @@
     }
 }
 
+/// Hash algorithm that can be used for idsig file.
 #[derive(Debug, PartialEq, FromPrimitive)]
 #[repr(u32)]
 pub enum HashAlgorithm {
+    /// SHA2-256
     SHA256 = 1,
 }
 
@@ -73,16 +95,24 @@
     }
 }
 
+/// Signature algorithm that can be used for idsig file
 #[derive(Debug, PartialEq, FromPrimitive)]
 #[allow(non_camel_case_types)]
 #[repr(u32)]
 pub enum SignatureAlgorithmId {
+    /// RSASSA-PSS with SHA2-256 digest, SHA2-256 MGF1, 32 bytes of salt, trailer: 0xbc
     RSASSA_PSS_SHA2_256 = 0x0101,
+    /// RSASSA-PSS with SHA2-512 digest, SHA2-512 MGF1, 64 bytes of salt, trailer: 0xbc
     RSASSA_PSS_SHA2_512 = 0x0102,
+    /// RSASSA-PKCS1-v1_5 with SHA2-256 digest.
     RSASSA_PKCS1_SHA2_256 = 0x0103,
+    /// RSASSA-PKCS1-v1_5 with SHA2-512 digest.
     RSASSA_PKCS1_SHA2_512 = 0x0104,
+    /// ECDSA with SHA2-256 digest.
     ECDSA_SHA2_256 = 0x0201,
+    /// ECDSA with SHA2-512 digest.
     ECDSA_SHA2_512 = 0x0202,
+    /// DSA with SHA2-256 digest
     DSA_SHA2_256 = 0x0301,
 }
 
@@ -153,10 +183,13 @@
 
 #[cfg(test)]
 mod tests {
-    use crate::util::hexstring_from;
-    use crate::*;
+    use crate::apksigv4::*;
     use std::io::Cursor;
 
+    fn hexstring_from(s: &[u8]) -> String {
+        s.iter().map(|byte| format!("{:02x}", byte)).reduce(|i, j| i + &j).unwrap_or_default()
+    }
+
     #[test]
     fn parse_idsig_file() {
         let idsig = Cursor::new(include_bytes!("../testdata/test.apk.idsig"));
diff --git a/idsig/src/hashtree.rs b/idsig/src/hashtree.rs
new file mode 100644
index 0000000..a4727a9
--- /dev/null
+++ b/idsig/src/hashtree.rs
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use ring::digest::{self, Algorithm, Digest};
+use std::io::{Cursor, Read, Result, Write};
+
+/// `HashTree` is a merkle tree (and its root hash) that is compatible with fs-verity.
+pub struct HashTree {
+    /// Binary presentation of the merkle tree
+    pub tree: Vec<u8>,
+    /// Root hash
+    pub root_hash: Vec<u8>,
+}
+
+impl HashTree {
+    /// Creates merkle tree from `input`, using the given `salt` and hashing `algorithm`. `input`
+    /// is divided into `block_size` chunks.
+    pub fn from<R: Read>(
+        input: &mut R,
+        input_size: usize,
+        salt: &[u8],
+        block_size: usize,
+        algorithm: &'static Algorithm,
+    ) -> Result<Self> {
+        let salt = zero_pad_salt(salt, algorithm);
+        let tree = generate_hash_tree(input, input_size, &salt, block_size, algorithm)?;
+
+        // Root hash is from the first block of the hash or the input data if there is no hash tree
+        // generated which can happen when input data is smaller than block size
+        let root_hash = if tree.is_empty() {
+            let mut data = Vec::new();
+            input.read_to_end(&mut data)?;
+            hash_one_block(&data, &salt, block_size, algorithm).as_ref().to_vec()
+        } else {
+            let first_block = &tree[0..block_size];
+            hash_one_block(first_block, &salt, block_size, algorithm).as_ref().to_vec()
+        };
+        Ok(HashTree { tree, root_hash })
+    }
+}
+
+/// Calculate hash tree for the blocks in `input`.
+///
+/// This function implements: https://www.kernel.org/doc/html/latest/filesystems/fsverity.html#merkle-tree
+///
+/// The file contents is divided into blocks, where the block size is configurable but is usually
+/// 4096 bytes. The end of the last block is zero-padded if needed. Each block is then hashed,
+/// producing the first level of hashes. Then, the hashes in this first level are grouped into
+/// blocksize-byte blocks (zero-padding the ends as needed) and these blocks are hashed,
+/// producing the second level of hashes. This proceeds up the tree until only a single block
+/// remains.
+fn generate_hash_tree<R: Read>(
+    input: &mut R,
+    input_size: usize,
+    salt: &[u8],
+    block_size: usize,
+    algorithm: &'static Algorithm,
+) -> Result<Vec<u8>> {
+    let digest_size = algorithm.output_len;
+    let levels = calc_hash_levels(input_size, block_size, digest_size);
+    let tree_size = levels.iter().map(|r| r.len()).sum();
+
+    // The contiguous memory that holds the entire merkle tree
+    let mut hash_tree = vec![0; tree_size];
+
+    for (n, cur) in levels.iter().enumerate() {
+        if n == 0 {
+            // Level 0: the (zero-padded) input stream is hashed into level 0
+            let pad_size = round_to_multiple(input_size, block_size) - input_size;
+            let mut input = input.chain(Cursor::new(vec![0; pad_size]));
+            let mut level0 = Cursor::new(&mut hash_tree[cur.start..cur.end]);
+
+            let mut a_block = vec![0; block_size];
+            let mut num_blocks = (input_size + block_size - 1) / block_size;
+            while num_blocks > 0 {
+                input.read_exact(&mut a_block)?;
+                let h = hash_one_block(&a_block, salt, block_size, algorithm);
+                level0.write_all(h.as_ref()).unwrap();
+                num_blocks -= 1;
+            }
+        } else {
+            // Intermediate levels: level n - 1 is hashed into level n
+            // Both levels belong to the same `hash_tree`. In order to have a mutable slice for
+            // level n while having a slice for level n - 1, take the mutable slice for both levels
+            // and split it.
+            let prev = &levels[n - 1];
+            let cur_and_prev = &mut hash_tree[cur.start..prev.end];
+            let (cur, prev) = cur_and_prev.split_at_mut(prev.start);
+            let mut cur = Cursor::new(cur);
+            prev.chunks(block_size).for_each(|data| {
+                let h = hash_one_block(data, salt, block_size, algorithm);
+                cur.write_all(h.as_ref()).unwrap();
+            });
+        }
+    }
+    Ok(hash_tree)
+}
+
+/// Hash one block of input using the given hash algorithm and the salt. Input might be smaller
+/// than a block, in which case zero is padded.
+fn hash_one_block(
+    input: &[u8],
+    salt: &[u8],
+    block_size: usize,
+    algorithm: &'static Algorithm,
+) -> Digest {
+    let mut ctx = digest::Context::new(algorithm);
+    ctx.update(salt);
+    ctx.update(input);
+    let pad_size = block_size - input.len();
+    ctx.update(&vec![0; pad_size]);
+    ctx.finish()
+}
+
+type Range = std::ops::Range<usize>;
+
+/// Calculate the ranges of hash for each level
+fn calc_hash_levels(input_size: usize, block_size: usize, digest_size: usize) -> Vec<Range> {
+    // The input is split into multiple blocks and each block is hashed, which becomes the input
+    // for the next level. Size of a single hash is `digest_size`.
+    let mut level_sizes = Vec::new();
+    loop {
+        // Input for this level is from either the last level (if exists), or the input parameter.
+        let input_size = *level_sizes.last().unwrap_or(&input_size);
+        if input_size <= block_size {
+            break;
+        }
+        let num_blocks = (input_size + block_size - 1) / block_size;
+        let hashes_size = round_to_multiple(num_blocks * digest_size, block_size);
+        level_sizes.push(hashes_size);
+    }
+
+    // The hash tree is stored upside down. The top level is at offset 0. The second level comes
+    // next, and so on. Level 0 is located at the end.
+    //
+    // Given level_sizes [10, 3, 1], the offsets for each label are ...
+    //
+    // Level 2 is at offset 0
+    // Level 1 is at offset 1 (because Level 2 is of size 1)
+    // Level 0 is at offset 4 (because Level 1 is of size 3)
+    //
+    // This is done by scanning the sizes in reverse order
+    let mut ranges = level_sizes
+        .iter()
+        .rev()
+        .scan(0, |prev_end, size| {
+            let range = *prev_end..*prev_end + size;
+            *prev_end = range.end;
+            Some(range)
+        })
+        .collect::<Vec<_>>();
+    ranges.reverse(); // reverse again so that index N is for level N
+    ranges
+}
+
+/// Round `n` up to the nearest multiple of `unit`
+fn round_to_multiple(n: usize, unit: usize) -> usize {
+    (n + unit - 1) & !(unit - 1)
+}
+
+/// Pad zero to salt if necessary.
+///
+/// According to https://www.kernel.org/doc/html/latest/filesystems/fsverity.html:
+///
+/// If a salt was specified, then it’s zero-padded to the closest multiple of the input size of the
+/// hash algorithm’s compression function, e.g. 64 bytes for SHA-256 or 128 bytes for SHA-512. The
+/// padded salt is prepended to every data or Merkle tree block that is hashed.
+fn zero_pad_salt(salt: &[u8], algorithm: &Algorithm) -> Vec<u8> {
+    if salt.is_empty() {
+        salt.to_vec()
+    } else {
+        let padded_len = round_to_multiple(salt.len(), algorithm.block_len);
+        let mut salt = salt.to_vec();
+        salt.resize(padded_len, 0);
+        salt
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::hashtree::*;
+    use ring::digest;
+    use std::fs::{self, File};
+
+    #[test]
+    fn compare_with_golden_output() -> Result<()> {
+        // The golden outputs are generated by using the `fsverity` utility.
+        let sizes = ["512", "4K", "1M", "10000000"];
+        for size in sizes.iter() {
+            let input_name = format!("testdata/input.{}", size);
+            let mut input = File::open(&input_name)?;
+            let golden_hash_tree = fs::read(format!("testdata/input.{}.hash", size))?;
+            let golden_descriptor = fs::read(format!("testdata/input.{}.descriptor", size))?;
+            let golden_root_hash = &golden_descriptor[16..16 + 32];
+
+            let size = std::fs::metadata(&input_name)?.len() as usize;
+            let salt = vec![1, 2, 3, 4, 5, 6];
+            let ht = HashTree::from(&mut input, size, &salt, 4096, &digest::SHA256)?;
+
+            assert_eq!(golden_hash_tree.as_slice(), ht.tree.as_slice());
+            assert_eq!(golden_root_hash, ht.root_hash.as_slice());
+        }
+        Ok(())
+    }
+}
diff --git a/idsig/src/lib.rs b/idsig/src/lib.rs
new file mode 100644
index 0000000..7937d71
--- /dev/null
+++ b/idsig/src/lib.rs
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! `idsig` provides routines for creating the idsig file that is defined for the APK signature
+//! scheme v4 and for parsing the file.
+
+mod apksigv4;
+mod hashtree;
+
+pub use crate::apksigv4::*;
+pub use crate::hashtree::*;
diff --git a/idsig/testdata/create.sh b/idsig/testdata/create.sh
new file mode 100755
index 0000000..eadfdb2
--- /dev/null
+++ b/idsig/testdata/create.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sizes="512 4K 1M 10000000"
+for size in $sizes; do
+  echo $size
+  dd if=/dev/random of=input.$size bs=$size count=1
+  fsverity digest input.$size \
+    --hash-alg=sha256 \
+    --salt=010203040506 \
+    --block-size=4096 \
+    --out-merkle-tree input.$size.hash \
+    --out-descriptor input.$size.descriptor
+done
diff --git a/idsig/testdata/input.10000000 b/idsig/testdata/input.10000000
new file mode 100644
index 0000000..6bc5a4b
--- /dev/null
+++ b/idsig/testdata/input.10000000
Binary files differ
diff --git a/idsig/testdata/input.10000000.descriptor b/idsig/testdata/input.10000000.descriptor
new file mode 100644
index 0000000..dc0d096
--- /dev/null
+++ b/idsig/testdata/input.10000000.descriptor
Binary files differ
diff --git a/idsig/testdata/input.10000000.hash b/idsig/testdata/input.10000000.hash
new file mode 100644
index 0000000..354c5c2
--- /dev/null
+++ b/idsig/testdata/input.10000000.hash
Binary files differ
diff --git a/idsig/testdata/input.1M b/idsig/testdata/input.1M
new file mode 100644
index 0000000..7040ec3
--- /dev/null
+++ b/idsig/testdata/input.1M
Binary files differ
diff --git a/idsig/testdata/input.1M.descriptor b/idsig/testdata/input.1M.descriptor
new file mode 100644
index 0000000..f11753d
--- /dev/null
+++ b/idsig/testdata/input.1M.descriptor
Binary files differ
diff --git a/idsig/testdata/input.1M.hash b/idsig/testdata/input.1M.hash
new file mode 100644
index 0000000..689790c
--- /dev/null
+++ b/idsig/testdata/input.1M.hash
Binary files differ
diff --git a/idsig/testdata/input.4K b/idsig/testdata/input.4K
new file mode 100644
index 0000000..99db32a
--- /dev/null
+++ b/idsig/testdata/input.4K
Binary files differ
diff --git a/idsig/testdata/input.4K.descriptor b/idsig/testdata/input.4K.descriptor
new file mode 100644
index 0000000..b120e2f
--- /dev/null
+++ b/idsig/testdata/input.4K.descriptor
Binary files differ
diff --git a/idsig/testdata/input.4K.hash b/idsig/testdata/input.4K.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/idsig/testdata/input.4K.hash
diff --git a/idsig/testdata/input.512 b/idsig/testdata/input.512
new file mode 100644
index 0000000..a57797f
--- /dev/null
+++ b/idsig/testdata/input.512
Binary files differ
diff --git a/idsig/testdata/input.512.descriptor b/idsig/testdata/input.512.descriptor
new file mode 100644
index 0000000..805019b
--- /dev/null
+++ b/idsig/testdata/input.512.descriptor
Binary files differ
diff --git a/idsig/testdata/input.512.hash b/idsig/testdata/input.512.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/idsig/testdata/input.512.hash
diff --git a/idsig/testdata/test.apk.idsig b/idsig/testdata/test.apk.idsig
new file mode 100644
index 0000000..8c112de
--- /dev/null
+++ b/idsig/testdata/test.apk.idsig
Binary files differ
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index a0215c3..4926e2c 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -111,6 +111,7 @@
             deps: [
                 "apkdmverity",
                 "authfs",
+                "authfs_service",
                 "zipfuse",
 
                 // TODO(b/184872979): Needed by authfs. Remove once the Rust API is created.
diff --git a/microdroid/init.rc b/microdroid/init.rc
index d43ab22..f9cd915 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -136,6 +136,9 @@
 
     start keystore2
 
+    mkdir /data/misc/authfs 0700 root root
+    start authfs_service
+
 on late-fs
     start vendor.keymint-microdroid
 
diff --git a/microdroid/keymint/MicrodroidKeymasterContext.cpp b/microdroid/keymint/MicrodroidKeymasterContext.cpp
index b5440f3..1d1346b 100644
--- a/microdroid/keymint/MicrodroidKeymasterContext.cpp
+++ b/microdroid/keymint/MicrodroidKeymasterContext.cpp
@@ -55,11 +55,14 @@
     // doesn't pose a problem for the current applications but may be a
     // candidate for hardening.
     auto encrypted_key = EncryptKey(key_material, AES_GCM_WITH_SW_ENFORCED, *hw_enforced,
-                                    *sw_enforced, hidden, root_key_, random_, &error);
-    if (error != KM_ERROR_OK) return error;
+                                    *sw_enforced, hidden, SecureDeletionData{}, root_key_, random_);
+    if (!encrypted_key) return encrypted_key.error();
 
-    *blob = SerializeAuthEncryptedBlob(encrypted_key, *hw_enforced, *sw_enforced, &error);
-    return error;
+    auto serialized = SerializeAuthEncryptedBlob(*encrypted_key, *hw_enforced, *sw_enforced,
+                                                 0 /* key_slot */);
+    if (!serialized) return serialized.error();
+    *blob = *serialized;
+    return KM_ERROR_OK;
 }
 
 keymaster_error_t MicrodroidKeymasterContext::ParseKeyBlob(
@@ -71,21 +74,21 @@
     error = BuildHiddenAuthorizations(additional_params, &hidden, microdroidSoftwareRootOfTrust);
     if (error != KM_ERROR_OK) return error;
 
-    auto deserialized_key = DeserializeAuthEncryptedBlob(blob, &error);
-    if (error != KM_ERROR_OK) return error;
+    auto deserialized_key = DeserializeAuthEncryptedBlob(blob);
+    if (!deserialized_key) return deserialized_key.error();
 
     keymaster_algorithm_t algorithm;
-    if (!deserialized_key.sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
+    if (!deserialized_key->sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
         return KM_ERROR_INVALID_ARGUMENT;
     }
 
-    auto key_material = DecryptKey(deserialized_key, hidden, root_key_, &error);
-    if (error != KM_ERROR_OK) return error;
+    auto key_material = DecryptKey(*deserialized_key, hidden, SecureDeletionData{}, root_key_);
+    if (!key_material) return key_material.error();
 
     auto factory = GetKeyFactory(algorithm);
-    return factory->LoadKey(move(key_material), additional_params,
-                            move(deserialized_key.hw_enforced), move(deserialized_key.sw_enforced),
-                            key);
+    return factory->LoadKey(move(*key_material), additional_params,
+                            move(deserialized_key->hw_enforced),
+                            move(deserialized_key->sw_enforced), key);
 }
 
 static bool UpgradeIntegerTag(keymaster_tag_t tag, uint32_t value, AuthorizationSet* set) {
@@ -137,10 +140,13 @@
 
     auto encrypted_key =
             EncryptKey(key->key_material(), AES_GCM_WITH_SW_ENFORCED, key->hw_enforced(),
-                       key->sw_enforced(), hidden, root_key_, random_, &error);
-    if (error != KM_ERROR_OK) return error;
+                       key->sw_enforced(), hidden, SecureDeletionData{}, root_key_, random_);
+    if (!encrypted_key) return encrypted_key.error();
 
-    *upgraded_key = SerializeAuthEncryptedBlob(encrypted_key, key->hw_enforced(),
-                                               key->sw_enforced(), &error);
+    auto serialized = SerializeAuthEncryptedBlob(*encrypted_key, key->hw_enforced(),
+                                                 key->sw_enforced(), 0 /* key_slot */);
+    if (!serialized) return serialized.error();
+
+    *upgraded_key = std::move(*serialized);
     return error;
 }
diff --git a/microdroid/payload/Android.bp b/microdroid/payload/Android.bp
index c7bc415..72711c3 100644
--- a/microdroid/payload/Android.bp
+++ b/microdroid/payload/Android.bp
@@ -49,30 +49,26 @@
     ],
 }
 
-cc_binary {
+cc_binary_host {
     name: "mk_payload",
     srcs: [
         "mk_payload.cc",
     ],
-    shared_libs: [
+    static_libs: [
+        "lib_microdroid_metadata_proto",
         "libbase",
+        "libcdisk_spec",
         "libcuttlefish_fs",
         "libcuttlefish_utils",
-        "liblog",
-        "libz",
-    ],
-    static_libs: [
-        "lib_microdroid_metadata_proto_lite",
-        "libcdisk_spec",
         "libext2_uuid",
         "libimage_aggregator",
         "libjsoncpp",
+        "liblog",
+        "libprotobuf-cpp-full",
         "libprotobuf-cpp-lite",
         "libsparse",
         "libxml2",
+        "libz",
     ],
-    generated_sources: ["apex-info-list"],
-    apex_available: [
-        "com.android.virt",
-    ],
+    static_executable: true,
 }
diff --git a/microdroid/payload/README.md b/microdroid/payload/README.md
index 35502c1..bf05c49 100644
--- a/microdroid/payload/README.md
+++ b/microdroid/payload/README.md
@@ -3,6 +3,9 @@
 Payload disk is a composite disk image referencing host APEXes and an APK so that microdroid
 mounts/activates APK/APEXes and executes a binary within the APK.
 
+Payload disk is created by [VirtualizationService](../../virtualizationservice) Service when
+starting a VM.
+
 ## Partitions
 
 Payload disk has 1 + N(number of APEX/APK payloads) partitions.
@@ -14,7 +17,7 @@
 
 * partition 1: Metadata partition
 * partition 2 ~ n: APEX payloads
-* partition n + 1: APK payload
+* partition n+1, n+2: APK payload and its idsig
 
 It's subject to change in the future, though.
 
@@ -34,52 +37,37 @@
 
 Each payload partition presents APEX or APK passed from the host.
 
-At the end of each payload partition the size of the original payload file (APEX or APK) is stored
-in 4-byte big endian.
+Note that each payload passed to the Guest is read by a block device. If a payload is not sized to a
+multiples of 4k, reading it would fail. To prevent that, "zero fillers" are added for those files.
+For example, if an APK is 8000 byte big, the APK partition would be padded with 192 bytes of zeros.
 
-For example, the following code shows how to get the original size of host apex file
-when the apex is read in microdroid as /dev/block/vdc2,
+# `mk_payload`
 
-    int fd = open("/dev/block/vdc2", O_RDONLY | O_BINARY | O_CLOEXEC);
-    uint32_t size;
-    lseek(fd, -sizeof(size), SEEK_END);
-    read(fd, &size, sizeof(size));
-    size = betoh32(size);
-
-## How to Create
-
-### `mk_payload`
-
-`mk_payload` creates a payload composite disk image as described in a JSON which is intentionlly
-similar to the schema of VM payload config.
+`mk_payload` is a small utility to create a payload disk image.
 
 ```
 $ cat payload_config.json
 {
-  "system_apexes": [
-    "com.android.adbd",
-  ],
   "apexes": [
     {
       "name": "com.my.hello",
-      "path": "hello.apex"
+      "path": "hello.apex",
     }
   ],
   "apk": {
     "name": "com.my.world",
-    "path": "/path/to/world.apk"
+    "path": "/path/to/world.apk",
+    "idsigPath": "/path/to/world.apk.idsig",
   }
 }
-$ adb push payload_config.json hello.apex /data/local/tmp/
-$ adb shell 'cd /data/local/tmp; /apex/com.android.virt/bin/mk_payload payload_config.json payload.img
-$ adb shell ls /data/local/tmp/*.img
+$ m mk_payload
+$ mk_payload payload_config.json payload.img
+$ ls
 payload.img
 payload-footer.img
 payload-header.img
 payload-metadata.img
-payload.img.0          # fillers
-payload.img.1
+payload-filler-0.img
+payload-filler-1.img
 ...
 ```
-
-In the future, [VirtualizationService](../../virtualizationservice) will handle this.
diff --git a/microdroid/payload/mk_payload.cc b/microdroid/payload/mk_payload.cc
index b27683c..33e91b9 100644
--- a/microdroid/payload/mk_payload.cc
+++ b/microdroid/payload/mk_payload.cc
@@ -26,7 +26,6 @@
 
 #include <android-base/file.h>
 #include <android-base/result.h>
-#include <com_android_apex.h>
 #include <image_aggregator.h>
 #include <json/json.h>
 
@@ -42,9 +41,6 @@
 using android::microdroid::Metadata;
 using android::microdroid::WriteMetadata;
 
-using com::android::apex::ApexInfoList;
-using com::android::apex::readApexInfoList;
-
 using cuttlefish::AlignToPartitionSize;
 using cuttlefish::CreateCompositeDisk;
 using cuttlefish::kLinuxFilesystem;
@@ -58,9 +54,9 @@
     return static_cast<uint32_t>(st.st_size);
 }
 
-std::string ToAbsolute(const std::string& path, const std::string& dirname) {
+std::string RelativeTo(const std::string& path, const std::string& dirname) {
     bool is_absolute = !path.empty() && path[0] == '/';
-    if (is_absolute) {
+    if (is_absolute || dirname == ".") {
         return path;
     } else {
         return dirname + "/" + path;
@@ -81,25 +77,20 @@
     std::string name; // the apex name
     std::string path; // the path to the apex file
                       // absolute or relative to the config file
-    std::optional<std::string> public_key;
-    std::optional<std::string> root_digest;
 };
 
 struct ApkConfig {
     std::string name;
     std::string path;
-    // TODO(jooyung) make this required?
-    std::optional<std::string> idsig_path;
+    std::string idsig_path;
 };
 
 struct Config {
     std::string dirname; // config file's direname to resolve relative paths in the config
 
-    // TODO(b/185956069) remove this when VirtualizationService can provide apex paths
-    std::vector<std::string> system_apexes;
-
     std::vector<ApexConfig> apexes;
     std::optional<ApkConfig> apk;
+    // This is a path in the guest side
     std::optional<std::string> payload_config_path;
 };
 
@@ -137,8 +128,6 @@
 Result<void> ParseJson(const Json::Value& value, ApexConfig& apex_config) {
     DO(ParseJson(value["name"], apex_config.name));
     DO(ParseJson(value["path"], apex_config.path));
-    DO(ParseJson(value["publicKey"], apex_config.public_key));
-    DO(ParseJson(value["rootDigest"], apex_config.root_digest));
     return {};
 }
 
@@ -150,7 +139,6 @@
 }
 
 Result<void> ParseJson(const Json::Value& value, Config& config) {
-    DO(ParseJson(value["system_apexes"], config.system_apexes));
     DO(ParseJson(value["apexes"], config.apexes));
     DO(ParseJson(value["apk"], config.apk));
     DO(ParseJson(value["payload_config_path"], config.payload_config_path));
@@ -163,7 +151,7 @@
     Json::Value root;
     Json::String errs;
     if (!parseFromStream(builder, in, &root, &errs)) {
-        return Error() << "bad config: " << errs;
+        return Error() << errs;
     }
 
     Config config;
@@ -174,63 +162,22 @@
 
 #undef DO
 
-Result<void> LoadSystemApexes(Config& config) {
-    static const char* kApexInfoListFile = "/apex/apex-info-list.xml";
-    std::optional<ApexInfoList> apex_info_list = readApexInfoList(kApexInfoListFile);
-    if (!apex_info_list.has_value()) {
-        return Error() << "Failed to read " << kApexInfoListFile;
-    }
-    auto get_apex_path = [&](const std::string& apex_name) -> std::optional<std::string> {
-        for (const auto& apex_info : apex_info_list->getApexInfo()) {
-            if (apex_info.getIsActive() && apex_info.getModuleName() == apex_name) {
-                return apex_info.getModulePath();
-            }
-        }
-        return std::nullopt;
-    };
-    for (const auto& apex_name : config.system_apexes) {
-        const auto& apex_path = get_apex_path(apex_name);
-        if (!apex_path.has_value()) {
-            return Error() << "Can't find the system apex: " << apex_name;
-        }
-        config.apexes.push_back(ApexConfig{
-                .name = apex_name,
-                .path = *apex_path,
-                .public_key = std::nullopt,
-                .root_digest = std::nullopt,
-        });
-    }
-    return {};
-}
-
 Result<void> MakeMetadata(const Config& config, const std::string& filename) {
     Metadata metadata;
     metadata.set_version(1);
 
+    int apex_index = 0;
     for (const auto& apex_config : config.apexes) {
         auto* apex = metadata.add_apexes();
-
-        // name
         apex->set_name(apex_config.name);
-
-        // publicKey
-        if (apex_config.public_key.has_value()) {
-            apex->set_publickey(apex_config.public_key.value());
-        }
-
-        // rootDigest
-        if (apex_config.root_digest.has_value()) {
-            apex->set_rootdigest(apex_config.root_digest.value());
-        }
+        apex->set_partition_name("microdroid-apex-" + std::to_string(apex_index++));
     }
 
     if (config.apk.has_value()) {
         auto* apk = metadata.mutable_apk();
         apk->set_name(config.apk->name);
         apk->set_payload_partition_name("microdroid-apk");
-        if (config.apk->idsig_path.has_value()) {
-            apk->set_idsig_partition_name("microdroid-apk-idsig");
-        }
+        apk->set_idsig_partition_name("microdroid-apk-idsig");
     }
 
     if (config.payload_config_path.has_value()) {
@@ -241,34 +188,8 @@
     return WriteMetadata(metadata, out);
 }
 
-// fill (zeros + original file's size) with aligning BLOCK_SIZE(4096) boundary
-// return true when the filler is generated.
-Result<bool> SizeFiller(const std::string& file_path, const std::string& filler_path) {
-    auto file_size = GetFileSize(file_path);
-    if (!file_size.ok()) {
-        return file_size.error();
-    }
-    auto disk_size = AlignToPartitionSize(*file_size + sizeof(uint32_t));
-
-    unique_fd fd(TEMP_FAILURE_RETRY(open(filler_path.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0600)));
-    if (fd.get() == -1) {
-        return ErrnoError() << "open(" << filler_path << ") failed.";
-    }
-    uint32_t size = htobe32(static_cast<uint32_t>(*file_size));
-    if (ftruncate(fd.get(), disk_size - *file_size) == -1) {
-        return ErrnoError() << "ftruncate(" << filler_path << ") failed.";
-    }
-    if (lseek(fd.get(), -sizeof(size), SEEK_END) == -1) {
-        return ErrnoError() << "lseek(" << filler_path << ") failed.";
-    }
-    if (write(fd.get(), &size, sizeof(size)) <= 0) {
-        return ErrnoError() << "write(" << filler_path << ") failed.";
-    }
-    return true;
-}
-
 // fill zeros to align |file_path|'s size to BLOCK_SIZE(4096) boundary.
-// return true when the filler is generated.
+// return true when the filler is needed.
 Result<bool> ZeroFiller(const std::string& file_path, const std::string& filler_path) {
     auto file_size = GetFileSize(file_path);
     if (!file_size.ok()) {
@@ -288,32 +209,17 @@
     return true;
 }
 
-// Do not generate any fillers
-// Note that CreateCompositeDisk() handles gaps between partitions.
-Result<bool> NoFiller(const std::string& file_path, const std::string& filler_path) {
-    (void)file_path;
-    (void)filler_path;
-    return false;
-}
-
 Result<void> MakePayload(const Config& config, const std::string& metadata_file,
                          const std::string& output_file) {
     std::vector<MultipleImagePartition> partitions;
 
-    // put metadata at the first partition
-    partitions.push_back(MultipleImagePartition{
-            .label = "payload-metadata",
-            .image_file_paths = {metadata_file},
-            .type = kLinuxFilesystem,
-            .read_only = true,
-    });
-
     int filler_count = 0;
-    auto add_partition = [&](auto partition_name, auto file_path, auto filler) -> Result<void> {
+    auto add_partition = [&](auto partition_name, auto file_path) -> Result<void> {
         std::vector<std::string> image_files{file_path};
 
-        std::string filler_path = output_file + "." + std::to_string(filler_count++);
-        if (auto ret = filler(file_path, filler_path); !ret.ok()) {
+        std::string filler_path =
+                AppendFileName(output_file, "-filler-" + std::to_string(filler_count++));
+        if (auto ret = ZeroFiller(file_path, filler_path); !ret.ok()) {
             return ret.error();
         } else if (*ret) {
             image_files.push_back(filler_path);
@@ -327,27 +233,31 @@
         return {};
     };
 
-    // put apexes at the subsequent partitions with "size" filler
+    // put metadata at the first partition
+    partitions.push_back(MultipleImagePartition{
+            .label = "payload-metadata",
+            .image_file_paths = {metadata_file},
+            .type = kLinuxFilesystem,
+            .read_only = true,
+    });
+    // put apexes at the subsequent partitions
     for (size_t i = 0; i < config.apexes.size(); i++) {
         const auto& apex_config = config.apexes[i];
-        std::string apex_path = ToAbsolute(apex_config.path, config.dirname);
-        if (auto ret = add_partition("microdroid-apex-" + std::to_string(i), apex_path, SizeFiller);
+        std::string apex_path = RelativeTo(apex_config.path, config.dirname);
+        if (auto ret = add_partition("microdroid-apex-" + std::to_string(i), apex_path);
             !ret.ok()) {
             return ret.error();
         }
     }
-    // put apk with "zero" filler.
-    // TODO(jooyung): partition name("microdroid-apk") is TBD
+    // put apk and its idsig
     if (config.apk.has_value()) {
-        std::string apk_path = ToAbsolute(config.apk->path, config.dirname);
-        if (auto ret = add_partition("microdroid-apk", apk_path, ZeroFiller); !ret.ok()) {
+        std::string apk_path = RelativeTo(config.apk->path, config.dirname);
+        if (auto ret = add_partition("microdroid-apk", apk_path); !ret.ok()) {
             return ret.error();
         }
-        if (config.apk->idsig_path.has_value()) {
-            std::string idsig_path = ToAbsolute(config.apk->idsig_path.value(), config.dirname);
-            if (auto ret = add_partition("microdroid-apk-idsig", idsig_path, NoFiller); !ret.ok()) {
-                return ret.error();
-            }
+        std::string idsig_path = RelativeTo(config.apk->idsig_path, config.dirname);
+        if (auto ret = add_partition("microdroid-apk-idsig", idsig_path); !ret.ok()) {
+            return ret.error();
         }
     }
 
@@ -365,12 +275,7 @@
 
     auto config = LoadConfig(argv[1]);
     if (!config.ok()) {
-        std::cerr << config.error() << '\n';
-        return 1;
-    }
-
-    if (const auto res = LoadSystemApexes(*config); !res.ok()) {
-        std::cerr << res.error() << '\n';
+        std::cerr << "bad config: " << config.error() << '\n';
         return 1;
     }
 
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index 0ea5d87..a082beb 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -10,6 +10,7 @@
     prefer_rlib: true,
     rustlibs: [
         "libanyhow",
+        "libapkverify",
         "libkernlog",
         "liblibc",
         "liblog_rust",
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 2586737..fa456e8 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -17,7 +17,8 @@
 mod ioutil;
 mod metadata;
 
-use anyhow::{anyhow, bail, Result};
+use anyhow::{anyhow, bail, Context, Result};
+use apkverify::verify;
 use log::{error, info, warn};
 use microdroid_payload_config::{Task, TaskType, VmPayloadConfig};
 use rustutils::system_properties::PropertyWatcher;
@@ -30,12 +31,19 @@
 use vsock::VsockStream;
 
 const WAIT_TIMEOUT: Duration = Duration::from_secs(10);
+const DM_MOUNTED_APK_PATH: &str = "/dev/block/mapper/microdroid-apk";
 
 fn main() -> Result<()> {
     kernlog::init()?;
     info!("started.");
 
     let metadata = metadata::load()?;
+
+    if let Err(err) = verify_payloads() {
+        error!("failed to verify payload: {}", err);
+        // TODO(jooyung): should stop the boot process if verification fails
+    }
+
     if !metadata.payload_config_path.is_empty() {
         let config = load_config(Path::new(&metadata.payload_config_path))?;
 
@@ -56,6 +64,19 @@
     Ok(())
 }
 
+// TODO(jooyung): v2/v3 full verification can be slow. Consider multithreading.
+fn verify_payloads() -> Result<()> {
+    // We don't verify APEXes since apexd does.
+
+    // should wait APK to be dm-verity mounted by apkdmverity
+    ioutil::wait_for_file(DM_MOUNTED_APK_PATH, WAIT_TIMEOUT)?;
+    verify(DM_MOUNTED_APK_PATH).context(format!("failed to verify {}", DM_MOUNTED_APK_PATH))?;
+
+    info!("payload verification succeeded.");
+    // TODO(jooyung): collect public keys and store them in instance.img
+    Ok(())
+}
+
 fn load_config(path: &Path) -> Result<VmPayloadConfig> {
     info!("loading config from {:?}...", path);
     let file = ioutil::wait_for_file(path, WAIT_TIMEOUT)?;
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
index aa7c9ab..a7b855a 100644
--- a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -44,6 +44,11 @@
                         /* debug */ false);
         adbConnectToMicrodroid(getDevice(), cid);
 
+        // Wait until logd-init starts. The service is one of the last services that are started in
+        // the microdroid boot procedure. Therefore, waiting for the service means that we wait for
+        // the boot to complete. TODO: we need a better marker eventually.
+        tryRunOnMicrodroid("watch -e \"getprop init.svc.logd-reinit | grep '^$'\"");
+
         // Test writing to /data partition
         runOnMicrodroid("echo MicrodroidTest > /data/local/tmp/test.txt");
         assertThat(runOnMicrodroid("cat /data/local/tmp/test.txt"), is("MicrodroidTest"));
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 239d729..cf92d5a 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -25,19 +25,15 @@
         "libandroid_logger",
         "libanyhow",
         "libcommand_fds",
-        "libcrc32fast",
         "libdisk",
         "liblog_rust",
         "libmicrodroid_metadata",
         "libmicrodroid_payload_config",
         "libonce_cell",
-        "libprotobuf",
-        "libprotos",
         "libserde_json",
         "libserde_xml_rs",
         "libserde",
         "libshared_child",
-        "libuuid",
         "libvmconfig",
         "libzip",
         "libvsock",
diff --git a/virtualizationservice/src/composite.rs b/virtualizationservice/src/composite.rs
index ded0053..40c7e5e 100644
--- a/virtualizationservice/src/composite.rs
+++ b/virtualizationservice/src/composite.rs
@@ -14,279 +14,12 @@
 
 //! Functions for creating a composite disk image.
 
-use crate::gpt::{
-    write_gpt_header, write_protective_mbr, GptPartitionEntry, GPT_BEGINNING_SIZE, GPT_END_SIZE,
-    GPT_HEADER_SIZE, GPT_NUM_PARTITIONS, GPT_PARTITION_ENTRY_SIZE, SECTOR_SIZE,
-};
 use android_system_virtualizationservice::aidl::android::system::virtualizationservice::Partition::Partition;
-use anyhow::{anyhow, bail, Context, Error};
-use crc32fast::Hasher;
-use disk::create_disk_file;
-use log::{trace, warn};
-use protobuf::Message;
-use protos::cdisk_spec::{ComponentDisk, CompositeDisk, ReadWriteCapability};
-use std::convert::TryInto;
+use anyhow::{anyhow, Context, Error};
+use disk::{create_composite_disk, create_disk_file, ImagePartitionType, PartitionInfo};
 use std::fs::{File, OpenOptions};
-use std::io::Write;
 use std::os::unix::io::AsRawFd;
 use std::path::{Path, PathBuf};
-use uuid::Uuid;
-
-/// A magic string placed at the beginning of a composite disk file to identify it.
-const CDISK_MAGIC: &str = "composite_disk\x1d";
-/// The version of the composite disk format supported by this implementation.
-const COMPOSITE_DISK_VERSION: u64 = 1;
-/// The amount of padding needed between the last partition entry and the first partition, to align
-/// the partition appropriately. The two sectors are for the MBR and the GPT header.
-const PARTITION_ALIGNMENT_SIZE: usize = GPT_BEGINNING_SIZE as usize
-    - 2 * SECTOR_SIZE as usize
-    - GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize;
-const HEADER_PADDING_LENGTH: usize = SECTOR_SIZE as usize - GPT_HEADER_SIZE as usize;
-// Keep all partitions 4k aligned for performance.
-const PARTITION_SIZE_SHIFT: u8 = 12;
-// Keep the disk size a multiple of 64k for crosvm's virtio_blk driver.
-const DISK_SIZE_SHIFT: u8 = 16;
-
-const LINUX_FILESYSTEM_GUID: Uuid = Uuid::from_u128(0x0FC63DAF_8483_4772_8E79_3D69D8477DE4);
-const EFI_SYSTEM_PARTITION_GUID: Uuid = Uuid::from_u128(0xC12A7328_F81F_11D2_BA4B_00A0C93EC93B);
-
-/// Information about a partition to create.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct PartitionInfo {
-    label: String,
-    path: PathBuf,
-    partition_type: ImagePartitionType,
-    writable: bool,
-    size: u64,
-}
-
-/// Round `val` up to the next multiple of 2**`align_log`.
-fn align_to_power_of_2(val: u64, align_log: u8) -> u64 {
-    let align = 1 << align_log;
-    ((val + (align - 1)) / align) * align
-}
-
-/// Round `val` to partition size(4K)
-fn align_to_partition_size(val: u64) -> u64 {
-    align_to_power_of_2(val, PARTITION_SIZE_SHIFT)
-}
-
-impl PartitionInfo {
-    fn aligned_size(&self) -> u64 {
-        align_to_partition_size(self.size)
-    }
-}
-
-/// The type of partition.
-#[allow(dead_code)]
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum ImagePartitionType {
-    LinuxFilesystem,
-    EfiSystemPartition,
-}
-
-impl ImagePartitionType {
-    fn guid(self) -> Uuid {
-        match self {
-            Self::LinuxFilesystem => LINUX_FILESYSTEM_GUID,
-            Self::EfiSystemPartition => EFI_SYSTEM_PARTITION_GUID,
-        }
-    }
-}
-
-/// Write protective MBR and primary GPT table.
-fn write_beginning(
-    file: &mut impl Write,
-    disk_guid: Uuid,
-    partitions: &[u8],
-    partition_entries_crc32: u32,
-    secondary_table_offset: u64,
-    disk_size: u64,
-) -> Result<(), Error> {
-    // Write the protective MBR to the first sector.
-    write_protective_mbr(file, disk_size)?;
-
-    // Write the GPT header, and pad out to the end of the sector.
-    write_gpt_header(file, disk_guid, partition_entries_crc32, secondary_table_offset, false)?;
-    file.write_all(&[0; HEADER_PADDING_LENGTH])?;
-
-    // Write partition entries, including unused ones.
-    file.write_all(partitions)?;
-
-    // Write zeroes to align the first partition appropriately.
-    file.write_all(&[0; PARTITION_ALIGNMENT_SIZE])?;
-
-    Ok(())
-}
-
-/// Write secondary GPT table.
-fn write_end(
-    file: &mut impl Write,
-    disk_guid: Uuid,
-    partitions: &[u8],
-    partition_entries_crc32: u32,
-    secondary_table_offset: u64,
-    disk_size: u64,
-) -> Result<(), Error> {
-    // Write partition entries, including unused ones.
-    file.write_all(partitions)?;
-
-    // Write the GPT header, and pad out to the end of the sector.
-    write_gpt_header(file, disk_guid, partition_entries_crc32, secondary_table_offset, true)?;
-    file.write_all(&[0; HEADER_PADDING_LENGTH])?;
-
-    // Pad out to the aligned disk size.
-    let used_disk_size = secondary_table_offset + GPT_END_SIZE;
-    let padding = disk_size - used_disk_size;
-    file.write_all(&vec![0; padding as usize])?;
-
-    Ok(())
-}
-
-/// Create the `GptPartitionEntry` for the given partition.
-fn create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry {
-    let mut partition_name: Vec<u16> = partition.label.encode_utf16().collect();
-    partition_name.resize(36, 0);
-
-    GptPartitionEntry {
-        partition_type_guid: partition.partition_type.guid(),
-        unique_partition_guid: Uuid::new_v4(),
-        first_lba: offset / SECTOR_SIZE,
-        last_lba: (offset + partition.aligned_size()) / SECTOR_SIZE - 1,
-        attributes: 0,
-        partition_name: partition_name.try_into().unwrap(),
-    }
-}
-
-/// Create one or more `ComponentDisk` proto messages for the given partition.
-fn create_component_disks(
-    partition: &PartitionInfo,
-    offset: u64,
-    zero_filler_path: &str,
-) -> Result<Vec<ComponentDisk>, Error> {
-    let aligned_size = partition.aligned_size();
-
-    let mut component_disks = vec![ComponentDisk {
-        offset,
-        file_path: partition.path.to_str().context("Invalid partition path")?.to_string(),
-        read_write_capability: if partition.writable {
-            ReadWriteCapability::READ_WRITE
-        } else {
-            ReadWriteCapability::READ_ONLY
-        },
-        ..ComponentDisk::new()
-    }];
-
-    if partition.size != aligned_size {
-        if partition.writable {
-            bail!(
-                "Read-write partition {:?} size is not a multiple of {}.",
-                partition,
-                1 << PARTITION_SIZE_SHIFT
-            );
-        } else {
-            // Fill in the gap by reusing the header file, because we know it is always bigger
-            // than the alignment size (i.e. GPT_BEGINNING_SIZE > 1 << PARTITION_SIZE_SHIFT).
-            warn!(
-                "Read-only partition {:?} size is not a multiple of {}, filling gap.",
-                partition,
-                1 << PARTITION_SIZE_SHIFT
-            );
-            component_disks.push(ComponentDisk {
-                offset: offset + partition.size,
-                file_path: zero_filler_path.to_owned(),
-                read_write_capability: ReadWriteCapability::READ_ONLY,
-                ..ComponentDisk::new()
-            });
-        }
-    }
-
-    Ok(component_disks)
-}
-
-/// Create a new composite disk containing the given partitions, and write it out to the given
-/// files.
-pub fn create_composite_disk(
-    partitions: &[PartitionInfo],
-    zero_filler_path: &Path,
-    header_path: &Path,
-    header_file: &mut File,
-    footer_path: &Path,
-    footer_file: &mut File,
-    output_composite: &mut File,
-) -> Result<(), Error> {
-    let zero_filler_path =
-        zero_filler_path.to_str().context("Invalid zero filler path")?.to_string();
-    let header_path = header_path.to_str().context("Invalid header path")?.to_string();
-    let footer_path = footer_path.to_str().context("Invalid footer path")?.to_string();
-
-    let mut composite_proto = CompositeDisk::new();
-    composite_proto.version = COMPOSITE_DISK_VERSION;
-    composite_proto.component_disks.push(ComponentDisk {
-        file_path: header_path,
-        offset: 0,
-        read_write_capability: ReadWriteCapability::READ_ONLY,
-        ..ComponentDisk::new()
-    });
-
-    // Write partitions to a temporary buffer so that we can calculate the CRC, and construct the
-    // ComponentDisk proto messages at the same time.
-    let mut partitions_buffer =
-        [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
-    let mut writer: &mut [u8] = &mut partitions_buffer;
-    let mut next_disk_offset = GPT_BEGINNING_SIZE;
-    for partition in partitions {
-        create_gpt_entry(partition, next_disk_offset).write_bytes(&mut writer)?;
-
-        for component_disk in
-            create_component_disks(partition, next_disk_offset, &zero_filler_path)?
-        {
-            composite_proto.component_disks.push(component_disk);
-        }
-
-        next_disk_offset += partition.aligned_size();
-    }
-    let secondary_table_offset = next_disk_offset;
-    let disk_size = align_to_power_of_2(secondary_table_offset + GPT_END_SIZE, DISK_SIZE_SHIFT);
-    trace!("Partitions: {:#?}", partitions);
-    trace!("Secondary table offset: {} disk size: {}", secondary_table_offset, disk_size);
-
-    composite_proto.component_disks.push(ComponentDisk {
-        file_path: footer_path,
-        offset: secondary_table_offset,
-        read_write_capability: ReadWriteCapability::READ_ONLY,
-        ..ComponentDisk::new()
-    });
-
-    // Calculate CRC32 of partition entries.
-    let mut hasher = Hasher::new();
-    hasher.update(&partitions_buffer);
-    let partition_entries_crc32 = hasher.finalize();
-
-    let disk_guid = Uuid::new_v4();
-    write_beginning(
-        header_file,
-        disk_guid,
-        &partitions_buffer,
-        partition_entries_crc32,
-        secondary_table_offset,
-        disk_size,
-    )?;
-    write_end(
-        footer_file,
-        disk_guid,
-        &partitions_buffer,
-        partition_entries_crc32,
-        secondary_table_offset,
-        disk_size,
-    )?;
-
-    composite_proto.length = disk_size;
-    output_composite.write_all(CDISK_MAGIC.as_bytes())?;
-    composite_proto.write_to_writer(output_composite)?;
-
-    Ok(())
-}
 
 /// Constructs a composite disk image for the given list of partitions, and opens it ready to use.
 ///
@@ -390,63 +123,3 @@
         .map_err(|e| anyhow!("Failed to open partition image: {}", e))?
         .get_len()?)
 }
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn beginning_size() {
-        let mut buffer = vec![];
-        let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
-        let disk_size = 1000 * SECTOR_SIZE;
-        write_beginning(
-            &mut buffer,
-            Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
-            &partitions,
-            42,
-            disk_size - GPT_END_SIZE,
-            disk_size,
-        )
-        .unwrap();
-
-        assert_eq!(buffer.len(), GPT_BEGINNING_SIZE as usize);
-    }
-
-    #[test]
-    fn end_size() {
-        let mut buffer = vec![];
-        let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
-        let disk_size = 1000 * SECTOR_SIZE;
-        write_end(
-            &mut buffer,
-            Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
-            &partitions,
-            42,
-            disk_size - GPT_END_SIZE,
-            disk_size,
-        )
-        .unwrap();
-
-        assert_eq!(buffer.len(), GPT_END_SIZE as usize);
-    }
-
-    #[test]
-    fn end_size_with_padding() {
-        let mut buffer = vec![];
-        let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
-        let disk_size = 1000 * SECTOR_SIZE;
-        let padding = 3 * SECTOR_SIZE;
-        write_end(
-            &mut buffer,
-            Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
-            &partitions,
-            42,
-            disk_size - GPT_END_SIZE - padding,
-            disk_size,
-        )
-        .unwrap();
-
-        assert_eq!(buffer.len(), GPT_END_SIZE as usize + padding as usize);
-    }
-}
diff --git a/virtualizationservice/src/gpt.rs b/virtualizationservice/src/gpt.rs
deleted file mode 100644
index 346a40a..0000000
--- a/virtualizationservice/src/gpt.rs
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Functions for writing GUID Partition Tables for use in a composite disk image.
-
-use anyhow::Error;
-use crc32fast::Hasher;
-use std::convert::TryInto;
-use std::io::Write;
-use uuid::Uuid;
-
-/// The size in bytes of a disk sector (also called a block).
-pub const SECTOR_SIZE: u64 = 1 << 9;
-/// The size in bytes on an MBR partition entry.
-const MBR_PARTITION_ENTRY_SIZE: usize = 16;
-/// The size in bytes of a GPT header.
-pub const GPT_HEADER_SIZE: u32 = 92;
-/// The number of partition entries in the GPT, which is the maximum number of partitions which are
-/// supported.
-pub const GPT_NUM_PARTITIONS: u32 = 128;
-/// The size in bytes of a single GPT partition entry.
-pub const GPT_PARTITION_ENTRY_SIZE: u32 = 128;
-/// The size in bytes of everything before the first partition: i.e. the MBR, GPT header and GPT
-/// partition entries.
-pub const GPT_BEGINNING_SIZE: u64 = SECTOR_SIZE * 40;
-/// The size in bytes of everything after the last partition: i.e. the GPT partition entries and GPT
-/// footer.
-pub const GPT_END_SIZE: u64 = SECTOR_SIZE * 33;
-
-/// Write a protective MBR for a disk of the given total size (in bytes).
-///
-/// This should be written at the start of the disk, before the GPT header. It
-/// is one `SECTOR_SIZE` long.
-pub fn write_protective_mbr(file: &mut impl Write, disk_size: u64) -> Result<(), Error> {
-    // Bootstrap code
-    file.write_all(&[0; 446])?;
-
-    // Partition status
-    file.write_all(&[0x00])?;
-    // Begin CHS
-    file.write_all(&[0; 3])?;
-    // Partition type
-    file.write_all(&[0xEE])?;
-    // End CHS
-    file.write_all(&[0; 3])?;
-    let first_lba: u32 = 1;
-    file.write_all(&first_lba.to_le_bytes())?;
-    let number_of_sectors: u32 = (disk_size / SECTOR_SIZE).try_into()?;
-    file.write_all(&number_of_sectors.to_le_bytes())?;
-
-    // Three more empty partitions
-    file.write_all(&[0; MBR_PARTITION_ENTRY_SIZE * 3])?;
-
-    // Boot signature
-    file.write_all(&[0x55, 0xAA])?;
-
-    Ok(())
-}
-
-#[derive(Clone, Debug, Default, Eq, PartialEq)]
-struct GptHeader {
-    signature: [u8; 8],
-    revision: [u8; 4],
-    header_size: u32,
-    header_crc32: u32,
-    current_lba: u64,
-    backup_lba: u64,
-    first_usable_lba: u64,
-    last_usable_lba: u64,
-    disk_guid: Uuid,
-    partition_entries_lba: u64,
-    num_partition_entries: u32,
-    partition_entry_size: u32,
-    partition_entries_crc32: u32,
-}
-
-impl GptHeader {
-    fn write_bytes(&self, out: &mut impl Write) -> Result<(), Error> {
-        out.write_all(&self.signature)?;
-        out.write_all(&self.revision)?;
-        out.write_all(&self.header_size.to_le_bytes())?;
-        out.write_all(&self.header_crc32.to_le_bytes())?;
-        // Reserved
-        out.write_all(&[0; 4])?;
-        out.write_all(&self.current_lba.to_le_bytes())?;
-        out.write_all(&self.backup_lba.to_le_bytes())?;
-        out.write_all(&self.first_usable_lba.to_le_bytes())?;
-        out.write_all(&self.last_usable_lba.to_le_bytes())?;
-
-        // GUID is mixed-endian for some reason, so we can't just use `Uuid::as_bytes()`.
-        write_guid(out, self.disk_guid)?;
-
-        out.write_all(&self.partition_entries_lba.to_le_bytes())?;
-        out.write_all(&self.num_partition_entries.to_le_bytes())?;
-        out.write_all(&self.partition_entry_size.to_le_bytes())?;
-        out.write_all(&self.partition_entries_crc32.to_le_bytes())?;
-        Ok(())
-    }
-}
-
-/// Write a GPT header for the disk.
-///
-/// It may either be a primary header (which should go at LBA 1) or a secondary header (which should
-/// go at the end of the disk).
-pub fn write_gpt_header(
-    out: &mut impl Write,
-    disk_guid: Uuid,
-    partition_entries_crc32: u32,
-    secondary_table_offset: u64,
-    secondary: bool,
-) -> Result<(), Error> {
-    let primary_header_lba = 1;
-    let secondary_header_lba = (secondary_table_offset + GPT_END_SIZE) / SECTOR_SIZE - 1;
-    let mut gpt_header = GptHeader {
-        signature: *b"EFI PART",
-        revision: [0, 0, 1, 0],
-        header_size: GPT_HEADER_SIZE,
-        current_lba: if secondary { secondary_header_lba } else { primary_header_lba },
-        backup_lba: if secondary { primary_header_lba } else { secondary_header_lba },
-        first_usable_lba: GPT_BEGINNING_SIZE / SECTOR_SIZE,
-        last_usable_lba: secondary_table_offset / SECTOR_SIZE - 1,
-        disk_guid,
-        partition_entries_lba: 2,
-        num_partition_entries: GPT_NUM_PARTITIONS,
-        partition_entry_size: GPT_PARTITION_ENTRY_SIZE,
-        partition_entries_crc32,
-        header_crc32: 0,
-    };
-
-    // Write once to a temporary buffer to calculate the CRC.
-    let mut header_without_crc = [0u8; GPT_HEADER_SIZE as usize];
-    gpt_header.write_bytes(&mut &mut header_without_crc[..])?;
-    let mut hasher = Hasher::new();
-    hasher.update(&header_without_crc);
-    gpt_header.header_crc32 = hasher.finalize();
-
-    gpt_header.write_bytes(out)?;
-
-    Ok(())
-}
-
-/// A GPT entry for a particular partition.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct GptPartitionEntry {
-    pub partition_type_guid: Uuid,
-    pub unique_partition_guid: Uuid,
-    pub first_lba: u64,
-    pub last_lba: u64,
-    pub attributes: u64,
-    /// UTF-16LE
-    pub partition_name: [u16; 36],
-}
-
-// TODO: Derive this once arrays of more than 32 elements have default values.
-impl Default for GptPartitionEntry {
-    fn default() -> Self {
-        Self {
-            partition_type_guid: Default::default(),
-            unique_partition_guid: Default::default(),
-            first_lba: 0,
-            last_lba: 0,
-            attributes: 0,
-            partition_name: [0; 36],
-        }
-    }
-}
-
-impl GptPartitionEntry {
-    /// Write out the partition table entry. It will take
-    /// `GPT_PARTITION_ENTRY_SIZE` bytes.
-    pub fn write_bytes(&self, out: &mut impl Write) -> Result<(), Error> {
-        write_guid(out, self.partition_type_guid)?;
-        write_guid(out, self.unique_partition_guid)?;
-        out.write_all(&self.first_lba.to_le_bytes())?;
-        out.write_all(&self.last_lba.to_le_bytes())?;
-        out.write_all(&self.attributes.to_le_bytes())?;
-        for code_unit in &self.partition_name {
-            out.write_all(&code_unit.to_le_bytes())?;
-        }
-        Ok(())
-    }
-}
-
-/// Write a UUID in the mixed-endian format which GPT uses for GUIDs.
-fn write_guid(out: &mut impl Write, guid: Uuid) -> Result<(), Error> {
-    let guid_fields = guid.as_fields();
-    out.write_all(&guid_fields.0.to_le_bytes())?;
-    out.write_all(&guid_fields.1.to_le_bytes())?;
-    out.write_all(&guid_fields.2.to_le_bytes())?;
-    out.write_all(guid_fields.3)?;
-
-    Ok(())
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn protective_mbr_size() {
-        let mut buffer = vec![];
-        write_protective_mbr(&mut buffer, 1000 * SECTOR_SIZE).unwrap();
-
-        assert_eq!(buffer.len(), SECTOR_SIZE as usize);
-    }
-
-    #[test]
-    fn header_size() {
-        let mut buffer = vec![];
-        write_gpt_header(
-            &mut buffer,
-            Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
-            42,
-            1000 * SECTOR_SIZE,
-            false,
-        )
-        .unwrap();
-
-        assert_eq!(buffer.len(), GPT_HEADER_SIZE as usize);
-    }
-
-    #[test]
-    fn partition_entry_size() {
-        let mut buffer = vec![];
-        GptPartitionEntry::default().write_bytes(&mut buffer).unwrap();
-
-        assert_eq!(buffer.len(), GPT_PARTITION_ENTRY_SIZE as usize);
-    }
-}
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index c9cc029..018be7b 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -17,7 +17,6 @@
 mod aidl;
 mod composite;
 mod crosvm;
-mod gpt;
 mod payload;
 
 use crate::aidl::{VirtualizationService, BINDER_SERVICE_IDENTIFIER, TEMPORARY_DIRECTORY};
diff --git a/virtualizationservice/src/payload.rs b/virtualizationservice/src/payload.rs
index 338e9a2..75ba6c7 100644
--- a/virtualizationservice/src/payload.rs
+++ b/virtualizationservice/src/payload.rs
@@ -85,7 +85,12 @@
         version: 1,
         apexes: apexes
             .iter()
-            .map(|apex| ApexPayload { name: apex.name.clone(), ..Default::default() })
+            .enumerate()
+            .map(|(i, apex)| ApexPayload {
+                name: apex.name.clone(),
+                partition_name: format!("microdroid-apex-{}", i),
+                ..Default::default()
+            })
             .collect(),
         apk: Some(ApkPayload {
             name: "apk".to_owned(),