Merge "Fix build error after crosvm upstream merge"
diff --git a/TEST_MAPPING b/TEST_MAPPING
index b805d03..b07dc3b 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -8,6 +8,9 @@
     },
     {
       "name": "VirtualizationTestCases"
+    },
+    {
+      "name": "MicrodroidTestApp"
     }
   ],
   "imports": [
diff --git a/apex/Android.bp b/apex/Android.bp
index af65e79..93d9f36 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -27,6 +27,7 @@
                 "microdroid_boot-5.10",
                 "microdroid_vendor_boot-5.10",
                 "microdroid_vbmeta",
+                "microdroid_vbmeta_bootconfig",
             ],
         },
         x86_64: {
@@ -39,6 +40,7 @@
                 "microdroid_boot-5.10",
                 "microdroid_vendor_boot-5.10",
                 "microdroid_vbmeta",
+                "microdroid_vbmeta_bootconfig",
             ],
         },
     },
@@ -108,3 +110,55 @@
         "simg2img",
     ],
 }
+
+sh_test_host {
+    name: "sign_virt_apex_test",
+    src: "sign_virt_apex_test.sh",
+    test_config: "sign_virt_apex_test.xml",
+    data_bins: [
+        // deapexer
+        "deapexer",
+        "debugfs_static",
+
+        // sign_virt_apex
+        "avbtool",
+        "img2simg",
+        "lpmake",
+        "lpunpack",
+        "sign_virt_apex",
+        "simg2img",
+    ],
+    data_libs: [
+        "libbase",
+        "libc++",
+        "libcrypto_utils",
+        "libcrypto",
+        "libext4_utils",
+        "liblog",
+        "liblp",
+        "libsparse",
+        "libz",
+    ],
+    data: [
+        ":com.android.virt",
+        "test.com.android.virt.pem",
+    ],
+    test_suites: ["general-tests"],
+}
+
+// custom tool to replace bytes in a file
+python_binary_host {
+    name: "replace_bytes",
+    srcs: [
+        "replace_bytes.py",
+    ],
+    version: {
+        py2: {
+            enabled: false,
+        },
+        py3: {
+            enabled: true,
+            embedded_launcher: true,
+        },
+    },
+}
diff --git a/apex/replace_bytes.py b/apex/replace_bytes.py
new file mode 100644
index 0000000..b22f132
--- /dev/null
+++ b/apex/replace_bytes.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""replace_bytes is a command line tool to replace bytes in a file.
+
+Typical usage: replace_bytes target_file old_file new_file
+
+  replace bytes of old_file with bytes of new_file in target_file. old_file and new_file should be
+  the same size.
+
+"""
+import argparse
+import sys
+
+
+def ParseArgs(argv):
+    parser = argparse.ArgumentParser(description='Replace bytes')
+    parser.add_argument(
+        'target_file',
+        help='path to the target file.')
+    parser.add_argument(
+        'old_file',
+        help='path to the file containing old bytes')
+    parser.add_argument(
+        'new_file',
+        help='path to the file containing new bytes')
+    return parser.parse_args(argv)
+
+
+def ReplaceBytes(target_file, old_file, new_file):
+    # read old bytes
+    with open(old_file, 'rb') as f:
+        old_bytes = f.read()
+
+    # read new bytes
+    with open(new_file, 'rb') as f:
+        new_bytes = f.read()
+
+    assert len(old_bytes) == len(new_bytes), 'Pubkeys should be the same size. (%d != %d)' % (
+        len(old_bytes), len(new_bytes))
+
+    # replace bytes in target_file
+    with open(target_file, 'r+b') as f:
+        pos = f.read().find(old_bytes)
+        assert pos != -1, 'Pubkey not found'
+        f.seek(pos)
+        f.write(new_bytes)
+
+
+def main(argv):
+    try:
+        args = ParseArgs(argv)
+        ReplaceBytes(args.target_file, args.old_file, args.new_file)
+    except Exception as e:
+        print(e)
+        sys.exit(1)
+
+
+if __name__ == '__main__':
+    main(sys.argv[1:])
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index 77f54c4..153b5dd 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -22,6 +22,8 @@
 - lpmake, lpunpack, simg2img, img2simg
 """
 import argparse
+import glob
+import hashlib
 import os
 import re
 import shutil
@@ -32,6 +34,8 @@
 
 def ParseArgs(argv):
     parser = argparse.ArgumentParser(description='Sign the Virt APEX')
+    parser.add_argument('--verify', action='store_true',
+                        help='Verify the Virt APEX')
     parser.add_argument(
         '-v', '--verbose',
         action='store_true',
@@ -76,7 +80,12 @@
     return int(value.removesuffix(' bytes'))
 
 
-def AvbInfo(args, image_path, descriptor_name=None):
+def ExtractAvbPubkey(args, key, output):
+    RunCommand(args, ['avbtool', 'extract_public_key',
+               '--key', key, '--output', output])
+
+
+def AvbInfo(args, image_path):
     """Parses avbtool --info image output
 
     Args:
@@ -87,7 +96,7 @@
     Returns:
       A pair of
         - a dict that contains VBMeta info. None if there's no VBMeta info.
-        - a dict that contains target descriptor info. None if name is not specified or not found.
+        - a list of descriptors.
     """
     if not os.path.exists(image_path):
         raise ValueError('Failed to find image: {}'.format(image_path))
@@ -97,7 +106,7 @@
     if ret_code == 1:
         return None, None
 
-    info, descriptor = {}, None
+    info, descriptors = {}, []
 
     # Read `avbtool info_image` output as "key:value" lines
     matcher = re.compile(r'^(\s*)([^:]+):\s*(.*)$')
@@ -110,30 +119,39 @@
             yield line_info.group(1), line_info.group(2), line_info.group(3)
 
     gen = IterateLine(output)
+
+    def ReadDescriptors(cur_indent, cur_name, cur_value):
+        descriptor = cur_value if cur_name == 'Prop' else {}
+        descriptors.append((cur_name, descriptor))
+        for indent, key, value in gen:
+            if indent <= cur_indent:
+                # read descriptors recursively to pass the read key as descriptor name
+                ReadDescriptors(indent, key, value)
+                break
+            descriptor[key] = value
+
     # Read VBMeta info
     for _, key, value in gen:
         if key == 'Descriptors':
+            ReadDescriptors(*next(gen))
             break
         info[key] = value
 
-    if descriptor_name:
-        for indent, key, _ in gen:
-            # Read a target descriptor
-            if key == descriptor_name:
-                cur_indent = indent
-                descriptor = {}
-                for indent, key, value in gen:
-                    if indent == cur_indent:
-                        break
-                    descriptor[key] = value
-                break
+    return info, descriptors
 
-    return info, descriptor
+
+# Look up a list of (key, value) with a key. Returns the value of the first matching pair.
+def LookUp(pairs, key):
+    for k, v in pairs:
+        if key == k:
+            return v
+    return None
 
 
 def AddHashFooter(args, key, image_path):
-    info, descriptor = AvbInfo(args, image_path, 'Hash descriptor')
+    info, descriptors = AvbInfo(args, image_path)
     if info:
+        descriptor = LookUp(descriptors, 'Hash descriptor')
         image_size = ReadBytesSize(info['Image size'])
         algorithm = info['Algorithm']
         partition_name = descriptor['Partition Name']
@@ -149,8 +167,9 @@
 
 
 def AddHashTreeFooter(args, key, image_path):
-    info, descriptor = AvbInfo(args, image_path, 'Hashtree descriptor')
+    info, descriptors = AvbInfo(args, image_path)
     if info:
+        descriptor = LookUp(descriptors, 'Hashtree descriptor')
         image_size = ReadBytesSize(info['Image size'])
         algorithm = info['Algorithm']
         partition_name = descriptor['Partition Name']
@@ -166,9 +185,12 @@
         RunCommand(args, cmd)
 
 
-def MakeVbmetaImage(args, key, vbmeta_img, images):
-    info, _ = AvbInfo(args, vbmeta_img)
-    if info:
+def MakeVbmetaImage(args, key, vbmeta_img, images=None, chained_partitions=None):
+    info, descriptors = AvbInfo(args, vbmeta_img)
+    if info is None:
+        return
+
+    with TempDirectory() as work_dir:
         algorithm = info['Algorithm']
         rollback_index = info['Rollback Index']
         rollback_index_location = info['Rollback Index Location']
@@ -179,8 +201,21 @@
                '--rollback_index', rollback_index,
                '--rollback_index_location', rollback_index_location,
                '--output', vbmeta_img]
-        for img in images:
-            cmd.extend(['--include_descriptors_from_image', img])
+        if images:
+            for img in images:
+                cmd.extend(['--include_descriptors_from_image', img])
+
+        # replace pubkeys of chained_partitions as well
+        for name, descriptor in descriptors:
+            if name == 'Chain Partition descriptor':
+                part_name = descriptor['Partition Name']
+                ril = descriptor['Rollback Index Location']
+                part_key = chained_partitions[part_name]
+                avbpubkey = os.path.join(work_dir, part_name + '.avbpubkey')
+                ExtractAvbPubkey(args, part_key, avbpubkey)
+                cmd.extend(['--chain_partition', '%s:%s:%s' %
+                           (part_name, ril, avbpubkey)])
+
         RunCommand(args, cmd)
         # libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
         # which matches this or the read will fail.
@@ -219,8 +254,8 @@
     with open(bootloader_pubkey, 'rb') as f:
         old_pubkey = f.read()
 
-    # replace bootloader pubkey
-    RunCommand(args, ['avbtool', 'extract_public_key', '--key', key, '--output', bootloader_pubkey])
+    # replace bootloader pubkey (overwrite the old one with the new one)
+    ExtractAvbPubkey(args, key, bootloader_pubkey)
 
     # read new pubkey
     with open(bootloader_pubkey, 'rb') as f:
@@ -241,13 +276,22 @@
     input_dir = args.input_dir
 
     # target files in the Virt APEX
-    bootloader_pubkey = os.path.join(input_dir, 'etc', 'microdroid_bootloader.avbpubkey')
+    bootloader_pubkey = os.path.join(
+        input_dir, 'etc', 'microdroid_bootloader.avbpubkey')
     bootloader = os.path.join(input_dir, 'etc', 'microdroid_bootloader')
     boot_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_boot-5.10.img')
     vendor_boot_img = os.path.join(
         input_dir, 'etc', 'fs', 'microdroid_vendor_boot-5.10.img')
     super_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_super.img')
     vbmeta_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_vbmeta.img')
+    vbmeta_bootconfig_img = os.path.join(
+        input_dir, 'etc', 'fs', 'microdroid_vbmeta_bootconfig.img')
+    bootconfig_normal = os.path.join(
+        input_dir, 'etc', 'microdroid_bootconfig.normal')
+    bootconfig_app_debuggable = os.path.join(
+        input_dir, 'etc', 'microdroid_bootconfig.app_debuggable')
+    bootconfig_full_debuggable = os.path.join(
+        input_dir, 'etc', 'microdroid_bootconfig.full_debuggable')
 
     # Key(pubkey) for bootloader should match with the one used to make VBmeta below
     # while it's okay to use different keys for other image files.
@@ -280,14 +324,81 @@
         # Ideally, making VBmeta should be done out of TempDirectory block. But doing it here
         # to avoid unpacking re-signed super.img for system/vendor images which are available
         # in this block.
-        MakeVbmetaImage(args, key, vbmeta_img, [
+        MakeVbmetaImage(args, key, vbmeta_img, images=[
                         boot_img, vendor_boot_img, system_a_img, vendor_a_img])
 
+    # Re-sign bootconfigs with the same key
+    bootconfig_sign_key = key
+    AddHashFooter(args, bootconfig_sign_key, bootconfig_normal)
+    AddHashFooter(args, bootconfig_sign_key, bootconfig_app_debuggable)
+    AddHashFooter(args, bootconfig_sign_key, bootconfig_full_debuggable)
+
+    # Re-sign vbmeta_bootconfig with a chained_partition to "bootconfig"
+    # Note that, for now, `key` and `bootconfig_sign_key` are the same, but technically they
+    # can be different. Vbmeta records pubkeys which signed chained partitions.
+    MakeVbmetaImage(args, key, vbmeta_bootconfig_img, chained_partitions={
+                    'bootconfig': bootconfig_sign_key})
+
+
+def VerifyVirtApex(args):
+    # Generator to emit avbtool-signed items along with its pubkey digest.
+    # This supports lpmake-packed images as well.
+    def Recur(target_dir):
+        for file in glob.glob(os.path.join(target_dir, 'etc', '**', '*'), recursive=True):
+            cur_item = os.path.relpath(file, target_dir)
+
+            if not os.path.isfile(file):
+                continue
+
+            # avbpubkey
+            if cur_item == 'etc/microdroid_bootloader.avbpubkey':
+                with open(file, 'rb') as f:
+                    yield (cur_item, hashlib.sha1(f.read()).hexdigest())
+                continue
+
+            # avbtool signed
+            info, _ = AvbInfo(args, file)
+            if info:
+                yield (cur_item, info['Public key (sha1)'])
+                continue
+
+            # logical partition
+            with TempDirectory() as tmp_dir:
+                unsparsed = os.path.join(tmp_dir, os.path.basename(file))
+                _, rc = RunCommand(
+                    # exit with 255 if it's not sparsed
+                    args, ['simg2img', file, unsparsed], expected_return_values={0, 255})
+                if rc == 0:
+                    with TempDirectory() as unpack_dir:
+                        # exit with 64 if it's not a logical partition.
+                        _, rc = RunCommand(
+                            args, ['lpunpack', unsparsed, unpack_dir], expected_return_values={0, 64})
+                        if rc == 0:
+                            nested_items = list(Recur(unpack_dir))
+                            if len(nested_items) > 0:
+                                for (item, key) in nested_items:
+                                    yield ('%s!/%s' % (cur_item, item), key)
+                                continue
+    # Read pubkey digest
+    with TempDirectory() as tmp_dir:
+        pubkey_file = os.path.join(tmp_dir, 'avbpubkey')
+        ExtractAvbPubkey(args, args.key, pubkey_file)
+        with open(pubkey_file, 'rb') as f:
+            pubkey_digest = hashlib.sha1(f.read()).hexdigest()
+
+    # Check every avbtool-signed item against the input key
+    for (item, pubkey) in Recur(args.input_dir):
+        assert pubkey == pubkey_digest, '%s: key mismatch: %s != %s' % (
+            item, pubkey, pubkey_digest)
+
 
 def main(argv):
     try:
         args = ParseArgs(argv)
-        SignVirtApex(args)
+        if args.verify:
+            VerifyVirtApex(args)
+        else:
+            SignVirtApex(args)
     except Exception as e:
         print(e)
         sys.exit(1)
diff --git a/apex/sign_virt_apex_test.sh b/apex/sign_virt_apex_test.sh
new file mode 100644
index 0000000..640a3d4
--- /dev/null
+++ b/apex/sign_virt_apex_test.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+shopt -s extglob
+
+TMP_ROOT=$(mktemp -d -t sign_virt_apex-XXXXXXXX)
+TEST_DIR=$(dirname $0)
+
+# To access host tools
+PATH=$TEST_DIR:$PATH
+DEBUGFS=$TEST_DIR/debugfs_static
+
+deapexer --debugfs_path $DEBUGFS extract $TEST_DIR/com.android.virt.apex $TMP_ROOT
+
+if [ "$(ls -A $TMP_ROOT/etc/fs/)" ]; then
+  sign_virt_apex $TEST_DIR/test.com.android.virt.pem $TMP_ROOT
+  sign_virt_apex --verify $TEST_DIR/test.com.android.virt.pem $TMP_ROOT
+else
+  echo "No filesystem images. Skip."
+fi
+
diff --git a/apex/sign_virt_apex_test.xml b/apex/sign_virt_apex_test.xml
new file mode 100644
index 0000000..5ea84a1
--- /dev/null
+++ b/apex/sign_virt_apex_test.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Runs sign_virt_apex test">
+    <test class="com.android.tradefed.testtype.binary.ExecutableHostTest" >
+      <option name="binary" value="sign_virt_apex_test"/>
+    </test>
+</configuration>
diff --git a/apex/test.com.android.virt.pem b/apex/test.com.android.virt.pem
new file mode 100644
index 0000000..b0cfff4
--- /dev/null
+++ b/apex/test.com.android.virt.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAw91a1/DFwSu1FbX92SxGshBGPvHW+4JpvVCw10rhx39pPynI
+ePOf94c94f+pZr6QsT94sQ93Ubjhzf29E9wb5QVT98VJycyYviClyFRl8a1KQQQh
+JGR9v4KEceEWYeJe3nbYDzPwvzdJXy0DbLNUWZBXfDEZGyQHnwb124OIkmBFWz+h
+QsRGFMP+9FgATKH2jnsrNEB2yMQqw7+gpBMJ4q2bqOGE48EjERQG7oFQYfzDsyDd
+5xDhvXFVQmIcrwHRc8DSVaXdlZwQQLCKc6wXg1XLY6/7KQr+XCuz0ptCQ0OW3MAB
+OySsxnl82R+zlb9j05uZf0Z7yUW5hyZylZshK8rAVUYbYLFUmsD3X43qx42GzNfj
+FHhZn6k8CnnyGYvjY3/Lp3JY+EEbvzmVAJrDmMmUMIpML06D7Hu509yBOSAoE8qy
+rcccglHs3rHQ93lSslU02JWYcJ193KThQIcmc1OXoT+NPZf4NKemVE2uCX+mPRNR
+M4ACofXbVg/b5NcEelgIzL0UOZDQMj+WdyGpJ3d8YmE+1WWQ8nqbbCy0vQc+8jc0
+ZzZ/RF4WlBOj/or1TkWyGvGVXYhnU8raF1MnDRbOixZpPhSfdC7vtzktcYxHXt5X
+Ect42/ynX4Q5Gz3VMeg3WcIMNMSGFo7B3UEhde5MVxgEf1AQelm8/LaqWncCAwEA
+AQKCAgAKIyrQgmW52clFlmXNF72Q+cao+1tlKRrP8Z01h2qoKLAJ1N/LYtCkvxs0
+10atSq+yfNaCU4qZcDg/sSJYJMxMzjnKWSu4hh5huM7bz4J3P8DYHJ6ag5j+kILK
+YhwGdPD0ErKcFtQfEX16r5m9xopXGGFuzBvAi9zZHkMbWXN4IAN29ZQjIIWADaTk
+gKmDTd61ASr7SVrciUqtVv25gELCuLmVxBZcs2JdP+wb7BV8/NgkLU9O5lDIvVTs
+WqehZzawBwrb4/nUBH/S2VBRLFcLNSWRw0n8ldUUcC6ed+q2EIl+Y3Gs3fkTTLZp
+hnqFBaLlEOig7cT6ZeF0XUkQ9TaCNoEXEistwT6hlWSoAwUPi2q5AeJc9TFCrw8i
+mJWY+9UZH/lOBM8jYoGPW2b7drbNn/8DuPu1N9miP12oaL5KjjGlXvN4RmdkaGjU
+/zUNceQm/Q8hPudCaZLR9trMAOjmHl9GqnGxR35pRWMRJ/N11X1KLVdSlVpUFtHB
+yhvAAhsFLAZxaiQmAANmwz9cnxJTk6+2JTyX6EZOdAFqDibjvTQIqERoSBtKDVTa
+5n02MC3MHSeouhMyQscLvxTa9RtqxQHHnnQBDplkQGErmz5VhD4CYMDKgjhGbH71
+tg0LHujMieldWnpKPZWlztmGaDaPksJAAUKA8RBKrJ2RgXAyAQKCAQEA712eJxuh
+KzoOe0rIHwC4De5vO7ZyleLGOVvaX9jcm3xxQg1feC5r03xcxqkDvuio94Y4s/Sx
+ZubieWY60pPY3d5w160EKRIwAUKtPR2Uy/QLvC3jMnmIy29KP0F6oQXxMurQ16IS
+Aul5aSHIB33UlEd9v9HenTc9hPvYMUALe0HmisXYTRR0p9DMlqt+goaiynD3U2gh
+09x640EtCvDJiM2pAaVw2z9J/eFHypy6AERaGbX3vYjlbch1oqH5+67i0Nl/FZLx
+wL2q5fUsGx8DNQmHu0kjlLLIbGAx/1dtXWOhH0q4SWrGFJXgsYu5f6AzIHz6XKDi
+cITb8P8JUoZgiwKCAQEA0XnXeppR6DASAZSi7e19WWLmUafjur/qUYy+Aolr7Oyc
+H18JU71AOohM8TxgDTGNfvzII6ryxK5j5VpBnL4IX44ymjQ2J7nOtRl7t5Ceh9Cy
+lPFZwxUlV7Mikow8kAVpbY0JonUnRCzcxNT1tO8qlWYEj8L1vZf2d61VIACE/fJU
+ekWQKr/CLlNp/PvjAQaLd6oSh5gf4Ymx+5bFM86tJbR3YAtMWvr8I+nPDT8Q0G2c
+Zt62ZKiE76duma7ndS1Od7ohuLbwW4vV1KUcSzFkfGjP/Cx6D+wQydWAdi7fsQ2u
+xNstQbbP535x5uwVIlZovflq9Sl8AA5bBRnduvSfRQKCAQAiLN6gvMwlDNP2fHXY
+H1UoAAv3nZP8nHUqyVeDacYNmRXelWQ1F4OjnVTttEHppvRA6vP7lYsiowJgzNzH
+Jf7HprO7x2MZrhQWifuMB0YwXHa0dmTC1yFV0lzqbSHiDaQjXe1VbDlgGw+PmBgk
+Ia4RQafNlFxRXAq3ivGSDo/VGFKfK6I3Vx1UvHYJaRDV9/0UJE7bpLl3szoEalDR
+CBHuK1be+k0DsKSSz/BdGEViNmAa3aUydXI0W3OYNcIoUg7mPLdtUB6eIzZcQMX8
+VVAy6VpsvgOLfn8pIg7hYw0lUU0214c6TDldxQxgrQ9eDnReRhnE0d+iqwVwAinF
+k5QDAoIBAHA/Z/Xsp6NRzvRF36C7OAYj9uMeoes6V6dnUZIubUTB7U7qMCdNLBOx
+YfmKrrWjLf00G1LxkbFO+Xy3Bp2lPvtlSTxUagiTim6Ev0S4HBsO/ALP6Zedxyrd
+dNMujm1mWP45K0aAnI/tskdPDnLsDdeMmTkn8WKtAYdTvF+vp5QkvJvglsYxhy4n
+yI2ltBiily2CVveNzteeX18/hWCjiSjBMY6nvzypbV8ZNLgWaT4m3j5JbVc27jU1
+dRCpJqIlqvyBIvzGGroTjnuqFiU8zGnWCE1K0AWkK8Lbw0CREZDgkhwujmu+OF4F
+5acmLpT91JaoBmZk2mt1RdTP7X73AjkCggEBAIwQSTzvVIqJoD4Vc9wqbCGyPr2s
+/g0GkEmcJJpe6E8AxZNzmBKV3bDzvC+thhEVQeCmVDZsOZjO/4TumskHntxMHIpp
+DHRgYiERCM2oIPMEkruqCQ+0BlH/8CtglyrPmsLgSU6L1CBQNMt39KFtcscMMwkk
+Coo/qN0DarQGkrjc+UN4Q0lJDBVB5nQj+1uCVEBnV/IC+08kr9uXIJGAllh3Wfgq
+jOdL2j1knpYD9Wi1TCZwDobCqDWwYMVQZNbGu6de3lWtuBYKCd154QUVm11Kkv3P
+Gz/yGM1v6IttZ0osMujVLADyZMLYKSt8ypRlB3TUD/4P3bUryorV/bu/ew8=
+-----END RSA PRIVATE KEY-----
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index a565a6f..58ccfc3 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -16,47 +16,59 @@
 
 package com.android.virt.fs;
 
-/** {@hide} */
+/**
+ * A service that works like a file server, where the files and directories are identified by
+ * "remote FD" that may be pre-exchanged or created on request.
+ *
+ * When a binder error is returned and it is a service specific error, the error code is an errno
+ * value which is an int.
+ *
+ * {@hide}
+ */
 interface IVirtFdService {
-    /** Error when the requesting FD is unknown. */
-    const int ERROR_UNKNOWN_FD = 1;
-
-    /**
-     * Error when I/O fails. This can happen when actual I/O error happens to the backing file,
-     * when the given offset or size are invalid, or any problems that can fail a read/write
-     * request.
-     */
-    const int ERROR_IO = 2;
-
-    /** Error when the file is too large to handle correctly. */
-    const int ERROR_FILE_TOO_LARGE = 3;
-
     /** Maximum content size that the service allows the client to request. */
     const int MAX_REQUESTING_DATA = 16384;
 
     /**
-     * Returns the content of the given file ID, from the offset, for the amount of requested size
+     * Returns the content of the given remote FD, from the offset, for the amount of requested size
      * or until EOF.
      */
-    byte[] readFile(int id, long offset, int size);
+    byte[] readFile(int fd, long offset, int size);
 
     /**
-     * Returns the content of fs-verity compatible Merkle tree of the given file ID, from the
+     * Returns the content of fs-verity compatible Merkle tree of the given remote FD, from the
      * offset, for the amount of requested size or until EOF.
      */
-    byte[] readFsverityMerkleTree(int id, long offset, int size);
+    byte[] readFsverityMerkleTree(int fd, long offset, int size);
 
-    /** Returns the fs-verity signature of the given file ID. */
-    byte[] readFsveritySignature(int id);
+    /** Returns the fs-verity signature of the given remote FD. */
+    byte[] readFsveritySignature(int fd);
 
     /**
-     * Writes the buffer to the given file ID from the file's offset. Returns the number of bytes
+     * Writes the buffer to the given remote FD from the file's offset. Returns the number of bytes
      * written.
      */
-    int writeFile(int id, in byte[] buf, long offset);
+    int writeFile(int fd, in byte[] buf, long offset);
 
-    /** Resizes the file backed by the given file ID to the new size. */
-    void resize(int id, long size);
+    /** Resizes the file backed by the given remote FD to the new size. */
+    void resize(int fd, long size);
 
-    long getFileSize(int id);
+    /** Returns the file size. */
+    long getFileSize(int fd);
+
+    /**
+     * Create a file given the remote directory FD.
+     *
+     * @param basename The file name to create. Must not contain directory separator.
+     * @return file A remote FD that represents the new created file.
+     */
+    int createFileInDirectory(int fd, String basename);
+
+    /**
+     * Create a directory inside the given remote directory FD.
+     *
+     * @param basename The directory name to create. Must not contain directory separator.
+     * @return file FD that represents the new created directory.
+     */
+    int createDirectoryInDirectory(int id, String basename);
 }
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index b235025..0c41eac 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -16,40 +16,37 @@
 
 use anyhow::Result;
 use log::error;
+use nix::{
+    dir::Dir, errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::mkdirat, sys::stat::Mode,
+};
 use std::cmp::min;
-use std::collections::BTreeMap;
+use std::collections::{btree_map, BTreeMap};
 use std::convert::TryInto;
 use std::fs::File;
 use std::io;
 use std::os::unix::fs::FileExt;
-use std::os::unix::io::AsRawFd;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::path::MAIN_SEPARATOR;
+use std::sync::{Arc, Mutex};
 
 use crate::fsverity;
 use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
-    BnVirtFdService, IVirtFdService, ERROR_FILE_TOO_LARGE, ERROR_IO, ERROR_UNKNOWN_FD,
-    MAX_REQUESTING_DATA,
+    BnVirtFdService, IVirtFdService, MAX_REQUESTING_DATA,
 };
 use authfs_aidl_interface::binder::{
     BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, StatusCode, Strong,
 };
-use binder_common::new_binder_exception;
+use binder_common::{new_binder_exception, new_binder_service_specific_error};
 
 fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
-    offset.try_into().map_err(|_| {
-        new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, format!("Invalid offset: {}", offset))
-    })
+    offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
 }
 
 fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
     if size > MAX_REQUESTING_DATA {
-        Err(new_binder_exception(
-            ExceptionCode::ILLEGAL_ARGUMENT,
-            format!("Unexpectedly large size: {}", size),
-        ))
+        Err(new_errno_error(Errno::EFBIG))
     } else {
-        size.try_into().map_err(|_| {
-            new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, format!("Invalid size: {}", size))
-        })
+        size.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
     }
 }
 
@@ -71,26 +68,53 @@
     /// A readable/writable file to serve by this server. This backing file should just be a
     /// regular file and does not have any specific property.
     ReadWrite(File),
+
+    /// A writable directory to serve by this server.
+    OutputDir(Dir),
 }
 
 pub struct FdService {
-    /// A pool of opened files, may be readonly or read-writable.
-    fd_pool: BTreeMap<i32, FdConfig>,
+    /// A pool of opened files and directories, which can be looked up by the FD number.
+    fd_pool: Arc<Mutex<BTreeMap<i32, FdConfig>>>,
 }
 
 impl FdService {
     pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
-        BnVirtFdService::new_binder(FdService { fd_pool }, BinderFeatures::default())
+        BnVirtFdService::new_binder(
+            FdService { fd_pool: Arc::new(Mutex::new(fd_pool)) },
+            BinderFeatures::default(),
+        )
     }
 
-    /// Handles the requesting file `id` with `handler` if it is in the FD pool. This function
-    /// returns whatever the handler returns.
-    fn handle_fd<F, R>(&self, id: i32, handler: F) -> BinderResult<R>
+    /// Handles the requesting file `id` with `handle_fn` if it is in the FD pool. This function
+    /// returns whatever `handle_fn` returns.
+    fn handle_fd<F, R>(&self, id: i32, handle_fn: F) -> BinderResult<R>
     where
         F: FnOnce(&FdConfig) -> BinderResult<R>,
     {
-        let fd_config = self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))?;
-        handler(fd_config)
+        let fd_pool = self.fd_pool.lock().unwrap();
+        let fd_config = fd_pool.get(&id).ok_or_else(|| new_errno_error(Errno::EBADF))?;
+        handle_fn(fd_config)
+    }
+
+    /// Inserts a new FD and corresponding `FdConfig` created by `create_fn` to the FD pool, then
+    /// returns the new FD number.
+    fn insert_new_fd<F>(&self, fd: i32, create_fn: F) -> BinderResult<i32>
+    where
+        F: FnOnce(&mut FdConfig) -> BinderResult<(i32, FdConfig)>,
+    {
+        let mut fd_pool = self.fd_pool.lock().unwrap();
+        let mut fd_config = fd_pool.get_mut(&fd).ok_or_else(|| new_errno_error(Errno::EBADF))?;
+        let (new_fd, new_fd_config) = create_fn(&mut fd_config)?;
+        if let btree_map::Entry::Vacant(entry) = fd_pool.entry(new_fd) {
+            entry.insert(new_fd_config);
+            Ok(new_fd)
+        } else {
+            Err(new_binder_exception(
+                ExceptionCode::ILLEGAL_STATE,
+                format!("The newly created FD {} is already in the pool unexpectedly", new_fd),
+            ))
+        }
     }
 }
 
@@ -105,9 +129,10 @@
             FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
                 read_into_buf(file, size, offset).map_err(|e| {
                     error!("readFile: read error: {}", e);
-                    Status::from(ERROR_IO)
+                    new_errno_error(Errno::EIO)
                 })
             }
+            FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
         })
     }
 
@@ -120,14 +145,14 @@
                 if let Some(tree_file) = &alt_merkle_tree {
                     read_into_buf(tree_file, size, offset).map_err(|e| {
                         error!("readFsverityMerkleTree: read error: {}", e);
-                        Status::from(ERROR_IO)
+                        new_errno_error(Errno::EIO)
                     })
                 } else {
                     let mut buf = vec![0; size];
                     let s = fsverity::read_merkle_tree(file.as_raw_fd(), offset, &mut buf)
                         .map_err(|e| {
                             error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
-                            Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
+                            new_errno_error(Errno::EIO)
                         })?;
                     debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
                     buf.truncate(s);
@@ -138,8 +163,9 @@
                 // For a writable file, Merkle tree is not expected to be served since Auth FS
                 // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
                 // use.
-                Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+                Err(new_errno_error(Errno::ENOSYS))
             }
+            FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
         })
     }
 
@@ -152,13 +178,13 @@
                     let offset = 0;
                     read_into_buf(sig_file, size, offset).map_err(|e| {
                         error!("readFsveritySignature: read error: {}", e);
-                        Status::from(ERROR_IO)
+                        new_errno_error(Errno::EIO)
                     })
                 } else {
                     let mut buf = vec![0; MAX_REQUESTING_DATA as usize];
                     let s = fsverity::read_signature(file.as_raw_fd(), &mut buf).map_err(|e| {
                         error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
-                        Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
+                        new_errno_error(Errno::EIO)
                     })?;
                     debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
                     buf.truncate(s);
@@ -167,8 +193,9 @@
             }
             FdConfig::ReadWrite(_file) => {
                 // There is no signature for a writable file.
-                Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+                Err(new_errno_error(Errno::ENOSYS))
             }
+            FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
         })
     }
 
@@ -176,21 +203,17 @@
         self.handle_fd(id, |config| match config {
             FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
             FdConfig::ReadWrite(file) => {
-                let offset: u64 = offset.try_into().map_err(|_| {
-                    new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, "Invalid offset")
-                })?;
+                let offset: u64 = offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))?;
                 // Check buffer size just to make `as i32` safe below.
                 if buf.len() > i32::MAX as usize {
-                    return Err(new_binder_exception(
-                        ExceptionCode::ILLEGAL_ARGUMENT,
-                        "Buffer size is too big",
-                    ));
+                    return Err(new_errno_error(Errno::EOVERFLOW));
                 }
                 Ok(file.write_at(buf, offset).map_err(|e| {
                     error!("writeFile: write error: {}", e);
-                    Status::from(ERROR_IO)
+                    new_errno_error(Errno::EIO)
                 })? as i32)
             }
+            FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
         })
     }
 
@@ -199,16 +222,14 @@
             FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
             FdConfig::ReadWrite(file) => {
                 if size < 0 {
-                    return Err(new_binder_exception(
-                        ExceptionCode::ILLEGAL_ARGUMENT,
-                        "Invalid size to resize to",
-                    ));
+                    return Err(new_errno_error(Errno::EINVAL));
                 }
                 file.set_len(size as u64).map_err(|e| {
                     error!("resize: set_len error: {}", e);
-                    Status::from(ERROR_IO)
+                    new_errno_error(Errno::EIO)
                 })
             }
+            FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
         })
     }
 
@@ -219,20 +240,64 @@
                     .metadata()
                     .map_err(|e| {
                         error!("getFileSize error: {}", e);
-                        Status::from(ERROR_IO)
+                        new_errno_error(Errno::EIO)
                     })?
                     .len();
                 Ok(size.try_into().map_err(|e| {
                     error!("getFileSize: File too large: {}", e);
-                    Status::from(ERROR_FILE_TOO_LARGE)
+                    new_errno_error(Errno::EFBIG)
                 })?)
             }
             FdConfig::ReadWrite(_file) => {
                 // Content and metadata of a writable file needs to be tracked by authfs, since
                 // fd_server isn't considered trusted. So there is no point to support getFileSize
                 // for a writable file.
-                Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+                Err(new_errno_error(Errno::ENOSYS))
             }
+            FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+        })
+    }
+
+    fn createFileInDirectory(&self, fd: i32, basename: &str) -> BinderResult<i32> {
+        if basename.contains(MAIN_SEPARATOR) {
+            return Err(new_errno_error(Errno::EINVAL));
+        }
+        self.insert_new_fd(fd, |config| match config {
+            FdConfig::OutputDir(dir) => {
+                let new_fd = openat(
+                    dir.as_raw_fd(),
+                    basename,
+                    // TODO(205172873): handle the case when the file already exist, e.g. truncate
+                    // or fail, and possibly allow the client to specify. For now, always truncate.
+                    OFlag::O_CREAT | OFlag::O_RDWR | OFlag::O_TRUNC,
+                    Mode::S_IRUSR | Mode::S_IWUSR,
+                )
+                .map_err(new_errno_error)?;
+                // SAFETY: new_fd is just created and not an error.
+                let new_file = unsafe { File::from_raw_fd(new_fd) };
+                Ok((new_fd, FdConfig::ReadWrite(new_file)))
+            }
+            _ => Err(new_errno_error(Errno::ENOTDIR)),
+        })
+    }
+
+    fn createDirectoryInDirectory(&self, dir_fd: i32, basename: &str) -> BinderResult<i32> {
+        if basename.contains(MAIN_SEPARATOR) {
+            return Err(new_errno_error(Errno::EINVAL));
+        }
+        self.insert_new_fd(dir_fd, |config| match config {
+            FdConfig::OutputDir(_) => {
+                mkdirat(dir_fd, basename, Mode::S_IRWXU).map_err(new_errno_error)?;
+                let new_dir = Dir::openat(
+                    dir_fd,
+                    basename,
+                    OFlag::O_DIRECTORY | OFlag::O_RDONLY,
+                    Mode::empty(),
+                )
+                .map_err(new_errno_error)?;
+                Ok((new_dir.as_raw_fd(), FdConfig::OutputDir(new_dir)))
+            }
+            _ => Err(new_errno_error(Errno::ENOTDIR)),
         })
     }
 }
@@ -244,3 +309,7 @@
     file.read_exact_at(&mut buf, offset)?;
     Ok(buf)
 }
+
+fn new_errno_error(errno: Errno) -> Status {
+    new_binder_service_specific_error(errno as i32, errno.desc())
+}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index 3413ce6..bbcd49f 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -28,6 +28,7 @@
 use anyhow::{bail, Result};
 use binder_common::rpc_server::run_rpc_server;
 use log::debug;
+use nix::dir::Dir;
 use std::collections::BTreeMap;
 use std::fs::File;
 use std::os::unix::io::FromRawFd;
@@ -77,6 +78,12 @@
     Ok((fd, FdConfig::ReadWrite(file)))
 }
 
+fn parse_arg_rw_dirs(arg: &str) -> Result<(i32, FdConfig)> {
+    let fd = arg.parse::<i32>()?;
+
+    Ok((fd, FdConfig::OutputDir(Dir::from_fd(fd)?)))
+}
+
 struct Args {
     fd_pool: BTreeMap<i32, FdConfig>,
     ready_fd: Option<File>,
@@ -93,6 +100,10 @@
              .long("rw-fds")
              .multiple(true)
              .number_of_values(1))
+        .arg(clap::Arg::with_name("rw-dirs")
+             .long("rw-dirs")
+             .multiple(true)
+             .number_of_values(1))
         .arg(clap::Arg::with_name("ready-fd")
             .long("ready-fd")
             .takes_value(true))
@@ -111,6 +122,12 @@
             fd_pool.insert(fd, config);
         }
     }
+    if let Some(args) = matches.values_of("rw-dirs") {
+        for arg in args {
+            let (fd, config) = parse_arg_rw_dirs(arg)?;
+            fd_pool.insert(fd, config);
+        }
+    }
     let ready_fd = if let Some(arg) = matches.value_of("ready-fd") {
         let fd = arg.parse::<i32>()?;
         Some(fd_to_file(fd)?)
diff --git a/authfs/service/src/authfs.rs b/authfs/service/src/authfs.rs
index 6d87243..1b05749 100644
--- a/authfs/service/src/authfs.rs
+++ b/authfs/service/src/authfs.rs
@@ -132,11 +132,11 @@
         // TODO(b/185178698): Many input files need to be signed and verified.
         // or can we use debug cert for now, which is better than nothing?
         args.push(OsString::from("--remote-ro-file-unverified"));
-        args.push(OsString::from(format!("{}:{}", conf.fd, conf.fd)));
+        args.push(OsString::from(conf.fd.to_string()));
     }
     for conf in out_fds {
         args.push(OsString::from("--remote-new-rw-file"));
-        args.push(OsString::from(format!("{}:{}", conf.fd, conf.fd)));
+        args.push(OsString::from(conf.fd.to_string()));
     }
     if debuggable {
         args.push(OsString::from("--debug"));
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 404e3a5..bbe5e6c 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -1,5 +1,7 @@
+mod remote_dir;
 mod remote_file;
 
+pub use remote_dir::RemoteDirEditor;
 pub use remote_file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
 
 use binder::unstable_api::{new_spibinder, AIBinder};
@@ -8,9 +10,10 @@
 
 use crate::common::CHUNK_SIZE;
 use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::IVirtFdService;
-use authfs_aidl_interface::binder::Strong;
+use authfs_aidl_interface::binder::{Status, Strong};
 
 pub type VirtFdService = Strong<dyn IVirtFdService>;
+pub type VirtFdServiceStatus = Status;
 
 pub type ChunkBuffer = [u8; CHUNK_SIZE as usize];
 
diff --git a/authfs/src/file/remote_dir.rs b/authfs/src/file/remote_dir.rs
new file mode 100644
index 0000000..2e1bc33
--- /dev/null
+++ b/authfs/src/file/remote_dir.rs
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::collections::HashMap;
+use std::io;
+use std::path::{Path, PathBuf};
+
+use super::remote_file::RemoteFileEditor;
+use super::{VirtFdService, VirtFdServiceStatus};
+use crate::fsverity::VerifiedFileEditor;
+use crate::fusefs::Inode;
+
+const MAX_ENTRIES: u16 = 100; // Arbitrary limit
+
+/// A remote directory backed by a remote directory FD, where the provider/fd_server is not
+/// trusted.
+///
+/// The directory is assumed empty initially without the trust to the storage. Functionally, when
+/// the backing storage is not clean, the fd_server can fail to create a file or directory when
+/// there is name collision. From RemoteDirEditor's perspective of security, the creation failure
+/// is just one of possible errors that can happen, and what matters is RemoteDirEditor maintains
+/// the integrity itself.
+///
+/// When new files are created through RemoteDirEditor, the file integrity are maintained within the
+/// VM. Similarly, integrity (namely the list of entries) of the directory, or new directories
+/// created within such a directory, are also maintained within the VM. A compromised fd_server or
+/// malicious client can't affect the view to the files and directories within such a directory in
+/// the VM.
+pub struct RemoteDirEditor {
+    service: VirtFdService,
+    remote_dir_fd: i32,
+
+    /// Mapping of entry names to the corresponding inode number. The actual file/directory is
+    /// stored in the global pool in fusefs.
+    entries: HashMap<PathBuf, Inode>,
+}
+
+impl RemoteDirEditor {
+    pub fn new(service: VirtFdService, remote_dir_fd: i32) -> Self {
+        RemoteDirEditor { service, remote_dir_fd, entries: HashMap::new() }
+    }
+
+    /// Returns the number of entries created.
+    pub fn number_of_entries(&self) -> u16 {
+        self.entries.len() as u16 // limited to MAX_ENTRIES
+    }
+
+    /// Creates a remote file at the current directory. If succeed, the returned remote FD is
+    /// stored in `entries` as the inode number.
+    pub fn create_file(
+        &mut self,
+        basename: &Path,
+    ) -> io::Result<(Inode, VerifiedFileEditor<RemoteFileEditor>)> {
+        self.validate_argument(basename)?;
+
+        let basename_str =
+            basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+        let new_fd = self
+            .service
+            .createFileInDirectory(self.remote_dir_fd, basename_str)
+            .map_err(into_io_error)?;
+        let new_inode = new_fd as Inode;
+
+        let new_remote_file =
+            VerifiedFileEditor::new(RemoteFileEditor::new(self.service.clone(), new_fd));
+        self.entries.insert(basename.to_path_buf(), new_inode);
+        Ok((new_inode, new_remote_file))
+    }
+
+    /// Creates a remote directory at the current directory. If succeed, the returned remote FD is
+    /// stored in `entries` as the inode number.
+    pub fn mkdir(&mut self, basename: &Path) -> io::Result<(Inode, RemoteDirEditor)> {
+        self.validate_argument(basename)?;
+
+        let basename_str =
+            basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+        let new_fd = self
+            .service
+            .createDirectoryInDirectory(self.remote_dir_fd, basename_str)
+            .map_err(into_io_error)?;
+        let new_inode = new_fd as Inode;
+
+        let new_remote_dir = RemoteDirEditor::new(self.service.clone(), new_fd);
+        self.entries.insert(basename.to_path_buf(), new_inode);
+        Ok((new_inode, new_remote_dir))
+    }
+
+    /// Returns the inode number of a file or directory named `name` previously created through
+    /// `RemoteDirEditor`.
+    pub fn find_inode(&self, name: &Path) -> Option<Inode> {
+        self.entries.get(name).copied()
+    }
+
+    fn validate_argument(&self, basename: &Path) -> io::Result<()> {
+        // Kernel should only give us a basename.
+        debug_assert!(basename.parent().is_none());
+        if self.entries.contains_key(basename) {
+            Err(io::Error::from_raw_os_error(libc::EEXIST))
+        } else if self.entries.len() >= MAX_ENTRIES.into() {
+            Err(io::Error::from_raw_os_error(libc::EMLINK))
+        } else {
+            Ok(())
+        }
+    }
+}
+
+fn into_io_error(e: VirtFdServiceStatus) -> io::Error {
+    let maybe_errno = e.service_specific_error();
+    if maybe_errno > 0 {
+        io::Error::from_raw_os_error(maybe_errno)
+    } else {
+        io::Error::new(io::ErrorKind::Other, e.get_description())
+    }
+}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index d985581..69a5cb8 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -15,16 +15,17 @@
  */
 
 use anyhow::Result;
-use log::{debug, warn};
-use std::collections::BTreeMap;
+use log::{debug, error, warn};
+use std::collections::{btree_map, BTreeMap, HashMap};
 use std::convert::TryFrom;
-use std::ffi::CStr;
+use std::ffi::{CStr, OsStr};
 use std::fs::OpenOptions;
 use std::io;
 use std::mem::MaybeUninit;
 use std::option::Option;
-use std::os::unix::io::AsRawFd;
-use std::path::Path;
+use std::os::unix::{ffi::OsStrExt, io::AsRawFd};
+use std::path::{Path, PathBuf};
+use std::sync::Mutex;
 use std::time::Duration;
 
 use fuse::filesystem::{
@@ -35,17 +36,27 @@
 
 use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
 use crate::file::{
-    RandomWrite, ReadByChunk, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
+    RandomWrite, ReadByChunk, RemoteDirEditor, RemoteFileEditor, RemoteFileReader,
+    RemoteMerkleTreeReader,
 };
 use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
 
-const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
-
 pub type Inode = u64;
 type Handle = u64;
 
-/// `FileConfig` defines the file type supported by AuthFS.
-pub enum FileConfig {
+const DEFAULT_METADATA_TIMEOUT: Duration = Duration::from_secs(5);
+const ROOT_INODE: Inode = 1;
+
+/// Maximum bytes in the write transaction to the FUSE device. This limits the maximum buffer
+/// size in a read request (including FUSE protocol overhead) that the filesystem writes to.
+const MAX_WRITE_BYTES: u32 = 65536;
+
+/// Maximum bytes in a read operation.
+/// TODO(victorhsieh): This option is deprecated by FUSE. Figure out if we can remove this.
+const MAX_READ_BYTES: u32 = 65536;
+
+/// `AuthFsEntry` defines the filesystem entry type supported by AuthFS.
+pub enum AuthFsEntry {
     /// A file type that is verified against fs-verity signature (thus read-only). The file is
     /// served from a remote server.
     VerifiedReadonly {
@@ -57,35 +68,70 @@
     /// A file type that is initially empty, and the content is stored on a remote server. File
     /// integrity is guaranteed with private Merkle tree.
     VerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor> },
+    /// A directory type that is initially empty. One can create new file (`VerifiedNew`) and new
+    /// directory (`VerifiedNewDirectory` itself) with integrity guaranteed within the VM.
+    VerifiedNewDirectory { dir: RemoteDirEditor },
 }
 
+// AuthFS needs to be `Sync` to be accepted by fuse::worker::start_message_loop as a `FileSystem`.
 struct AuthFs {
-    /// Store `FileConfig`s using the `Inode` number as the search index.
-    ///
-    /// For further optimization to minimize the search cost, since Inode is integer, we may
-    /// consider storing them in a Vec if we can guarantee that the numbers are small and
-    /// consecutive.
-    file_pool: BTreeMap<Inode, FileConfig>,
+    /// Table for `Inode` to `AuthFsEntry` lookup. This needs to be `Sync` to be used in
+    /// `fuse::worker::start_message_loop`.
+    inode_table: Mutex<BTreeMap<Inode, AuthFsEntry>>,
 
-    /// Maximum bytes in the write transaction to the FUSE device. This limits the maximum size to
-    /// a read request (including FUSE protocol overhead).
-    max_write: u32,
+    /// Root directory entry table for path to `Inode` lookup. The root directory content should
+    /// remain constant throughout the filesystem's lifetime.
+    root_entries: HashMap<PathBuf, Inode>,
 }
 
 impl AuthFs {
-    pub fn new(file_pool: BTreeMap<Inode, FileConfig>, max_write: u32) -> AuthFs {
-        AuthFs { file_pool, max_write }
+    pub fn new(root_entries_by_path: HashMap<PathBuf, AuthFsEntry>) -> AuthFs {
+        let mut next_inode = ROOT_INODE + 1;
+        let mut inode_table = BTreeMap::new();
+        let mut root_entries = HashMap::new();
+
+        root_entries_by_path.into_iter().for_each(|(path_buf, entry)| {
+            next_inode += 1;
+            root_entries.insert(path_buf, next_inode);
+            inode_table.insert(next_inode, entry);
+        });
+
+        AuthFs { inode_table: Mutex::new(inode_table), root_entries }
     }
 
-    /// Handles the file associated with `inode` if found. This function returns whatever the
-    /// handler returns.
-    fn handle_file<F, R>(&self, inode: &Inode, handler: F) -> io::Result<R>
+    /// Handles the file associated with `inode` if found. This function returns whatever
+    /// `handle_fn` returns.
+    fn handle_inode<F, R>(&self, inode: &Inode, handle_fn: F) -> io::Result<R>
     where
-        F: FnOnce(&FileConfig) -> io::Result<R>,
+        F: FnOnce(&AuthFsEntry) -> io::Result<R>,
     {
+        let inode_table = self.inode_table.lock().unwrap();
         let config =
-            self.file_pool.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
-        handler(config)
+            inode_table.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+        handle_fn(config)
+    }
+
+    /// Inserts a new inode and corresponding `AuthFsEntry` created by `create_fn` to the inode
+    /// table, then returns the new inode number.
+    fn insert_new_inode<F>(&self, inode: &Inode, create_fn: F) -> io::Result<Inode>
+    where
+        F: FnOnce(&mut AuthFsEntry) -> io::Result<(Inode, AuthFsEntry)>,
+    {
+        let mut inode_table = self.inode_table.lock().unwrap();
+        let mut config =
+            inode_table.get_mut(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+        let (new_inode, new_file_config) = create_fn(&mut config)?;
+        if let btree_map::Entry::Vacant(entry) = inode_table.entry(new_inode) {
+            entry.insert(new_file_config);
+            Ok(new_inode)
+        } else {
+            // We can't assume fd_server is trusted, so the returned FD may collide with existing
+            // one, even when we are creating a new file. Do not override an existing FD. In terms
+            // of security, it is better to "leak" the file created earlier, than returning an
+            // existing inode as a new file.
+            error!("Inode {} already exists, do not override", new_inode);
+            Err(io::Error::from_raw_os_error(libc::EIO))
+        }
     }
 }
 
@@ -105,25 +151,30 @@
     }
 }
 
-enum FileMode {
+#[allow(clippy::enum_variant_names)]
+enum AccessMode {
     ReadOnly,
     ReadWrite,
 }
 
-fn create_stat(ino: libc::ino_t, file_size: u64, file_mode: FileMode) -> io::Result<libc::stat64> {
+fn create_stat(
+    ino: libc::ino_t,
+    file_size: u64,
+    access_mode: AccessMode,
+) -> io::Result<libc::stat64> {
+    // SAFETY: stat64 is a plan C struct without pointer.
     let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
 
     st.st_ino = ino;
-    st.st_mode = match file_mode {
+    st.st_mode = match access_mode {
         // Until needed, let's just grant the owner access.
-        FileMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
-        FileMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
+        // TODO(205169366): Implement mode properly.
+        AccessMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
+        AccessMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
     };
-    st.st_dev = 0;
     st.st_nlink = 1;
     st.st_uid = 0;
     st.st_gid = 0;
-    st.st_rdev = 0;
     st.st_size = libc::off64_t::try_from(file_size)
         .map_err(|_| io::Error::from_raw_os_error(libc::EFBIG))?;
     st.st_blksize = blk_size();
@@ -133,6 +184,30 @@
     Ok(st)
 }
 
+fn create_dir_stat(ino: libc::ino_t, file_number: u16) -> io::Result<libc::stat64> {
+    // SAFETY: stat64 is a plan C struct without pointer.
+    let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
+
+    st.st_ino = ino;
+    // TODO(205169366): Implement mode properly.
+    st.st_mode = libc::S_IFDIR
+        | libc::S_IXUSR
+        | libc::S_IWUSR
+        | libc::S_IRUSR
+        | libc::S_IXGRP
+        | libc::S_IXOTH;
+
+    // 2 extra for . and ..
+    st.st_nlink = file_number
+        .checked_add(2)
+        .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?
+        .into();
+
+    st.st_uid = 0;
+    st.st_gid = 0;
+    Ok(st)
+}
+
 fn offset_to_chunk_index(offset: u64) -> u64 {
     offset / CHUNK_SIZE
 }
@@ -187,7 +262,7 @@
     type DirIter = EmptyDirectoryIterator;
 
     fn max_buffer_size(&self) -> u32 {
-        self.max_write
+        MAX_WRITE_BYTES
     }
 
     fn init(&self, _capable: FsOptions) -> io::Result<FsOptions> {
@@ -196,30 +271,59 @@
         Ok(FsOptions::WRITEBACK_CACHE)
     }
 
-    fn lookup(&self, _ctx: Context, _parent: Inode, name: &CStr) -> io::Result<Entry> {
-        // Only accept file name that looks like an integrer. Files in the pool are simply exposed
-        // by their inode number. Also, there is currently no directory structure.
-        let num = name.to_str().map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?;
-        // Normally, `lookup` is required to increase a reference count for the inode (while
-        // `forget` will decrease it). It is not necessary here since the files are configured to
-        // be static.
-        let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
-        let st = self.handle_file(&inode, |config| match config {
-            FileConfig::UnverifiedReadonly { file_size, .. }
-            | FileConfig::VerifiedReadonly { file_size, .. } => {
-                create_stat(inode, *file_size, FileMode::ReadOnly)
-            }
-            FileConfig::VerifiedNew { editor } => {
-                create_stat(inode, editor.size(), FileMode::ReadWrite)
-            }
-        })?;
-        Ok(Entry {
-            inode,
-            generation: 0,
-            attr: st,
-            entry_timeout: DEFAULT_METADATA_TIMEOUT,
-            attr_timeout: DEFAULT_METADATA_TIMEOUT,
-        })
+    fn lookup(&self, _ctx: Context, parent: Inode, name: &CStr) -> io::Result<Entry> {
+        if parent == ROOT_INODE {
+            let inode = *self
+                .root_entries
+                .get(cstr_to_path(name))
+                .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+            // Normally, `lookup` is required to increase a reference count for the inode (while
+            // `forget` will decrease it). It is not yet necessary until we start to support
+            // deletion (only for `VerifiedNewDirectory`).
+            let st = self.handle_inode(&inode, |config| match config {
+                AuthFsEntry::UnverifiedReadonly { file_size, .. }
+                | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
+                    create_stat(inode, *file_size, AccessMode::ReadOnly)
+                }
+                AuthFsEntry::VerifiedNew { editor } => {
+                    create_stat(inode, editor.size(), AccessMode::ReadWrite)
+                }
+                AuthFsEntry::VerifiedNewDirectory { dir } => {
+                    create_dir_stat(inode, dir.number_of_entries())
+                }
+            })?;
+            Ok(Entry {
+                inode,
+                generation: 0,
+                attr: st,
+                entry_timeout: DEFAULT_METADATA_TIMEOUT,
+                attr_timeout: DEFAULT_METADATA_TIMEOUT,
+            })
+        } else {
+            let inode = self.handle_inode(&parent, |config| match config {
+                AuthFsEntry::VerifiedNewDirectory { dir } => {
+                    let path: &Path = cstr_to_path(name);
+                    dir.find_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+                }
+                _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+            })?;
+            let st = self.handle_inode(&inode, |config| match config {
+                AuthFsEntry::VerifiedNew { editor } => {
+                    create_stat(inode, editor.size(), AccessMode::ReadWrite)
+                }
+                AuthFsEntry::VerifiedNewDirectory { dir } => {
+                    create_dir_stat(inode, dir.number_of_entries())
+                }
+                _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+            })?;
+            Ok(Entry {
+                inode,
+                generation: 0,
+                attr: st,
+                entry_timeout: DEFAULT_METADATA_TIMEOUT,
+                attr_timeout: DEFAULT_METADATA_TIMEOUT,
+            })
+        }
     }
 
     fn getattr(
@@ -228,15 +332,18 @@
         inode: Inode,
         _handle: Option<Handle>,
     ) -> io::Result<(libc::stat64, Duration)> {
-        self.handle_file(&inode, |config| {
+        self.handle_inode(&inode, |config| {
             Ok((
                 match config {
-                    FileConfig::UnverifiedReadonly { file_size, .. }
-                    | FileConfig::VerifiedReadonly { file_size, .. } => {
-                        create_stat(inode, *file_size, FileMode::ReadOnly)?
+                    AuthFsEntry::UnverifiedReadonly { file_size, .. }
+                    | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
+                        create_stat(inode, *file_size, AccessMode::ReadOnly)?
                     }
-                    FileConfig::VerifiedNew { editor } => {
-                        create_stat(inode, editor.size(), FileMode::ReadWrite)?
+                    AuthFsEntry::VerifiedNew { editor } => {
+                        create_stat(inode, editor.size(), AccessMode::ReadWrite)?
+                    }
+                    AuthFsEntry::VerifiedNewDirectory { dir } => {
+                        create_dir_stat(inode, dir.number_of_entries())?
                     }
                 },
                 DEFAULT_METADATA_TIMEOUT,
@@ -252,22 +359,63 @@
     ) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
         // Since file handle is not really used in later operations (which use Inode directly),
         // return None as the handle.
-        self.handle_file(&inode, |config| {
+        self.handle_inode(&inode, |config| {
             match config {
-                FileConfig::VerifiedReadonly { .. } | FileConfig::UnverifiedReadonly { .. } => {
+                AuthFsEntry::VerifiedReadonly { .. } | AuthFsEntry::UnverifiedReadonly { .. } => {
                     check_access_mode(flags, libc::O_RDONLY)?;
                 }
-                FileConfig::VerifiedNew { .. } => {
+                AuthFsEntry::VerifiedNew { .. } => {
                     // No need to check access modes since all the modes are allowed to the
                     // read-writable file.
                 }
+                AuthFsEntry::VerifiedNewDirectory { .. } => {
+                    // TODO(victorhsieh): implement when needed.
+                    return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+                }
             }
-            // Always cache the file content. There is currently no need to support direct I/O or avoid
-            // the cache buffer. Memory mapping is only possible with cache enabled.
+            // Always cache the file content. There is currently no need to support direct I/O or
+            // avoid the cache buffer. Memory mapping is only possible with cache enabled.
             Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
         })
     }
 
+    fn create(
+        &self,
+        _ctx: Context,
+        parent: Self::Inode,
+        name: &CStr,
+        _mode: u32,
+        _flags: u32,
+        _umask: u32,
+    ) -> io::Result<(Entry, Option<Self::Handle>, fuse::sys::OpenOptions)> {
+        // TODO(205169366): Implement mode properly.
+        // TODO(205172873): handle O_TRUNC and O_EXCL properly.
+        let new_inode = self.insert_new_inode(&parent, |config| match config {
+            AuthFsEntry::VerifiedNewDirectory { dir } => {
+                let basename: &Path = cstr_to_path(name);
+                if dir.find_inode(basename).is_some() {
+                    return Err(io::Error::from_raw_os_error(libc::EEXIST));
+                }
+                let (new_inode, new_file) = dir.create_file(basename)?;
+                Ok((new_inode, AuthFsEntry::VerifiedNew { editor: new_file }))
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+        })?;
+
+        Ok((
+            Entry {
+                inode: new_inode,
+                generation: 0,
+                attr: create_stat(new_inode, /* file_size */ 0, AccessMode::ReadWrite)?,
+                entry_timeout: DEFAULT_METADATA_TIMEOUT,
+                attr_timeout: DEFAULT_METADATA_TIMEOUT,
+            },
+            // See also `open`.
+            /* handle */ None,
+            fuse::sys::OpenOptions::KEEP_CACHE,
+        ))
+    }
+
     fn read<W: io::Write + ZeroCopyWriter>(
         &self,
         _ctx: Context,
@@ -279,19 +427,20 @@
         _lock_owner: Option<u64>,
         _flags: u32,
     ) -> io::Result<usize> {
-        self.handle_file(&inode, |config| {
+        self.handle_inode(&inode, |config| {
             match config {
-                FileConfig::VerifiedReadonly { reader, file_size } => {
+                AuthFsEntry::VerifiedReadonly { reader, file_size } => {
                     read_chunks(w, reader, *file_size, offset, size)
                 }
-                FileConfig::UnverifiedReadonly { reader, file_size } => {
+                AuthFsEntry::UnverifiedReadonly { reader, file_size } => {
                     read_chunks(w, reader, *file_size, offset, size)
                 }
-                FileConfig::VerifiedNew { editor } => {
+                AuthFsEntry::VerifiedNew { editor } => {
                     // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
                     // request a read even if the file is open with O_WRONLY.
                     read_chunks(w, editor, editor.size(), offset, size)
                 }
+                _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
             }
         })
     }
@@ -308,8 +457,8 @@
         _delayed_write: bool,
         _flags: u32,
     ) -> io::Result<usize> {
-        self.handle_file(&inode, |config| match config {
-            FileConfig::VerifiedNew { editor } => {
+        self.handle_inode(&inode, |config| match config {
+            AuthFsEntry::VerifiedNew { editor } => {
                 let mut buf = vec![0; size as usize];
                 r.read_exact(&mut buf)?;
                 editor.write_at(&buf, offset)
@@ -326,11 +475,11 @@
         _handle: Option<Handle>,
         valid: SetattrValid,
     ) -> io::Result<(libc::stat64, Duration)> {
-        self.handle_file(&inode, |config| {
+        self.handle_inode(&inode, |config| {
             match config {
-                FileConfig::VerifiedNew { editor } => {
+                AuthFsEntry::VerifiedNew { editor } => {
                     // Initialize the default stat.
-                    let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
+                    let mut new_attr = create_stat(inode, editor.size(), AccessMode::ReadWrite)?;
                     // `valid` indicates what fields in `attr` are valid. Update to return correctly.
                     if valid.contains(SetattrValid::SIZE) {
                         // st_size is i64, but the cast should be safe since kernel should not give a
@@ -381,9 +530,9 @@
         name: &CStr,
         size: u32,
     ) -> io::Result<GetxattrReply> {
-        self.handle_file(&inode, |config| {
+        self.handle_inode(&inode, |config| {
             match config {
-                FileConfig::VerifiedNew { editor } => {
+                AuthFsEntry::VerifiedNew { editor } => {
                     // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
                     // change (see b/196635431). Until it's possible, use xattr to expose what we need
                     // as an authfs specific API.
@@ -407,16 +556,44 @@
             }
         })
     }
+
+    fn mkdir(
+        &self,
+        _ctx: Context,
+        parent: Self::Inode,
+        name: &CStr,
+        _mode: u32,
+        _umask: u32,
+    ) -> io::Result<Entry> {
+        // TODO(205169366): Implement mode properly.
+        let new_inode = self.insert_new_inode(&parent, |config| match config {
+            AuthFsEntry::VerifiedNewDirectory { dir } => {
+                let basename: &Path = cstr_to_path(name);
+                if dir.find_inode(basename).is_some() {
+                    return Err(io::Error::from_raw_os_error(libc::EEXIST));
+                }
+                let (new_inode, new_dir) = dir.mkdir(basename)?;
+                Ok((new_inode, AuthFsEntry::VerifiedNewDirectory { dir: new_dir }))
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+        })?;
+
+        Ok(Entry {
+            inode: new_inode,
+            generation: 0,
+            attr: create_dir_stat(new_inode, /* file_number */ 0)?,
+            entry_timeout: DEFAULT_METADATA_TIMEOUT,
+            attr_timeout: DEFAULT_METADATA_TIMEOUT,
+        })
+    }
 }
 
 /// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
 pub fn loop_forever(
-    file_pool: BTreeMap<Inode, FileConfig>,
+    root_entries: HashMap<PathBuf, AuthFsEntry>,
     mountpoint: &Path,
     extra_options: &Option<String>,
 ) -> Result<(), fuse::Error> {
-    let max_read: u32 = 65536;
-    let max_write: u32 = 65536;
     let dev_fuse = OpenOptions::new()
         .read(true)
         .write(true)
@@ -429,7 +606,7 @@
         MountOption::AllowOther,
         MountOption::UserId(0),
         MountOption::GroupId(0),
-        MountOption::MaxRead(max_read),
+        MountOption::MaxRead(MAX_READ_BYTES),
     ];
     if let Some(value) = extra_options {
         mount_options.push(MountOption::Extra(value));
@@ -440,8 +617,12 @@
 
     fuse::worker::start_message_loop(
         dev_fuse,
-        max_write,
-        max_read,
-        AuthFs::new(file_pool, max_write),
+        MAX_WRITE_BYTES,
+        MAX_READ_BYTES,
+        AuthFs::new(root_entries),
     )
 }
+
+fn cstr_to_path(cstr: &CStr) -> &Path {
+    OsStr::from_bytes(cstr.to_bytes()).as_ref()
+}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index a6956e2..f6a2a56 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -29,7 +29,7 @@
 
 use anyhow::{bail, Context, Result};
 use log::error;
-use std::collections::BTreeMap;
+use std::collections::HashMap;
 use std::convert::TryInto;
 use std::path::PathBuf;
 use structopt::StructOpt;
@@ -42,9 +42,9 @@
 mod fusefs;
 
 use auth::FakeAuthenticator;
-use file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
+use file::{RemoteDirEditor, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
 use fsverity::{VerifiedFileEditor, VerifiedFileReader};
-use fusefs::{FileConfig, Inode};
+use fusefs::AuthFsEntry;
 
 #[derive(StructOpt)]
 struct Args {
@@ -62,24 +62,33 @@
 
     /// A read-only remote file with integrity check. Can be multiple.
     ///
-    /// For example, `--remote-verified-file 5:10:/path/to/cert` tells the filesystem to associate
-    /// entry 5 with a remote file 10, and need to be verified against the /path/to/cert.
+    /// For example, `--remote-ro-file 5:/path/to/cert` tells the filesystem to associate the
+    /// file $MOUNTPOINT/5 with a remote FD 5, and need to be verified against the /path/to/cert.
     #[structopt(long, parse(try_from_str = parse_remote_ro_file_option))]
     remote_ro_file: Vec<OptionRemoteRoFile>,
 
     /// A read-only remote file without integrity check. Can be multiple.
     ///
-    /// For example, `--remote-unverified-file 5:10` tells the filesystem to associate entry 5
-    /// with a remote file 10.
-    #[structopt(long, parse(try_from_str = parse_remote_ro_file_unverified_option))]
-    remote_ro_file_unverified: Vec<OptionRemoteRoFileUnverified>,
+    /// For example, `--remote-ro-file-unverified 5` tells the filesystem to associate the file
+    /// $MOUNTPOINT/5 with a remote FD 5.
+    #[structopt(long)]
+    remote_ro_file_unverified: Vec<i32>,
 
     /// A new read-writable remote file with integrity check. Can be multiple.
     ///
-    /// For example, `--remote-new-verified-file 12:34` tells the filesystem to associate entry 12
-    /// with a remote file 34.
-    #[structopt(long, parse(try_from_str = parse_remote_new_rw_file_option))]
-    remote_new_rw_file: Vec<OptionRemoteRwFile>,
+    /// For example, `--remote-new-rw-file 5` tells the filesystem to associate the file
+    /// $MOUNTPOINT/5 with a remote FD 5.
+    #[structopt(long)]
+    remote_new_rw_file: Vec<i32>,
+
+    /// A new directory that is assumed empty in the backing filesystem. New files created in this
+    /// directory are integrity-protected in the same way as --remote-new-verified-file. Can be
+    /// multiple.
+    ///
+    /// For example, `--remote-new-rw-dir 5` tells the filesystem to associate $MOUNTPOINT/5
+    /// with a remote dir FD 5.
+    #[structopt(long)]
+    remote_new_rw_dir: Vec<i32>,
 
     /// Enable debugging features.
     #[structopt(long)]
@@ -87,136 +96,119 @@
 }
 
 struct OptionRemoteRoFile {
-    ino: Inode,
-
     /// ID to refer to the remote file.
-    remote_id: i32,
+    remote_fd: i32,
 
     /// Certificate to verify the authenticity of the file's fs-verity signature.
     /// TODO(170494765): Implement PKCS#7 signature verification.
     _certificate_path: PathBuf,
 }
 
-struct OptionRemoteRoFileUnverified {
-    ino: Inode,
-
-    /// ID to refer to the remote file.
-    remote_id: i32,
-}
-
-struct OptionRemoteRwFile {
-    ino: Inode,
-
-    /// ID to refer to the remote file.
-    remote_id: i32,
-}
-
 fn parse_remote_ro_file_option(option: &str) -> Result<OptionRemoteRoFile> {
     let strs: Vec<&str> = option.split(':').collect();
-    if strs.len() != 3 {
+    if strs.len() != 2 {
         bail!("Invalid option: {}", option);
     }
     Ok(OptionRemoteRoFile {
-        ino: strs[0].parse::<Inode>()?,
-        remote_id: strs[1].parse::<i32>()?,
-        _certificate_path: PathBuf::from(strs[2]),
+        remote_fd: strs[0].parse::<i32>()?,
+        _certificate_path: PathBuf::from(strs[1]),
     })
 }
 
-fn parse_remote_ro_file_unverified_option(option: &str) -> Result<OptionRemoteRoFileUnverified> {
-    let strs: Vec<&str> = option.split(':').collect();
-    if strs.len() != 2 {
-        bail!("Invalid option: {}", option);
-    }
-    Ok(OptionRemoteRoFileUnverified {
-        ino: strs[0].parse::<Inode>()?,
-        remote_id: strs[1].parse::<i32>()?,
-    })
-}
-
-fn parse_remote_new_rw_file_option(option: &str) -> Result<OptionRemoteRwFile> {
-    let strs: Vec<&str> = option.split(':').collect();
-    if strs.len() != 2 {
-        bail!("Invalid option: {}", option);
-    }
-    Ok(OptionRemoteRwFile {
-        ino: strs[0].parse::<Inode>().unwrap(),
-        remote_id: strs[1].parse::<i32>().unwrap(),
-    })
-}
-
-fn new_config_remote_verified_file(
+fn new_remote_verified_file_entry(
     service: file::VirtFdService,
-    remote_id: i32,
+    remote_fd: i32,
     file_size: u64,
-) -> Result<FileConfig> {
-    let signature = service.readFsveritySignature(remote_id).context("Failed to read signature")?;
+) -> Result<AuthFsEntry> {
+    let signature = service.readFsveritySignature(remote_fd).context("Failed to read signature")?;
 
     let authenticator = FakeAuthenticator::always_succeed();
-    Ok(FileConfig::VerifiedReadonly {
+    Ok(AuthFsEntry::VerifiedReadonly {
         reader: VerifiedFileReader::new(
             &authenticator,
-            RemoteFileReader::new(service.clone(), remote_id),
+            RemoteFileReader::new(service.clone(), remote_fd),
             file_size,
             signature,
-            RemoteMerkleTreeReader::new(service.clone(), remote_id),
+            RemoteMerkleTreeReader::new(service.clone(), remote_fd),
         )?,
         file_size,
     })
 }
 
-fn new_config_remote_unverified_file(
+fn new_remote_unverified_file_entry(
     service: file::VirtFdService,
-    remote_id: i32,
+    remote_fd: i32,
     file_size: u64,
-) -> Result<FileConfig> {
-    let reader = RemoteFileReader::new(service, remote_id);
-    Ok(FileConfig::UnverifiedReadonly { reader, file_size })
+) -> Result<AuthFsEntry> {
+    let reader = RemoteFileReader::new(service, remote_fd);
+    Ok(AuthFsEntry::UnverifiedReadonly { reader, file_size })
 }
 
-fn new_config_remote_new_verified_file(
+fn new_remote_new_verified_file_entry(
     service: file::VirtFdService,
-    remote_id: i32,
-) -> Result<FileConfig> {
-    let remote_file = RemoteFileEditor::new(service, remote_id);
-    Ok(FileConfig::VerifiedNew { editor: VerifiedFileEditor::new(remote_file) })
+    remote_fd: i32,
+) -> Result<AuthFsEntry> {
+    let remote_file = RemoteFileEditor::new(service, remote_fd);
+    Ok(AuthFsEntry::VerifiedNew { editor: VerifiedFileEditor::new(remote_file) })
 }
 
-fn prepare_file_pool(args: &Args) -> Result<BTreeMap<Inode, FileConfig>> {
-    let mut file_pool = BTreeMap::new();
+fn new_remote_new_verified_dir_entry(
+    service: file::VirtFdService,
+    remote_fd: i32,
+) -> Result<AuthFsEntry> {
+    let dir = RemoteDirEditor::new(service, remote_fd);
+    Ok(AuthFsEntry::VerifiedNewDirectory { dir })
+}
+
+fn prepare_root_dir_entries(args: &Args) -> Result<HashMap<PathBuf, AuthFsEntry>> {
+    let mut root_entries = HashMap::new();
 
     let service = file::get_rpc_binder_service(args.cid)?;
 
     for config in &args.remote_ro_file {
-        file_pool.insert(
-            config.ino,
-            new_config_remote_verified_file(
+        root_entries.insert(
+            remote_fd_to_path_buf(config.remote_fd),
+            new_remote_verified_file_entry(
                 service.clone(),
-                config.remote_id,
-                service.getFileSize(config.remote_id)?.try_into()?,
+                config.remote_fd,
+                service.getFileSize(config.remote_fd)?.try_into()?,
             )?,
         );
     }
 
-    for config in &args.remote_ro_file_unverified {
-        file_pool.insert(
-            config.ino,
-            new_config_remote_unverified_file(
+    for remote_fd in &args.remote_ro_file_unverified {
+        let remote_fd = *remote_fd;
+        root_entries.insert(
+            remote_fd_to_path_buf(remote_fd),
+            new_remote_unverified_file_entry(
                 service.clone(),
-                config.remote_id,
-                service.getFileSize(config.remote_id)?.try_into()?,
+                remote_fd,
+                service.getFileSize(remote_fd)?.try_into()?,
             )?,
         );
     }
 
-    for config in &args.remote_new_rw_file {
-        file_pool.insert(
-            config.ino,
-            new_config_remote_new_verified_file(service.clone(), config.remote_id)?,
+    for remote_fd in &args.remote_new_rw_file {
+        let remote_fd = *remote_fd;
+        root_entries.insert(
+            remote_fd_to_path_buf(remote_fd),
+            new_remote_new_verified_file_entry(service.clone(), remote_fd)?,
         );
     }
 
-    Ok(file_pool)
+    for remote_fd in &args.remote_new_rw_dir {
+        let remote_fd = *remote_fd;
+        root_entries.insert(
+            remote_fd_to_path_buf(remote_fd),
+            new_remote_new_verified_dir_entry(service.clone(), remote_fd)?,
+        );
+    }
+
+    Ok(root_entries)
+}
+
+fn remote_fd_to_path_buf(fd: i32) -> PathBuf {
+    PathBuf::from(fd.to_string())
 }
 
 fn try_main() -> Result<()> {
@@ -227,8 +219,8 @@
         android_logger::Config::default().with_tag("authfs").with_min_level(log_level),
     );
 
-    let file_pool = prepare_file_pool(&args)?;
-    fusefs::loop_forever(file_pool, &args.mount_point, &args.extra_options)?;
+    let root_entries = prepare_root_dir_entries(&args)?;
+    fusefs::loop_forever(root_entries, &args.mount_point, &args.extra_options)?;
     bail!("Unexpected exit after the handler loop")
 }
 
diff --git a/authfs/tests/Android.bp b/authfs/tests/Android.bp
index 88c1ba6..92fa428 100644
--- a/authfs/tests/Android.bp
+++ b/authfs/tests/Android.bp
@@ -14,7 +14,7 @@
         "VirtualizationTestHelper",
     ],
     test_suites: ["general-tests"],
-    target_required: ["open_then_run"],
+    target_required: ["open_then_run_module"],
     data: [
         ":authfs_test_files",
         ":MicrodroidTestApp.signed",
@@ -22,7 +22,16 @@
 }
 
 rust_test {
-    name: "open_then_run",
+    // PushFilePreparer can sometimes push the directory (if named "open_then_run", which contains
+    // the actual executable in a per-architecture sub-directory) instead of the executable. This
+    // makes it harder to use because the host Java test have to detect the executable path
+    // dynamically, e.g. if it's a directory, append the device's architecture to build the actual
+    // executable path. By simply renaming the module (thus the host directory), this forces
+    // PushFilePreparer to always push the executable to the destination, so that the Java test can
+    // easily locate the executable with a constant path.
+    name: "open_then_run_module",
+    stem: "open_then_run",
+
     crate_name: "open_then_run",
     srcs: ["open_then_run.rs"],
     edition: "2018",
diff --git a/authfs/tests/AndroidTest.xml b/authfs/tests/AndroidTest.xml
index 9deab5b..643e2b4 100644
--- a/authfs/tests/AndroidTest.xml
+++ b/authfs/tests/AndroidTest.xml
@@ -23,7 +23,7 @@
 
     <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
         <option name="throw-if-cmd-fail" value="true" />
-        <!-- Prepare test directory. -->
+        <!-- Prepare test directories. -->
         <option name="run-command" value="mkdir -p /data/local/tmp/authfs/mnt" />
         <option name="teardown-command" value="rm -rf /data/local/tmp/authfs" />
     </target_preparer>
@@ -33,7 +33,7 @@
         <option name="abort-on-push-failure" value="true" />
 
         <!-- Test executable -->
-        <option name="push-file" key="open_then_run" value="/data/local/tmp/authfs/open_then_run" />
+        <option name="push-file" key="open_then_run" value="/data/local/tmp/open_then_run" />
 
         <!-- Test data files -->
         <option name="push-file" key="cert.der" value="/data/local/tmp/authfs/cert.der" />
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
index 7229dde..8a13ef3 100644
--- a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -34,8 +34,8 @@
 import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
 import com.android.tradefed.testtype.junit4.AfterClassWithInfo;
 import com.android.tradefed.testtype.junit4.BeforeClassWithInfo;
-import com.android.tradefed.util.AbiUtils;
 import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.CommandStatus;
 
 import org.junit.After;
 import org.junit.AssumptionViolatedException;
@@ -54,6 +54,12 @@
     /** Test directory on Android where data are located */
     private static final String TEST_DIR = "/data/local/tmp/authfs";
 
+    /** Output directory where the test can generate output on Android */
+    private static final String TEST_OUTPUT_DIR = "/data/local/tmp/authfs/output_dir";
+
+    /** Path to open_then_run on Android */
+    private static final String OPEN_THEN_RUN_BIN = "/data/local/tmp/open_then_run";
+
     /** Mount point of authfs on Microdroid during the test */
     private static final String MOUNT_DIR = "/data/local/tmp";
 
@@ -76,7 +82,6 @@
     private static boolean sAssumptionFailed;
 
     private ExecutorService mThreadPool = Executors.newCachedThreadPool();
-    private String mArch;
 
     @BeforeClassWithInfo
     public static void beforeClassWithDevice(TestInformation testInfo)
@@ -138,23 +143,22 @@
     }
 
     @Before
-    public void setUp() {
+    public void setUp() throws Exception {
         assumeFalse(sAssumptionFailed);
-        mArch = AbiUtils.getArchForAbi(getAbi().getName());
+        sAndroid.run("mkdir " + TEST_OUTPUT_DIR);
     }
 
     @After
-    public void tearDown() throws DeviceNotAvailableException {
+    public void tearDown() throws Exception {
         sAndroid.tryRun("killall fd_server");
-        sAndroid.tryRun("rm -f " + TEST_DIR + "/output");
+        sAndroid.run("rm -rf " + TEST_OUTPUT_DIR);
 
         tryRunOnMicrodroid("killall authfs");
         tryRunOnMicrodroid("umount " + MOUNT_DIR);
     }
 
     @Test
-    public void testReadWithFsverityVerification_RemoteFile()
-            throws DeviceNotAvailableException, InterruptedException {
+    public void testReadWithFsverityVerification_RemoteFile() throws Exception {
         // Setup
         runFdServerOnAndroid(
                 "--open-ro 3:input.4m --open-ro 4:input.4m.merkle_dump --open-ro 5:input.4m.fsv_sig"
@@ -162,25 +166,24 @@
                 "--ro-fds 3:4:5 --ro-fds 6");
 
         runAuthFsOnMicrodroid(
-                "--remote-ro-file-unverified 10:6 --remote-ro-file 11:3:cert.der --cid "
+                "--remote-ro-file-unverified 6 --remote-ro-file 3:cert.der --cid "
                         + VMADDR_CID_HOST);
 
         // Action
-        String actualHashUnverified4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/10");
-        String actualHash4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/11");
+        String actualHashUnverified4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/6");
+        String actualHash4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/3");
 
         // Verify
         String expectedHash4m = computeFileHashOnAndroid(TEST_DIR + "/input.4m");
 
-        assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4m, actualHashUnverified4m);
-        assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4m, actualHash4m);
+        assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4m, actualHashUnverified4m);
+        assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4m, actualHash4m);
     }
 
     // Separate the test from the above simply because exec in shell does not allow open too many
     // files.
     @Test
-    public void testReadWithFsverityVerification_RemoteSmallerFile()
-            throws DeviceNotAvailableException, InterruptedException {
+    public void testReadWithFsverityVerification_RemoteSmallerFile() throws Exception {
         // Setup
         runFdServerOnAndroid(
                 "--open-ro 3:input.4k --open-ro 4:input.4k.merkle_dump --open-ro"
@@ -188,46 +191,43 @@
                     + " --open-ro 8:input.4k1.fsv_sig",
                 "--ro-fds 3:4:5 --ro-fds 6:7:8");
         runAuthFsOnMicrodroid(
-                "--remote-ro-file 10:3:cert.der --remote-ro-file 11:6:cert.der --cid "
-                        + VMADDR_CID_HOST);
+                "--remote-ro-file 3:cert.der --remote-ro-file 6:cert.der --cid " + VMADDR_CID_HOST);
 
         // Action
-        String actualHash4k = computeFileHashOnMicrodroid(MOUNT_DIR + "/10");
-        String actualHash4k1 = computeFileHashOnMicrodroid(MOUNT_DIR + "/11");
+        String actualHash4k = computeFileHashOnMicrodroid(MOUNT_DIR + "/3");
+        String actualHash4k1 = computeFileHashOnMicrodroid(MOUNT_DIR + "/6");
 
         // Verify
         String expectedHash4k = computeFileHashOnAndroid(TEST_DIR + "/input.4k");
         String expectedHash4k1 = computeFileHashOnAndroid(TEST_DIR + "/input.4k1");
 
-        assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4k, actualHash4k);
-        assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4k1, actualHash4k1);
+        assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4k, actualHash4k);
+        assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4k1, actualHash4k1);
     }
 
     @Test
-    public void testReadWithFsverityVerification_TamperedMerkleTree()
-            throws DeviceNotAvailableException, InterruptedException {
+    public void testReadWithFsverityVerification_TamperedMerkleTree() throws Exception {
         // Setup
         runFdServerOnAndroid(
                 "--open-ro 3:input.4m --open-ro 4:input.4m.merkle_dump.bad "
                         + "--open-ro 5:input.4m.fsv_sig",
                 "--ro-fds 3:4:5");
-        runAuthFsOnMicrodroid("--remote-ro-file 10:3:cert.der --cid " + VMADDR_CID_HOST);
+        runAuthFsOnMicrodroid("--remote-ro-file 3:cert.der --cid " + VMADDR_CID_HOST);
 
         // Verify
-        assertFalse(copyFileOnMicrodroid(MOUNT_DIR + "/10", "/dev/null"));
+        assertFalse(copyFileOnMicrodroid(MOUNT_DIR + "/3", "/dev/null"));
     }
 
     @Test
-    public void testWriteThroughCorrectly()
-            throws DeviceNotAvailableException, InterruptedException {
+    public void testWriteThroughCorrectly() throws Exception {
         // Setup
-        runFdServerOnAndroid("--open-rw 3:output", "--rw-fds 3");
-        runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid " + VMADDR_CID_HOST);
+        runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
 
         // Action
         String srcPath = "/system/bin/linker64";
-        String destPath = MOUNT_DIR + "/20";
-        String backendPath = TEST_DIR + "/output";
+        String destPath = MOUNT_DIR + "/3";
+        String backendPath = TEST_OUTPUT_DIR + "/out.file";
         assertTrue(copyFileOnMicrodroid(srcPath, destPath));
 
         // Verify
@@ -236,15 +236,14 @@
     }
 
     @Test
-    public void testWriteFailedIfDetectsTampering()
-            throws DeviceNotAvailableException, InterruptedException {
+    public void testWriteFailedIfDetectsTampering() throws Exception {
         // Setup
-        runFdServerOnAndroid("--open-rw 3:output", "--rw-fds 3");
-        runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid " + VMADDR_CID_HOST);
+        runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
 
         String srcPath = "/system/bin/linker64";
-        String destPath = MOUNT_DIR + "/20";
-        String backendPath = TEST_DIR + "/output";
+        String destPath = MOUNT_DIR + "/3";
+        String backendPath = TEST_OUTPUT_DIR + "/out.file";
         assertTrue(copyFileOnMicrodroid(srcPath, destPath));
 
         // Action
@@ -255,28 +254,32 @@
         // Write to a block partially requires a read back to calculate the new hash. It should fail
         // when the content is inconsistent to the known hash. Use direct I/O to avoid simply
         // writing to the filesystem cache.
-        assertEquals(
-                tryRunOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 direct"),
-                null);
+        assertFalse(
+                writeZerosAtFileOffsetOnMicrodroid(
+                        destPath, /* offset */ 0, /* number */ 1024, /* writeThrough */ true));
 
         // A full 4K write does not require to read back, so write can succeed even if the backing
         // block has already been tampered.
-        runOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=4096 skip=4096");
+        assertTrue(
+                writeZerosAtFileOffsetOnMicrodroid(
+                        destPath, /* offset */ 4096, /* number */ 4096, /* writeThrough */ false));
 
         // Otherwise, a partial write with correct backing file should still succeed.
-        runOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 skip=8192");
+        assertTrue(
+                writeZerosAtFileOffsetOnMicrodroid(
+                        destPath, /* offset */ 8192, /* number */ 1024, /* writeThrough */ false));
     }
 
     @Test
-    public void testFileResize() throws DeviceNotAvailableException, InterruptedException {
+    public void testFileResize() throws Exception {
         // Setup
-        runFdServerOnAndroid("--open-rw 3:output", "--rw-fds 3");
-        runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid " + VMADDR_CID_HOST);
-        String outputPath = MOUNT_DIR + "/20";
-        String backendPath = TEST_DIR + "/output";
+        runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+        String outputPath = MOUNT_DIR + "/3";
+        String backendPath = TEST_OUTPUT_DIR + "/out.file";
 
         // Action & Verify
-        runOnMicrodroid("yes $'\\x01' | tr -d '\\n' | dd bs=1 count=10000 of=" + outputPath);
+        createFileWithOnesOnMicrodroid(outputPath, 10000);
         assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 10000);
         expectBackingFileConsistency(
                 outputPath,
@@ -298,6 +301,112 @@
                 "e53130831c13dabff71d5d1797e3aaa467b4b7d32b3b8782c4ff03d76976f2aa");
     }
 
+    @Test
+    public void testOutputDirectory_WriteNewFiles() throws Exception {
+        // Setup
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Action & Verify
+        // Can create a new file to write.
+        String expectedAndroidPath = androidOutputDir + "/file";
+        String authfsPath = authfsOutputDir + "/file";
+        createFileWithOnesOnMicrodroid(authfsPath, 10000);
+        assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 10000);
+        expectBackingFileConsistency(
+                authfsPath,
+                expectedAndroidPath,
+                "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+
+        // Regular file operations work, e.g. resize.
+        resizeFileOnMicrodroid(authfsPath, 15000);
+        assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 15000);
+        expectBackingFileConsistency(
+                authfsPath,
+                expectedAndroidPath,
+                "567c89f62586e0d33369157afdfe99a2fa36cdffb01e91dcdc0b7355262d610d");
+    }
+
+    @Test
+    public void testOutputDirectory_MkdirAndWriteFile() throws Exception {
+        // Setup
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Action
+        // Can create nested directories and can create a file in one.
+        runOnMicrodroid("mkdir " + authfsOutputDir + "/new_dir");
+        runOnMicrodroid("mkdir -p " + authfsOutputDir + "/we/need/to/go/deeper");
+        createFileWithOnesOnMicrodroid(authfsOutputDir + "/new_dir/file1", 10000);
+        createFileWithOnesOnMicrodroid(authfsOutputDir + "/we/need/file2", 10000);
+
+        // Verify
+        // Directories show up in Android.
+        sAndroid.run("test -d " + androidOutputDir + "/new_dir");
+        sAndroid.run("test -d " + androidOutputDir + "/we/need/to/go/deeper");
+        // Files exist in Android. Hashes on Microdroid and Android are consistent.
+        assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/new_dir/file1"), 10000);
+        expectBackingFileConsistency(
+                authfsOutputDir + "/new_dir/file1",
+                androidOutputDir + "/new_dir/file1",
+                "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+        // Same to file in a nested directory.
+        assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/we/need/file2"), 10000);
+        expectBackingFileConsistency(
+                authfsOutputDir + "/we/need/file2",
+                androidOutputDir + "/we/need/file2",
+                "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+    }
+
+    @Test
+    public void testOutputDirectory_CreateAndTruncateExistingFile() throws Exception {
+        // Setup
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        // Action & Verify
+        runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/file");
+        assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/file"), 3);
+        // Can override a file and write normally.
+        createFileWithOnesOnMicrodroid(authfsOutputDir + "/file", 10000);
+        assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/file"), 10000);
+        expectBackingFileConsistency(
+                authfsOutputDir + "/file",
+                androidOutputDir + "/file",
+                "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+    }
+
+    @Test
+    public void testOutputDirectory_CannotRecreateDirectoryIfNameExists() throws Exception {
+        // Setup
+        String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+        String authfsOutputDir = MOUNT_DIR + "/3";
+        sAndroid.run("mkdir " + androidOutputDir);
+        runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+        runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+        runOnMicrodroid("touch " + authfsOutputDir + "/some_file");
+        runOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir");
+        runOnMicrodroid("touch " + authfsOutputDir + "/some_dir/file");
+        runOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/dir");
+
+        // Action & Verify
+        // Cannot create directory if an entry with the same name already exists.
+        assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_file");
+        assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir");
+        assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/file");
+        assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/dir");
+    }
+
     private void expectBackingFileConsistency(
             String authFsPath, String backendPath, String expectedHash)
             throws DeviceNotAvailableException {
@@ -347,6 +456,24 @@
         return Long.parseLong(runOnMicrodroid("stat -c '%s' " + path));
     }
 
+    private void createFileWithOnesOnMicrodroid(String filePath, long numberOfOnes) {
+        runOnMicrodroid(
+                "yes $'\\x01' | tr -d '\\n' | dd bs=1 count=" + numberOfOnes + " of=" + filePath);
+    }
+
+    private boolean writeZerosAtFileOffsetOnMicrodroid(
+            String filePath, long offset, long numberOfZeros, boolean writeThrough) {
+        String cmd = "dd if=/dev/zero of=" + filePath + " bs=1 count=" + numberOfZeros;
+        if (offset > 0) {
+            cmd += " skip=" + offset;
+        }
+        if (writeThrough) {
+            cmd += " direct";
+        }
+        CommandResult result = runOnMicrodroidForResult(cmd);
+        return result.getStatus() == CommandStatus.SUCCESS;
+    }
+
     private void runAuthFsOnMicrodroid(String flags) {
         String cmd = AUTHFS_BIN + " " + MOUNT_DIR + " " + flags;
 
@@ -374,18 +501,13 @@
         }
     }
 
-    private String getOpenThenRunPath() {
-        // Construct path to match PushFilePreparer's upload path.
-        return TEST_DIR + "/open_then_run/" + mArch + "/open_then_run";
-    }
-
     private void runFdServerOnAndroid(String helperFlags, String fdServerFlags)
             throws DeviceNotAvailableException {
         String cmd =
                 "cd "
                         + TEST_DIR
                         + " && "
-                        + getOpenThenRunPath()
+                        + OPEN_THEN_RUN_BIN
                         + " "
                         + helperFlags
                         + " -- "
diff --git a/authfs/tests/open_then_run.rs b/authfs/tests/open_then_run.rs
index 3e6ae71..fca8953 100644
--- a/authfs/tests/open_then_run.rs
+++ b/authfs/tests/open_then_run.rs
@@ -122,8 +122,13 @@
         // them for the provided program, and are not supposed to do anything else.
         OpenOptions::new()
             .custom_flags(libc::O_PATH | libc::O_DIRECTORY)
+            // The custom flags above is not taken into consideration by the unix implementation of
+            // OpenOptions for flag validation. So even though the man page of open(2) says that
+            // most flags include access mode are ignored, we still need to set a "valid" mode to
+            // make the library happy. The value does not appear to matter elsewhere in the library.
+            .read(true)
             .open(path)
-            .with_context(|| format!("Open {} directory", path))
+            .with_context(|| format!("Open {} directory as path", path))
     })?;
 
     let cmdline_args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index af504a1..508423b 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -85,12 +85,15 @@
             .context("Failed to open config APK idsig file")?;
         let idsig_fd = ParcelFileDescriptor::new(idsig_fd);
 
-        // Console output and the system log output from the VM are redirected to this file.
-        // TODO: Send this to stdout instead? Or specify None?
-        let log_fd = File::create(data_dir.join("vm.log")).context("Failed to create log file")?;
-        let log_fd = ParcelFileDescriptor::new(log_fd);
-
-        let debug_level = if parameters.debug_mode { DebugLevel::FULL } else { DebugLevel::NONE };
+        let (log_fd, debug_level) = if parameters.debug_mode {
+            // Console output and the system log output from the VM are redirected to this file.
+            let log_fd =
+                File::create(data_dir.join("vm.log")).context("Failed to create log file")?;
+            let log_fd = ParcelFileDescriptor::new(log_fd);
+            (Some(log_fd), DebugLevel::FULL)
+        } else {
+            (None, DebugLevel::NONE)
+        };
 
         let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
             apk: Some(apk_fd),
@@ -102,7 +105,7 @@
         });
 
         let vm = service
-            .createVm(&config, Some(&log_fd), Some(&log_fd))
+            .createVm(&config, log_fd.as_ref(), log_fd.as_ref())
             .context("Failed to create VM")?;
         let vm_state = Arc::new(VmStateMonitor::default());
 
diff --git a/compos/compos_key_cmd/compos_key_cmd.cpp b/compos/compos_key_cmd/compos_key_cmd.cpp
index 2735f2e..3f431da 100644
--- a/compos/compos_key_cmd/compos_key_cmd.cpp
+++ b/compos/compos_key_cmd/compos_key_cmd.cpp
@@ -236,7 +236,7 @@
         appConfig.idsig = std::move(idsigFd);
         appConfig.instanceImage = std::move(instanceFd);
         appConfig.configPath = kConfigFilePath;
-        appConfig.debugLevel = VirtualMachineAppConfig::DebugLevel::NONE;
+        appConfig.debugLevel = VirtualMachineAppConfig::DebugLevel::FULL;
         appConfig.memoryMib = 0; // Use default
 
         LOG(INFO) << "Starting VM";
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index 3959859..4b3ac1b 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -179,6 +179,7 @@
     ) -> Result<()> {
         let instance_image = fs::OpenOptions::new()
             .create(true)
+            .truncate(true)
             .read(true)
             .write(true)
             .open(&self.instance_image)
diff --git a/demo/java/com/android/microdroid/demo/MainActivity.java b/demo/java/com/android/microdroid/demo/MainActivity.java
index 60e50bb..15d9046 100644
--- a/demo/java/com/android/microdroid/demo/MainActivity.java
+++ b/demo/java/com/android/microdroid/demo/MainActivity.java
@@ -285,7 +285,7 @@
                     mVirtualMachine = vmm.create("demo_vm", config);
                 }
                 mVirtualMachine.run();
-                mVirtualMachine.setCallback(callback);
+                mVirtualMachine.setCallback(Executors.newSingleThreadExecutor(), callback);
                 mStatus.postValue(mVirtualMachine.getStatus());
 
                 InputStream console = mVirtualMachine.getConsoleOutputStream();
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index 63c9288..6556b87 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -19,6 +19,7 @@
 import static android.os.ParcelFileDescriptor.MODE_READ_ONLY;
 import static android.os.ParcelFileDescriptor.MODE_READ_WRITE;
 
+import android.annotation.CallbackExecutor;
 import android.annotation.NonNull;
 import android.annotation.Nullable;
 import android.content.Context;
@@ -42,6 +43,7 @@
 import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.Files;
 import java.util.Optional;
+import java.util.concurrent.Executor;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -110,6 +112,9 @@
     /** The registered callback */
     private @Nullable VirtualMachineCallback mCallback;
 
+    /** The executor on which the callback will be executed */
+    private @NonNull Executor mCallbackExecutor;
+
     private @Nullable ParcelFileDescriptor mConsoleReader;
     private @Nullable ParcelFileDescriptor mConsoleWriter;
 
@@ -263,7 +268,10 @@
      * Registers the callback object to get events from the virtual machine. If a callback was
      * already registered, it is replaced with the new one.
      */
-    public void setCallback(@Nullable VirtualMachineCallback callback) {
+    public void setCallback(
+            @NonNull @CallbackExecutor Executor executor,
+            @Nullable VirtualMachineCallback callback) {
+        mCallbackExecutor = executor;
         mCallback = callback;
     }
 
@@ -328,7 +336,8 @@
                             if (cb == null) {
                                 return;
                             }
-                            cb.onPayloadStarted(VirtualMachine.this, stream);
+                            mCallbackExecutor.execute(
+                                    () -> cb.onPayloadStarted(VirtualMachine.this, stream));
                         }
 
                         @Override
@@ -337,7 +346,7 @@
                             if (cb == null) {
                                 return;
                             }
-                            cb.onPayloadReady(VirtualMachine.this);
+                            mCallbackExecutor.execute(() -> cb.onPayloadReady(VirtualMachine.this));
                         }
 
                         @Override
@@ -346,7 +355,8 @@
                             if (cb == null) {
                                 return;
                             }
-                            cb.onPayloadFinished(VirtualMachine.this, exitCode);
+                            mCallbackExecutor.execute(
+                                    () -> cb.onPayloadFinished(VirtualMachine.this, exitCode));
                         }
 
                         @Override
@@ -355,7 +365,7 @@
                             if (cb == null) {
                                 return;
                             }
-                            cb.onDied(VirtualMachine.this);
+                            mCallbackExecutor.execute(() -> cb.onDied(VirtualMachine.this));
                         }
                     });
             service.asBinder()
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 274b7ed..3eaf124 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -44,7 +44,7 @@
 android_system_image {
     name: "microdroid",
     use_avb: true,
-    avb_private_key: ":avb_testkey_rsa4096",
+    avb_private_key: ":microdroid_sign_key",
     avb_algorithm: "SHA256_RSA4096",
     partition_name: "system",
     deps: [
@@ -193,7 +193,7 @@
             ],
         },
     },
-    avb_private_key: ":avb_testkey_rsa4096",
+    avb_private_key: ":microdroid_sign_key",
     avb_algorithm: "SHA256_RSA4096",
     file_contexts: ":microdroid_vendor_file_contexts.gen",
 }
@@ -248,7 +248,7 @@
     header_version: "4",
     partition_name: "boot",
     use_avb: true,
-    avb_private_key: ":avb_testkey_rsa4096",
+    avb_private_key: ":microdroid_sign_key",
 }
 
 android_filesystem {
@@ -285,7 +285,7 @@
     },
     partition_name: "vendor_boot",
     use_avb: true,
-    avb_private_key: ":avb_testkey_rsa4096",
+    avb_private_key: ":microdroid_sign_key",
 }
 
 android_filesystem {
@@ -335,25 +335,92 @@
     cmd: "cat $(in) > $(out)",
 }
 
-// TODO(b/203031847) sign these bootconfig images using avb
+vbmeta {
+    name: "microdroid_vbmeta_bootconfig",
+    partition_name: "vbmeta",
+    private_key: ":microdroid_sign_key",
+    chained_partitions: [
+        {
+            name: "bootconfig",
+            private_key: ":microdroid_sign_key",
+        },
+    ],
+}
+
+// See external/avb/avbtool.py
+// MAX_VBMETA_SIZE=64KB, MAX_FOOTER_SIZE=4KB
+avb_hash_footer_kb = "68"
+
 prebuilt_etc {
     name: "microdroid_bootconfig_normal",
-    src: "bootconfig.normal",
+    src: ":microdroid_bootconfig_normal_gen",
     filename: "microdroid_bootconfig.normal",
 }
 
 prebuilt_etc {
     name: "microdroid_bootconfig_app_debuggable",
-    src: "bootconfig.app_debuggable",
+    src: ":microdroid_bootconfig_app_debuggable_gen",
     filename: "microdroid_bootconfig.app_debuggable",
 }
 
 prebuilt_etc {
     name: "microdroid_bootconfig_full_debuggable",
-    src: "bootconfig.full_debuggable",
+    src: ":microdroid_bootconfig_full_debuggable_gen",
     filename: "microdroid_bootconfig.full_debuggable",
 }
 
+// TODO(jiyong): make a new module type that does the avb signing
+genrule {
+    name: "microdroid_bootconfig_normal_gen",
+    tools: ["avbtool"],
+    srcs: [
+        "bootconfig.normal",
+        ":microdroid_sign_key",
+    ],
+    out: ["microdroid_bootconfig.normal"],
+    cmd: "cp $(location bootconfig.normal) $(out) && " +
+        "$(location avbtool) add_hash_footer " +
+        "--algorithm SHA256_RSA4096 " +
+        "--partition_name bootconfig " +
+        "--key $(location :microdroid_sign_key) " +
+        "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+        "--image $(out)",
+}
+
+genrule {
+    name: "microdroid_bootconfig_app_debuggable_gen",
+    tools: ["avbtool"],
+    srcs: [
+        "bootconfig.app_debuggable",
+        ":microdroid_sign_key",
+    ],
+    out: ["microdroid_bootconfig.app_debuggable"],
+    cmd: "cp $(location bootconfig.app_debuggable) $(out) && " +
+        "$(location avbtool) add_hash_footer " +
+        "--algorithm SHA256_RSA4096 " +
+        "--partition_name bootconfig " +
+        "--key $(location :microdroid_sign_key) " +
+        "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+        "--image $(out)",
+}
+
+genrule {
+    name: "microdroid_bootconfig_full_debuggable_gen",
+    tools: ["avbtool"],
+    srcs: [
+        "bootconfig.full_debuggable",
+        ":microdroid_sign_key",
+    ],
+    out: ["microdroid_bootconfig.full_debuggable"],
+    cmd: "cp $(location bootconfig.full_debuggable) $(out) && " +
+        "$(location avbtool) add_hash_footer " +
+        "--algorithm SHA256_RSA4096 " +
+        "--partition_name bootconfig " +
+        "--key $(location :microdroid_sign_key) " +
+        "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+        "--image $(out)",
+}
+
 prebuilt_etc {
     name: "microdroid_fstab",
     src: "fstab.microdroid",
@@ -369,23 +436,18 @@
             // For unknown reason, the signed bootloader doesn't work on x86_64. Until the problem
             // is fixed, let's use the unsigned bootloader for the architecture.
             // TODO(b/185115783): remove this
-            src: ":microdroid_crosvm_bootloader",
+            src: ":microdroid_bootloader_pubkey_replaced",
         },
     },
     filename: "microdroid_bootloader",
 }
 
-// See external/avb/avbtool.py
-// MAX_VBMETA_SIZE=64KB, MAX_FOOTER_SIZE=4KB
-avb_hash_footer_kb = "68"
-
-// TODO(b/193504286) remove this when prebuilt bootloader exposes pubkey as well.
 genrule {
     name: "microdroid_bootloader_gen",
     tools: ["avbtool"],
     srcs: [
-        ":microdroid_crosvm_bootloader",
-        ":avb_testkey_rsa4096",
+        ":microdroid_bootloader_pubkey_replaced",
+        ":microdroid_sign_key",
     ],
     out: ["bootloader-signed"],
     // 1. Copy the input to the output becaise avbtool modifies --image in
@@ -394,31 +456,55 @@
     // bootloader file whose size is 1. It can't pass avbtool.
     // 3. Add the hash footer. The partition size is set to (image size + 68KB)
     // rounded up to 4KB boundary.
-    cmd: "cp $(location :microdroid_crosvm_bootloader) $(out) && " +
+    cmd: "cp $(location :microdroid_bootloader_pubkey_replaced) $(out) && " +
         "if [ $$(stat --format=%s $(out)) -gt 4096 ]; then " +
         "$(location avbtool) add_hash_footer " +
         "--algorithm SHA256_RSA4096 " +
         "--partition_name bootloader " +
-        "--key $(location :avb_testkey_rsa4096) " +
+        "--key $(location :microdroid_sign_key) " +
         "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
         "--image $(out)" +
         "; fi",
 }
 
-prebuilt_etc {
-    name: "microdroid_bootloader.avbpubkey",
-    src: ":microdroid_bootloader_pubkey_gen",
+// Replace avbpubkey of prebuilt bootloader with the avbpubkey of the signing key
+genrule {
+    name: "microdroid_bootloader_pubkey_replaced",
+    tools: ["replace_bytes"],
+    srcs: [
+        ":microdroid_crosvm_bootloader", // input (bootloader)
+        ":microdroid_crosvm_bootloader.avbpubkey", // old bytes (old pubkey)
+        ":microdroid_bootloader_avbpubkey_gen", // new bytes (new pubkey)
+    ],
+    out: ["bootloader-pubkey-replaced"],
+    // 1. Copy the input to the output (replace_bytes modifies the file in-place)
+    // 2. Check if the file is big enough. For arm and x86 we have fake
+    // bootloader file whose size is 1. (replace_bytes fails if key not found)
+    // 3. Replace embedded pubkey with new one.
+    cmd: "cp $(location :microdroid_crosvm_bootloader) $(out) && " +
+        "if [ $$(stat --format=%s $(out)) -gt 4096 ]; then " +
+        "$(location replace_bytes) $(out) " +
+        "$(location :microdroid_crosvm_bootloader.avbpubkey) " +
+        "$(location :microdroid_bootloader_avbpubkey_gen)" +
+        "; fi",
 }
 
+// Apex keeps a copy of avbpubkey embedded in bootloader so that embedded avbpubkey can be replaced
+// while re-signing bootloader.
+prebuilt_etc {
+    name: "microdroid_bootloader.avbpubkey",
+    src: ":microdroid_bootloader_avbpubkey_gen",
+}
+
+// Generate avbpukey from the signing key
 genrule {
-    name: "microdroid_bootloader_pubkey_gen",
+    name: "microdroid_bootloader_avbpubkey_gen",
     tools: ["avbtool"],
-    srcs: [
-        ":microdroid_crosvm_bootloader",
-        ":avb_testkey_rsa4096",
-    ],
-    out: ["bootloader-pubkey"],
-    cmd: "$(location avbtool) extract_public_key --key $(location :avb_testkey_rsa4096) --output $(out)",
+    srcs: [":microdroid_sign_key"],
+    out: ["bootloader.pubkey"],
+    cmd: "$(location avbtool) extract_public_key " +
+        "--key $(location :microdroid_sign_key) " +
+        "--output $(out)",
 }
 
 prebuilt_etc {
@@ -448,10 +534,18 @@
     cmd: "$(location mkenvimage_host) -s 4096 -o $(out) $(in)",
 }
 
+// Note that keys can be different for filesystem images even though we're using the same key
+// for microdroid. However, the key signing VBmeta should match with the pubkey embedded in
+// bootloader.
+filegroup {
+    name: "microdroid_sign_key",
+    srcs: [":avb_testkey_rsa4096"],
+}
+
 vbmeta {
     name: "microdroid_vbmeta",
     partition_name: "vbmeta",
-    private_key: ":avb_testkey_rsa4096",
+    private_key: ":microdroid_sign_key",
     partitions: [
         "microdroid_vendor",
         "microdroid_vendor_boot-5.10",
diff --git a/microdroid/bootconfig.x86_64 b/microdroid/bootconfig.x86_64
index 2977ee3..6076889 100644
--- a/microdroid/bootconfig.x86_64
+++ b/microdroid/bootconfig.x86_64
@@ -1 +1 @@
-androidboot.boot_devices = pci0000:00/0000:00:03.0,pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0
+androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0
diff --git a/microdroid/init.rc b/microdroid/init.rc
index ad551cc..664402f 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -195,7 +195,7 @@
     seclabel u:r:shell:s0
     setenv HOSTNAME console
 
-service seriallogging /system/bin/logcat -b all -v threadtime -f /dev/hvc1 *:V
+service seriallogging /system/bin/logcat -b all -v threadtime -f /dev/hvc2 *:V
     disabled
     user logd
     group root logd
diff --git a/microdroid/ueventd.rc b/microdroid/ueventd.rc
index 85f2f9d..037b8fc 100644
--- a/microdroid/ueventd.rc
+++ b/microdroid/ueventd.rc
@@ -26,4 +26,4 @@
 /dev/tty0                 0660   root       system
 
 # Virtual console for logcat
-/dev/hvc1                 0660   logd       logd
+/dev/hvc2                 0660   logd       logd
diff --git a/pvmfw/pvmfw.img b/pvmfw/pvmfw.img
index 317821f..510b2c4 100644
--- a/pvmfw/pvmfw.img
+++ b/pvmfw/pvmfw.img
Binary files differ
diff --git a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
index b3a76ce..8d9a7e3 100644
--- a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
+++ b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
@@ -152,6 +152,12 @@
                 .runTimedCmd(timeout, "adb", "-s", MICRODROID_SERIAL, "shell", join(cmd));
     }
 
+    // Asserts the command will fail on Microdroid.
+    public static void assertFailedOnMicrodroid(String... cmd) {
+        CommandResult result = runOnMicrodroidForResult(cmd);
+        assertThat(result.getStatus(), is(CommandStatus.FAILED));
+    }
+
     private static String join(String... strs) {
         return String.join(" ", Arrays.asList(strs));
     }
diff --git a/tests/testapk/Android.bp b/tests/testapk/Android.bp
index 493fc93..32c47dd 100644
--- a/tests/testapk/Android.bp
+++ b/tests/testapk/Android.bp
@@ -6,7 +6,10 @@
     name: "MicrodroidTestApp",
     test_suites: ["device-tests"],
     srcs: ["src/java/**/*.java"],
-    static_libs: ["androidx.test.runner"],
+    static_libs: [
+        "androidx.test.runner",
+        "androidx.test.ext.junit",
+    ],
     libs: ["android.system.virtualmachine"],
     jni_libs: ["MicrodroidTestNativeLib"],
     platform_apis: true,
diff --git a/tests/testapk/AndroidManifest.xml b/tests/testapk/AndroidManifest.xml
index 21abeb5..bc955d2 100644
--- a/tests/testapk/AndroidManifest.xml
+++ b/tests/testapk/AndroidManifest.xml
@@ -15,8 +15,9 @@
 -->
 <manifest xmlns:android="http://schemas.android.com/apk/res/android"
       package="com.android.microdroid.test">
+    <uses-permission android:name="android.permission.MANAGE_VIRTUAL_MACHINE" />
     <application>
-        <uses-library android:name="android.system.virtualmachine" android:required="true" />
+        <uses-library android:name="android.system.virtualmachine" android:required="false" />
     </application>
     <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
         android:targetPackage="com.android.microdroid.test"
diff --git a/tests/testapk/AndroidTest.xml b/tests/testapk/AndroidTest.xml
index 25b1001..c7097db 100644
--- a/tests/testapk/AndroidTest.xml
+++ b/tests/testapk/AndroidTest.xml
@@ -17,8 +17,15 @@
     <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
         <option name="test-file-name" value="MicrodroidTestApp.apk" />
     </target_preparer>
+    <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+      <option
+        name="run-command"
+        value="pm grant com.android.microdroid.test android.permission.MANAGE_VIRTUAL_MACHINE" />
+    </target_preparer>
     <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
         <option name="package" value="com.android.microdroid.test" />
         <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+        <option name="shell-timeout" value="300000" />
+        <option name="test-timeout" value="300000" />
     </test>
 </configuration>
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 5e465d5..e0d6cc1 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -16,15 +16,133 @@
 package com.android.microdroid.test;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNoException;
 
+import android.content.Context;
+import android.os.ParcelFileDescriptor;
+import android.system.virtualmachine.VirtualMachine;
+import android.system.virtualmachine.VirtualMachineCallback;
+import android.system.virtualmachine.VirtualMachineConfig;
+import android.system.virtualmachine.VirtualMachineException;
+import android.system.virtualmachine.VirtualMachineManager;
+
+import androidx.test.core.app.ApplicationProvider;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
 
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
 @RunWith(JUnit4.class)
 public class MicrodroidTests {
+    @Rule public Timeout globalTimeout = Timeout.seconds(300);
+
+    private static class Inner {
+        public Context mContext;
+        public VirtualMachineManager mVmm;
+        public VirtualMachine mVm;
+    }
+
+    private boolean mPkvmSupported = false;
+    private Inner mInner;
+
+    @Before
+    public void setup() {
+        // In case when the virt APEX doesn't exist on the device, classes in the
+        // android.system.virtualmachine package can't be loaded. Therefore, before using the
+        // classes, check the existence of a class in the package and skip this test if not exist.
+        try {
+            Class.forName("android.system.virtualmachine.VirtualMachineManager");
+            mPkvmSupported = true;
+        } catch (ClassNotFoundException e) {
+            assumeNoException(e);
+            return;
+        }
+        mInner = new Inner();
+        mInner.mContext = ApplicationProvider.getApplicationContext();
+        mInner.mVmm = VirtualMachineManager.getInstance(mInner.mContext);
+    }
+
+    @After
+    public void cleanup() throws VirtualMachineException {
+        if (!mPkvmSupported) {
+            return;
+        }
+        if (mInner.mVm == null) {
+            return;
+        }
+        mInner.mVm.stop();
+        mInner.mVm.delete();
+    }
+
+    private abstract static class VmEventListener implements VirtualMachineCallback {
+        private ExecutorService mExecutorService = Executors.newSingleThreadExecutor();
+
+        void runToFinish(VirtualMachine vm) throws VirtualMachineException, InterruptedException {
+            vm.setCallback(mExecutorService, this);
+            vm.run();
+            mExecutorService.awaitTermination(300, TimeUnit.SECONDS);
+        }
+
+        void forceStop(VirtualMachine vm) {
+            try {
+                vm.stop();
+                this.onDied(vm);
+                mExecutorService.shutdown();
+            } catch (VirtualMachineException e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        @Override
+        public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {}
+
+        @Override
+        public void onPayloadReady(VirtualMachine vm) {}
+
+        @Override
+        public void onPayloadFinished(VirtualMachine vm, int exitCode) {}
+
+        @Override
+        public void onDied(VirtualMachine vm) {}
+    }
+
     @Test
-    public void testNothing() {
-        assertTrue(true);
+    public void startAndStop() throws VirtualMachineException, InterruptedException {
+        VirtualMachineConfig.Builder builder =
+                new VirtualMachineConfig.Builder(mInner.mContext, "assets/vm_config.json");
+        VirtualMachineConfig config = builder.build();
+
+        mInner.mVm = mInner.mVmm.getOrCreate("test_vm", config);
+        VmEventListener listener =
+                new VmEventListener() {
+                    private boolean mPayloadReadyCalled = false;
+                    private boolean mPayloadStartedCalled = false;
+
+                    @Override
+                    public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {
+                        mPayloadStartedCalled = true;
+                    }
+
+                    @Override
+                    public void onPayloadReady(VirtualMachine vm) {
+                        mPayloadReadyCalled = true;
+                        forceStop(vm);
+                    }
+
+                    @Override
+                    public void onDied(VirtualMachine vm) {
+                        assertTrue(mPayloadReadyCalled);
+                        assertTrue(mPayloadStartedCalled);
+                    }
+                };
+        listener.runToFinish(mInner.mVm);
     }
 }
diff --git a/virtualizationservice/src/crosvm.rs b/virtualizationservice/src/crosvm.rs
index 08be052..bf1ff0c 100644
--- a/virtualizationservice/src/crosvm.rs
+++ b/virtualizationservice/src/crosvm.rs
@@ -251,8 +251,9 @@
 
     // Setup the serial devices.
     // 1. uart device: used as the output device by bootloaders and as early console by linux
-    // 2. virtio-console device: used as the console device
-    // 3. virtio-console device: used as the logcat output
+    // 2. virtio-console device: used as the console device where kmsg is redirected to
+    // 3. virtio-console device: used as the androidboot.console device (not used currently)
+    // 4. virtio-console device: used as the logcat output
     //
     // When [console|log]_fd is not specified, the devices are attached to sink, which means what's
     // written there is discarded.
@@ -273,8 +274,10 @@
     command.arg(format!("--serial={},hardware=serial", &console_arg));
     // /dev/hvc0
     command.arg(format!("--serial={},hardware=virtio-console,num=1", &console_arg));
-    // /dev/hvc1
-    command.arg(format!("--serial={},hardware=virtio-console,num=2", &log_arg));
+    // /dev/hvc1 (not used currently)
+    command.arg("--serial=type=sink,hardware=virtio-console,num=2");
+    // /dev/hvc2
+    command.arg(format!("--serial={},hardware=virtio-console,num=3", &log_arg));
 
     if let Some(bootloader) = &config.bootloader {
         command.arg("--bios").arg(add_preserved_fd(&mut preserved_fds, bootloader));
diff --git a/virtualizationservice/src/payload.rs b/virtualizationservice/src/payload.rs
index a59afd5..55eb19b 100644
--- a/virtualizationservice/src/payload.rs
+++ b/virtualizationservice/src/payload.rs
@@ -36,7 +36,8 @@
 
 /// The list of APEXes which microdroid requires.
 // TODO(b/192200378) move this to microdroid.json?
-const MICRODROID_REQUIRED_APEXES: [&str; 2] = ["com.android.adbd", "com.android.os.statsd"];
+const MICRODROID_REQUIRED_APEXES: [&str; 1] = ["com.android.os.statsd"];
+const MICRODROID_REQUIRED_APEXES_DEBUG: [&str; 1] = ["com.android.adbd"];
 
 const APEX_INFO_LIST_PATH: &str = "/apex/apex-info-list.xml";
 
@@ -132,7 +133,11 @@
                     let staged_apex_info = pm.getStagedApexInfo(&apex_info.name)?;
                     if let Some(staged_apex_info) = staged_apex_info {
                         apex_info.path = PathBuf::from(staged_apex_info.diskImagePath);
-                        // TODO(b/201788989) copy bootclasspath/systemserverclasspath
+                        apex_info.boot_classpath = staged_apex_info.hasBootClassPathJars;
+                        apex_info.systemserver_classpath =
+                            staged_apex_info.hasSystemServerClassPathJars;
+                        apex_info.dex2oatboot_classpath =
+                            staged_apex_info.hasDex2OatBootClassPathJars;
                     }
                 }
             }
@@ -195,12 +200,13 @@
     config_path: &str,
     vm_payload_config: &VmPayloadConfig,
     temporary_directory: &Path,
+    debug_level: DebugLevel,
 ) -> Result<DiskImage> {
     let pm = PackageManager::new()?;
     let apex_list = pm.get_apex_list(vm_payload_config.prefer_staged)?;
 
     // collect APEX names from config
-    let apexes = collect_apex_names(&apex_list, &vm_payload_config.apexes);
+    let apexes = collect_apex_names(&apex_list, &vm_payload_config.apexes, debug_level);
     info!("Microdroid payload APEXes: {:?}", apexes);
 
     let metadata_file = make_metadata_file(config_path, &apexes, temporary_directory)?;
@@ -253,7 +259,11 @@
 }
 
 // Collect APEX names from config
-fn collect_apex_names(apex_list: &ApexInfoList, apexes: &[ApexConfig]) -> Vec<String> {
+fn collect_apex_names(
+    apex_list: &ApexInfoList,
+    apexes: &[ApexConfig],
+    debug_level: DebugLevel,
+) -> Vec<String> {
     // Process pseudo names like "{BOOTCLASSPATH}".
     // For now we have following pseudo APEX names:
     // - {BOOTCLASSPATH}: represents APEXes contributing "BOOTCLASSPATH" environment variable
@@ -270,6 +280,9 @@
         .collect();
     // Add required APEXes
     apex_names.extend(MICRODROID_REQUIRED_APEXES.iter().map(|name| name.to_string()));
+    if debug_level != DebugLevel::NONE {
+        apex_names.extend(MICRODROID_REQUIRED_APEXES_DEBUG.iter().map(|name| name.to_string()));
+    }
     apex_names.sort();
     apex_names.dedup();
     apex_names
@@ -290,8 +303,17 @@
         &config.configPath,
         vm_payload_config,
         temporary_directory,
+        config.debugLevel,
     )?);
 
+    vm_config.disks[1].partitions.push(Partition {
+        label: "vbmeta".to_owned(),
+        image: Some(open_parcel_file(
+            Path::new("/apex/com.android.virt/etc/fs/microdroid_vbmeta_bootconfig.img"),
+            false,
+        )?),
+        writable: false,
+    });
     let bootconfig_image = "/apex/com.android.virt/etc/microdroid_bootconfig.".to_owned()
         + match config.debugLevel {
             DebugLevel::NONE => "normal",