Merge "Enable selinux on microdroid"
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index f19abd1..00f34b9 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,7 +1,13 @@
 [Builtin Hooks]
+aidl_format = true
+android_test_mapping_format = true
 bpfmt = true
 clang_format = true
+jsonlint = true
+google_java_format = true
+pylint3 = true
 rustfmt = true
+xmllint = true
 
 [Builtin Hooks Options]
 clang_format = --commit ${PREUPLOAD_COMMIT} --style file --extensions c,h,cc,cpp
diff --git a/apex/Android.bp b/apex/Android.bp
index 50c17f6..fa3806f 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -17,17 +17,20 @@
     arch: {
         arm64: {
             binaries: [
+                "authfs", // TODO(victorhsieh): move to microdroid once we can run the test in VM.
                 "crosvm",
             ],
         },
         x86_64: {
             binaries: [
+                "authfs", // TODO(victorhsieh): move to microdroid once we can run the test in VM.
                 "crosvm",
             ],
         },
     },
     binaries: [
         "assemble_cvd",
+        "fd_server",
         "virtmanager",
         "vm",
     ],
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 9f7be93..85f2abb 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -39,18 +39,26 @@
     ],
     bindgen_flags: ["--size_t-is-usize"],
     cflags: ["-D BORINGSSL_NO_CXX"],
+    apex_available: ["com.android.virt"],
 }
 
 rust_binary {
     name: "authfs",
     defaults: ["authfs_defaults"],
+    apex_available: ["com.android.virt"],
 }
 
 rust_test {
     name: "authfs_device_test_src_lib",
     defaults: ["authfs_defaults"],
     test_suites: ["device-tests"],
-    data: [
+    data: [":authfs_test_files"],
+}
+
+filegroup {
+    name: "authfs_test_files",
+    srcs: [
+        "testdata/cert.der",
         "testdata/input.4k",
         "testdata/input.4k.fsv_sig",
         "testdata/input.4k.merkle_dump",
diff --git a/authfs/TEST_MAPPING b/authfs/TEST_MAPPING
index d0c0b09..d2dc1d8 100644
--- a/authfs/TEST_MAPPING
+++ b/authfs/TEST_MAPPING
@@ -2,6 +2,9 @@
   "presubmit": [
     {
       "name": "authfs_device_test_src_lib"
+    },
+    {
+      "name": "AuthFsTestCases"
     }
   ]
 }
diff --git a/authfs/aidl/Android.bp b/authfs/aidl/Android.bp
index 7f3c968..35a3c4a 100644
--- a/authfs/aidl/Android.bp
+++ b/authfs/aidl/Android.bp
@@ -9,6 +9,7 @@
     backend: {
         rust: {
             enabled: true,
+            apex_available: ["com.android.virt"],
         },
     },
 }
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index 628ee3c..189f43a 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -45,4 +45,10 @@
 
     /** Returns the fs-verity signature of the given file ID. */
     byte[] readFsveritySignature(int id);
+
+    /**
+     * Writes the buffer to the given file ID from the file's offset. Returns the number of bytes
+     * written.
+     */
+    int writeFile(int id, in byte[] buf, long offset);
 }
diff --git a/authfs/fd_server/Android.bp b/authfs/fd_server/Android.bp
index f12f01f..f82b72f 100644
--- a/authfs/fd_server/Android.bp
+++ b/authfs/fd_server/Android.bp
@@ -7,10 +7,13 @@
     srcs: ["src/main.rs"],
     rustlibs: [
         "authfs_aidl_interface-rust",
+        "libandroid_logger",
         "libanyhow",
         "libbinder_rs",
         "libclap",
         "liblibc",
         "liblog_rust",
+        "libnix",
     ],
+    apex_available: ["com.android.virt"],
 }
diff --git a/authfs/fd_server/src/fsverity.rs b/authfs/fd_server/src/fsverity.rs
new file mode 100644
index 0000000..e89bbd0
--- /dev/null
+++ b/authfs/fd_server/src/fsverity.rs
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use nix::ioctl_readwrite;
+use std::io;
+
+// Constants/values from uapi/linux/fsverity.h
+const FS_VERITY_METADATA_TYPE_MERKLE_TREE: u64 = 1;
+const FS_VERITY_METADATA_TYPE_SIGNATURE: u64 = 3;
+const FS_IOCTL_MAGIC: u8 = b'f';
+const FS_IOCTL_READ_VERITY_METADATA: u8 = 135;
+
+#[repr(C)]
+pub struct fsverity_read_metadata_arg {
+    metadata_type: u64,
+    offset: u64,
+    length: u64,
+    buf_ptr: u64,
+    __reserved: u64,
+}
+
+ioctl_readwrite!(
+    read_verity_metadata,
+    FS_IOCTL_MAGIC,
+    FS_IOCTL_READ_VERITY_METADATA,
+    fsverity_read_metadata_arg
+);
+
+fn read_metadata(fd: i32, metadata_type: u64, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
+    let mut arg = fsverity_read_metadata_arg {
+        metadata_type,
+        offset,
+        length: buf.len() as u64,
+        buf_ptr: buf.as_mut_ptr() as u64,
+        __reserved: 0,
+    };
+    Ok(unsafe { read_verity_metadata(fd, &mut arg) }.map_err(|e| {
+        if let nix::Error::Sys(errno) = e {
+            io::Error::from_raw_os_error(errno as i32)
+        } else {
+            // Document of nix::sys::ioctl indicates the macro-generated function always returns an
+            // nix::errno::Errno, which can be converted nix::Error::Sys above. As the result, this
+            // branch is unreachable.
+            unreachable!();
+        }
+    })? as usize)
+}
+
+/// Read the raw Merkle tree from the fd, if it exists. The API semantics is similar to a regular
+/// pread(2), and may not return full requested buffer.
+pub fn read_merkle_tree(fd: i32, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
+    read_metadata(fd, FS_VERITY_METADATA_TYPE_MERKLE_TREE, offset, buf)
+}
+
+/// Read the fs-verity signature from the fd (if exists). The returned signature should be complete.
+pub fn read_signature(fd: i32, buf: &mut [u8]) -> io::Result<usize> {
+    read_metadata(fd, FS_VERITY_METADATA_TYPE_SIGNATURE, 0 /* offset */, buf)
+}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index cbd7712..99b6e9e 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -25,6 +25,8 @@
 //! [1] Since the remote binder is not ready, this currently implementation uses local binder
 //!     first.
 
+mod fsverity;
+
 use std::cmp::min;
 use std::collections::BTreeMap;
 use std::convert::TryInto;
@@ -32,17 +34,18 @@
 use std::fs::File;
 use std::io;
 use std::os::unix::fs::FileExt;
-use std::os::unix::io::FromRawFd;
+use std::os::unix::io::{AsRawFd, FromRawFd};
 
 use anyhow::{bail, Context, Result};
-use binder::IBinder; // TODO(178852354): remove once set_requesting_sid is exposed in the API.
+use binder::IBinderInternal; // TODO(178852354): remove once set_requesting_sid is exposed in the API.
 use log::{debug, error};
 
 use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
     BnVirtFdService, IVirtFdService, ERROR_IO, ERROR_UNKNOWN_FD, MAX_REQUESTING_DATA,
 };
 use authfs_aidl_interface::binder::{
-    add_service, ExceptionCode, Interface, ProcessState, Result as BinderResult, Status, Strong,
+    add_service, ExceptionCode, Interface, ProcessState, Result as BinderResult, Status,
+    StatusCode, Strong,
 };
 
 const SERVICE_NAME: &str = "authfs_fd_server";
@@ -70,38 +73,41 @@
     }
 }
 
-/// Configuration of a read-only file to serve by this server. The file is supposed to be verifiable
-/// with the associated fs-verity metadata.
-struct ReadonlyFdConfig {
-    /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
-    file: File,
+/// Configuration of a file descriptor to be served/exposed/shared.
+enum FdConfig {
+    /// A read-only file to serve by this server. The file is supposed to be verifiable with the
+    /// associated fs-verity metadata.
+    Readonly {
+        /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
+        file: File,
 
-    /// Alternative Merkle tree stored in another file.
-    alt_merkle_file: Option<File>,
+        /// Alternative Merkle tree stored in another file.
+        alt_merkle_tree: Option<File>,
 
-    /// Alternative signature stored in another file.
-    alt_signature_file: Option<File>,
+        /// Alternative signature stored in another file.
+        alt_signature: Option<File>,
+    },
+
+    /// A readable/writable file to serve by this server. This backing file should just be a
+    /// regular file and does not have any specific property.
+    ReadWrite(File),
 }
 
 struct FdService {
-    /// A pool of read-only files
-    fd_pool: BTreeMap<i32, ReadonlyFdConfig>,
+    /// A pool of opened files, may be readonly or read-writable.
+    fd_pool: BTreeMap<i32, FdConfig>,
 }
 
 impl FdService {
-    pub fn new_binder(fd_pool: BTreeMap<i32, ReadonlyFdConfig>) -> Strong<dyn IVirtFdService> {
+    pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
         let result = BnVirtFdService::new_binder(FdService { fd_pool });
         result.as_binder().set_requesting_sid(false);
         result
     }
 
-    fn get_file_config(&self, id: i32) -> BinderResult<&ReadonlyFdConfig> {
+    fn get_file_config(&self, id: i32) -> BinderResult<&FdConfig> {
         self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))
     }
-
-    fn get_file(&self, id: i32) -> BinderResult<&File> {
-        Ok(&self.get_file_config(id)?.file)
-    }
 }
 
 impl Interface for FdService {}
@@ -111,38 +117,96 @@
         let size: usize = validate_and_cast_size(size)?;
         let offset: u64 = validate_and_cast_offset(offset)?;
 
-        read_into_buf(self.get_file(id)?, size, offset).map_err(|e| {
-            error!("readFile: read error: {}", e);
-            Status::from(ERROR_IO)
-        })
+        match self.get_file_config(id)? {
+            FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
+                read_into_buf(&file, size, offset).map_err(|e| {
+                    error!("readFile: read error: {}", e);
+                    Status::from(ERROR_IO)
+                })
+            }
+        }
     }
 
     fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
         let size: usize = validate_and_cast_size(size)?;
         let offset: u64 = validate_and_cast_offset(offset)?;
 
-        if let Some(file) = &self.get_file_config(id)?.alt_merkle_file {
-            read_into_buf(&file, size, offset).map_err(|e| {
-                error!("readFsverityMerkleTree: read error: {}", e);
-                Status::from(ERROR_IO)
-            })
-        } else {
-            // TODO(victorhsieh) retrieve from the fd when the new ioctl is ready
-            Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Not implemented yet"))
+        match &self.get_file_config(id)? {
+            FdConfig::Readonly { file, alt_merkle_tree, .. } => {
+                if let Some(tree_file) = &alt_merkle_tree {
+                    read_into_buf(&tree_file, size, offset).map_err(|e| {
+                        error!("readFsverityMerkleTree: read error: {}", e);
+                        Status::from(ERROR_IO)
+                    })
+                } else {
+                    let mut buf = vec![0; size];
+                    let s = fsverity::read_merkle_tree(file.as_raw_fd(), offset, &mut buf)
+                        .map_err(|e| {
+                            error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
+                            Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
+                        })?;
+                    debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
+                    buf.truncate(s);
+                    Ok(buf)
+                }
+            }
+            FdConfig::ReadWrite(_file) => {
+                // For a writable file, Merkle tree is not expected to be served since Auth FS
+                // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
+                // use.
+                Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+            }
         }
     }
 
     fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
-        if let Some(file) = &self.get_file_config(id)?.alt_signature_file {
-            // Supposedly big enough buffer size to store signature.
-            let size = MAX_REQUESTING_DATA as usize;
-            read_into_buf(&file, size, 0).map_err(|e| {
-                error!("readFsveritySignature: read error: {}", e);
-                Status::from(ERROR_IO)
-            })
-        } else {
-            // TODO(victorhsieh) retrieve from the fd when the new ioctl is ready
-            Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Not implemented yet"))
+        match &self.get_file_config(id)? {
+            FdConfig::Readonly { file, alt_signature, .. } => {
+                if let Some(sig_file) = &alt_signature {
+                    // Supposedly big enough buffer size to store signature.
+                    let size = MAX_REQUESTING_DATA as usize;
+                    let offset = 0;
+                    read_into_buf(&sig_file, size, offset).map_err(|e| {
+                        error!("readFsveritySignature: read error: {}", e);
+                        Status::from(ERROR_IO)
+                    })
+                } else {
+                    let mut buf = vec![0; MAX_REQUESTING_DATA as usize];
+                    let s = fsverity::read_signature(file.as_raw_fd(), &mut buf).map_err(|e| {
+                        error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
+                        Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
+                    })?;
+                    debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
+                    buf.truncate(s);
+                    Ok(buf)
+                }
+            }
+            FdConfig::ReadWrite(_file) => {
+                // There is no signature for a writable file.
+                Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+            }
+        }
+    }
+
+    fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
+        match &self.get_file_config(id)? {
+            FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+            FdConfig::ReadWrite(file) => {
+                let offset: u64 = offset.try_into().map_err(|_| {
+                    new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, "Invalid offset")
+                })?;
+                // Check buffer size just to make `as i32` safe below.
+                if buf.len() > i32::MAX as usize {
+                    return Err(new_binder_exception(
+                        ExceptionCode::ILLEGAL_ARGUMENT,
+                        "Buffer size is too big",
+                    ));
+                }
+                Ok(file.write_at(buf, offset).map_err(|e| {
+                    error!("writeFile: write error: {}", e);
+                    Status::from(ERROR_IO)
+                })? as i32)
+            }
         }
     }
 }
@@ -169,29 +233,42 @@
     Ok(unsafe { File::from_raw_fd(fd) })
 }
 
-fn parse_arg_ro_fds(arg: &str) -> Result<(i32, ReadonlyFdConfig)> {
+fn parse_arg_ro_fds(arg: &str) -> Result<(i32, FdConfig)> {
     let result: Result<Vec<i32>, _> = arg.split(':').map(|x| x.parse::<i32>()).collect();
     let fds = result?;
     if fds.len() > 3 {
         bail!("Too many options: {}", arg);
     }
-
     Ok((
         fds[0],
-        ReadonlyFdConfig {
+        FdConfig::Readonly {
             file: fd_to_file(fds[0])?,
-            alt_merkle_file: fds.get(1).map(|fd| fd_to_file(*fd)).transpose()?,
-            alt_signature_file: fds.get(2).map(|fd| fd_to_file(*fd)).transpose()?,
+            // Alternative Merkle tree, if provided
+            alt_merkle_tree: fds.get(1).map(|fd| fd_to_file(*fd)).transpose()?,
+            // Alternative signature, if provided
+            alt_signature: fds.get(2).map(|fd| fd_to_file(*fd)).transpose()?,
         },
     ))
 }
 
-fn parse_args() -> Result<BTreeMap<i32, ReadonlyFdConfig>> {
+fn parse_arg_rw_fds(arg: &str) -> Result<(i32, FdConfig)> {
+    let fd = arg.parse::<i32>()?;
+    let file = fd_to_file(fd)?;
+    if file.metadata()?.len() > 0 {
+        bail!("File is expected to be empty");
+    }
+    Ok((fd, FdConfig::ReadWrite(file)))
+}
+
+fn parse_args() -> Result<BTreeMap<i32, FdConfig>> {
     #[rustfmt::skip]
     let matches = clap::App::new("fd_server")
         .arg(clap::Arg::with_name("ro-fds")
              .long("ro-fds")
-             .required(true)
+             .multiple(true)
+             .number_of_values(1))
+        .arg(clap::Arg::with_name("rw-fds")
+             .long("rw-fds")
              .multiple(true)
              .number_of_values(1))
         .get_matches();
@@ -203,10 +280,20 @@
             fd_pool.insert(fd, config);
         }
     }
+    if let Some(args) = matches.values_of("rw-fds") {
+        for arg in args {
+            let (fd, config) = parse_arg_rw_fds(arg)?;
+            fd_pool.insert(fd, config);
+        }
+    }
     Ok(fd_pool)
 }
 
 fn main() -> Result<()> {
+    android_logger::init_once(
+        android_logger::Config::default().with_tag("fd_server").with_min_level(log::Level::Debug),
+    );
+
     let fd_pool = parse_args()?;
 
     ProcessState::start_thread_pool();
diff --git a/authfs/src/common.rs b/authfs/src/common.rs
index 522397f..6556fde 100644
--- a/authfs/src/common.rs
+++ b/authfs/src/common.rs
@@ -20,3 +20,59 @@
 pub fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
     (dividend + divisor - 1) / divisor
 }
+
+/// Given `offset` and `length`, generates (offset, size) tuples that together form the same length,
+/// and aligned to `alignment`.
+pub struct ChunkedSizeIter {
+    remaining: usize,
+    offset: u64,
+    alignment: usize,
+}
+
+impl ChunkedSizeIter {
+    pub fn new(remaining: usize, offset: u64, alignment: usize) -> Self {
+        ChunkedSizeIter { remaining, offset, alignment }
+    }
+}
+
+impl Iterator for ChunkedSizeIter {
+    type Item = (u64, usize);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.remaining == 0 {
+            return None;
+        }
+        let chunk_data_size = std::cmp::min(
+            self.remaining,
+            self.alignment - (self.offset % self.alignment as u64) as usize,
+        );
+        let retval = (self.offset, chunk_data_size);
+        self.offset += chunk_data_size as u64;
+        self.remaining = self.remaining.saturating_sub(chunk_data_size);
+        Some(retval)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    fn collect_chunk_read_iter(remaining: usize, offset: u64) -> Vec<(u64, usize)> {
+        ChunkedSizeIter::new(remaining, offset, 4096).collect::<Vec<_>>()
+    }
+
+    #[test]
+    fn test_chunk_read_iter() {
+        assert_eq!(collect_chunk_read_iter(4096, 0), [(0, 4096)]);
+        assert_eq!(collect_chunk_read_iter(8192, 0), [(0, 4096), (4096, 4096)]);
+        assert_eq!(collect_chunk_read_iter(8192, 4096), [(4096, 4096), (8192, 4096)]);
+
+        assert_eq!(
+            collect_chunk_read_iter(16384, 1),
+            [(1, 4095), (4096, 4096), (8192, 4096), (12288, 4096), (16384, 1)]
+        );
+
+        assert_eq!(collect_chunk_read_iter(0, 0), []);
+        assert_eq!(collect_chunk_read_iter(0, 100), []);
+    }
+}
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
new file mode 100644
index 0000000..9ff8ea5
--- /dev/null
+++ b/authfs/src/file.rs
@@ -0,0 +1,52 @@
+mod local_file;
+mod remote_file;
+
+pub use local_file::LocalFileReader;
+pub use remote_file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
+
+use std::io;
+
+use crate::common::CHUNK_SIZE;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
+use authfs_aidl_interface::binder::{get_interface, Strong};
+
+// TODO(victorhsieh): use remote binder.
+pub fn get_local_binder() -> Strong<dyn IVirtFdService::IVirtFdService> {
+    let service_name = "authfs_fd_server";
+    get_interface(&service_name).expect("Cannot reach authfs_fd_server binder service")
+}
+
+pub type ChunkBuffer = [u8; CHUNK_SIZE as usize];
+
+/// A trait for reading data by chunks. Chunks can be read by specifying the chunk index. Only the
+/// last chunk may have incomplete chunk size.
+pub trait ReadByChunk {
+    /// Reads the `chunk_index`-th chunk to a `ChunkBuffer`. Returns the size read, which has to be
+    /// `CHUNK_SIZE` except for the last incomplete chunk. Reading beyond the file size (including
+    /// empty file) should return 0.
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>;
+}
+
+/// A trait to write a buffer to the destination at a given offset. The implementation does not
+/// necessarily own or maintain the destination state.
+///
+/// NB: The trait is required in a member of `fusefs::AuthFs`, which is required to be Sync and
+/// immutable (this the member).
+pub trait RandomWrite {
+    /// Writes `buf` to the destination at `offset`. Returns the written size, which may not be the
+    /// full buffer.
+    fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>;
+
+    /// Writes the full `buf` to the destination at `offset`.
+    fn write_all_at(&self, buf: &[u8], offset: u64) -> io::Result<()> {
+        let mut input_offset = 0;
+        let mut output_offset = offset;
+        while input_offset < buf.len() {
+            let size = self.write_at(&buf[input_offset..], output_offset)?;
+            input_offset += size;
+            output_offset += size as u64;
+        }
+        Ok(())
+    }
+}
diff --git a/authfs/src/file/local_file.rs b/authfs/src/file/local_file.rs
new file mode 100644
index 0000000..13c954f
--- /dev/null
+++ b/authfs/src/file/local_file.rs
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::cmp::min;
+use std::fs::File;
+use std::io;
+use std::os::unix::fs::FileExt;
+
+use super::{ChunkBuffer, ReadByChunk};
+use crate::common::CHUNK_SIZE;
+
+/// A read-only file that can be read by chunks.
+pub struct LocalFileReader {
+    file: File,
+    size: u64,
+}
+
+impl LocalFileReader {
+    /// Creates a `LocalFileReader` to read from for the specified `path`.
+    pub fn new(file: File) -> io::Result<LocalFileReader> {
+        let size = file.metadata()?.len();
+        Ok(LocalFileReader { file, size })
+    }
+
+    pub fn len(&self) -> u64 {
+        self.size
+    }
+}
+
+impl ReadByChunk for LocalFileReader {
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+        let start = chunk_index * CHUNK_SIZE;
+        if start >= self.size {
+            return Ok(0);
+        }
+        let end = min(self.size, start + CHUNK_SIZE);
+        let read_size = (end - start) as usize;
+        debug_assert!(read_size <= buf.len());
+        self.file.read_exact_at(&mut buf[..read_size], start)?;
+        Ok(read_size)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::env::temp_dir;
+
+    #[test]
+    fn test_read_4k_file() -> io::Result<()> {
+        let file_reader = LocalFileReader::new(File::open("testdata/input.4k")?)?;
+        let mut buf = [0u8; 4096];
+        let size = file_reader.read_chunk(0, &mut buf)?;
+        assert_eq!(size, buf.len());
+        Ok(())
+    }
+
+    #[test]
+    fn test_read_4k1_file() -> io::Result<()> {
+        let file_reader = LocalFileReader::new(File::open("testdata/input.4k1")?)?;
+        let mut buf = [0u8; 4096];
+        let size = file_reader.read_chunk(0, &mut buf)?;
+        assert_eq!(size, buf.len());
+        let size = file_reader.read_chunk(1, &mut buf)?;
+        assert_eq!(size, 1);
+        Ok(())
+    }
+
+    #[test]
+    fn test_read_4m_file() -> io::Result<()> {
+        let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
+        for index in 0..file_reader.len() / 4096 {
+            let mut buf = [0u8; 4096];
+            let size = file_reader.read_chunk(index, &mut buf)?;
+            assert_eq!(size, buf.len());
+        }
+        Ok(())
+    }
+
+    #[test]
+    fn test_read_beyond_file_size() -> io::Result<()> {
+        let file_reader = LocalFileReader::new(File::open("testdata/input.4k").unwrap()).unwrap();
+        let mut buf = [0u8; 4096];
+        let size = file_reader.read_chunk(1u64, &mut buf)?;
+        assert_eq!(size, 0);
+        Ok(())
+    }
+
+    #[test]
+    fn test_read_empty_file() -> io::Result<()> {
+        let mut temp_file = temp_dir();
+        temp_file.push("authfs_test_empty_file");
+        let file_reader = LocalFileReader::new(File::create(temp_file).unwrap()).unwrap();
+        let mut buf = [0u8; 4096];
+        let size = file_reader.read_chunk(0, &mut buf)?;
+        assert_eq!(size, 0);
+        Ok(())
+    }
+}
diff --git a/authfs/src/file/remote_file.rs b/authfs/src/file/remote_file.rs
new file mode 100644
index 0000000..9d614f5
--- /dev/null
+++ b/authfs/src/file/remote_file.rs
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::cmp::min;
+use std::convert::TryFrom;
+use std::io;
+use std::sync::{Arc, Mutex};
+
+use super::{ChunkBuffer, RandomWrite, ReadByChunk};
+use crate::common::CHUNK_SIZE;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
+use authfs_aidl_interface::binder::Strong;
+
+type VirtFdService = Strong<dyn IVirtFdService::IVirtFdService>;
+
+fn remote_read_chunk(
+    service: &Arc<Mutex<VirtFdService>>,
+    remote_fd: i32,
+    chunk_index: u64,
+    buf: &mut ChunkBuffer,
+) -> io::Result<usize> {
+    let offset = i64::try_from(chunk_index * CHUNK_SIZE)
+        .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+
+    let chunk = service
+        .lock()
+        .unwrap()
+        .readFile(remote_fd, offset, buf.len() as i32)
+        .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+    let size = min(buf.len(), chunk.len());
+    buf[..size].copy_from_slice(&chunk[..size]);
+    Ok(size)
+}
+
+pub struct RemoteFileReader {
+    // This needs to have Sync trait to be used in fuse::worker::start_message_loop.
+    service: Arc<Mutex<VirtFdService>>,
+    file_fd: i32,
+}
+
+impl RemoteFileReader {
+    pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
+        RemoteFileReader { service, file_fd }
+    }
+}
+
+impl ReadByChunk for RemoteFileReader {
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+        remote_read_chunk(&self.service, self.file_fd, chunk_index, buf)
+    }
+}
+
+pub struct RemoteMerkleTreeReader {
+    // This needs to be a Sync to be used in fuse::worker::start_message_loop.
+    // TODO(victorhsieh): change to Strong<> once binder supports it.
+    service: Arc<Mutex<VirtFdService>>,
+    file_fd: i32,
+}
+
+impl RemoteMerkleTreeReader {
+    pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
+        RemoteMerkleTreeReader { service, file_fd }
+    }
+}
+
+impl ReadByChunk for RemoteMerkleTreeReader {
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
+            .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+
+        let chunk = self
+            .service
+            .lock()
+            .unwrap()
+            .readFsverityMerkleTree(self.file_fd, offset, buf.len() as i32)
+            .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+        let size = min(buf.len(), chunk.len());
+        buf[..size].copy_from_slice(&chunk[..size]);
+        Ok(size)
+    }
+}
+
+pub struct RemoteFileEditor {
+    // This needs to have Sync trait to be used in fuse::worker::start_message_loop.
+    service: Arc<Mutex<VirtFdService>>,
+    file_fd: i32,
+}
+
+impl RemoteFileEditor {
+    pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
+        RemoteFileEditor { service, file_fd }
+    }
+}
+
+impl RandomWrite for RemoteFileEditor {
+    fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+        let offset =
+            i64::try_from(offset).map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+        let size = self
+            .service
+            .lock()
+            .unwrap()
+            .writeFile(self.file_fd, &buf, offset)
+            .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+        Ok(size as usize) // within range because size is supposed to <= buf.len(), which is a usize
+    }
+}
+
+impl ReadByChunk for RemoteFileEditor {
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+        remote_read_chunk(&self.service, self.file_fd, chunk_index, buf)
+    }
+}
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index 37d96c1..1515574 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -16,8 +16,9 @@
 
 mod builder;
 mod common;
+mod editor;
 mod sys;
 mod verifier;
 
-pub use self::builder::MerkleLeaves;
-pub use self::verifier::FsverityChunkedFileReader;
+pub use editor::VerifiedFileEditor;
+pub use verifier::VerifiedFileReader;
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index 607d3a7..94b9718 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -47,13 +47,17 @@
         .collect()
 }
 
-#[allow(dead_code)]
 impl MerkleLeaves {
     /// Creates a `MerkleLeaves` instance with empty data.
     pub fn new() -> Self {
         Self { leaves: Vec::new(), file_size: 0 }
     }
 
+    /// Gets size of the file represented by `MerkleLeaves`.
+    pub fn file_size(&self) -> u64 {
+        self.file_size
+    }
+
     /// Updates the hash of the `index`-th leaf, and increase the size to `size_at_least` if the
     /// current size is smaller.
     pub fn update_hash(&mut self, index: usize, hash: &Sha256Hash, size_at_least: u64) {
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
new file mode 100644
index 0000000..81ccd53
--- /dev/null
+++ b/authfs/src/fsverity/editor.rs
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A module for writing to a file from a trusted world to an untrusted storage.
+//!
+//! Architectural Model:
+//!  * Trusted world: the writer, a signing secret, has some memory, but NO persistent storage.
+//!  * Untrusted world: persistent storage, assuming untrusted.
+//!  * IPC mechanism between trusted and untrusted world
+//!
+//! Use cases:
+//!  * In the trusted world, we want to generate a large file, sign it, and share the signature for
+//!    a third party to verify the file.
+//!  * In the trusted world, we want to read a previously signed file back with signature check
+//!    without having to touch the whole file.
+//!
+//! Requirements:
+//!  * Communication between trusted and untrusted world is not cheap, and files can be large.
+//!  * A file write pattern may not be sequential, neither does read.
+//!
+//! Considering the above, a technique similar to fs-verity is used. fs-verity uses an alternative
+//! hash function, a Merkle tree, to calculate the hash of file content. A file update at any
+//! location will propagate the hash update from the leaf to the root node. Unlike fs-verity, which
+//! assumes static files, to support write operation, we need to allow the file (thus tree) to
+//! update.
+//!
+//! For the trusted world to generate a large file with random write and hash it, the writer needs
+//! to hold some private information and update the Merkle tree during a file write (or even when
+//! the Merkle tree needs to be stashed to the untrusted storage).
+//!
+//! A write to a file must update the root hash. In order for the root hash to update, a tree
+//! walk to update from the write location to the root node is necessary. Importantly, in case when
+//! (part of) the Merkle tree needs to be read from the untrusted storage (e.g. not yet verified in
+//! cache), the original path must be verified by the trusted signature before the update to happen.
+//!
+//! Denial-of-service is a known weakness if the untrusted storage decides to simply remove the
+//! file. But there is nothing we can do in this architecture.
+//!
+//! Rollback attack is another possible attack, but can be addressed with a rollback counter when
+//! possible.
+
+use std::io;
+use std::sync::{Arc, RwLock};
+
+use super::builder::MerkleLeaves;
+use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
+use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
+
+// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
+// mapping below.
+impl From<CryptoError> for io::Error {
+    fn from(error: CryptoError) -> Self {
+        io::Error::new(io::ErrorKind::Other, error)
+    }
+}
+
+/// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
+/// not be stored in a trusted environment. Only new, empty files are currently supported.
+pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
+    file: F,
+    merkle_tree: Arc<RwLock<MerkleLeaves>>,
+}
+
+impl<F: ReadByChunk + RandomWrite> VerifiedFileEditor<F> {
+    /// Wraps a supposedly new file for integrity protection.
+    pub fn new(file: F) -> Self {
+        Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
+    }
+
+    /// Calculates the fs-verity digest of the current file.
+    #[allow(dead_code)]
+    pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
+        let merkle_tree = self.merkle_tree.read().unwrap();
+        merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
+    }
+
+    fn new_hash_for_incomplete_write(
+        &self,
+        source: &[u8],
+        offset_from_alignment: usize,
+        output_chunk_index: usize,
+        merkle_tree: &mut MerkleLeaves,
+    ) -> io::Result<Sha256Hash> {
+        // The buffer is initialized to 0 purposely. To calculate the block hash, the data is
+        // 0-padded to the block size. When a chunk read is less than a chunk, the initial value
+        // conveniently serves the padding purpose.
+        let mut orig_data = [0u8; CHUNK_SIZE as usize];
+
+        // If previous data exists, read back and verify against the known hash (since the
+        // storage / remote server is not trusted).
+        if merkle_tree.is_index_valid(output_chunk_index) {
+            self.read_chunk(output_chunk_index as u64, &mut orig_data)?;
+
+            // Verify original content
+            let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
+            if !merkle_tree.is_consistent(output_chunk_index, &hash) {
+                return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
+            }
+        }
+
+        Ok(Sha256Hasher::new()?
+            .update(&orig_data[..offset_from_alignment])?
+            .update(source)?
+            .update(&orig_data[offset_from_alignment + source.len()..])?
+            .finalize()?)
+    }
+
+    fn new_chunk_hash(
+        &self,
+        source: &[u8],
+        offset_from_alignment: usize,
+        current_size: usize,
+        output_chunk_index: usize,
+        merkle_tree: &mut MerkleLeaves,
+    ) -> io::Result<Sha256Hash> {
+        if current_size as u64 == CHUNK_SIZE {
+            // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
+            // write location.
+            Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
+        } else {
+            // Case 2: For an incomplete write, calculate the hash based on previous data (if
+            // any).
+            self.new_hash_for_incomplete_write(
+                source,
+                offset_from_alignment,
+                output_chunk_index,
+                merkle_tree,
+            )
+        }
+    }
+
+    pub fn size(&self) -> u64 {
+        self.merkle_tree.read().unwrap().file_size()
+    }
+}
+
+impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
+    fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+        // Since we don't need to support 32-bit CPU, make an assert to make conversion between
+        // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
+        // <= usize::MAX` or handle `TryInto` errors.
+        debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
+
+        // The write range may not be well-aligned with the chunk boundary. There are various cases
+        // to deal with:
+        //  1. A write of a full 4K chunk.
+        //  2. A write of an incomplete chunk, possibly beyond the original EOF.
+        //
+        // Note that a write beyond EOF can create a hole. But we don't need to handle it here
+        // because holes are zeros, and leaves in MerkleLeaves are hashes of 4096-zeros by
+        // default.
+
+        // Now iterate on the input data, considering the alignment at the destination.
+        for (output_offset, current_size) in
+            ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize)
+        {
+            // Lock the tree for the whole write for now. There may be room to improve to increase
+            // throughput.
+            let mut merkle_tree = self.merkle_tree.write().unwrap();
+
+            let offset_in_buf = (output_offset - offset) as usize;
+            let source = &buf[offset_in_buf as usize..offset_in_buf as usize + current_size];
+            let output_chunk_index = (output_offset / CHUNK_SIZE) as usize;
+            let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize;
+
+            let new_hash = match self.new_chunk_hash(
+                source,
+                offset_from_alignment,
+                current_size,
+                output_chunk_index,
+                &mut merkle_tree,
+            ) {
+                Ok(hash) => hash,
+                Err(e) => {
+                    // Return early when any error happens before the right. Even if the hash is not
+                    // consistent for the current chunk, we can still consider the earlier writes
+                    // successful. Note that nothing persistent has been done in this iteration.
+                    let written = output_offset - offset;
+                    if written > 0 {
+                        return Ok(written as usize);
+                    }
+                    return Err(e);
+                }
+            };
+
+            // A failed, partial write here will make the backing file inconsistent to the (old)
+            // hash. Nothing can be done within this writer, but at least it still maintains the
+            // (original) integrity for the file. To matches what write(2) describes for an error
+            // case (though it's about direct I/O), "Partial data may be written ... should be
+            // considered inconsistent", an error below is propagated.
+            self.file.write_all_at(&source, output_offset)?;
+
+            // Update the hash only after the write succeeds. Note that this only attempts to keep
+            // the tree consistent to what has been written regardless the actual state beyond the
+            // writer.
+            let size_at_least = offset.saturating_add(buf.len() as u64);
+            merkle_tree.update_hash(output_chunk_index, &new_hash, size_at_least);
+        }
+        Ok(buf.len())
+    }
+}
+
+impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+        self.file.read_chunk(chunk_index, buf)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    // Test data below can be generated by:
+    //  $ perl -e 'print "\x{00}" x 6000' > foo
+    //  $ perl -e 'print "\x{01}" x 5000' >> foo
+    //  $ fsverity digest foo
+    use super::*;
+    use anyhow::Result;
+    use std::cell::RefCell;
+    use std::convert::TryInto;
+
+    struct InMemoryEditor {
+        data: RefCell<Vec<u8>>,
+        fail_read: bool,
+    }
+
+    impl InMemoryEditor {
+        pub fn new() -> InMemoryEditor {
+            InMemoryEditor { data: RefCell::new(Vec::new()), fail_read: false }
+        }
+    }
+
+    impl RandomWrite for InMemoryEditor {
+        fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+            let begin: usize =
+                offset.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+            let end = begin + buf.len();
+            if end > self.data.borrow().len() {
+                self.data.borrow_mut().resize(end, 0);
+            }
+            self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(&buf);
+            Ok(buf.len())
+        }
+    }
+
+    impl ReadByChunk for InMemoryEditor {
+        fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+            if self.fail_read {
+                return Err(io::Error::new(io::ErrorKind::Other, "test!"));
+            }
+
+            let borrowed = self.data.borrow();
+            let chunk = &borrowed
+                .chunks(CHUNK_SIZE as usize)
+                .nth(chunk_index as usize)
+                .ok_or_else(|| {
+                    io::Error::new(
+                        io::ErrorKind::InvalidInput,
+                        format!("read_chunk out of bound: index {}", chunk_index),
+                    )
+                })?;
+            buf[..chunk.len()].copy_from_slice(&chunk);
+            Ok(chunk.len())
+        }
+    }
+
+    #[test]
+    fn test_writer() -> Result<()> {
+        let writer = InMemoryEditor::new();
+        let buf = [1; 4096];
+        assert_eq!(writer.data.borrow().len(), 0);
+
+        assert_eq!(writer.write_at(&buf, 16384)?, 4096);
+        assert_eq!(writer.data.borrow()[16384..16384 + 4096], buf);
+
+        assert_eq!(writer.write_at(&buf, 2048)?, 4096);
+        assert_eq!(writer.data.borrow()[2048..2048 + 4096], buf);
+
+        assert_eq!(writer.data.borrow().len(), 16384 + 4096);
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_no_write() -> Result<()> {
+        // Verify fs-verity hash without any write.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")
+                .as_slice()
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_from_zero() -> Result<()> {
+        // Verify a write of a full chunk.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
+                .as_slice()
+        );
+
+        // Verify a write of across multiple chunks.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")
+                .as_slice()
+        );
+
+        // Verify another write of across multiple chunks.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")
+                .as_slice()
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_unaligned() -> Result<()> {
+        // Verify small, unaligned write beyond EOF.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 5], 3)?, 5);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")
+                .as_slice()
+        );
+
+        // Verify bigger, unaligned write beyond EOF.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")
+                .as_slice()
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_with_hole() -> Result<()> {
+        // Verify an aligned write beyond EOF with holes.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")
+                .as_slice()
+        );
+
+        // Verify an unaligned write beyond EOF with holes.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")
+                .as_slice()
+        );
+
+        // Just another example with a small write.
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")
+                .as_slice()
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_various_writes() -> Result<()> {
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
+        assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
+                .as_slice()
+        );
+        assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
+        assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
+                .as_slice()
+        );
+        assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
+        assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
+                .as_slice()
+        );
+        assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
+                .as_slice()
+        );
+        assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
+        assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
+        assert_eq!(
+            file.calculate_fsverity_digest()?,
+            to_u8_vec("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")
+                .as_slice()
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_inconsistent_read() -> Result<()> {
+        let file = VerifiedFileEditor::new(InMemoryEditor::new());
+        assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
+
+        // Replace the expected hash of the first/0-th chunk. An incomplete write will fail when it
+        // detects the inconsistent read.
+        {
+            let mut merkle_tree = file.merkle_tree.write().unwrap();
+            let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+            merkle_tree.update_hash(0, &overriding_hash, 8192);
+        }
+        assert!(file.write_at(&[1; 1], 2048).is_err());
+
+        // A write of full chunk can still succeed. Also fixed the inconsistency.
+        assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
+
+        // Replace the expected hash of the second/1-th chunk. A write range from previous chunk can
+        // still succeed, but returns early due to an inconsistent read but still successfully. A
+        // resumed write will fail since no bytes can be written due to the same inconsistency.
+        {
+            let mut merkle_tree = file.merkle_tree.write().unwrap();
+            let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+            merkle_tree.update_hash(1, &overriding_hash, 8192);
+        }
+        assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
+        assert!(file.write_at(&[10; 8000 - 4096], 4096).is_err());
+        Ok(())
+    }
+
+    #[test]
+    fn test_verified_writer_failed_read_back() -> Result<()> {
+        let mut writer = InMemoryEditor::new();
+        writer.fail_read = true;
+        let file = VerifiedFileEditor::new(writer);
+        assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
+
+        // When a read back is needed, a read failure will fail to write.
+        assert!(file.write_at(&[1; 1], 2048).is_err());
+        Ok(())
+    }
+
+    fn to_u8_vec(hex_str: &str) -> Vec<u8> {
+        assert!(hex_str.len() % 2 == 0);
+        (0..hex_str.len())
+            .step_by(2)
+            .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
+            .collect()
+    }
+}
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index fd108f5..13de42a 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -22,7 +22,7 @@
 use crate::auth::Authenticator;
 use crate::common::{divide_roundup, CHUNK_SIZE};
 use crate::crypto::{CryptoError, Sha256Hasher};
-use crate::reader::ReadOnlyDataByChunk;
+use crate::file::{ChunkBuffer, ReadByChunk};
 
 const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
 
@@ -36,14 +36,14 @@
     Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
 }
 
-fn verity_check<T: ReadOnlyDataByChunk>(
+fn verity_check<T: ReadByChunk>(
     chunk: &[u8],
     chunk_index: u64,
     file_size: u64,
     merkle_tree: &T,
 ) -> Result<HashBuffer, FsverityError> {
     // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
-    // current implementation expects to crash when a `ReadOnlyDataByChunk` implementation reads
+    // current implementation expects to crash when a `ReadByChunk` implementation reads
     // beyond the file size, including empty file.
     assert_ne!(file_size, 0);
 
@@ -68,7 +68,7 @@
 /// offset of the child node's hash. It is up to the iterator user to use the node and hash,
 /// e.g. for the actual verification.
 #[allow(clippy::needless_collect)]
-fn fsverity_walk<T: ReadOnlyDataByChunk>(
+fn fsverity_walk<T: ReadByChunk>(
     chunk_index: u64,
     file_size: u64,
     merkle_tree: &T,
@@ -125,21 +125,21 @@
     Ok(formatted_digest)
 }
 
-pub struct FsverityChunkedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
+pub struct VerifiedFileReader<F: ReadByChunk, M: ReadByChunk> {
     chunked_file: F,
     file_size: u64,
     merkle_tree: M,
     root_hash: HashBuffer,
 }
 
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> FsverityChunkedFileReader<F, M> {
+impl<F: ReadByChunk, M: ReadByChunk> VerifiedFileReader<F, M> {
     pub fn new<A: Authenticator>(
         authenticator: &A,
         chunked_file: F,
         file_size: u64,
         sig: Vec<u8>,
         merkle_tree: M,
-    ) -> Result<FsverityChunkedFileReader<F, M>, FsverityError> {
+    ) -> Result<VerifiedFileReader<F, M>, FsverityError> {
         let mut buf = [0u8; CHUNK_SIZE as usize];
         let size = merkle_tree.read_chunk(0, &mut buf)?;
         if buf.len() != size {
@@ -149,18 +149,15 @@
         let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
         let valid = authenticator.verify(&sig, &formatted_digest)?;
         if valid {
-            Ok(FsverityChunkedFileReader { chunked_file, file_size, merkle_tree, root_hash })
+            Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
         } else {
             Err(FsverityError::BadSignature)
         }
     }
 }
 
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> ReadOnlyDataByChunk
-    for FsverityChunkedFileReader<F, M>
-{
-    fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
-        debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+impl<F: ReadByChunk, M: ReadByChunk> ReadByChunk for VerifiedFileReader<F, M> {
+    fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
         let size = self.chunked_file.read_chunk(chunk_index, buf)?;
         let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
             .map_err(|_| io::Error::from_raw_os_error(EIO))?;
@@ -176,13 +173,12 @@
 mod tests {
     use super::*;
     use crate::auth::FakeAuthenticator;
-    use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
+    use crate::file::{LocalFileReader, ReadByChunk};
     use anyhow::Result;
-    use std::fs::File;
+    use std::fs::{self, File};
     use std::io::Read;
 
-    type LocalFsverityChunkedFileReader =
-        FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
+    type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, LocalFileReader>;
 
     fn total_chunk_number(file_size: u64) -> u64 {
         (file_size + 4095) / 4096
@@ -193,21 +189,15 @@
         content_path: &str,
         merkle_tree_path: &str,
         signature_path: &str,
-    ) -> Result<(LocalFsverityChunkedFileReader, u64)> {
-        let file_reader = ChunkedFileReader::new(File::open(content_path)?)?;
+    ) -> Result<(LocalVerifiedFileReader, u64)> {
+        let file_reader = LocalFileReader::new(File::open(content_path)?)?;
         let file_size = file_reader.len();
-        let merkle_tree = ChunkedFileReader::new(File::open(merkle_tree_path)?)?;
+        let merkle_tree = LocalFileReader::new(File::open(merkle_tree_path)?)?;
         let mut sig = Vec::new();
         let _ = File::open(signature_path)?.read_to_end(&mut sig)?;
         let authenticator = FakeAuthenticator::always_succeed();
         Ok((
-            FsverityChunkedFileReader::new(
-                &authenticator,
-                file_reader,
-                file_size,
-                sig,
-                merkle_tree,
-            )?,
+            VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree)?,
             file_size,
         ))
     }
@@ -222,7 +212,7 @@
 
         for i in 0..total_chunk_number(file_size) {
             let mut buf = [0u8; 4096];
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+            assert!(file_reader.read_chunk(i, &mut buf).is_ok());
         }
         Ok(())
     }
@@ -237,7 +227,7 @@
 
         for i in 0..total_chunk_number(file_size) {
             let mut buf = [0u8; 4096];
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+            assert!(file_reader.read_chunk(i, &mut buf).is_ok());
         }
         Ok(())
     }
@@ -252,7 +242,7 @@
 
         for i in 0..total_chunk_number(file_size) {
             let mut buf = [0u8; 4096];
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+            assert!(file_reader.read_chunk(i, &mut buf).is_ok());
         }
         Ok(())
     }
@@ -271,27 +261,21 @@
         let num_hashes = 4096 / 32;
         let last_index = num_hashes;
         for i in 0..last_index {
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_err());
+            assert!(file_reader.read_chunk(i, &mut buf).is_err());
         }
-        assert!(file_reader.read_chunk(last_index, &mut buf[..]).is_ok());
+        assert!(file_reader.read_chunk(last_index, &mut buf).is_ok());
         Ok(())
     }
 
     #[test]
     fn invalid_signature() -> Result<()> {
         let authenticator = FakeAuthenticator::always_fail();
-        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
+        let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
         let file_size = file_reader.len();
-        let merkle_tree = ChunkedFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
-        let sig = include_bytes!("../../testdata/input.4m.fsv_sig").to_vec();
-        assert!(FsverityChunkedFileReader::new(
-            &authenticator,
-            file_reader,
-            file_size,
-            sig,
-            merkle_tree
-        )
-        .is_err());
+        let merkle_tree = LocalFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
+        let sig = fs::read("testdata/input.4m.fsv_sig")?;
+        assert!(VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree)
+            .is_err());
         Ok(())
     }
 }
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index f5dd6ec..d97291c 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -26,31 +26,45 @@
 use std::path::Path;
 use std::time::Duration;
 
-use fuse::filesystem::{Context, DirEntry, DirectoryIterator, Entry, FileSystem, ZeroCopyWriter};
+use fuse::filesystem::{
+    Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, ZeroCopyReader,
+    ZeroCopyWriter,
+};
 use fuse::mount::MountOption;
 
-use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::fsverity::FsverityChunkedFileReader;
-use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
-use crate::remote_file::{RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader};
+use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
+use crate::file::{
+    LocalFileReader, RandomWrite, ReadByChunk, RemoteFileEditor, RemoteFileReader,
+    RemoteMerkleTreeReader,
+};
+use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
 
 const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
 
 pub type Inode = u64;
 type Handle = u64;
 
-type RemoteFsverityChunkedFileReader =
-    FsverityChunkedFileReader<RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader>;
-
-// A debug only type where everything are stored as local files.
-type FileBackedFsverityChunkedFileReader =
-    FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
-
+/// `FileConfig` defines the file type supported by AuthFS.
 pub enum FileConfig {
-    LocalVerifiedFile(FileBackedFsverityChunkedFileReader, u64),
-    LocalUnverifiedFile(ChunkedFileReader, u64),
-    RemoteVerifiedFile(RemoteFsverityChunkedFileReader, u64),
-    RemoteUnverifiedFile(RemoteChunkedFileReader, u64),
+    /// A file type that is verified against fs-verity signature (thus read-only). The file is
+    /// backed by a local file. Debug only.
+    LocalVerifiedReadonlyFile {
+        reader: VerifiedFileReader<LocalFileReader, LocalFileReader>,
+        file_size: u64,
+    },
+    /// A file type that is a read-only passthrough from a local file. Debug only.
+    LocalUnverifiedReadonlyFile { reader: LocalFileReader, file_size: u64 },
+    /// A file type that is verified against fs-verity signature (thus read-only). The file is
+    /// served from a remote server.
+    RemoteVerifiedReadonlyFile {
+        reader: VerifiedFileReader<RemoteFileReader, RemoteMerkleTreeReader>,
+        file_size: u64,
+    },
+    /// A file type that is a read-only passthrough from a file on a remote serrver.
+    RemoteUnverifiedReadonlyFile { reader: RemoteFileReader, file_size: u64 },
+    /// A file type that is initially empty, and the content is stored on a remote server. File
+    /// integrity is guaranteed with private Merkle tree.
+    RemoteVerifiedNewFile { editor: VerifiedFileEditor<RemoteFileEditor> },
 }
 
 struct AuthFs {
@@ -92,11 +106,20 @@
     }
 }
 
-fn create_stat(ino: libc::ino_t, file_size: u64) -> io::Result<libc::stat64> {
+enum FileMode {
+    ReadOnly,
+    ReadWrite,
+}
+
+fn create_stat(ino: libc::ino_t, file_size: u64, file_mode: FileMode) -> io::Result<libc::stat64> {
     let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
 
     st.st_ino = ino;
-    st.st_mode = libc::S_IFREG | libc::S_IRUSR | libc::S_IRGRP | libc::S_IROTH;
+    st.st_mode = match file_mode {
+        // Until needed, let's just grant the owner access.
+        FileMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
+        FileMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
+    };
     st.st_dev = 0;
     st.st_nlink = 1;
     st.st_uid = 0;
@@ -111,40 +134,11 @@
     Ok(st)
 }
 
-/// An iterator that generates (offset, size) for a chunked read operation, where offset is the
-/// global file offset, and size is the amount of read from the offset.
-struct ChunkReadIter {
-    remaining: usize,
-    offset: u64,
-}
-
-impl ChunkReadIter {
-    pub fn new(remaining: usize, offset: u64) -> Self {
-        ChunkReadIter { remaining, offset }
-    }
-}
-
-impl Iterator for ChunkReadIter {
-    type Item = (u64, usize);
-
-    fn next(&mut self) -> Option<Self::Item> {
-        if self.remaining == 0 {
-            return None;
-        }
-        let chunk_data_size =
-            std::cmp::min(self.remaining, (CHUNK_SIZE - self.offset % CHUNK_SIZE) as usize);
-        let retval = (self.offset, chunk_data_size);
-        self.offset += chunk_data_size as u64;
-        self.remaining = self.remaining.saturating_sub(chunk_data_size);
-        Some(retval)
-    }
-}
-
 fn offset_to_chunk_index(offset: u64) -> u64 {
     offset / CHUNK_SIZE
 }
 
-fn read_chunks<W: io::Write, T: ReadOnlyDataByChunk>(
+fn read_chunks<W: io::Write, T: ReadByChunk>(
     mut w: W,
     file: &T,
     file_size: u64,
@@ -153,7 +147,7 @@
 ) -> io::Result<usize> {
     let remaining = file_size.saturating_sub(offset);
     let size_to_read = std::cmp::min(size as usize, remaining as usize);
-    let total = ChunkReadIter::new(size_to_read, offset).try_fold(
+    let total = ChunkedSizeIter::new(size_to_read, offset, CHUNK_SIZE as usize).try_fold(
         0,
         |total, (current_offset, planned_data_size)| {
             // TODO(victorhsieh): There might be a non-trivial way to avoid this copy. For example,
@@ -197,6 +191,12 @@
         self.max_write
     }
 
+    fn init(&self, _capable: FsOptions) -> io::Result<FsOptions> {
+        // Enable writeback cache for better performance especially since our bandwidth to the
+        // backend service is limited.
+        Ok(FsOptions::WRITEBACK_CACHE)
+    }
+
     fn lookup(&self, _ctx: Context, _parent: Inode, name: &CStr) -> io::Result<Entry> {
         // Only accept file name that looks like an integrer. Files in the pool are simply exposed
         // by their inode number. Also, there is currently no directory structure.
@@ -206,10 +206,15 @@
         // be static.
         let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
         let st = match self.get_file_config(&inode)? {
-            FileConfig::LocalVerifiedFile(_, file_size)
-            | FileConfig::LocalUnverifiedFile(_, file_size)
-            | FileConfig::RemoteUnverifiedFile(_, file_size)
-            | FileConfig::RemoteVerifiedFile(_, file_size) => create_stat(inode, *file_size)?,
+            FileConfig::LocalVerifiedReadonlyFile { file_size, .. }
+            | FileConfig::LocalUnverifiedReadonlyFile { file_size, .. }
+            | FileConfig::RemoteUnverifiedReadonlyFile { file_size, .. }
+            | FileConfig::RemoteVerifiedReadonlyFile { file_size, .. } => {
+                create_stat(inode, *file_size, FileMode::ReadOnly)?
+            }
+            FileConfig::RemoteVerifiedNewFile { editor } => {
+                create_stat(inode, editor.size(), FileMode::ReadWrite)?
+            }
         };
         Ok(Entry {
             inode,
@@ -228,10 +233,15 @@
     ) -> io::Result<(libc::stat64, Duration)> {
         Ok((
             match self.get_file_config(&inode)? {
-                FileConfig::LocalVerifiedFile(_, file_size)
-                | FileConfig::LocalUnverifiedFile(_, file_size)
-                | FileConfig::RemoteUnverifiedFile(_, file_size)
-                | FileConfig::RemoteVerifiedFile(_, file_size) => create_stat(inode, *file_size)?,
+                FileConfig::LocalVerifiedReadonlyFile { file_size, .. }
+                | FileConfig::LocalUnverifiedReadonlyFile { file_size, .. }
+                | FileConfig::RemoteUnverifiedReadonlyFile { file_size, .. }
+                | FileConfig::RemoteVerifiedReadonlyFile { file_size, .. } => {
+                    create_stat(inode, *file_size, FileMode::ReadOnly)?
+                }
+                FileConfig::RemoteVerifiedNewFile { editor } => {
+                    create_stat(inode, editor.size(), FileMode::ReadWrite)?
+                }
             },
             DEFAULT_METADATA_TIMEOUT,
         ))
@@ -244,21 +254,29 @@
         flags: u32,
     ) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
         // Since file handle is not really used in later operations (which use Inode directly),
-        // return None as the handle..
+        // return None as the handle.
         match self.get_file_config(&inode)? {
-            FileConfig::LocalVerifiedFile(_, _) | FileConfig::RemoteVerifiedFile(_, _) => {
+            FileConfig::LocalVerifiedReadonlyFile { .. }
+            | FileConfig::RemoteVerifiedReadonlyFile { .. } => {
                 check_access_mode(flags, libc::O_RDONLY)?;
                 // Once verified, and only if verified, the file content can be cached. This is not
-                // really needed for a local file, but is the behavior of RemoteVerifiedFile later.
+                // really needed for a local file, but is the behavior of RemoteVerifiedReadonlyFile
+                // later.
                 Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
             }
-            FileConfig::LocalUnverifiedFile(_, _) | FileConfig::RemoteUnverifiedFile(_, _) => {
+            FileConfig::LocalUnverifiedReadonlyFile { .. }
+            | FileConfig::RemoteUnverifiedReadonlyFile { .. } => {
                 check_access_mode(flags, libc::O_RDONLY)?;
                 // Do not cache the content. This type of file is supposed to be verified using
                 // dm-verity. The filesystem mount over dm-verity already is already cached, so use
                 // direct I/O here to avoid double cache.
                 Ok((None, fuse::sys::OpenOptions::DIRECT_IO))
             }
+            FileConfig::RemoteVerifiedNewFile { .. } => {
+                // No need to check access modes since all the modes are allowed to the
+                // read-writable file.
+                Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+            }
         }
     }
 
@@ -274,18 +292,45 @@
         _flags: u32,
     ) -> io::Result<usize> {
         match self.get_file_config(&inode)? {
-            FileConfig::LocalVerifiedFile(file, file_size) => {
-                read_chunks(w, file, *file_size, offset, size)
+            FileConfig::LocalVerifiedReadonlyFile { reader, file_size } => {
+                read_chunks(w, reader, *file_size, offset, size)
             }
-            FileConfig::LocalUnverifiedFile(file, file_size) => {
-                read_chunks(w, file, *file_size, offset, size)
+            FileConfig::LocalUnverifiedReadonlyFile { reader, file_size } => {
+                read_chunks(w, reader, *file_size, offset, size)
             }
-            FileConfig::RemoteVerifiedFile(file, file_size) => {
-                read_chunks(w, file, *file_size, offset, size)
+            FileConfig::RemoteVerifiedReadonlyFile { reader, file_size } => {
+                read_chunks(w, reader, *file_size, offset, size)
             }
-            FileConfig::RemoteUnverifiedFile(file, file_size) => {
-                read_chunks(w, file, *file_size, offset, size)
+            FileConfig::RemoteUnverifiedReadonlyFile { reader, file_size } => {
+                read_chunks(w, reader, *file_size, offset, size)
             }
+            FileConfig::RemoteVerifiedNewFile { editor } => {
+                // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
+                // request a read even if the file is open with O_WRONLY.
+                read_chunks(w, editor, editor.size(), offset, size)
+            }
+        }
+    }
+
+    fn write<R: io::Read + ZeroCopyReader>(
+        &self,
+        _ctx: Context,
+        inode: Self::Inode,
+        _handle: Self::Handle,
+        mut r: R,
+        size: u32,
+        offset: u64,
+        _lock_owner: Option<u64>,
+        _delayed_write: bool,
+        _flags: u32,
+    ) -> io::Result<usize> {
+        match self.get_file_config(&inode)? {
+            FileConfig::RemoteVerifiedNewFile { editor } => {
+                let mut buf = vec![0; size as usize];
+                r.read_exact(&mut buf)?;
+                editor.write_at(&buf, offset)
+            }
+            _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
         }
     }
 }
@@ -325,27 +370,3 @@
         AuthFs::new(file_pool, max_write),
     )
 }
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    fn collect_chunk_read_iter(remaining: usize, offset: u64) -> Vec<(u64, usize)> {
-        ChunkReadIter::new(remaining, offset).collect::<Vec<_>>()
-    }
-
-    #[test]
-    fn test_chunk_read_iter() {
-        assert_eq!(collect_chunk_read_iter(4096, 0), [(0, 4096)]);
-        assert_eq!(collect_chunk_read_iter(8192, 0), [(0, 4096), (4096, 4096)]);
-        assert_eq!(collect_chunk_read_iter(8192, 4096), [(4096, 4096), (8192, 4096)]);
-
-        assert_eq!(
-            collect_chunk_read_iter(16384, 1),
-            [(1, 4095), (4096, 4096), (8192, 4096), (12288, 4096), (16384, 1)]
-        );
-
-        assert_eq!(collect_chunk_read_iter(0, 0), []);
-        assert_eq!(collect_chunk_read_iter(0, 100), []);
-    }
-}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 41b922d..0db73e9 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -38,16 +38,14 @@
 mod auth;
 mod common;
 mod crypto;
+mod file;
 mod fsverity;
 mod fusefs;
-mod reader;
-mod remote_file;
 
 use auth::FakeAuthenticator;
-use fsverity::FsverityChunkedFileReader;
+use file::{LocalFileReader, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
+use fsverity::{VerifiedFileEditor, VerifiedFileReader};
 use fusefs::{FileConfig, Inode};
-use reader::ChunkedFileReader;
-use remote_file::{RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader};
 
 #[derive(StructOpt)]
 struct Args {
@@ -55,31 +53,38 @@
     #[structopt(parse(from_os_str))]
     mount_point: PathBuf,
 
-    /// A verifiable read-only file. Can be multiple.
+    /// A read-only remote file with integrity check. Can be multiple.
     ///
     /// For example, `--remote-verified-file 5:10:1234:/path/to/cert` tells the filesystem to
     /// associate entry 5 with a remote file 10 of size 1234 bytes, and need to be verified against
     /// the /path/to/cert.
-    #[structopt(long, parse(try_from_str = parse_remote_verified_file_option))]
-    remote_verified_file: Vec<RemoteVerifiedFileConfig>,
+    #[structopt(long, parse(try_from_str = parse_remote_ro_file_option))]
+    remote_ro_file: Vec<OptionRemoteRoFile>,
 
-    /// An unverifiable read-only file. Can be multiple.
+    /// A read-only remote file without integrity check. Can be multiple.
     ///
     /// For example, `--remote-unverified-file 5:10:1234` tells the filesystem to associate entry 5
     /// with a remote file 10 of size 1234 bytes.
-    #[structopt(long, parse(try_from_str = parse_remote_unverified_file_option))]
-    remote_unverified_file: Vec<RemoteUnverifiedFileConfig>,
+    #[structopt(long, parse(try_from_str = parse_remote_ro_file_unverified_option))]
+    remote_ro_file_unverified: Vec<OptionRemoteRoFileUnverified>,
 
-    /// Debug only. A readonly file to be protected by fs-verity. Can be multiple.
-    #[structopt(long, parse(try_from_str = parse_local_verified_file_option))]
-    local_verified_file: Vec<LocalVerifiedFileConfig>,
+    /// A new read-writable remote file with integrity check. Can be multiple.
+    ///
+    /// For example, `--remote-new-verified-file 12:34` tells the filesystem to associate entry 12
+    /// with a remote file 34.
+    #[structopt(long, parse(try_from_str = parse_remote_new_rw_file_option))]
+    remote_new_rw_file: Vec<OptionRemoteRwFile>,
 
-    /// Debug only. An unverified read-only file. Can be multiple.
-    #[structopt(long, parse(try_from_str = parse_local_unverified_file_option))]
-    local_unverified_file: Vec<LocalUnverifiedFileConfig>,
+    /// Debug only. A read-only local file with integrity check. Can be multiple.
+    #[structopt(long, parse(try_from_str = parse_local_file_ro_option))]
+    local_ro_file: Vec<OptionLocalFileRo>,
+
+    /// Debug only. A read-only local file without integrity check. Can be multiple.
+    #[structopt(long, parse(try_from_str = parse_local_ro_file_unverified_ro_option))]
+    local_ro_file_unverified: Vec<OptionLocalRoFileUnverified>,
 }
 
-struct RemoteVerifiedFileConfig {
+struct OptionRemoteRoFile {
     ino: Inode,
 
     /// ID to refer to the remote file.
@@ -94,7 +99,7 @@
     _certificate_path: PathBuf,
 }
 
-struct RemoteUnverifiedFileConfig {
+struct OptionRemoteRoFileUnverified {
     ino: Inode,
 
     /// ID to refer to the remote file.
@@ -104,7 +109,14 @@
     file_size: u64,
 }
 
-struct LocalVerifiedFileConfig {
+struct OptionRemoteRwFile {
+    ino: Inode,
+
+    /// ID to refer to the remote file.
+    remote_id: i32,
+}
+
+struct OptionLocalFileRo {
     ino: Inode,
 
     /// Local path of the backing file.
@@ -121,19 +133,19 @@
     _certificate_path: PathBuf,
 }
 
-struct LocalUnverifiedFileConfig {
+struct OptionLocalRoFileUnverified {
     ino: Inode,
 
     /// Local path of the backing file.
     file_path: PathBuf,
 }
 
-fn parse_remote_verified_file_option(option: &str) -> Result<RemoteVerifiedFileConfig> {
+fn parse_remote_ro_file_option(option: &str) -> Result<OptionRemoteRoFile> {
     let strs: Vec<&str> = option.split(':').collect();
     if strs.len() != 4 {
         bail!("Invalid option: {}", option);
     }
-    Ok(RemoteVerifiedFileConfig {
+    Ok(OptionRemoteRoFile {
         ino: strs[0].parse::<Inode>()?,
         remote_id: strs[1].parse::<i32>()?,
         file_size: strs[2].parse::<u64>()?,
@@ -141,24 +153,35 @@
     })
 }
 
-fn parse_remote_unverified_file_option(option: &str) -> Result<RemoteUnverifiedFileConfig> {
+fn parse_remote_ro_file_unverified_option(option: &str) -> Result<OptionRemoteRoFileUnverified> {
     let strs: Vec<&str> = option.split(':').collect();
     if strs.len() != 3 {
         bail!("Invalid option: {}", option);
     }
-    Ok(RemoteUnverifiedFileConfig {
+    Ok(OptionRemoteRoFileUnverified {
         ino: strs[0].parse::<Inode>()?,
         remote_id: strs[1].parse::<i32>()?,
         file_size: strs[2].parse::<u64>()?,
     })
 }
 
-fn parse_local_verified_file_option(option: &str) -> Result<LocalVerifiedFileConfig> {
+fn parse_remote_new_rw_file_option(option: &str) -> Result<OptionRemoteRwFile> {
+    let strs: Vec<&str> = option.split(':').collect();
+    if strs.len() != 2 {
+        bail!("Invalid option: {}", option);
+    }
+    Ok(OptionRemoteRwFile {
+        ino: strs[0].parse::<Inode>().unwrap(),
+        remote_id: strs[1].parse::<i32>().unwrap(),
+    })
+}
+
+fn parse_local_file_ro_option(option: &str) -> Result<OptionLocalFileRo> {
     let strs: Vec<&str> = option.split(':').collect();
     if strs.len() != 5 {
         bail!("Invalid option: {}", option);
     }
-    Ok(LocalVerifiedFileConfig {
+    Ok(OptionLocalFileRo {
         ino: strs[0].parse::<Inode>()?,
         file_path: PathBuf::from(strs[1]),
         merkle_tree_dump_path: PathBuf::from(strs[2]),
@@ -167,92 +190,94 @@
     })
 }
 
-fn parse_local_unverified_file_option(option: &str) -> Result<LocalUnverifiedFileConfig> {
+fn parse_local_ro_file_unverified_ro_option(option: &str) -> Result<OptionLocalRoFileUnverified> {
     let strs: Vec<&str> = option.split(':').collect();
     if strs.len() != 2 {
         bail!("Invalid option: {}", option);
     }
-    Ok(LocalUnverifiedFileConfig {
+    Ok(OptionLocalRoFileUnverified {
         ino: strs[0].parse::<Inode>()?,
         file_path: PathBuf::from(strs[1]),
     })
 }
 
 fn new_config_remote_verified_file(remote_id: i32, file_size: u64) -> Result<FileConfig> {
-    let service = remote_file::server::get_local_service();
+    let service = file::get_local_binder();
     let signature = service.readFsveritySignature(remote_id).context("Failed to read signature")?;
 
     let service = Arc::new(Mutex::new(service));
     let authenticator = FakeAuthenticator::always_succeed();
-    Ok(FileConfig::RemoteVerifiedFile(
-        FsverityChunkedFileReader::new(
+    Ok(FileConfig::RemoteVerifiedReadonlyFile {
+        reader: VerifiedFileReader::new(
             &authenticator,
-            RemoteChunkedFileReader::new(Arc::clone(&service), remote_id),
+            RemoteFileReader::new(Arc::clone(&service), remote_id),
             file_size,
             signature,
-            RemoteFsverityMerkleTreeReader::new(Arc::clone(&service), remote_id),
+            RemoteMerkleTreeReader::new(Arc::clone(&service), remote_id),
         )?,
         file_size,
-    ))
+    })
 }
 
 fn new_config_remote_unverified_file(remote_id: i32, file_size: u64) -> Result<FileConfig> {
-    let file_reader = RemoteChunkedFileReader::new(
-        Arc::new(Mutex::new(remote_file::server::get_local_service())),
-        remote_id,
-    );
-    Ok(FileConfig::RemoteUnverifiedFile(file_reader, file_size))
+    let reader = RemoteFileReader::new(Arc::new(Mutex::new(file::get_local_binder())), remote_id);
+    Ok(FileConfig::RemoteUnverifiedReadonlyFile { reader, file_size })
 }
 
-fn new_config_local_verified_file(
+fn new_config_local_ro_file(
     protected_file: &PathBuf,
     merkle_tree_dump: &PathBuf,
     signature: &PathBuf,
 ) -> Result<FileConfig> {
     let file = File::open(&protected_file)?;
     let file_size = file.metadata()?.len();
-    let file_reader = ChunkedFileReader::new(file)?;
-    let merkle_tree_reader = ChunkedFileReader::new(File::open(merkle_tree_dump)?)?;
+    let file_reader = LocalFileReader::new(file)?;
+    let merkle_tree_reader = LocalFileReader::new(File::open(merkle_tree_dump)?)?;
     let authenticator = FakeAuthenticator::always_succeed();
     let mut sig = Vec::new();
     let _ = File::open(signature)?.read_to_end(&mut sig)?;
-    let file_reader = FsverityChunkedFileReader::new(
-        &authenticator,
-        file_reader,
-        file_size,
-        sig,
-        merkle_tree_reader,
-    )?;
-    Ok(FileConfig::LocalVerifiedFile(file_reader, file_size))
+    let reader =
+        VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree_reader)?;
+    Ok(FileConfig::LocalVerifiedReadonlyFile { reader, file_size })
 }
 
-fn new_config_local_unverified_file(file_path: &PathBuf) -> Result<FileConfig> {
-    let file_reader = ChunkedFileReader::new(File::open(file_path)?)?;
-    let file_size = file_reader.len();
-    Ok(FileConfig::LocalUnverifiedFile(file_reader, file_size))
+fn new_config_local_ro_file_unverified(file_path: &PathBuf) -> Result<FileConfig> {
+    let reader = LocalFileReader::new(File::open(file_path)?)?;
+    let file_size = reader.len();
+    Ok(FileConfig::LocalUnverifiedReadonlyFile { reader, file_size })
+}
+
+fn new_config_remote_new_verified_file(remote_id: i32) -> Result<FileConfig> {
+    let remote_file =
+        RemoteFileEditor::new(Arc::new(Mutex::new(file::get_local_binder())), remote_id);
+    Ok(FileConfig::RemoteVerifiedNewFile { editor: VerifiedFileEditor::new(remote_file) })
 }
 
 fn prepare_file_pool(args: &Args) -> Result<BTreeMap<Inode, FileConfig>> {
     let mut file_pool = BTreeMap::new();
 
-    for config in &args.remote_verified_file {
+    for config in &args.remote_ro_file {
         file_pool.insert(
             config.ino,
             new_config_remote_verified_file(config.remote_id, config.file_size)?,
         );
     }
 
-    for config in &args.remote_unverified_file {
+    for config in &args.remote_ro_file_unverified {
         file_pool.insert(
             config.ino,
             new_config_remote_unverified_file(config.remote_id, config.file_size)?,
         );
     }
 
-    for config in &args.local_verified_file {
+    for config in &args.remote_new_rw_file {
+        file_pool.insert(config.ino, new_config_remote_new_verified_file(config.remote_id)?);
+    }
+
+    for config in &args.local_ro_file {
         file_pool.insert(
             config.ino,
-            new_config_local_verified_file(
+            new_config_local_ro_file(
                 &config.file_path,
                 &config.merkle_tree_dump_path,
                 &config.signature_path,
@@ -260,8 +285,8 @@
         );
     }
 
-    for config in &args.local_unverified_file {
-        file_pool.insert(config.ino, new_config_local_unverified_file(&config.file_path)?);
+    for config in &args.local_ro_file_unverified {
+        file_pool.insert(config.ino, new_config_local_ro_file_unverified(&config.file_path)?);
     }
 
     Ok(file_pool)
diff --git a/authfs/src/reader.rs b/authfs/src/reader.rs
deleted file mode 100644
index 0242afa..0000000
--- a/authfs/src/reader.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//! A module for reading data by chunks.
-
-use std::fs::File;
-use std::io::Result;
-use std::os::unix::fs::FileExt;
-
-use crate::common::CHUNK_SIZE;
-
-/// A trait for reading data by chunks. The data is assumed readonly and has fixed length. Chunks
-/// can be read by specifying the chunk index. Only the last chunk may have incomplete chunk size.
-pub trait ReadOnlyDataByChunk {
-    /// Read the `chunk_index`-th chunk to `buf`. Each slice/chunk has size `CHUNK_SIZE` except for
-    /// the last one, which can be an incomplete chunk. `buf` is currently required to be large
-    /// enough to hold a full chunk of data. Reading beyond the file size (including empty file)
-    /// will crash.
-    fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize>;
-}
-
-fn chunk_index_to_range(size: u64, chunk_index: u64) -> Result<(u64, u64)> {
-    let start = chunk_index * CHUNK_SIZE;
-    assert!(start < size);
-    let end = std::cmp::min(size, start + CHUNK_SIZE);
-    Ok((start, end))
-}
-
-/// A read-only file that can be read by chunks.
-pub struct ChunkedFileReader {
-    file: File,
-    size: u64,
-}
-
-impl ChunkedFileReader {
-    /// Creates a `ChunkedFileReader` to read from for the specified `path`.
-    pub fn new(file: File) -> Result<ChunkedFileReader> {
-        let size = file.metadata()?.len();
-        Ok(ChunkedFileReader { file, size })
-    }
-
-    pub fn len(&self) -> u64 {
-        self.size
-    }
-}
-
-impl ReadOnlyDataByChunk for ChunkedFileReader {
-    fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize> {
-        debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
-        let (start, end) = chunk_index_to_range(self.size, chunk_index)?;
-        let size = (end - start) as usize;
-        self.file.read_at(&mut buf[..size], start)
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use std::env::temp_dir;
-
-    #[test]
-    fn test_read_4k_file() -> Result<()> {
-        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4k")?)?;
-        let mut buf = [0u8; 4096];
-        let size = file_reader.read_chunk(0, &mut buf)?;
-        assert_eq!(size, buf.len());
-        Ok(())
-    }
-
-    #[test]
-    fn test_read_4k1_file() -> Result<()> {
-        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4k1")?)?;
-        let mut buf = [0u8; 4096];
-        let size = file_reader.read_chunk(0, &mut buf)?;
-        assert_eq!(size, buf.len());
-        let size = file_reader.read_chunk(1, &mut buf)?;
-        assert_eq!(size, 1);
-        Ok(())
-    }
-
-    #[test]
-    fn test_read_4m_file() -> Result<()> {
-        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
-        for index in 0..file_reader.len() / 4096 {
-            let mut buf = [0u8; 4096];
-            let size = file_reader.read_chunk(index, &mut buf)?;
-            assert_eq!(size, buf.len());
-        }
-        Ok(())
-    }
-
-    #[test]
-    #[should_panic]
-    fn test_read_beyond_file_size() {
-        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4k").unwrap()).unwrap();
-        let mut buf = [0u8; 4096];
-        let _ = file_reader.read_chunk(1u64, &mut buf); // should panic
-    }
-
-    #[test]
-    #[should_panic]
-    fn test_read_empty_file() {
-        let mut temp_file = temp_dir();
-        temp_file.push("authfs_test_empty_file");
-        let file_reader = ChunkedFileReader::new(File::create(temp_file).unwrap()).unwrap();
-        let mut buf = [0u8; 4096];
-        let _ = file_reader.read_chunk(0, &mut buf); // should panic
-    }
-}
diff --git a/authfs/src/remote_file.rs b/authfs/src/remote_file.rs
deleted file mode 100644
index 01e803c..0000000
--- a/authfs/src/remote_file.rs
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::convert::TryFrom;
-use std::io;
-use std::io::Write;
-use std::sync::{Arc, Mutex};
-
-use crate::common::CHUNK_SIZE;
-use crate::reader::ReadOnlyDataByChunk;
-
-use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
-use authfs_aidl_interface::binder::Strong;
-
-type VirtFdService = Strong<dyn IVirtFdService::IVirtFdService>;
-
-pub mod server {
-    // TODO(victorhsieh): use remote binder.
-    pub fn get_local_service() -> super::VirtFdService {
-        let service_name = "authfs_fd_server";
-        authfs_aidl_interface::binder::get_interface(&service_name)
-            .expect("Cannot reach authfs_fd_server binder service")
-    }
-}
-
-pub struct RemoteChunkedFileReader {
-    // This needs to have Sync trait to be used in fuse::worker::start_message_loop.
-    service: Arc<Mutex<VirtFdService>>,
-    file_fd: i32,
-}
-
-impl RemoteChunkedFileReader {
-    pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
-        RemoteChunkedFileReader { service, file_fd }
-    }
-}
-
-impl ReadOnlyDataByChunk for RemoteChunkedFileReader {
-    fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
-        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
-            .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
-
-        let service = Arc::clone(&self.service);
-        let chunk = service
-            .lock()
-            .unwrap()
-            .readFile(self.file_fd, offset, buf.len() as i32)
-            .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
-        buf.write(&chunk)
-    }
-}
-
-pub struct RemoteFsverityMerkleTreeReader {
-    // This needs to be a Sync to be used in fuse::worker::start_message_loop.
-    // TODO(victorhsieh): change to Strong<> once binder supports it.
-    service: Arc<Mutex<VirtFdService>>,
-    file_fd: i32,
-}
-
-impl RemoteFsverityMerkleTreeReader {
-    pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
-        RemoteFsverityMerkleTreeReader { service, file_fd }
-    }
-}
-
-impl ReadOnlyDataByChunk for RemoteFsverityMerkleTreeReader {
-    fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
-        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
-            .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
-
-        let service = Arc::clone(&self.service);
-        let chunk = service
-            .lock()
-            .unwrap()
-            .readFsverityMerkleTree(self.file_fd, offset, buf.len() as i32)
-            .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
-        buf.write(&chunk)
-    }
-}
diff --git a/authfs/tests/Android.bp b/authfs/tests/Android.bp
new file mode 100644
index 0000000..bacb890
--- /dev/null
+++ b/authfs/tests/Android.bp
@@ -0,0 +1,15 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+java_test_host {
+    name: "AuthFsHostTest",
+    srcs: ["java/**/*.java"],
+    libs: [
+        "tradefed",
+        "compatibility-tradefed",
+        "compatibility-host-util",
+    ],
+    test_suites: ["general-tests"],
+    data: [":authfs_test_files"],
+}
diff --git a/authfs/tests/AndroidTest.xml b/authfs/tests/AndroidTest.xml
new file mode 100644
index 0000000..485e392
--- /dev/null
+++ b/authfs/tests/AndroidTest.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<configuration description="Config for authfs tests">
+    <!-- Since Android does not support user namespace, we need root to access /dev/fuse and also
+         to set up the mount. -->
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
+
+    <!-- Basic checks that the device has all the prerequisites. -->
+    <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+        <option name="throw-if-cmd-fail" value="true" />
+        <!-- Make sure kernel has FUSE enabled. -->
+        <option name="run-command" value="ls /dev/fuse" />
+        <!-- Make sure necessary executables are installed. -->
+        <option name="run-command" value="ls /apex/com.android.virt/bin/fd_server" />
+        <option name="run-command" value="ls /apex/com.android.virt/bin/authfs" />
+        <!-- Prepare test directory. -->
+        <option name="run-command" value="mkdir -p /data/local/tmp/authfs/mnt" />
+        <option name="teardown-command" value="rm -rf /data/local/tmp/authfs" />
+    </target_preparer>
+
+    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+        <option name="cleanup" value="true" />
+        <option name="abort-on-push-failure" value="true" />
+        <option name="push-file" key="cert.der" value="/data/local/tmp/authfs/cert.der" />
+        <option name="push-file" key="input.4m" value="/data/local/tmp/authfs/input.4m" />
+        <option name="push-file" key="input.4k1" value="/data/local/tmp/authfs/input.4k1" />
+        <option name="push-file" key="input.4k" value="/data/local/tmp/authfs/input.4k" />
+        <option name="push-file" key="input.4m.fsv_sig"
+            value="/data/local/tmp/authfs/input.4m.fsv_sig" />
+        <option name="push-file" key="input.4k1.fsv_sig"
+            value="/data/local/tmp/authfs/input.4k1.fsv_sig" />
+        <option name="push-file" key="input.4k.fsv_sig"
+            value="/data/local/tmp/authfs/input.4k.fsv_sig" />
+        <option name="push-file" key="input.4m.merkle_dump"
+            value="/data/local/tmp/authfs/input.4m.merkle_dump" />
+        <option name="push-file" key="input.4m.merkle_dump.bad"
+            value="/data/local/tmp/authfs/input.4m.merkle_dump.bad" />
+        <option name="push-file" key="input.4k1.merkle_dump"
+            value="/data/local/tmp/authfs/input.4k1.merkle_dump" />
+        <option name="push-file" key="input.4k.merkle_dump"
+            value="/data/local/tmp/authfs/input.4k.merkle_dump" />
+    </target_preparer>
+
+    <test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
+        <option name="jar" value="AuthFsHostTest.jar" />
+    </test>
+</configuration>
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
new file mode 100644
index 0000000..3837dd3
--- /dev/null
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import android.platform.test.annotations.RootPermissionTest;
+
+import com.android.tradefed.device.DeviceNotAvailableException;
+import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
+import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
+import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.CommandStatus;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+@RootPermissionTest
+@RunWith(DeviceJUnit4ClassRunner.class)
+public final class AuthFsHostTest extends BaseHostJUnit4Test {
+
+    /** Test directory where data are located */
+    private static final String TEST_DIR = "/data/local/tmp/authfs";
+
+    /** Mount point of authfs during the test */
+    private static final String MOUNT_DIR = "/data/local/tmp/authfs/mnt";
+
+    private static final String FD_SERVER_BIN = "/apex/com.android.virt/bin/fd_server";
+    private static final String AUTHFS_BIN = "/apex/com.android.virt/bin/authfs";
+
+    /** Plenty of time for authfs to get ready */
+    private static final int TIME_BUDGET_AUTHFS_SETUP = 1500;  // ms
+
+    private ITestDevice mDevice;
+    private ExecutorService mThreadPool;
+
+    @Before
+    public void setUp() {
+        mDevice = getDevice();
+        mThreadPool = Executors.newCachedThreadPool();
+    }
+
+    @After
+    public void tearDown() throws DeviceNotAvailableException {
+        mDevice.executeShellV2Command("killall authfs fd_server");
+        mDevice.executeShellV2Command("umount " + MOUNT_DIR);
+        mDevice.executeShellV2Command("rm -f " + TEST_DIR);
+    }
+
+    @Test
+    public void testReadWithFsverityVerification_LocalFile()
+            throws DeviceNotAvailableException, InterruptedException {
+        // Setup
+        runAuthFsInBackground(
+                "--local-ro-file-unverified 3:input.4m"
+                + " --local-ro-file 4:input.4m:input.4m.merkle_dump:input.4m.fsv_sig:cert.der"
+                + " --local-ro-file 5:input.4k1:input.4k1.merkle_dump:input.4k1.fsv_sig:cert.der"
+                + " --local-ro-file 6:input.4k:input.4k.merkle_dump:input.4k.fsv_sig:cert.der"
+        );
+        Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+        // Action
+        String actualHashUnverified4m = computeFileHashInGuest(MOUNT_DIR + "/3");
+        String actualHash4m = computeFileHashInGuest(MOUNT_DIR + "/4");
+        String actualHash4k1 = computeFileHashInGuest(MOUNT_DIR + "/5");
+        String actualHash4k = computeFileHashInGuest(MOUNT_DIR + "/6");
+
+        // Verify
+        String expectedHash4m = computeFileHash(TEST_DIR + "/input.4m");
+        String expectedHash4k1 = computeFileHash(TEST_DIR + "/input.4k1");
+        String expectedHash4k = computeFileHash(TEST_DIR + "/input.4k");
+
+        assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4m, actualHashUnverified4m);
+        assertEquals("Inconsistent hash from /authfs/4: ", expectedHash4m, actualHash4m);
+        assertEquals("Inconsistent hash from /authfs/5: ", expectedHash4k1, actualHash4k1);
+        assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4k, actualHash4k);
+    }
+
+    @Test
+    public void testReadWithFsverityVerification_RemoteFile()
+            throws DeviceNotAvailableException, InterruptedException {
+        // Setup
+        runFdServerInBackground(
+                "3<input.4m 4<input.4m.merkle_dump 5<input.4m.fsv_sig 6<input.4m",
+                "--ro-fds 3:4:5 --ro-fds 6"
+        );
+        runAuthFsInBackground(
+                "--remote-ro-file-unverified 10:6:4194304 --remote-ro-file 11:3:4194304:cert.der"
+        );
+        Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+        // Action
+        String actualHashUnverified4m = computeFileHashInGuest(MOUNT_DIR + "/10");
+        String actualHash4m = computeFileHashInGuest(MOUNT_DIR + "/11");
+
+        // Verify
+        String expectedHash4m = computeFileHash(TEST_DIR + "/input.4m");
+
+        assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4m, actualHashUnverified4m);
+        assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4m, actualHash4m);
+    }
+
+    // Separate the test from the above simply because exec in shell does not allow open too many
+    // files.
+    @Test
+    public void testReadWithFsverityVerification_RemoteSmallerFile()
+            throws DeviceNotAvailableException, InterruptedException {
+        // Setup
+        runFdServerInBackground(
+                "3<input.4k 4<input.4k.merkle_dump 5<input.4k.fsv_sig"
+                + " 6<input.4k1 7<input.4k1.merkle_dump 8<input.4k1.fsv_sig",
+                "--ro-fds 3:4:5 --ro-fds 6:7:8"
+        );
+        runAuthFsInBackground(
+                "--remote-ro-file 10:3:4096:cert.der --remote-ro-file 11:6:4097:cert.der"
+        );
+        Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+        // Action
+        String actualHash4k = computeFileHashInGuest(MOUNT_DIR + "/10");
+        String actualHash4k1 = computeFileHashInGuest(MOUNT_DIR + "/11");
+
+        // Verify
+        String expectedHash4k = computeFileHash(TEST_DIR + "/input.4k");
+        String expectedHash4k1 = computeFileHash(TEST_DIR + "/input.4k1");
+
+        assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4k, actualHash4k);
+        assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4k1, actualHash4k1);
+    }
+
+    @Test
+    public void testReadWithFsverityVerification_TamperedMerkleTree()
+            throws DeviceNotAvailableException, InterruptedException {
+        // Setup
+        runFdServerInBackground(
+                "3<input.4m 4<input.4m.merkle_dump.bad 5<input.4m.fsv_sig",
+                "--ro-fds 3:4:5"
+        );
+        runAuthFsInBackground("--remote-ro-file 10:3:4096:cert.der");
+        Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+        // Verify
+        assertFalse(copyFileInGuest(MOUNT_DIR + "/10", "/dev/null"));
+    }
+
+    @Test
+    public void testWriteThroughCorrectly()
+            throws DeviceNotAvailableException, InterruptedException {
+        // Setup
+        runFdServerInBackground("3<>output", "--rw-fds 3");
+        runAuthFsInBackground("--remote-new-rw-file 20:3");
+        Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+        // Action
+        String srcPath = "/system/bin/linker";
+        String destPath = MOUNT_DIR + "/20";
+        String backendPath = TEST_DIR + "/output";
+        assertTrue(copyFileInGuest(srcPath, destPath));
+
+        // Verify
+        String expectedHash = computeFileHashInGuest(srcPath);
+        String actualHash = computeFileHash(backendPath);
+        assertEquals("Inconsistent file hash on the backend storage", expectedHash, actualHash);
+
+        String actualHashFromAuthFs = computeFileHashInGuest(destPath);
+        assertEquals("Inconsistent file hash when reads from authfs", expectedHash,
+                actualHashFromAuthFs);
+    }
+
+    @Test
+    public void testWriteFailedIfDetectsTampering()
+            throws DeviceNotAvailableException, InterruptedException {
+        // Setup
+        runFdServerInBackground("3<>/output", "--rw-fds 3");
+        runAuthFsInBackground("--remote-new-rw-file 20:3");
+        Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+        String srcPath = "/system/bin/linker";
+        String destPath = MOUNT_DIR + "/20";
+        String backendPath = TEST_DIR + "/output";
+        assertTrue(copyFileInGuest(srcPath, destPath));
+
+        // Action
+        // Tampering with the first 2 4K block of the backing file.
+        expectRemoteCommandToSucceed("dd if=/dev/zero of=" + backendPath + " bs=1 count=8192");
+
+        // Verify
+        // Write to a block partially requires a read back to calculate the new hash. It should fail
+        // when the content is inconsistent to the known hash. Use direct I/O to avoid simply
+        // writing to the filesystem cache.
+        expectRemoteCommandToFail("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 direct");
+
+        // A full 4K write does not require to read back, so write can succeed even if the backing
+        // block has already been tampered.
+        expectRemoteCommandToSucceed(
+                "dd if=/dev/zero of=" + destPath + " bs=1 count=4096 skip=4096");
+
+        // Otherwise, a partial write with correct backing file should still succeed.
+        expectRemoteCommandToSucceed(
+                "dd if=/dev/zero of=" + destPath + " bs=1 count=1024 skip=8192");
+    }
+
+    // TODO(b/178874539): This does not really run in the guest VM.  Send the shell command to the
+    // guest VM when authfs works across VM boundary.
+    private String computeFileHashInGuest(String path) throws DeviceNotAvailableException {
+        return computeFileHash(path);
+    }
+
+    private boolean copyFileInGuest(String src, String dest) throws DeviceNotAvailableException {
+        // TODO(b/182576497): cp returns error because close(2) returns ENOSYS in the current authfs
+        // implementation. We should probably fix that since programs can expect close(2) return 0.
+        String cmd = "cat " + src + " > " + dest;
+        CommandResult result = mDevice.executeShellV2Command(cmd);
+        return result.getStatus() == CommandStatus.SUCCESS;
+    }
+
+    private String computeFileHash(String path) throws DeviceNotAvailableException {
+        String result = expectRemoteCommandToSucceed("sha256sum " + path);
+        String[] tokens = result.split("\\s");
+        if (tokens.length > 0) {
+            return tokens[0];
+        } else {
+            CLog.e("Unrecognized output by sha256sum: " + result);
+            return "";
+        }
+    }
+
+    private void runAuthFsInBackground(String flags) throws DeviceNotAvailableException {
+        String cmd = "cd " + TEST_DIR + " && " + AUTHFS_BIN + " " + MOUNT_DIR + " " + flags;
+
+        mThreadPool.submit(() -> {
+            try {
+                CLog.i("Starting authfs");
+                expectRemoteCommandToSucceed(cmd);
+            } catch (DeviceNotAvailableException e) {
+                CLog.e("Error running authfs", e);
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    private void runFdServerInBackground(String execParamsForOpeningFds, String flags)
+            throws DeviceNotAvailableException {
+        String cmd = "cd " + TEST_DIR + " && exec " + execParamsForOpeningFds + " " + FD_SERVER_BIN
+                + " " + flags;
+        mThreadPool.submit(() -> {
+            try {
+                CLog.i("Starting fd_server");
+                expectRemoteCommandToSucceed(cmd);
+            } catch (DeviceNotAvailableException e) {
+                CLog.e("Error running fd_server", e);
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    private String expectRemoteCommandToSucceed(String cmd) throws DeviceNotAvailableException {
+        CommandResult result = mDevice.executeShellV2Command(cmd);
+        assertEquals("`" + cmd + "` failed: " + result.getStderr(), CommandStatus.SUCCESS,
+                result.getStatus());
+        CLog.d("Stdout: " + result.getStdout());
+        return result.getStdout().trim();
+    }
+
+    private void expectRemoteCommandToFail(String cmd) throws DeviceNotAvailableException {
+        CommandResult result = mDevice.executeShellV2Command(cmd);
+        assertNotEquals("Unexpected success from `" + cmd + "`: " + result.getStdout(),
+                result.getStatus(), CommandStatus.SUCCESS);
+    }
+}
diff --git a/authfs/tools/device-test.sh b/authfs/tools/device-test.sh
deleted file mode 100755
index 5cf5f10..0000000
--- a/authfs/tools/device-test.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/system/bin/sh
-
-# TODO(victorhsieh): Create a standard Android test for continuous integration.
-#
-# How to run this test:
-#
-# Setup:
-# $ adb push testdata/input.4m* /data/local/tmp
-#
-# Shell 1:
-# $ adb shell 'cd /data/local/tmp && exec 9</system/bin/sh 8<input.4m 7<input.4m.merkle_dump 6<input.4m.fsv_sig 5<input.4m 4<input.4m.merkle_dump.bad 3<input.4m.fsv_sig fd_server --ro-fds 9 --ro-fds 8:7:6 --ro-fds 5:4:3'
-#
-# Shell 2:
-# $ adb push tools/device-test.sh /data/local/tmp/ && adb shell /data/local/tmp/device-test.sh
-
-# Run with -u to enter new namespace.
-if [[ $1 == "-u" ]]; then
-  exec unshare -mUr $0
-fi
-
-cd /data/local/tmp
-
-MOUNTPOINT=/data/local/tmp/authfs
-trap "umount ${MOUNTPOINT}" EXIT;
-mkdir -p ${MOUNTPOINT}
-
-size=$(du -b /system/bin/sh |awk '{print $1}')
-size2=$(du -b input.4m |awk '{print $1}')
-
-echo "Mounting authfs in background ..."
-
-# TODO(170494765): Replace /dev/null (currently not used) with a valid
-# certificate.
-authfs \
-  ${MOUNTPOINT} \
-  --local-verified-file 2:input.4m:input.4m.merkle_dump:input.4m.fsv_sig:/dev/null \
-  --local-verified-file 3:input.4k1:input.4k1.merkle_dump:input.4k1.fsv_sig:/dev/null \
-  --local-verified-file 4:input.4k:input.4k.merkle_dump:input.4k.fsv_sig:/dev/null \
-  --local-unverified-file 5:/system/bin/sh \
-  --remote-unverified-file 6:9:${size} \
-  --remote-verified-file 7:8:${size2}:/dev/null \
-  --remote-verified-file 8:5:${size2}:/dev/null \
-  &
-sleep 0.1
-
-echo "Accessing files in authfs ..."
-md5sum ${MOUNTPOINT}/2 input.4m
-echo
-md5sum ${MOUNTPOINT}/3 input.4k1
-echo
-md5sum ${MOUNTPOINT}/4 input.4k
-echo
-md5sum ${MOUNTPOINT}/5 /system/bin/sh
-md5sum ${MOUNTPOINT}/6
-echo
-md5sum ${MOUNTPOINT}/7 input.4m
-echo
-echo Checking error cases...
-cat /data/local/tmp/authfs/8 2>&1 |grep -q ": I/O error" || echo "Failed to catch the problem"
-echo "Done!"
diff --git a/authfs/tools/test.sh b/authfs/tools/test.sh
deleted file mode 100755
index 9ed3a99..0000000
--- a/authfs/tools/test.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-# Run with -u to enter new namespace.
-if [[ $1 == "-u" ]]; then
-  exec unshare -m -U -r $0
-fi
-
-trap "umount /tmp/mnt" EXIT;
-mkdir -p /tmp/mnt
-
-echo "Mounting authfs in background ..."
-strace -o authfs.strace target/debug/authfs \
-  /tmp/mnt \
-  --local-verified-file 2:testdata/input.4m:testdata/input.4m.merkle_dump:testdata/input.4m.fsv_sig \
-  --local-verified-file 3:testdata/input.4k1:testdata/input.4k1.merkle_dump:testdata/input.4k1.fsv_sig \
-  --local-verified-file 4:testdata/input.4k:testdata/input.4k.merkle_dump:testdata/input.4k.fsv_sig \
-  --local-unverified-file 5:testdata/input.4k \
-  &
-sleep 0.1
-
-echo "Accessing files in authfs ..."
-echo
-md5sum /tmp/mnt/2 testdata/input.4m
-echo
-md5sum /tmp/mnt/3 testdata/input.4k1
-echo
-md5sum /tmp/mnt/4 /tmp/mnt/5 testdata/input.4k
-echo
-dd if=/tmp/mnt/2 bs=1000 skip=100 count=50 status=none |md5sum
-dd if=testdata/input.4m bs=1000 skip=100 count=50 status=none |md5sum
-echo
-tac /tmp/mnt/4 |md5sum
-tac /tmp/mnt/5 |md5sum
-tac testdata/input.4k |md5sum
-echo
-test -f /tmp/mnt/2 || echo 'FAIL: an expected file is missing'
-test -f /tmp/mnt/0 && echo 'FAIL: unexpected file presents'
-test -f /tmp/mnt/1 && echo 'FAIL: unexpected file presents, 1 is root dir'
-test -f /tmp/mnt/100 && echo 'FAIL: unexpected file presents'
-test -f /tmp/mnt/foo && echo 'FAIL: unexpected file presents'
-test -f /tmp/mnt/dir/3 && echo 'FAIL: unexpected file presents'
-echo "Done!"
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 6b9692b..81eb48c 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -37,6 +37,7 @@
     use_avb: true,
     avb_private_key: "microdroid.pem",
     avb_algorithm: "SHA256_RSA4096",
+    partition_name: "system",
     deps: [
         "init_second_stage",
         "microdroid_init_rc",
@@ -56,15 +57,16 @@
         "tombstoned",
         "cgroups.json",
 
-        // These two files are temporary and only for test.
+        // These files are temporary and only for test.
         // TODO(b/178993690): migrate cil files to Soong
-        "microdroid_plat_sepolicy.cil",
         "microdroid_plat_mapping_file",
+        "microdroid_plat_sepolicy_and_mapping.sha256",
     ] + microdroid_shell_and_utilities,
     multilib: {
         common: {
             deps: [
                 "com.android.runtime",
+                "plat_sepolicy.cil",
                 "plat_file_contexts",
                 "plat_hwservice_contexts",
                 "plat_property_contexts",
@@ -94,6 +96,8 @@
         "microdroid_plat_sepolicy_vers.txt",
         "microdroid_vendor_sepolicy.cil",
         "microdroid_plat_pub_versioned.cil",
+        "microdroid_precompiled_sepolicy",
+        "microdroid_precompiled_sepolicy.plat_sepolicy_and_mapping.sha256",
     ],
     avb_private_key: "microdroid.pem",
     avb_algorithm: "SHA256_RSA4096",
@@ -135,15 +139,15 @@
 bootimg {
     name: "microdroid_boot-5.10",
     ramdisk_module: "microdroid_ramdisk-5.10",
-    enabled: false,
+    // We don't have kernel for arm and x86. But Soong demands one when it builds for
+    // arm or x86 target. Satisfy that by providing an empty file as the kernel.
+    kernel_prebuilt: "empty_kernel",
     arch: {
         arm64: {
             kernel_prebuilt: ":kernel_prebuilts-5.10-arm64",
-            enabled: true,
         },
         x86_64: {
             kernel_prebuilt: ":kernel_prebuilts-5.10-x86_64",
-            enabled: true,
         },
     },
     // TODO(jiyong): change the name to init, cause it's confusing
@@ -157,6 +161,8 @@
     dtb_prebuilt: "dummy_dtb.img",
     header_version: "4",
     partition_name: "boot",
+    use_avb: true,
+    avb_private_key: "microdroid.pem",
 }
 
 android_filesystem {
@@ -184,15 +190,8 @@
     header_version: "4",
     vendor_boot: true,
     partition_name: "vendor_boot",
-    enabled: false,
-    arch: {
-        arm64: {
-            enabled: true,
-        },
-        x86_64: {
-            enabled: true,
-        },
-    },
+    use_avb: true,
+    avb_private_key: "microdroid.pem",
 }
 
 android_filesystem {
@@ -242,3 +241,83 @@
     out: ["output.img"],
     cmd: "$(location mkenvimage_host) -s 4096 -o $(out) $(in)",
 }
+
+genrule {
+    name: "microdroid_plat_sepolicy_and_mapping.sha256_gen",
+    srcs: [
+        ":plat_sepolicy.cil",
+        ":microdroid_plat_mapping_file",
+    ],
+    out: ["plat_sepolicy_and_mapping.sha256"],
+    cmd: "cat $(in) | sha256sum | cut -d' ' -f1 > $(out)",
+}
+
+// sepolicy sha256 for system
+prebuilt_etc {
+    name: "microdroid_plat_sepolicy_and_mapping.sha256",
+    src: ":microdroid_plat_sepolicy_and_mapping.sha256_gen",
+    filename: "plat_sepolicy_and_mapping.sha256",
+    relative_install_path: "selinux",
+    installable: false,
+}
+
+// sepolicy sha256 for vendor (filename differs)
+prebuilt_etc {
+    name: "microdroid_precompiled_sepolicy.plat_sepolicy_and_mapping.sha256",
+    src: ":microdroid_plat_sepolicy_and_mapping.sha256_gen",
+    filename: "precompiled_sepolicy.plat_sepolicy_and_mapping.sha256",
+    relative_install_path: "selinux",
+    installable: false,
+}
+
+genrule {
+    name: "microdroid_precompiled_sepolicy_gen",
+    tools: ["secilc"],
+    srcs: [
+        ":plat_sepolicy.cil",
+        ":microdroid_plat_mapping_file",
+        ":microdroid_plat_pub_versioned.cil",
+        ":microdroid_vendor_sepolicy.cil",
+    ],
+    out: ["precompiled_sepolicy"],
+    cmd: "$(location secilc) -m -M true -G -c 30 $(in) -o $(out) -f /dev/null",
+}
+
+prebuilt_etc {
+    name: "microdroid_precompiled_sepolicy",
+    src: ":microdroid_precompiled_sepolicy_gen",
+    filename: "precompiled_sepolicy",
+    relative_install_path: "selinux",
+    installable: false,
+}
+
+vbmeta {
+    name: "microdroid_vbmeta",
+    partition_name: "vbmeta",
+    private_key: "microdroid.pem",
+    partitions: [
+        "microdroid_vendor",
+        "microdroid_vendor_boot-5.10",
+    ],
+    chained_partitions: [
+        {
+            name: "vbmeta_system",
+            rollback_index_location: 1,
+            private_key: "microdroid.pem",
+        },
+        {
+            name: "boot",
+            rollback_index_location: 2,
+            private_key: "microdroid.pem",
+        },
+    ],
+}
+
+vbmeta {
+    name: "microdroid_vbmeta_system",
+    partition_name: "vbmeta_system",
+    private_key: "microdroid.pem",
+    partitions: [
+        "microdroid",
+    ],
+}
diff --git a/microdroid/README.md b/microdroid/README.md
index fe0843f..d737e22 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -22,6 +22,8 @@
 $ m microdroid_boot-5.10
 $ m microdroid_vendor_boot-5.10
 $ m microdroid_uboot_env
+$ m microdroid_vbmeta
+$ m microdroid_vbmeta_system
 ```
 
 ## Installing
@@ -34,6 +36,8 @@
 $ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_super.img /data/local/tmp/super.img
 $ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_boot-5.10.img /data/local/tmp/boot.img
 $ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_vendor_boot-5.10.img /data/local/tmp/vendor_boot.img
+$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_vbmeta.img /data/local/tmp/vbmeta.img
+$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_vbmeta_system.img /data/local/tmp/vbmeta_system.img
 $ adb shell mkdir /data/local/tmp/cuttlefish_runtime.1/
 $ adb push $ANDROID_PRODUCT_OUT/system/etc/uboot_env.img /data/local/tmp/cuttlefish_runtime.1/
 $ adb shell mkdir -p /data/local/tmp/etc/cvd_config
@@ -41,8 +45,6 @@
 $ dd if=/dev/zero of=empty.img bs=4k count=600
 $ mkfs.ext4 -F empty.img
 $ adb push empty.img /data/local/tmp/userdata.img
-$ adb push empty.img /data/local/tmp/vbmeta.img
-$ adb push empty.img /data/local/tmp/vbmeta_system.img
 $ adb push empty.img /data/local/tmp/cache.img
 ```
 
@@ -52,8 +54,8 @@
 future, this shall be done via [`virtmanager`](../virtmanager/).
 
 ```
-$ adb shell 'HOME=/data/local/tmp; PATH=$PATH:/apex/com.android.virt/bin; assemble_cvd < /dev/null'
-$ adb shell 'cd /data/local/tmp; /apex/com.android.virt/bin/crosvm run --cid=5 --disable-sandbox --bios=bootloader --serial=type=stdout --disk=cuttlefish_runtime/composite.img'
+$ adb shell 'HOME=/data/local/tmp; PATH=$PATH:/apex/com.android.virt/bin; assemble_cvd -protected_vm < /dev/null'
+$ adb shell 'cd /data/local/tmp; /apex/com.android.virt/bin/crosvm run --cid=5 --disable-sandbox --bios=bootloader --serial=type=stdout --disk=cuttlefish_runtime/os_composite.img'
 ```
 
 The CID in `--cid` parameter can be anything greater than 2 (`VMADDR_CID_HOST`).
diff --git a/microdroid/empty_kernel b/microdroid/empty_kernel
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/microdroid/empty_kernel
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 2c32b28..e70fb77 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -113,40 +113,12 @@
     seclabel u:r:shell:s0
     setenv HOSTNAME console
 
-service servicemanager /system/bin/servicemanager
-    class core animation
-    user system
-    group system readproc
-    critical
-    # TODO(b/179342589): uncomment after turning off APEX session on microdroid
-    # onrestart restart apexd
-    onrestart class_restart main
-    shutdown critical
-
-service logd /system/bin/logd
-    socket logd stream 0666 logd logd
-    socket logdr seqpacket 0666 logd logd
-    socket logdw dgram+passcred 0222 logd logd
-    file /proc/kmsg r
-    file /dev/kmsg w
-    user logd
-    group logd system package_info readproc
-    capabilities SYSLOG AUDIT_CONTROL
-    priority 10
-
-service logd-reinit /system/bin/logd --reinit
-    oneshot
+# TODO(b/181093750): remove these after adding apex support
+service adbd /system/bin/adbd --root_seclabel=u:r:su:s0
+    class core
+    socket adbd seqpacket 660 system system
     disabled
-    user logd
-    group logd
-
-# Limit SELinux denial generation to 5/second
-service logd-auditctl /system/bin/auditctl -r 5
-    oneshot
-    disabled
-    user logd
-    group logd
-    capabilities AUDIT_CONTROL
+    seclabel u:r:adbd:s0
 
 on fs
     write /dev/event-log-tags "# content owned by logd
@@ -157,26 +129,3 @@
 on property:sys.boot_completed=1
     start logd-auditctl
 
-service adbd /system/bin/adbd --root_seclabel=u:r:su:s0
-    class core
-    socket adbd seqpacket 660 system system
-    disabled
-    seclabel u:r:adbd:s0
-
-#TODO(b/179342589): uncomment after turning off APEX session on microdroid
-#service apexd /system/bin/apexd
-#    interface aidl apexservice
-#    class core
-#    user root
-#    group system
-#    oneshot
-#    disabled # does not start with the core class
-#    reboot_on_failure reboot,apexd-failed
-
-service apexd-bootstrap /system/bin/apexd --bootstrap
-    user root
-    group system
-    oneshot
-    disabled
-    reboot_on_failure reboot,bootloader,bootstrap-apexd-failed
-
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
new file mode 100644
index 0000000..c030e8d
--- /dev/null
+++ b/tests/hostside/Android.bp
@@ -0,0 +1,29 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+java_test_host {
+    name: "MicrodroidHostTestCases",
+    srcs: ["java/**/*.java"],
+    test_suites: ["device-tests"],
+    libs: ["tradefed"],
+    data: [
+        ":microdroid_super",
+        ":microdroid_boot-5.10",
+        ":microdroid_vendor_boot-5.10",
+        ":microdroid_uboot_env",
+        ":cuttlefish_crosvm_bootloader",
+        ":MicrodroidHostTestCase_EmptyImage",
+        ":microdroid_vbmeta",
+        ":microdroid_vbmeta_system",
+    ],
+}
+
+genrule {
+    name: "MicrodroidHostTestCase_EmptyImage",
+    tools: ["mke2fs"],
+    out: ["empty.img"],
+    cmd: "dd if=/dev/zero of=$(out) bs=4k count=600 &&" +
+        "$(location mke2fs) -t ext4 $(out)",
+    visibility: ["//visibility:private"],
+}
diff --git a/tests/hostside/AndroidTest.xml b/tests/hostside/AndroidTest.xml
new file mode 100644
index 0000000..da24b71
--- /dev/null
+++ b/tests/hostside/AndroidTest.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Tests for microdroid">
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+        <option name="force-root" value="true" />
+    </target_preparer>
+
+    <test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
+        <option name="jar" value="MicrodroidHostTestCases.jar" />
+    </test>
+</configuration>
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
new file mode 100644
index 0000000..c5c86a6
--- /dev/null
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.virt.test;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertThat;
+
+import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
+import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
+import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.RunUtil;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+@RunWith(DeviceJUnit4ClassRunner.class)
+public class MicrodroidTestCase extends BaseHostJUnit4Test {
+    private static final String TEST_ROOT = "/data/local/tmp/virt/";
+    private static final String VIRT_APEX = "/apex/com.android.virt/";
+    private static final int TEST_VM_CID = 5;
+    private static final int TEST_VM_ADB_PORT = 8000;
+    private static final String MICRODROID_SERIAL = "localhost:" + TEST_VM_ADB_PORT;
+    private static final long MICRODROID_BOOT_TIMEOUT_MILLIS = 15000;
+
+    private void pushFile(String localName, String remoteName) {
+        try {
+            File localFile = getTestInformation().getDependencyFile(localName, false);
+            Path remotePath = Paths.get(TEST_ROOT, remoteName);
+            getDevice().executeShellCommand("mkdir -p " + remotePath.getParent());
+            getDevice().pushFile(localFile, remotePath.toString());
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private String executeCommand(String cmd) {
+        final long defaultCommandTimeoutMillis = 1000; // 1 sec
+        return executeCommand(defaultCommandTimeoutMillis, cmd);
+    }
+
+    private String executeCommand(long timeout, String cmd) {
+        CommandResult result = RunUtil.getDefault().runTimedCmd(timeout, cmd.split(" "));
+        return result.getStdout().trim(); // remove the trailing whitespace including newline
+    }
+
+    @Test
+    public void testMicrodroidBoots() throws Exception {
+        // Prepare input files
+        pushFile("u-boot.bin", "bootloader");
+        pushFile("microdroid_super.img", "super.img");
+        pushFile("microdroid_boot-5.10.img", "boot.img");
+        pushFile("microdroid_vendor_boot-5.10.img", "vendor_boot.img");
+        pushFile("uboot_env.img", "cuttlefish_runtime.1/uboot_env.img");
+        pushFile("empty.img", "userdata.img");
+        pushFile("microdroid_vbmeta.img", "vbmeta.img");
+        pushFile("microdroid_vbmeta_system.img", "vbmeta_system.img");
+        pushFile("empty.img", "cache.img");
+        getDevice().executeShellCommand("mkdir -p " + TEST_ROOT + "etc/cvd_config");
+        getDevice().pushString("{}", TEST_ROOT + "etc/cvd_config/cvd_config_phone.json");
+
+        // Run assemble_cvd to create os_composite.img
+        getDevice().executeShellCommand("HOME=" + TEST_ROOT + "; "
+                + "PATH=$PATH:" + VIRT_APEX + "bin; "
+                + VIRT_APEX + "bin/assemble_cvd -protected_vm < /dev/null");
+
+        // Make sure that os_composite.img is created
+        final String compositeImg = TEST_ROOT + "cuttlefish_runtime/os_composite.img";
+        CommandResult result = getDevice().executeShellV2Command("du -b " + compositeImg);
+        assertThat(result.getExitCode(), is(0));
+        assertThat(result.getStdout(), is(not("")));
+
+        // Start microdroid using crosvm
+        ExecutorService executor = Executors.newFixedThreadPool(1);
+        executor.execute(() -> {
+            try {
+                getDevice().executeShellV2Command("cd " + TEST_ROOT + "; "
+                        + VIRT_APEX + "bin/crosvm run "
+                        + "--cid=" + TEST_VM_CID + " "
+                        + "--disable-sandbox "
+                        + "--bios=bootloader "
+                        + "--serial=type=syslog "
+                        + "--disk=cuttlefish_runtime/os_composite.img");
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        });
+        // .. and wait for microdroid to boot
+        // TODO(jiyong): don't wait too long. We can wait less by monitoring log from microdroid
+        Thread.sleep(MICRODROID_BOOT_TIMEOUT_MILLIS);
+
+        // Connect to microdroid and read a system property from there
+        executeCommand("adb forward tcp:" + TEST_VM_ADB_PORT + " vsock:" + TEST_VM_CID + ":5555");
+        executeCommand("adb connect " + MICRODROID_SERIAL);
+        String prop = executeCommand("adb -s " + MICRODROID_SERIAL + " shell getprop ro.hardware");
+        assertThat(prop, is("microdroid"));
+
+        // Shutdown microdroid
+        executeCommand("adb -s localhost:" + TEST_VM_ADB_PORT + " shell reboot");
+    }
+
+    @Before
+    public void setUp() throws Exception {
+        // delete the test root
+        getDevice().executeShellCommand("rm -rf " + TEST_ROOT);
+
+        // disconnect from microdroid
+        executeCommand("adb disconnect " + MICRODROID_SERIAL);
+    }
+
+    @After
+    public void shutdown() throws Exception {
+        // disconnect from microdroid
+        executeCommand("adb disconnect " + MICRODROID_SERIAL);
+
+        // kill stale crosvm processes
+        getDevice().executeShellV2Command("killall crosvm");
+    }
+}
diff --git a/tests/vsock_test.cc b/tests/vsock_test.cc
index 74e984f..57a03ca 100644
--- a/tests/vsock_test.cc
+++ b/tests/vsock_test.cc
@@ -21,6 +21,7 @@
 #include <linux/vm_sockets.h>
 
 #include <iostream>
+#include <optional>
 
 #include "android-base/file.h"
 #include "android-base/logging.h"
@@ -57,7 +58,7 @@
     ASSERT_EQ(ret, 0) << strerror(errno);
 
     sp<IVirtualMachine> vm;
-    status = mVirtManager->startVm(String16(kVmConfigPath), &vm);
+    status = mVirtManager->startVm(String16(kVmConfigPath), std::nullopt, &vm);
     ASSERT_TRUE(status.isOk()) << "Error starting VM: " << status;
 
     int32_t cid;
diff --git a/virtmanager/Android.bp b/virtmanager/Android.bp
index 5ff5db4..1b2aec1 100644
--- a/virtmanager/Android.bp
+++ b/virtmanager/Android.bp
@@ -9,7 +9,8 @@
     edition: "2018",
     rustlibs: [
         "android.system.virtmanager-rust",
-        "libenv_logger",
+        "libandroid_logger",
+        "libbinder_rs", // TODO(dbrazdil): remove once b/182890877 is fixed
         "liblog_rust",
         "libserde_json",
         "libserde",
diff --git a/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl b/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl
index ade8717..ab03c18 100644
--- a/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl
+++ b/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl
@@ -16,8 +16,31 @@
 package android.system.virtmanager;
 
 import android.system.virtmanager.IVirtualMachine;
+import android.system.virtmanager.VirtualMachineDebugInfo;
 
 interface IVirtManager {
-        /** Start the VM with the given config file, and return a handle to it. */
-        IVirtualMachine startVm(String configPath);
+    /**
+     * Start the VM with the given config file, and return a handle to it. If `logFd` is provided
+     * then console logs from the VM will be sent to it.
+     */
+    IVirtualMachine startVm(String configPath, in @nullable ParcelFileDescriptor logFd);
+
+    /**
+     * Get a list of all currently running VMs. This method is only intended for debug purposes,
+     * and as such is only permitted from the shell user.
+     */
+    VirtualMachineDebugInfo[] debugListVms();
+
+    /**
+     * Hold a strong reference to a VM in Virt Manager. This method is only intended for debug
+     * purposes, and as such is only permitted from the shell user.
+     */
+    void debugHoldVmRef(IVirtualMachine vm);
+
+    /**
+     * Drop reference to a VM that is being held by Virt Manager. Returns the reference if VM was
+     * found and null otherwise. This method is only intended for debug purposes, and as such is
+     * only permitted from the shell user.
+     */
+    @nullable IVirtualMachine debugDropVmRef(int cid);
 }
diff --git a/virtmanager/aidl/android/system/virtmanager/IVirtualMachine.aidl b/virtmanager/aidl/android/system/virtmanager/IVirtualMachine.aidl
index 5f408f8..0358bfd 100644
--- a/virtmanager/aidl/android/system/virtmanager/IVirtualMachine.aidl
+++ b/virtmanager/aidl/android/system/virtmanager/IVirtualMachine.aidl
@@ -16,6 +16,6 @@
 package android.system.virtmanager;
 
 interface IVirtualMachine {
-        /** Get the CID allocated to the VM. */
-        int getCid();
+    /** Get the CID allocated to the VM. */
+    int getCid();
 }
diff --git a/virtmanager/aidl/android/system/virtmanager/VirtualMachineDebugInfo.aidl b/virtmanager/aidl/android/system/virtmanager/VirtualMachineDebugInfo.aidl
new file mode 100644
index 0000000..d877a56
--- /dev/null
+++ b/virtmanager/aidl/android/system/virtmanager/VirtualMachineDebugInfo.aidl
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtmanager;
+
+/** Information about a running VM, for debug purposes only. */
+parcelable VirtualMachineDebugInfo {
+    /** The CID assigned to the VM. */
+    int cid;
+
+    /**
+     * The filename of the config file used to start the VM. This may have changed since it was
+     * read so it shouldn't be trusted; it is only stored for debugging purposes.
+     */
+    String configPath;
+}
diff --git a/virtmanager/src/aidl.rs b/virtmanager/src/aidl.rs
index 8394e36..8c963d2 100644
--- a/virtmanager/src/aidl.rs
+++ b/virtmanager/src/aidl.rs
@@ -14,19 +14,28 @@
 
 //! Implementation of the AIDL interface of the Virt Manager.
 
-use crate::config::load_vm_config;
+use crate::config::VmConfig;
 use crate::crosvm::VmInstance;
 use crate::{Cid, FIRST_GUEST_CID};
+use ::binder::FromIBinder; // TODO(dbrazdil): remove once b/182890877 is fixed
 use android_system_virtmanager::aidl::android::system::virtmanager::IVirtManager::IVirtManager;
 use android_system_virtmanager::aidl::android::system::virtmanager::IVirtualMachine::{
     BnVirtualMachine, IVirtualMachine,
 };
-use android_system_virtmanager::binder::{self, Interface, StatusCode, Strong};
+use android_system_virtmanager::aidl::android::system::virtmanager::VirtualMachineDebugInfo::VirtualMachineDebugInfo;
+use android_system_virtmanager::binder::{
+    self, Interface, ParcelFileDescriptor, StatusCode, Strong, ThreadState,
+};
 use log::error;
-use std::sync::{Arc, Mutex};
+use std::fs::File;
+use std::sync::{Arc, Mutex, Weak};
 
 pub const BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtmanager";
 
+// TODO(qwandor): Use PermissionController once it is available to Rust.
+/// Only processes running with one of these UIDs are allowed to call debug methods.
+const DEBUG_ALLOWED_UIDS: [u32; 2] = [0, 2000];
+
 /// Implementation of `IVirtManager`, the entry point of the AIDL service.
 #[derive(Debug, Default)]
 pub struct VirtManager {
@@ -39,14 +48,75 @@
     /// Create and start a new VM with the given configuration, assigning it the next available CID.
     ///
     /// Returns a binder `IVirtualMachine` object referring to it, as a handle for the client.
-    fn startVm(&self, config_path: &str) -> binder::Result<Strong<dyn IVirtualMachine>> {
+    fn startVm(
+        &self,
+        config_path: &str,
+        log_fd: Option<&ParcelFileDescriptor>,
+    ) -> binder::Result<Strong<dyn IVirtualMachine>> {
         let state = &mut *self.state.lock().unwrap();
         let cid = state.next_cid;
-        let instance = start_vm(config_path, cid)?;
+        let log_fd = log_fd
+            .map(|fd| fd.as_ref().try_clone().map_err(|_| StatusCode::UNKNOWN_ERROR))
+            .transpose()?;
+        let instance = Arc::new(start_vm(config_path, cid, log_fd)?);
         // TODO(qwandor): keep track of which CIDs are currently in use so that we can reuse them.
         state.next_cid = state.next_cid.checked_add(1).ok_or(StatusCode::UNKNOWN_ERROR)?;
-        Ok(VirtualMachine::create(Arc::new(instance)))
+        state.add_vm(Arc::downgrade(&instance));
+        Ok(VirtualMachine::create(instance))
     }
+
+    /// Get a list of all currently running VMs. This method is only intended for debug purposes,
+    /// and as such is only permitted from the shell user.
+    fn debugListVms(&self) -> binder::Result<Vec<VirtualMachineDebugInfo>> {
+        if !debug_access_allowed() {
+            return Err(StatusCode::PERMISSION_DENIED.into());
+        }
+
+        let state = &mut *self.state.lock().unwrap();
+        let vms = state.vms();
+        let cids = vms
+            .into_iter()
+            .map(|vm| VirtualMachineDebugInfo {
+                cid: vm.cid as i32,
+                configPath: vm.config_path.clone(),
+            })
+            .collect();
+        Ok(cids)
+    }
+
+    /// Hold a strong reference to a VM in Virt Manager. This method is only intended for debug
+    /// purposes, and as such is only permitted from the shell user.
+    fn debugHoldVmRef(&self, vmref: &dyn IVirtualMachine) -> binder::Result<()> {
+        if !debug_access_allowed() {
+            return Err(StatusCode::PERMISSION_DENIED.into());
+        }
+
+        // Workaround for b/182890877.
+        let vm: Strong<dyn IVirtualMachine> = FromIBinder::try_from(vmref.as_binder()).unwrap();
+
+        let state = &mut *self.state.lock().unwrap();
+        state.debug_hold_vm(vm);
+        Ok(())
+    }
+
+    /// Drop reference to a VM that is being held by Virt Manager. Returns the reference if VM was
+    /// found and None otherwise. This method is only intended for debug purposes, and as such is
+    /// only permitted from the shell user.
+    fn debugDropVmRef(&self, cid: i32) -> binder::Result<Option<Strong<dyn IVirtualMachine>>> {
+        if !debug_access_allowed() {
+            return Err(StatusCode::PERMISSION_DENIED.into());
+        }
+
+        let state = &mut *self.state.lock().unwrap();
+        Ok(state.debug_drop_vm(cid))
+    }
+}
+
+/// Check whether the caller of the current Binder method is allowed to call debug methods.
+fn debug_access_allowed() -> bool {
+    let uid = ThreadState::get_calling_uid();
+    log::trace!("Debug method call from UID {}.", uid);
+    DEBUG_ALLOWED_UIDS.contains(&uid)
 }
 
 /// Implementation of the AIDL `IVirtualMachine` interface. Used as a handle to a VM.
@@ -73,23 +143,62 @@
 /// The mutable state of the Virt Manager. There should only be one instance of this struct.
 #[derive(Debug)]
 struct State {
+    /// The next available unused CID.
     next_cid: Cid,
+
+    /// The VMs which have been started. When VMs are started a weak reference is added to this list
+    /// while a strong reference is returned to the caller over Binder. Once all copies of the
+    /// Binder client are dropped the weak reference here will become invalid, and will be removed
+    /// from the list opportunistically the next time `add_vm` is called.
+    vms: Vec<Weak<VmInstance>>,
+
+    /// Vector of strong VM references held on behalf of users that cannot hold them themselves.
+    /// This is only used for debugging purposes.
+    debug_held_vms: Vec<Strong<dyn IVirtualMachine>>,
+}
+
+impl State {
+    /// Get a list of VMs which are currently running.
+    fn vms(&self) -> Vec<Arc<VmInstance>> {
+        // Attempt to upgrade the weak pointers to strong pointers.
+        self.vms.iter().filter_map(Weak::upgrade).collect()
+    }
+
+    /// Add a new VM to the list.
+    fn add_vm(&mut self, vm: Weak<VmInstance>) {
+        // Garbage collect any entries from the stored list which no longer exist.
+        self.vms.retain(|vm| vm.strong_count() > 0);
+
+        // Actually add the new VM.
+        self.vms.push(vm);
+    }
+
+    /// Store a strong VM reference.
+    fn debug_hold_vm(&mut self, vm: Strong<dyn IVirtualMachine>) {
+        self.debug_held_vms.push(vm);
+    }
+
+    /// Retrieve and remove a strong VM reference.
+    fn debug_drop_vm(&mut self, cid: i32) -> Option<Strong<dyn IVirtualMachine>> {
+        let pos = self.debug_held_vms.iter().position(|vm| vm.getCid() == Ok(cid))?;
+        Some(self.debug_held_vms.swap_remove(pos))
+    }
 }
 
 impl Default for State {
     fn default() -> Self {
-        State { next_cid: FIRST_GUEST_CID }
+        State { next_cid: FIRST_GUEST_CID, vms: vec![], debug_held_vms: vec![] }
     }
 }
 
 /// Start a new VM instance from the given VM config filename. This assumes the VM is not already
 /// running.
-fn start_vm(config_path: &str, cid: Cid) -> binder::Result<VmInstance> {
-    let config = load_vm_config(config_path).map_err(|e| {
+fn start_vm(config_path: &str, cid: Cid, log_fd: Option<File>) -> binder::Result<VmInstance> {
+    let config = VmConfig::load(config_path).map_err(|e| {
         error!("Failed to load VM config {}: {:?}", config_path, e);
         StatusCode::BAD_VALUE
     })?;
-    Ok(VmInstance::start(&config, cid).map_err(|e| {
+    Ok(VmInstance::start(&config, cid, config_path, log_fd).map_err(|e| {
         error!("Failed to start VM {}: {:?}", config_path, e);
         StatusCode::UNKNOWN_ERROR
     })?)
diff --git a/virtmanager/src/config.rs b/virtmanager/src/config.rs
index c0d23f0..d8cb06f 100644
--- a/virtmanager/src/config.rs
+++ b/virtmanager/src/config.rs
@@ -49,6 +49,13 @@
         }
         Ok(())
     }
+
+    /// Load the configuration for a VM from the given JSON file.
+    pub fn load(path: &str) -> Result<VmConfig, Error> {
+        let file = File::open(path).with_context(|| format!("Failed to open {}", path))?;
+        let buffered = BufReader::new(file);
+        Ok(serde_json::from_reader(buffered)?)
+    }
 }
 
 /// A disk image to be made available to the VM.
@@ -59,10 +66,3 @@
     /// Whether this disk should be writable by the VM.
     pub writable: bool,
 }
-
-/// Load the configuration for the VM with the given ID from a JSON file.
-pub fn load_vm_config(path: &str) -> Result<VmConfig, Error> {
-    let file = File::open(path).with_context(|| format!("Failed to open {}", path))?;
-    let buffered = BufReader::new(file);
-    Ok(serde_json::from_reader(buffered)?)
-}
diff --git a/virtmanager/src/crosvm.rs b/virtmanager/src/crosvm.rs
index 057b791..814a1a7 100644
--- a/virtmanager/src/crosvm.rs
+++ b/virtmanager/src/crosvm.rs
@@ -18,6 +18,7 @@
 use crate::Cid;
 use anyhow::Error;
 use log::{debug, error, info};
+use std::fs::File;
 use std::process::{Child, Command};
 
 const CROSVM_PATH: &str = "/apex/com.android.virt/bin/crosvm";
@@ -29,19 +30,27 @@
     child: Child,
     /// The CID assigned to the VM for vsock communication.
     pub cid: Cid,
+    /// The filename of the config file that was used to start the VM. This may have changed since
+    /// it was read so it shouldn't be trusted; it is only stored for debugging purposes.
+    pub config_path: String,
 }
 
 impl VmInstance {
     /// Create a new `VmInstance` for the given process.
-    fn new(child: Child, cid: Cid) -> VmInstance {
-        VmInstance { child, cid }
+    fn new(child: Child, cid: Cid, config_path: &str) -> VmInstance {
+        VmInstance { child, cid, config_path: config_path.to_owned() }
     }
 
     /// Start an instance of `crosvm` to manage a new VM. The `crosvm` instance will be killed when
     /// the `VmInstance` is dropped.
-    pub fn start(config: &VmConfig, cid: Cid) -> Result<VmInstance, Error> {
-        let child = run_vm(config, cid)?;
-        Ok(VmInstance::new(child, cid))
+    pub fn start(
+        config: &VmConfig,
+        cid: Cid,
+        config_path: &str,
+        log_fd: Option<File>,
+    ) -> Result<VmInstance, Error> {
+        let child = run_vm(config, cid, log_fd)?;
+        Ok(VmInstance::new(child, cid, config_path))
     }
 }
 
@@ -61,14 +70,18 @@
 }
 
 /// Start an instance of `crosvm` to manage a new VM.
-fn run_vm(config: &VmConfig, cid: Cid) -> Result<Child, Error> {
+fn run_vm(config: &VmConfig, cid: Cid, log_fd: Option<File>) -> Result<Child, Error> {
     config.validate()?;
 
     let mut command = Command::new(CROSVM_PATH);
     // TODO(qwandor): Remove --disable-sandbox.
     command.arg("run").arg("--disable-sandbox").arg("--cid").arg(cid.to_string());
-    // TODO(jiyong): Don't redirect console to the host syslog
-    command.arg("--serial=type=syslog");
+    if let Some(log_fd) = log_fd {
+        command.stdout(log_fd);
+    } else {
+        // Ignore console output.
+        command.arg("--serial=type=sink");
+    }
     if let Some(bootloader) = &config.bootloader {
         command.arg("--bios").arg(bootloader);
     }
diff --git a/virtmanager/src/main.rs b/virtmanager/src/main.rs
index 7cca4a9..486efeb 100644
--- a/virtmanager/src/main.rs
+++ b/virtmanager/src/main.rs
@@ -21,17 +21,22 @@
 use crate::aidl::{VirtManager, BINDER_SERVICE_IDENTIFIER};
 use android_system_virtmanager::aidl::android::system::virtmanager::IVirtManager::BnVirtManager;
 use android_system_virtmanager::binder::{add_service, ProcessState};
-use log::info;
+use log::{info, Level};
 
 /// The first CID to assign to a guest VM managed by the Virt Manager. CIDs lower than this are
 /// reserved for the host or other usage.
 const FIRST_GUEST_CID: Cid = 10;
 
+const LOG_TAG: &str = "VirtManager";
+
 /// The unique ID of a VM used (together with a port number) for vsock communication.
 type Cid = u32;
 
 fn main() {
-    env_logger::init();
+    android_logger::init_once(
+        android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Trace),
+    );
+
     let virt_manager = VirtManager::default();
     let virt_manager = BnVirtManager::new_binder(virt_manager);
     add_service(BINDER_SERVICE_IDENTIFIER, virt_manager.as_binder()).unwrap();
diff --git a/vm/Android.bp b/vm/Android.bp
index 8fe7ae9..4bb9727 100644
--- a/vm/Android.bp
+++ b/vm/Android.bp
@@ -1,3 +1,7 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
 rust_binary {
     name: "vm",
     crate_name: "vm",
@@ -6,9 +10,10 @@
     rustlibs: [
         "android.system.virtmanager-rust",
         "libanyhow",
-        "libbinder_rs",
         "libenv_logger",
+        "liblibc",
         "liblog_rust",
+        "libstructopt",
     ],
     apex_available: [
         "com.android.virt",
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 1e642cb..8c2a084 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -17,49 +17,98 @@
 mod sync;
 
 use android_system_virtmanager::aidl::android::system::virtmanager::IVirtManager::IVirtManager;
-use android_system_virtmanager::binder::{get_interface, ProcessState, Strong};
-use anyhow::{bail, Context, Error};
-// TODO: Import these via android_system_virtmanager::binder once https://r.android.com/1619403 is
-// submitted.
-use binder::{DeathRecipient, IBinder};
-use std::env;
-use std::process::exit;
+use android_system_virtmanager::binder::{
+    get_interface, DeathRecipient, IBinder, ParcelFileDescriptor, ProcessState, Strong,
+};
+use anyhow::{Context, Error};
+use std::fs::File;
+use std::io;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::path::PathBuf;
+use structopt::clap::AppSettings;
+use structopt::StructOpt;
 use sync::AtomicFlag;
 
 const VIRT_MANAGER_BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtmanager";
 
+#[derive(StructOpt)]
+#[structopt(no_version, global_settings = &[AppSettings::DisableVersion])]
+enum Opt {
+    /// Run a virtual machine
+    Run {
+        /// Path to VM config JSON
+        #[structopt(parse(from_os_str))]
+        config: PathBuf,
+
+        /// Detach VM from the terminal and run in the background
+        #[structopt(short, long)]
+        daemonize: bool,
+    },
+    /// Stop a virtual machine running in the background
+    Stop {
+        /// CID of the virtual machine
+        cid: u32,
+    },
+    /// List running virtual machines
+    List,
+}
+
 fn main() -> Result<(), Error> {
     env_logger::init();
-
-    let args: Vec<_> = env::args().collect();
-    if args.len() < 2 {
-        eprintln!("Usage:");
-        eprintln!("  {} run <vm_config.json>", args[0]);
-        exit(1);
-    }
+    let opt = Opt::from_args();
 
     // We need to start the thread pool for Binder to work properly, especially link_to_death.
     ProcessState::start_thread_pool();
 
-    match args[1].as_ref() {
-        "run" if args.len() == 3 => command_run(&args[2]),
-        command => bail!("Invalid command '{}' or wrong number of arguments", command),
+    let virt_manager = get_interface(VIRT_MANAGER_BINDER_SERVICE_IDENTIFIER)
+        .context("Failed to find Virt Manager service")?;
+
+    match opt {
+        Opt::Run { config, daemonize } => command_run(virt_manager, &config, daemonize),
+        Opt::Stop { cid } => command_stop(virt_manager, cid),
+        Opt::List => command_list(virt_manager),
     }
 }
 
 /// Run a VM from the given configuration file.
-fn command_run(config_filename: &str) -> Result<(), Error> {
-    let virt_manager: Strong<dyn IVirtManager> =
-        get_interface(VIRT_MANAGER_BINDER_SERVICE_IDENTIFIER)
-            .with_context(|| "Failed to find Virt Manager service")?;
-    let vm = virt_manager.startVm(config_filename).with_context(|| "Failed to start VM")?;
-    let cid = vm.getCid().with_context(|| "Failed to get CID")?;
+fn command_run(
+    virt_manager: Strong<dyn IVirtManager>,
+    config_path: &PathBuf,
+    daemonize: bool,
+) -> Result<(), Error> {
+    let config_filename = config_path.to_str().context("Failed to parse VM config path")?;
+    let stdout_file = ParcelFileDescriptor::new(duplicate_stdout()?);
+    let stdout = if daemonize { None } else { Some(&stdout_file) };
+    let vm = virt_manager.startVm(config_filename, stdout).context("Failed to start VM")?;
+
+    let cid = vm.getCid().context("Failed to get CID")?;
     println!("Started VM from {} with CID {}.", config_filename, cid);
 
-    // Wait until the VM dies. If we just returned immediately then the IVirtualMachine Binder
-    // object would be dropped and the VM would be killed.
-    wait_for_death(&mut vm.as_binder())?;
-    println!("VM died");
+    if daemonize {
+        // Pass the VM reference back to Virt Manager and have it hold it in the background.
+        virt_manager.debugHoldVmRef(&*vm).context("Failed to pass VM to Virt Manager")
+    } else {
+        // Wait until the VM dies. If we just returned immediately then the IVirtualMachine Binder
+        // object would be dropped and the VM would be killed.
+        wait_for_death(&mut vm.as_binder())?;
+        println!("VM died");
+        Ok(())
+    }
+}
+
+/// Retrieve reference to a previously daemonized VM and stop it.
+fn command_stop(virt_manager: Strong<dyn IVirtManager>, cid: u32) -> Result<(), Error> {
+    virt_manager
+        .debugDropVmRef(cid as i32)
+        .context("Failed to get VM from Virt Manager")?
+        .context("CID does not correspond to a running background VM")?;
+    Ok(())
+}
+
+/// List the VMs currently running.
+fn command_list(virt_manager: Strong<dyn IVirtManager>) -> Result<(), Error> {
+    let vms = virt_manager.debugListVms().context("Failed to get list of VMs")?;
+    println!("Running VMs: {:#?}", vms);
     Ok(())
 }
 
@@ -76,3 +125,18 @@
     dead.wait();
     Ok(())
 }
+
+/// Safely duplicate the standard output file descriptor.
+fn duplicate_stdout() -> io::Result<File> {
+    let stdout_fd = io::stdout().as_raw_fd();
+    // Safe because this just duplicates a file descriptor which we know to be valid, and we check
+    // for an error.
+    let dup_fd = unsafe { libc::dup(stdout_fd) };
+    if dup_fd < 0 {
+        Err(io::Error::last_os_error())
+    } else {
+        // Safe because we have just duplicated the file descriptor so we own it, and `from_raw_fd`
+        // takes ownership of it.
+        Ok(unsafe { File::from_raw_fd(dup_fd) })
+    }
+}