Merge "Add assemble_cvd to com.android.virt"
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 3c5849b..f9fdd1f 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -2,18 +2,22 @@
name: "authfs_defaults",
crate_name: "authfs",
srcs: [
- "src/lib.rs",
+ "src/main.rs",
],
edition: "2018",
rustlibs: [
"libanyhow",
"libauthfs_crypto_bindgen",
+ "libcfg_if",
+ "libfuse_rust",
"liblibc",
+ "libstructopt",
"libthiserror",
],
host_supported: true,
shared_libs: ["libcrypto"],
clippy_lints: "android",
+ defaults: ["crosvm_defaults"],
}
// TODO(b/172687320): remove once there is a canonical bindgen.
@@ -30,8 +34,8 @@
host_supported: true,
}
-rust_library {
- name: "libauthfs",
+rust_binary {
+ name: "authfs",
defaults: ["authfs_defaults"],
}
diff --git a/authfs/src/common.rs b/authfs/src/common.rs
new file mode 100644
index 0000000..2220ae7
--- /dev/null
+++ b/authfs/src/common.rs
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+pub const COMMON_PAGE_SIZE: u64 = 4096;
+
+pub fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
+ (dividend + divisor - 1) / divisor
+}
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index c9070ba..52aacf7 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -19,6 +19,7 @@
use thiserror::Error;
use crate::auth::Authenticator;
+use crate::common::divide_roundup;
use crate::crypto::{CryptoError, Sha256Hasher};
use crate::reader::ReadOnlyDataByChunk;
@@ -43,10 +44,6 @@
type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
-fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
- (dividend + divisor - 1) / divisor
-}
-
fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
let padding_size = pad_to - chunk.len();
Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
@@ -168,7 +165,6 @@
}
impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> FsverityChunkedFileReader<F, M> {
- #[allow(dead_code)]
pub fn new<A: Authenticator>(
authenticator: &A,
chunked_file: F,
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
new file mode 100644
index 0000000..484aad4
--- /dev/null
+++ b/authfs/src/fusefs.rs
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::Result;
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::ffi::CStr;
+use std::fs::OpenOptions;
+use std::io;
+use std::mem::MaybeUninit;
+use std::option::Option;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+use std::time::Duration;
+
+use fuse::filesystem::{Context, DirEntry, DirectoryIterator, Entry, FileSystem, ZeroCopyWriter};
+use fuse::mount::MountOption;
+
+use crate::common::{divide_roundup, COMMON_PAGE_SIZE};
+use crate::fsverity::FsverityChunkedFileReader;
+use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
+
+// We're reading the backing file by chunk, so setting the block size to be the same.
+const BLOCK_SIZE: usize = COMMON_PAGE_SIZE as usize;
+
+const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
+
+pub type Inode = u64;
+type Handle = u64;
+
+// A debug only type where everything are stored as local files.
+type FileBackedFsverityChunkedFileReader =
+ FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
+
+pub enum FileConfig {
+ LocalVerifiedFile(FileBackedFsverityChunkedFileReader, u64),
+ LocalUnverifiedFile(ChunkedFileReader, u64),
+}
+
+struct AuthFs {
+ /// Store `FileConfig`s using the `Inode` number as the search index.
+ ///
+ /// For further optimization to minimize the search cost, since Inode is integer, we may
+ /// consider storing them in a Vec if we can guarantee that the numbers are small and
+ /// consecutive.
+ file_pool: BTreeMap<Inode, FileConfig>,
+
+ /// Maximum bytes in the write transaction to the FUSE device. This limits the maximum size to
+ /// a read request (including FUSE protocol overhead).
+ max_write: u32,
+}
+
+impl AuthFs {
+ pub fn new(file_pool: BTreeMap<Inode, FileConfig>, max_write: u32) -> AuthFs {
+ AuthFs { file_pool, max_write }
+ }
+
+ fn get_file_config(&self, inode: &Inode) -> io::Result<&FileConfig> {
+ self.file_pool.get(&inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ }
+}
+
+fn check_access_mode(flags: u32, mode: libc::c_int) -> io::Result<()> {
+ if (flags & libc::O_ACCMODE as u32) == mode as u32 {
+ Ok(())
+ } else {
+ Err(io::Error::from_raw_os_error(libc::EACCES))
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(all(target_arch = "aarch64", target_pointer_width = "64"))] {
+ fn blk_size() -> libc::c_int { BLOCK_SIZE as libc::c_int }
+ } else {
+ fn blk_size() -> libc::c_long { BLOCK_SIZE as libc::c_long }
+ }
+}
+
+fn create_stat(ino: libc::ino_t, file_size: u64) -> io::Result<libc::stat64> {
+ let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
+
+ st.st_ino = ino;
+ st.st_mode = libc::S_IFREG | libc::S_IRUSR | libc::S_IRGRP | libc::S_IROTH;
+ st.st_dev = 0;
+ st.st_nlink = 1;
+ st.st_uid = 0;
+ st.st_gid = 0;
+ st.st_rdev = 0;
+ st.st_size = libc::off64_t::try_from(file_size)
+ .map_err(|_| io::Error::from_raw_os_error(libc::EFBIG))?;
+ st.st_blksize = blk_size();
+ // Per man stat(2), st_blocks is "Number of 512B blocks allocated".
+ st.st_blocks = libc::c_longlong::try_from(divide_roundup(file_size, 512))
+ .map_err(|_| io::Error::from_raw_os_error(libc::EFBIG))?;
+ Ok(st)
+}
+
+/// An iterator that generates (offset, size) for a chunked read operation, where offset is the
+/// global file offset, and size is the amount of read from the offset.
+struct ChunkReadIter {
+ remaining: usize,
+ offset: u64,
+}
+
+impl ChunkReadIter {
+ pub fn new(remaining: usize, offset: u64) -> Self {
+ ChunkReadIter { remaining, offset }
+ }
+}
+
+impl Iterator for ChunkReadIter {
+ type Item = (u64, usize);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.remaining == 0 {
+ return None;
+ }
+ let chunk_data_size =
+ std::cmp::min(self.remaining, BLOCK_SIZE - (self.offset % BLOCK_SIZE as u64) as usize);
+ let retval = (self.offset, chunk_data_size);
+ self.offset += chunk_data_size as u64;
+ self.remaining = self.remaining.saturating_sub(chunk_data_size);
+ Some(retval)
+ }
+}
+
+fn offset_to_chunk_index(offset: u64) -> u64 {
+ offset / BLOCK_SIZE as u64
+}
+
+fn read_chunks<W: io::Write, T: ReadOnlyDataByChunk>(
+ mut w: W,
+ file: &T,
+ file_size: u64,
+ offset: u64,
+ size: u32,
+) -> io::Result<usize> {
+ let remaining = file_size.saturating_sub(offset);
+ let size_to_read = std::cmp::min(size as usize, remaining as usize);
+ let total = ChunkReadIter::new(size_to_read, offset).try_fold(
+ 0,
+ |total, (current_offset, planned_data_size)| {
+ // TODO(victorhsieh): There might be a non-trivial way to avoid this copy. For example,
+ // instead of accepting a buffer, the writer could expose the final destination buffer
+ // for the reader to write to. It might not be generally applicable though, e.g. with
+ // virtio transport, the buffer may not be continuous.
+ let mut buf = [0u8; BLOCK_SIZE];
+ let read_size = file.read_chunk(offset_to_chunk_index(current_offset), &mut buf)?;
+ if read_size < planned_data_size {
+ return Err(io::Error::from_raw_os_error(libc::ENODATA));
+ }
+
+ let begin = (current_offset % BLOCK_SIZE as u64) as usize;
+ let end = begin + planned_data_size;
+ let s = w.write(&buf[begin..end])?;
+ if s != planned_data_size {
+ return Err(io::Error::from_raw_os_error(libc::EIO));
+ }
+ Ok(total + s)
+ },
+ )?;
+
+ Ok(total)
+}
+
+// No need to support enumerating directory entries.
+struct EmptyDirectoryIterator {}
+
+impl DirectoryIterator for EmptyDirectoryIterator {
+ fn next(&mut self) -> Option<DirEntry> {
+ None
+ }
+}
+
+impl FileSystem for AuthFs {
+ type Inode = Inode;
+ type Handle = Handle;
+ type DirIter = EmptyDirectoryIterator;
+
+ fn max_buffer_size(&self) -> u32 {
+ self.max_write
+ }
+
+ fn lookup(&self, _ctx: Context, _parent: Inode, name: &CStr) -> io::Result<Entry> {
+ // Only accept file name that looks like an integrer. Files in the pool are simply exposed
+ // by their inode number. Also, there is currently no directory structure.
+ let num = name.to_str().map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?;
+ // Normally, `lookup` is required to increase a reference count for the inode (while
+ // `forget` will decrease it). It is not necessary here since the files are configured to
+ // be static.
+ let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
+ let st = match self.get_file_config(&inode)? {
+ FileConfig::LocalVerifiedFile(_, file_size)
+ | FileConfig::LocalUnverifiedFile(_, file_size) => create_stat(inode, *file_size)?,
+ };
+ Ok(Entry {
+ inode,
+ generation: 0,
+ attr: st,
+ entry_timeout: DEFAULT_METADATA_TIMEOUT,
+ attr_timeout: DEFAULT_METADATA_TIMEOUT,
+ })
+ }
+
+ fn getattr(
+ &self,
+ _ctx: Context,
+ inode: Inode,
+ _handle: Option<Handle>,
+ ) -> io::Result<(libc::stat64, Duration)> {
+ Ok((
+ match self.get_file_config(&inode)? {
+ FileConfig::LocalVerifiedFile(_, file_size)
+ | FileConfig::LocalUnverifiedFile(_, file_size) => create_stat(inode, *file_size)?,
+ },
+ DEFAULT_METADATA_TIMEOUT,
+ ))
+ }
+
+ fn open(
+ &self,
+ _ctx: Context,
+ inode: Self::Inode,
+ flags: u32,
+ ) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
+ // Since file handle is not really used in later operations (which use Inode directly),
+ // return None as the handle..
+ match self.get_file_config(&inode)? {
+ FileConfig::LocalVerifiedFile(_, _) => {
+ check_access_mode(flags, libc::O_RDONLY)?;
+ // Once verified, and only if verified, the file content can be cached. This is not
+ // really needed for a local file, but is the behavior of RemoteVerifiedFile later.
+ Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ }
+ FileConfig::LocalUnverifiedFile(_, _) => {
+ check_access_mode(flags, libc::O_RDONLY)?;
+ // Do not cache the content. This type of file is supposed to be verified using
+ // dm-verity. The filesystem mount over dm-verity already is already cached, so use
+ // direct I/O here to avoid double cache.
+ Ok((None, fuse::sys::OpenOptions::DIRECT_IO))
+ }
+ }
+ }
+
+ fn read<W: io::Write + ZeroCopyWriter>(
+ &self,
+ _ctx: Context,
+ inode: Inode,
+ _handle: Handle,
+ w: W,
+ size: u32,
+ offset: u64,
+ _lock_owner: Option<u64>,
+ _flags: u32,
+ ) -> io::Result<usize> {
+ match self.get_file_config(&inode)? {
+ FileConfig::LocalVerifiedFile(file, file_size) => {
+ read_chunks(w, file, *file_size, offset, size)
+ }
+ FileConfig::LocalUnverifiedFile(file, file_size) => {
+ read_chunks(w, file, *file_size, offset, size)
+ }
+ }
+ }
+}
+
+/// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
+pub fn loop_forever(
+ file_pool: BTreeMap<Inode, FileConfig>,
+ mountpoint: &Path,
+) -> Result<(), fuse::Error> {
+ let max_read: u32 = 65536;
+ let max_write: u32 = 65536;
+ let dev_fuse = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .open("/dev/fuse")
+ .expect("Failed to open /dev/fuse");
+
+ fuse::mount(
+ mountpoint,
+ "authfs",
+ libc::MS_NOSUID | libc::MS_NODEV,
+ &[
+ MountOption::FD(dev_fuse.as_raw_fd()),
+ MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
+ MountOption::AllowOther,
+ MountOption::UserId(0),
+ MountOption::GroupId(0),
+ MountOption::MaxRead(max_read),
+ ],
+ )
+ .expect("Failed to mount fuse");
+
+ fuse::worker::start_message_loop(
+ dev_fuse,
+ max_write,
+ max_read,
+ AuthFs::new(file_pool, max_write),
+ )
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn collect_chunk_read_iter(remaining: usize, offset: u64) -> Vec<(u64, usize)> {
+ ChunkReadIter::new(remaining, offset).collect::<Vec<_>>()
+ }
+
+ #[test]
+ fn test_chunk_read_iter() {
+ assert_eq!(collect_chunk_read_iter(4096, 0), [(0, 4096)]);
+ assert_eq!(collect_chunk_read_iter(8192, 0), [(0, 4096), (4096, 4096)]);
+ assert_eq!(collect_chunk_read_iter(8192, 4096), [(4096, 4096), (8192, 4096)]);
+
+ assert_eq!(
+ collect_chunk_read_iter(16384, 1),
+ [(1, 4095), (4096, 4096), (8192, 4096), (12288, 4096), (16384, 1)]
+ );
+
+ assert_eq!(collect_chunk_read_iter(0, 0), []);
+ assert_eq!(collect_chunk_read_iter(0, 100), []);
+ }
+}
diff --git a/authfs/src/lib.rs b/authfs/src/lib.rs
deleted file mode 100644
index 05070d6..0000000
--- a/authfs/src/lib.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//! This crate provides a FUSE-based, non-generic filesystem that I/O is authenticated. This
-//! filesystem assumes the storage layer is not trusted, e.g. file is provided by an untrusted VM,
-//! and the content can't be simply trusted. The filesystem can use its public key to verify a
-//! (read-only) file against its associated fs-verity signature by a trusted party. With the Merkle
-//! tree, each read of file block can be verified individually.
-//!
-//! The implementation is not finished.
-
-mod auth;
-mod crypto;
-mod fsverity;
-mod reader;
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
new file mode 100644
index 0000000..f0b5237
--- /dev/null
+++ b/authfs/src/main.rs
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! This crate implements AuthFS, a FUSE-based, non-generic filesystem where file access is
+//! authenticated. This filesystem assumes the underlying layer is not trusted, e.g. file may be
+//! provided by an untrusted host/VM, so that the content can't be simply trusted. However, with a
+//! public key from a trusted party, this filesystem can still verify a (read-only) file signed by
+//! the trusted party even if the host/VM as the blob provider is malicious. With the Merkle tree,
+//! each read of file block can be verified individually only when needed.
+//!
+//! AuthFS only serve files that are specifically configured. A file configuration may include the
+//! source (e.g. local file or remote file server), verification method (e.g. certificate for
+//! fs-verity verification, or no verification if expected to mount over dm-verity), and file ID.
+//! Regardless of the actual file name, the exposed file names through AuthFS are currently integer,
+//! e.g. /mountpoint/42.
+
+use anyhow::{bail, Result};
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::io::Read;
+use std::path::PathBuf;
+use structopt::StructOpt;
+
+mod auth;
+mod common;
+mod crypto;
+mod fsverity;
+mod fusefs;
+mod reader;
+
+use auth::FakeAuthenticator;
+use fsverity::FsverityChunkedFileReader;
+use fusefs::{FileConfig, Inode};
+use reader::ChunkedFileReader;
+
+#[derive(StructOpt)]
+struct Options {
+ /// Mount point of AuthFS.
+ #[structopt(parse(from_os_str))]
+ mount_point: PathBuf,
+
+ /// Debug only. A readonly file to be protected by fs-verity. Can be multiple.
+ #[structopt(long, parse(try_from_str = parse_local_verified_file_option))]
+ local_verified_file: Vec<LocalVerifiedFileConfig>,
+
+ /// Debug only. An unverified read-only file. Can be multiple.
+ #[structopt(long, parse(try_from_str = parse_local_unverified_file_option))]
+ local_unverified_file: Vec<LocalUnverifiedFileConfig>,
+}
+
+struct LocalVerifiedFileConfig {
+ ino: Inode,
+ file_path: PathBuf,
+ merkle_tree_dump_path: PathBuf,
+ signature_path: PathBuf,
+}
+
+struct LocalUnverifiedFileConfig {
+ ino: Inode,
+ file_path: PathBuf,
+}
+
+fn parse_local_verified_file_option(option: &str) -> Result<LocalVerifiedFileConfig> {
+ let strs: Vec<&str> = option.split(':').collect();
+ if strs.len() != 4 {
+ bail!("Invalid option: {}", option);
+ }
+ Ok(LocalVerifiedFileConfig {
+ ino: strs[0].parse::<Inode>().unwrap(),
+ file_path: PathBuf::from(strs[1]),
+ merkle_tree_dump_path: PathBuf::from(strs[2]),
+ signature_path: PathBuf::from(strs[3]),
+ })
+}
+
+fn parse_local_unverified_file_option(option: &str) -> Result<LocalUnverifiedFileConfig> {
+ let strs: Vec<&str> = option.split(':').collect();
+ if strs.len() != 2 {
+ bail!("Invalid option: {}", option);
+ }
+ Ok(LocalUnverifiedFileConfig {
+ ino: strs[0].parse::<Inode>().unwrap(),
+ file_path: PathBuf::from(strs[1]),
+ })
+}
+
+fn new_config_local_verified_file(
+ protected_file: &PathBuf,
+ merkle_tree_dump: &PathBuf,
+ signature: &PathBuf,
+) -> Result<FileConfig> {
+ let file = File::open(&protected_file)?;
+ let file_size = file.metadata()?.len();
+ let file_reader = ChunkedFileReader::new(file)?;
+ let merkle_tree_reader = ChunkedFileReader::new(File::open(merkle_tree_dump)?)?;
+ let authenticator = FakeAuthenticator::always_succeed();
+ let mut sig = Vec::new();
+ let _ = File::open(signature)?.read_to_end(&mut sig)?;
+ let file_reader = FsverityChunkedFileReader::new(
+ &authenticator,
+ file_reader,
+ file_size,
+ sig,
+ merkle_tree_reader,
+ )?;
+ Ok(FileConfig::LocalVerifiedFile(file_reader, file_size))
+}
+
+fn new_config_local_unverified_file(file_path: &PathBuf) -> Result<FileConfig> {
+ let file = File::open(file_path)?;
+ let file_size = file.metadata()?.len();
+ let file_reader = ChunkedFileReader::new(file)?;
+ Ok(FileConfig::LocalUnverifiedFile(file_reader, file_size))
+}
+
+fn prepare_file_pool(args: &Options) -> Result<BTreeMap<Inode, FileConfig>> {
+ let mut file_pool = BTreeMap::new();
+
+ for config in &args.local_verified_file {
+ file_pool.insert(
+ config.ino,
+ new_config_local_verified_file(
+ &config.file_path,
+ &config.merkle_tree_dump_path,
+ &config.signature_path,
+ )?,
+ );
+ }
+
+ for config in &args.local_unverified_file {
+ file_pool.insert(config.ino, new_config_local_unverified_file(&config.file_path)?);
+ }
+
+ Ok(file_pool)
+}
+
+fn main() -> Result<()> {
+ let args = Options::from_args();
+ let file_pool = prepare_file_pool(&args)?;
+ fusefs::loop_forever(file_pool, &args.mount_point)?;
+ Ok(())
+}
diff --git a/authfs/src/reader.rs b/authfs/src/reader.rs
index 135a793..2d1b617 100644
--- a/authfs/src/reader.rs
+++ b/authfs/src/reader.rs
@@ -19,13 +19,14 @@
use std::fs::File;
use std::io::Result;
use std::os::unix::fs::FileExt;
-use std::path::Path;
+
+use crate::common::COMMON_PAGE_SIZE;
/// A trait for reading data by chunks. The data is assumed readonly and has fixed length. Chunks
/// can be read by specifying the chunk index. Only the last chunk may have incomplete chunk size.
pub trait ReadOnlyDataByChunk {
/// Default chunk size.
- const CHUNK_SIZE: u64 = 4096;
+ const CHUNK_SIZE: u64 = COMMON_PAGE_SIZE;
/// Read the `chunk_index`-th chunk to `buf`. Each slice/chunk has size `CHUNK_SIZE` except for
/// the last one, which can be an incomplete chunk. `buf` is currently required to be large
@@ -49,9 +50,7 @@
impl ChunkedFileReader {
/// Creates a `ChunkedFileReader` to read from for the specified `path`.
- #[allow(dead_code)]
- pub fn new<P: AsRef<Path>>(path: P) -> Result<ChunkedFileReader> {
- let file = File::open(path)?;
+ pub fn new(file: File) -> Result<ChunkedFileReader> {
let size = file.metadata()?.len();
Ok(ChunkedFileReader { file, size })
}
diff --git a/authfs/tools/test.sh b/authfs/tools/test.sh
new file mode 100755
index 0000000..9ed3a99
--- /dev/null
+++ b/authfs/tools/test.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Run with -u to enter new namespace.
+if [[ $1 == "-u" ]]; then
+ exec unshare -m -U -r $0
+fi
+
+trap "umount /tmp/mnt" EXIT;
+mkdir -p /tmp/mnt
+
+echo "Mounting authfs in background ..."
+strace -o authfs.strace target/debug/authfs \
+ /tmp/mnt \
+ --local-verified-file 2:testdata/input.4m:testdata/input.4m.merkle_dump:testdata/input.4m.fsv_sig \
+ --local-verified-file 3:testdata/input.4k1:testdata/input.4k1.merkle_dump:testdata/input.4k1.fsv_sig \
+ --local-verified-file 4:testdata/input.4k:testdata/input.4k.merkle_dump:testdata/input.4k.fsv_sig \
+ --local-unverified-file 5:testdata/input.4k \
+ &
+sleep 0.1
+
+echo "Accessing files in authfs ..."
+echo
+md5sum /tmp/mnt/2 testdata/input.4m
+echo
+md5sum /tmp/mnt/3 testdata/input.4k1
+echo
+md5sum /tmp/mnt/4 /tmp/mnt/5 testdata/input.4k
+echo
+dd if=/tmp/mnt/2 bs=1000 skip=100 count=50 status=none |md5sum
+dd if=testdata/input.4m bs=1000 skip=100 count=50 status=none |md5sum
+echo
+tac /tmp/mnt/4 |md5sum
+tac /tmp/mnt/5 |md5sum
+tac testdata/input.4k |md5sum
+echo
+test -f /tmp/mnt/2 || echo 'FAIL: an expected file is missing'
+test -f /tmp/mnt/0 && echo 'FAIL: unexpected file presents'
+test -f /tmp/mnt/1 && echo 'FAIL: unexpected file presents, 1 is root dir'
+test -f /tmp/mnt/100 && echo 'FAIL: unexpected file presents'
+test -f /tmp/mnt/foo && echo 'FAIL: unexpected file presents'
+test -f /tmp/mnt/dir/3 && echo 'FAIL: unexpected file presents'
+echo "Done!"