Merge changes I675f622e,Id7bcfd75,I53c349dd
* changes:
MicrodroidTestApp is a self-instrumenting test
Console output and OS logs are separated
Debug mode can be changed in the demo app
diff --git a/apex/Android.bp b/apex/Android.bp
index 17ff7da..20a863f 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -83,3 +83,20 @@
filename: "init.rc",
installable: false,
}
+
+// Virt apex needs a custom signer for its payload
+python_binary_host {
+ name: "sign_virt_apex",
+ srcs: [
+ "sign_virt_apex.py",
+ ],
+ version: {
+ py2: {
+ enabled: false,
+ },
+ py3: {
+ enabled: true,
+ embedded_launcher: true,
+ },
+ },
+}
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
new file mode 100644
index 0000000..6ecd07e
--- /dev/null
+++ b/apex/sign_virt_apex.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""sign_virt_apex is a command line tool for sign the Virt APEX file.
+
+Typical usage: sign_virt_apex [-v] [--avbtool path_to_avbtool] path_to_key payload_contents_dir
+
+sign_virt_apex uses external tools which are assumed to be available via PATH.
+- avbtool (--avbtool can override the tool)
+- lpmake, lpunpack, simg2img, img2simg
+"""
+import argparse
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+
+
+def ParseArgs(argv):
+ parser = argparse.ArgumentParser(description='Sign the Virt APEX')
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help='verbose execution')
+ parser.add_argument(
+ '--avbtool',
+ default='avbtool',
+ help='Optional flag that specifies the AVB tool to use. Defaults to `avbtool`.')
+ parser.add_argument(
+ 'key',
+ help='path to the private key file.')
+ parser.add_argument(
+ 'input_dir',
+ help='the directory having files to be packaged')
+ return parser.parse_args(argv)
+
+
+def RunCommand(args, cmd, env=None, expected_return_values={0}):
+ env = env or {}
+ env.update(os.environ.copy())
+
+ # TODO(b/193504286): we need a way to find other tool (cmd[0]) in various contexts
+ # e.g. sign_apex.py, sign_target_files_apk.py
+ if cmd[0] == 'avbtool':
+ cmd[0] = args.avbtool
+
+ if args.verbose:
+ print('Running: ' + ' '.join(cmd))
+ p = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True)
+ output, _ = p.communicate()
+
+ if args.verbose or p.returncode not in expected_return_values:
+ print(output.rstrip())
+
+ assert p.returncode in expected_return_values, (
+ '%d Failed to execute: ' + ' '.join(cmd)) % p.returncode
+ return (output, p.returncode)
+
+
+def ReadBytesSize(value):
+ return int(value.removesuffix(' bytes'))
+
+
+def AvbInfo(args, image_path, descriptor_name=None):
+ """Parses avbtool --info image output
+
+ Args:
+ args: program arguments.
+ image_path: The path to the image.
+ descriptor_name: Descriptor name of interest.
+
+ Returns:
+ A pair of
+ - a dict that contains VBMeta info. None if there's no VBMeta info.
+ - a dict that contains target descriptor info. None if name is not specified or not found.
+ """
+ if not os.path.exists(image_path):
+ raise ValueError('Failed to find image: {}'.format(image_path))
+
+ output, ret_code = RunCommand(
+ args, ['avbtool', 'info_image', '--image', image_path], expected_return_values={0, 1})
+ if ret_code == 1:
+ return None, None
+
+ info, descriptor = {}, None
+
+ # Read `avbtool info_image` output as "key:value" lines
+ matcher = re.compile(r'^(\s*)([^:]+):\s*(.*)$')
+
+ def IterateLine(output):
+ for line in output.split('\n'):
+ line_info = matcher.match(line)
+ if not line_info:
+ continue
+ yield line_info.group(1), line_info.group(2), line_info.group(3)
+
+ gen = IterateLine(output)
+ # Read VBMeta info
+ for _, key, value in gen:
+ if key == 'Descriptors':
+ break
+ info[key] = value
+
+ if descriptor_name:
+ for indent, key, _ in gen:
+ # Read a target descriptor
+ if key == descriptor_name:
+ cur_indent = indent
+ descriptor = {}
+ for indent, key, value in gen:
+ if indent == cur_indent:
+ break
+ descriptor[key] = value
+ break
+
+ return info, descriptor
+
+
+def AddHashFooter(args, key, image_path):
+ info, descriptor = AvbInfo(args, image_path, 'Hash descriptor')
+ if info:
+ image_size = ReadBytesSize(info['Image size'])
+ algorithm = info['Algorithm']
+ partition_name = descriptor['Partition Name']
+ partition_size = str(image_size)
+
+ cmd = ['avbtool', 'add_hash_footer',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--partition_name', partition_name,
+ '--partition_size', partition_size,
+ '--image', image_path]
+ RunCommand(args, cmd)
+
+
+def AddHashTreeFooter(args, key, image_path):
+ info, descriptor = AvbInfo(args, image_path, 'Hashtree descriptor')
+ if info:
+ image_size = ReadBytesSize(info['Image size'])
+ algorithm = info['Algorithm']
+ partition_name = descriptor['Partition Name']
+ partition_size = str(image_size)
+
+ cmd = ['avbtool', 'add_hashtree_footer',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--partition_name', partition_name,
+ '--partition_size', partition_size,
+ '--do_not_generate_fec',
+ '--image', image_path]
+ RunCommand(args, cmd)
+
+
+def MakeVbmetaImage(args, key, vbmeta_img, images):
+ info, _ = AvbInfo(args, vbmeta_img)
+ if info:
+ algorithm = info['Algorithm']
+ rollback_index = info['Rollback Index']
+ rollback_index_location = info['Rollback Index Location']
+
+ cmd = ['avbtool', 'make_vbmeta_image',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--rollback_index', rollback_index,
+ '--rollback_index_location', rollback_index_location,
+ '--output', vbmeta_img]
+ for img in images:
+ cmd.extend(['--include_descriptors_from_image', img])
+ RunCommand(args, cmd)
+ # libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
+ # which matches this or the read will fail.
+ RunCommand(args, ['truncate', '-s', '65536', vbmeta_img])
+
+
+class TempDirectory(object):
+
+ def __enter__(self):
+ self.name = tempfile.mkdtemp()
+ return self.name
+
+ def __exit__(self, *unused):
+ shutil.rmtree(self.name)
+
+
+def MakeSuperImage(args, partitions, output):
+ with TempDirectory() as work_dir:
+ cmd = ['lpmake', '--device-size=auto', '--metadata-slots=2', # A/B
+ '--metadata-size=65536', '--sparse', '--output=' + output]
+
+ for part, img in partitions.items():
+ tmp_img = os.path.join(work_dir, part)
+ RunCommand(args, ['img2simg', img, tmp_img])
+
+ image_arg = '--image=%s=%s' % (part, img)
+ partition_arg = '--partition=%s:readonly:%d:default' % (
+ part, os.path.getsize(img))
+ cmd.extend([image_arg, partition_arg])
+
+ RunCommand(args, cmd)
+
+
+def SignVirtApex(args):
+ key = args.key
+ input_dir = args.input_dir
+
+ # target files in the Virt APEX
+ bootloader = os.path.join(input_dir, 'etc', 'microdroid_bootloader')
+ boot_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_boot-5.10.img')
+ vendor_boot_img = os.path.join(
+ input_dir, 'etc', 'fs', 'microdroid_vendor_boot-5.10.img')
+ super_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_super.img')
+ vbmeta_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_vbmeta.img')
+
+ # re-sign bootloader, boot.img, vendor_boot.img
+ AddHashFooter(args, key, bootloader)
+ AddHashFooter(args, key, boot_img)
+ AddHashFooter(args, key, vendor_boot_img)
+
+ # re-sign super.img
+ with TempDirectory() as work_dir:
+ # unpack super.img
+ tmp_super_img = os.path.join(work_dir, 'super.img')
+ RunCommand(args, ['simg2img', super_img, tmp_super_img])
+ RunCommand(args, ['lpunpack', tmp_super_img, work_dir])
+
+ system_a_img = os.path.join(work_dir, 'system_a.img')
+ vendor_a_img = os.path.join(work_dir, 'vendor_a.img')
+ partitions = {"system_a": system_a_img, "vendor_a": vendor_a_img}
+
+ # re-sign partitions in super.img
+ for img in partitions.values():
+ AddHashTreeFooter(args, key, img)
+
+ # re-pack super.img
+ MakeSuperImage(args, partitions, super_img)
+
+ # re-generate vbmeta from re-signed {boot, vendor_boot, system_a, vendor_a}.img
+ # Ideally, making VBmeta should be done out of TempDirectory block. But doing it here
+ # to avoid unpacking re-signed super.img for system/vendor images which are available
+ # in this block.
+ MakeVbmetaImage(args, key, vbmeta_img, [
+ boot_img, vendor_boot_img, system_a_img, vendor_a_img])
+
+
+def main(argv):
+ try:
+ args = ParseArgs(argv)
+ SignVirtApex(args)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index ed3a0ea..b235025 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -83,8 +83,14 @@
BnVirtFdService::new_binder(FdService { fd_pool }, BinderFeatures::default())
}
- fn get_file_config(&self, id: i32) -> BinderResult<&FdConfig> {
- self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))
+ /// Handles the requesting file `id` with `handler` if it is in the FD pool. This function
+ /// returns whatever the handler returns.
+ fn handle_fd<F, R>(&self, id: i32, handler: F) -> BinderResult<R>
+ where
+ F: FnOnce(&FdConfig) -> BinderResult<R>,
+ {
+ let fd_config = self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))?;
+ handler(fd_config)
}
}
@@ -95,21 +101,21 @@
let size: usize = validate_and_cast_size(size)?;
let offset: u64 = validate_and_cast_offset(offset)?;
- match self.get_file_config(id)? {
+ self.handle_fd(id, |config| match config {
FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
read_into_buf(file, size, offset).map_err(|e| {
error!("readFile: read error: {}", e);
Status::from(ERROR_IO)
})
}
- }
+ })
}
fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
let size: usize = validate_and_cast_size(size)?;
let offset: u64 = validate_and_cast_offset(offset)?;
- match &self.get_file_config(id)? {
+ self.handle_fd(id, |config| match config {
FdConfig::Readonly { file, alt_merkle_tree, .. } => {
if let Some(tree_file) = &alt_merkle_tree {
read_into_buf(tree_file, size, offset).map_err(|e| {
@@ -134,11 +140,11 @@
// use.
Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
}
- }
+ })
}
fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
- match &self.get_file_config(id)? {
+ self.handle_fd(id, |config| match config {
FdConfig::Readonly { file, alt_signature, .. } => {
if let Some(sig_file) = &alt_signature {
// Supposedly big enough buffer size to store signature.
@@ -163,11 +169,11 @@
// There is no signature for a writable file.
Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
}
- }
+ })
}
fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
- match &self.get_file_config(id)? {
+ self.handle_fd(id, |config| match config {
FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
FdConfig::ReadWrite(file) => {
let offset: u64 = offset.try_into().map_err(|_| {
@@ -185,11 +191,11 @@
Status::from(ERROR_IO)
})? as i32)
}
- }
+ })
}
fn resize(&self, id: i32, size: i64) -> BinderResult<()> {
- match &self.get_file_config(id)? {
+ self.handle_fd(id, |config| match config {
FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
FdConfig::ReadWrite(file) => {
if size < 0 {
@@ -203,11 +209,11 @@
Status::from(ERROR_IO)
})
}
- }
+ })
}
fn getFileSize(&self, id: i32) -> BinderResult<i64> {
- match &self.get_file_config(id)? {
+ self.handle_fd(id, |config| match config {
FdConfig::Readonly { file, .. } => {
let size = file
.metadata()
@@ -227,7 +233,7 @@
// for a writable file.
Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
}
- }
+ })
}
}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index d54b5be..d985581 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -77,8 +77,15 @@
AuthFs { file_pool, max_write }
}
- fn get_file_config(&self, inode: &Inode) -> io::Result<&FileConfig> {
- self.file_pool.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ /// Handles the file associated with `inode` if found. This function returns whatever the
+ /// handler returns.
+ fn handle_file<F, R>(&self, inode: &Inode, handler: F) -> io::Result<R>
+ where
+ F: FnOnce(&FileConfig) -> io::Result<R>,
+ {
+ let config =
+ self.file_pool.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+ handler(config)
}
}
@@ -197,15 +204,15 @@
// `forget` will decrease it). It is not necessary here since the files are configured to
// be static.
let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
- let st = match self.get_file_config(&inode)? {
+ let st = self.handle_file(&inode, |config| match config {
FileConfig::UnverifiedReadonly { file_size, .. }
| FileConfig::VerifiedReadonly { file_size, .. } => {
- create_stat(inode, *file_size, FileMode::ReadOnly)?
+ create_stat(inode, *file_size, FileMode::ReadOnly)
}
FileConfig::VerifiedNew { editor } => {
- create_stat(inode, editor.size(), FileMode::ReadWrite)?
+ create_stat(inode, editor.size(), FileMode::ReadWrite)
}
- };
+ })?;
Ok(Entry {
inode,
generation: 0,
@@ -221,18 +228,20 @@
inode: Inode,
_handle: Option<Handle>,
) -> io::Result<(libc::stat64, Duration)> {
- Ok((
- match self.get_file_config(&inode)? {
- FileConfig::UnverifiedReadonly { file_size, .. }
- | FileConfig::VerifiedReadonly { file_size, .. } => {
- create_stat(inode, *file_size, FileMode::ReadOnly)?
- }
- FileConfig::VerifiedNew { editor } => {
- create_stat(inode, editor.size(), FileMode::ReadWrite)?
- }
- },
- DEFAULT_METADATA_TIMEOUT,
- ))
+ self.handle_file(&inode, |config| {
+ Ok((
+ match config {
+ FileConfig::UnverifiedReadonly { file_size, .. }
+ | FileConfig::VerifiedReadonly { file_size, .. } => {
+ create_stat(inode, *file_size, FileMode::ReadOnly)?
+ }
+ FileConfig::VerifiedNew { editor } => {
+ create_stat(inode, editor.size(), FileMode::ReadWrite)?
+ }
+ },
+ DEFAULT_METADATA_TIMEOUT,
+ ))
+ })
}
fn open(
@@ -243,18 +252,20 @@
) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
// Since file handle is not really used in later operations (which use Inode directly),
// return None as the handle.
- match self.get_file_config(&inode)? {
- FileConfig::VerifiedReadonly { .. } | FileConfig::UnverifiedReadonly { .. } => {
- check_access_mode(flags, libc::O_RDONLY)?;
+ self.handle_file(&inode, |config| {
+ match config {
+ FileConfig::VerifiedReadonly { .. } | FileConfig::UnverifiedReadonly { .. } => {
+ check_access_mode(flags, libc::O_RDONLY)?;
+ }
+ FileConfig::VerifiedNew { .. } => {
+ // No need to check access modes since all the modes are allowed to the
+ // read-writable file.
+ }
}
- FileConfig::VerifiedNew { .. } => {
- // No need to check access modes since all the modes are allowed to the
- // read-writable file.
- }
- }
- // Always cache the file content. There is currently no need to support direct I/O or avoid
- // the cache buffer. Memory mapping is only possible with cache enabled.
- Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ // Always cache the file content. There is currently no need to support direct I/O or avoid
+ // the cache buffer. Memory mapping is only possible with cache enabled.
+ Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ })
}
fn read<W: io::Write + ZeroCopyWriter>(
@@ -268,19 +279,21 @@
_lock_owner: Option<u64>,
_flags: u32,
) -> io::Result<usize> {
- match self.get_file_config(&inode)? {
- FileConfig::VerifiedReadonly { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
+ self.handle_file(&inode, |config| {
+ match config {
+ FileConfig::VerifiedReadonly { reader, file_size } => {
+ read_chunks(w, reader, *file_size, offset, size)
+ }
+ FileConfig::UnverifiedReadonly { reader, file_size } => {
+ read_chunks(w, reader, *file_size, offset, size)
+ }
+ FileConfig::VerifiedNew { editor } => {
+ // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
+ // request a read even if the file is open with O_WRONLY.
+ read_chunks(w, editor, editor.size(), offset, size)
+ }
}
- FileConfig::UnverifiedReadonly { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::VerifiedNew { editor } => {
- // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
- // request a read even if the file is open with O_WRONLY.
- read_chunks(w, editor, editor.size(), offset, size)
- }
- }
+ })
}
fn write<R: io::Read + ZeroCopyReader>(
@@ -295,14 +308,14 @@
_delayed_write: bool,
_flags: u32,
) -> io::Result<usize> {
- match self.get_file_config(&inode)? {
+ self.handle_file(&inode, |config| match config {
FileConfig::VerifiedNew { editor } => {
let mut buf = vec![0; size as usize];
r.read_exact(&mut buf)?;
editor.write_at(&buf, offset)
}
_ => Err(io::Error::from_raw_os_error(libc::EBADF)),
- }
+ })
}
fn setattr(
@@ -313,44 +326,52 @@
_handle: Option<Handle>,
valid: SetattrValid,
) -> io::Result<(libc::stat64, Duration)> {
- match self.get_file_config(&inode)? {
- FileConfig::VerifiedNew { editor } => {
- // Initialize the default stat.
- let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
- // `valid` indicates what fields in `attr` are valid. Update to return correctly.
- if valid.contains(SetattrValid::SIZE) {
- // st_size is i64, but the cast should be safe since kernel should not give a
- // negative size.
- debug_assert!(attr.st_size >= 0);
- new_attr.st_size = attr.st_size;
- editor.resize(attr.st_size as u64)?;
- }
+ self.handle_file(&inode, |config| {
+ match config {
+ FileConfig::VerifiedNew { editor } => {
+ // Initialize the default stat.
+ let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
+ // `valid` indicates what fields in `attr` are valid. Update to return correctly.
+ if valid.contains(SetattrValid::SIZE) {
+ // st_size is i64, but the cast should be safe since kernel should not give a
+ // negative size.
+ debug_assert!(attr.st_size >= 0);
+ new_attr.st_size = attr.st_size;
+ editor.resize(attr.st_size as u64)?;
+ }
- if valid.contains(SetattrValid::MODE) {
- warn!("Changing st_mode is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ if valid.contains(SetattrValid::MODE) {
+ warn!("Changing st_mode is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::UID) {
+ warn!("Changing st_uid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::GID) {
+ warn!("Changing st_gid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::CTIME) {
+ debug!(
+ "Ignoring ctime change as authfs does not maintain timestamp currently"
+ );
+ }
+ if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
+ debug!(
+ "Ignoring atime change as authfs does not maintain timestamp currently"
+ );
+ }
+ if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
+ debug!(
+ "Ignoring mtime change as authfs does not maintain timestamp currently"
+ );
+ }
+ Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
}
- if valid.contains(SetattrValid::UID) {
- warn!("Changing st_uid is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::GID) {
- warn!("Changing st_gid is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::CTIME) {
- debug!("Ignoring ctime change as authfs does not maintain timestamp currently");
- }
- if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
- debug!("Ignoring atime change as authfs does not maintain timestamp currently");
- }
- if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
- debug!("Ignoring mtime change as authfs does not maintain timestamp currently");
- }
- Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
}
- _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
- }
+ })
}
fn getxattr(
@@ -360,29 +381,31 @@
name: &CStr,
size: u32,
) -> io::Result<GetxattrReply> {
- match self.get_file_config(&inode)? {
- FileConfig::VerifiedNew { editor } => {
- // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
- // change (see b/196635431). Until it's possible, use xattr to expose what we need
- // as an authfs specific API.
- if name != CStr::from_bytes_with_nul(b"authfs.fsverity.digest\0").unwrap() {
- return Err(io::Error::from_raw_os_error(libc::ENODATA));
- }
+ self.handle_file(&inode, |config| {
+ match config {
+ FileConfig::VerifiedNew { editor } => {
+ // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
+ // change (see b/196635431). Until it's possible, use xattr to expose what we need
+ // as an authfs specific API.
+ if name != CStr::from_bytes_with_nul(b"authfs.fsverity.digest\0").unwrap() {
+ return Err(io::Error::from_raw_os_error(libc::ENODATA));
+ }
- if size == 0 {
- // Per protocol, when size is 0, return the value size.
- Ok(GetxattrReply::Count(editor.get_fsverity_digest_size() as u32))
- } else {
- let digest = editor.calculate_fsverity_digest()?;
- if digest.len() > size as usize {
- Err(io::Error::from_raw_os_error(libc::ERANGE))
+ if size == 0 {
+ // Per protocol, when size is 0, return the value size.
+ Ok(GetxattrReply::Count(editor.get_fsverity_digest_size() as u32))
} else {
- Ok(GetxattrReply::Value(digest.to_vec()))
+ let digest = editor.calculate_fsverity_digest()?;
+ if digest.len() > size as usize {
+ Err(io::Error::from_raw_os_error(libc::ERANGE))
+ } else {
+ Ok(GetxattrReply::Value(digest.to_vec()))
+ }
}
}
+ _ => Err(io::Error::from_raw_os_error(libc::ENODATA)),
}
- _ => Err(io::Error::from_raw_os_error(libc::ENODATA)),
- }
+ })
}
}
diff --git a/authfs/tests/open_then_run.rs b/authfs/tests/open_then_run.rs
index ba3ed38..a540f9d 100644
--- a/authfs/tests/open_then_run.rs
+++ b/authfs/tests/open_then_run.rs
@@ -118,7 +118,7 @@
})?;
let dir_files = parse_and_create_file_mapping(matches.values_of("open-dir"), |path| {
- Dir::open(path, OFlag::O_DIRECTORY | OFlag::O_RDWR, Mode::S_IRWXU)
+ Dir::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::S_IRWXU)
.with_context(|| format!("Open {} directory", path))
})?;
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index d8fec81..5893fd6 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -14,6 +14,7 @@
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
"liblog_rust",
+ "librustutils",
],
shared_libs: [
"libbinder_rpc_unstable",
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index e685267..af504a1 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -16,6 +16,7 @@
//! Support for starting CompOS in a VM and connecting to the service
+use crate::timeouts::timeouts;
use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
IVirtualMachine::IVirtualMachine,
@@ -42,7 +43,6 @@
use std::path::Path;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
-use std::time::Duration;
/// This owns an instance of the CompOS VM.
pub struct VmInstance {
@@ -107,10 +107,12 @@
let vm_state = Arc::new(VmStateMonitor::default());
let vm_state_clone = Arc::clone(&vm_state);
- vm.as_binder().link_to_death(&mut DeathRecipient::new(move || {
+ let mut death_recipient = DeathRecipient::new(move || {
vm_state_clone.set_died();
log::error!("VirtualizationService died");
- }))?;
+ });
+ // Note that dropping death_recipient cancels this, so we can't use a temporary here.
+ vm.as_binder().link_to_death(&mut death_recipient)?;
let vm_state_clone = Arc::clone(&vm_state);
let callback = BnVirtualMachineCallback::new_binder(
@@ -238,14 +240,13 @@
}
fn wait_until_ready(&self) -> Result<i32> {
- // 10s is long enough on real hardware, but it can take 90s when using nested
- // virtualization.
- // TODO(b/200924405): Reduce timeout/detect nested virtualization
let (state, result) = self
.state_ready
- .wait_timeout_while(self.mutex.lock().unwrap(), Duration::from_secs(120), |state| {
- state.cid.is_none() && !state.has_died
- })
+ .wait_timeout_while(
+ self.mutex.lock().unwrap(),
+ timeouts()?.vm_max_time_to_ready,
+ |state| state.cid.is_none() && !state.has_died,
+ )
.unwrap();
if result.timed_out() {
bail!("Timed out waiting for VM")
diff --git a/compos/common/lib.rs b/compos/common/lib.rs
index 0b84a28..4bfa81f 100644
--- a/compos/common/lib.rs
+++ b/compos/common/lib.rs
@@ -17,6 +17,7 @@
//! Common items used by CompOS server and/or clients
pub mod compos_client;
+pub mod timeouts;
/// Special CID indicating "any".
pub const VMADDR_CID_ANY: u32 = -1i32 as u32;
diff --git a/compos/common/timeouts.rs b/compos/common/timeouts.rs
new file mode 100644
index 0000000..42cfe69
--- /dev/null
+++ b/compos/common/timeouts.rs
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Timeouts for common situations, with support for longer timeouts when using nested
+//! virtualization.
+
+use anyhow::Result;
+use rustutils::system_properties;
+use std::time::Duration;
+
+/// Holder for the various timeouts we use.
+#[derive(Debug, Copy, Clone)]
+pub struct Timeouts {
+ /// Total time that odrefresh may take to perform compilation
+ pub odrefresh_max_execution_time: Duration,
+ /// Time allowed for a single compilation step run by odrefresh
+ pub odrefresh_max_child_process_time: Duration,
+ /// Time allowed for the CompOS VM to start up and become ready.
+ pub vm_max_time_to_ready: Duration,
+}
+
+/// Whether the current platform requires extra time for operations inside a VM.
+pub fn need_extra_time() -> Result<bool> {
+ // Nested virtualization is slow. Check if we are running on vsoc as a proxy for this.
+ let value = system_properties::read("ro.build.product")?;
+ Ok(value == "vsoc_x86_64" || value == "vsoc_x86")
+}
+
+/// Return the timeouts that are appropriate on the current platform.
+pub fn timeouts() -> Result<&'static Timeouts> {
+ if need_extra_time()? {
+ Ok(&EXTENDED_TIMEOUTS)
+ } else {
+ Ok(&NORMAL_TIMEOUTS)
+ }
+}
+
+/// The timeouts that we use normally.
+pub const NORMAL_TIMEOUTS: Timeouts = Timeouts {
+ // Note: the source of truth for these odrefresh timeouts is art/odrefresh/odr_config.h.
+ odrefresh_max_execution_time: Duration::from_secs(300),
+ odrefresh_max_child_process_time: Duration::from_secs(90),
+ vm_max_time_to_ready: Duration::from_secs(10),
+};
+
+/// The timeouts that we use when need_extra_time() returns true.
+pub const EXTENDED_TIMEOUTS: Timeouts = Timeouts {
+ odrefresh_max_execution_time: Duration::from_secs(480),
+ odrefresh_max_child_process_time: Duration::from_secs(150),
+ vm_max_time_to_ready: Duration::from_secs(120),
+};
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index 2a24b7a..ecfea61 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -19,7 +19,7 @@
"libcomposd_native_rust",
"libnum_traits",
"liblog_rust",
- "librustutils",
+ "libshared_child",
],
proc_macros: ["libnum_derive"],
apex_available: [
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTask.aidl b/compos/composd/aidl/android/system/composd/ICompilationTask.aidl
new file mode 100644
index 0000000..ae03fcc
--- /dev/null
+++ b/compos/composd/aidl/android/system/composd/ICompilationTask.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.composd;
+
+/**
+ * Represents a compilation in process.
+ */
+interface ICompilationTask {
+ /**
+ * Attempt to cancel compilation. If successful compilation will end and no further success or
+ * failed callbacks will be received (although any in flight may still be delivered).
+ */
+ void cancel();
+}
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
new file mode 100644
index 0000000..a9d41b8
--- /dev/null
+++ b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.composd;
+
+/**
+ * Interface to be implemented by clients of IIsolatedCompilationService to be notified when a
+ * requested compilation task completes.
+ */
+interface ICompilationTaskCallback {
+ /**
+ * Called if a compilation task has ended successfully, generating all the required artifacts.
+ */
+ void onSuccess();
+
+ /**
+ * Called if a compilation task has ended unsuccessfully.
+ */
+ void onFailure();
+}
diff --git a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
index 3d0ad31..3d28894 100644
--- a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
+++ b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
@@ -15,6 +15,8 @@
*/
package android.system.composd;
+import android.system.composd.ICompilationTask;
+import android.system.composd.ICompilationTaskCallback;
import com.android.compos.CompilationResult;
import com.android.compos.FdAnnotation;
@@ -24,8 +26,11 @@
* This compiles BCP extensions and system server, even if the system artifacts are up to date,
* and writes the results to a test directory to avoid disrupting any real artifacts in
* existence.
+ * Compilation continues in the background, and success/failure is reported via the supplied
+ * callback, unless the returned ICompilationTask is cancelled. The caller should maintain
+ * a reference to the ICompilationTask until compilation completes or is cancelled.
*/
- void runForcedCompileForTest();
+ ICompilationTask startTestCompile(ICompilationTaskCallback callback);
/**
* Run dex2oat in the currently running instance of the CompOS VM. This is a simple proxy
diff --git a/compos/composd/src/compilation_task.rs b/compos/composd/src/compilation_task.rs
new file mode 100644
index 0000000..c4eed52
--- /dev/null
+++ b/compos/composd/src/compilation_task.rs
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use crate::instance_starter::CompOsInstance;
+use crate::odrefresh::{self, Odrefresh};
+use android_system_composd::aidl::android::system::composd::{
+ ICompilationTask::ICompilationTask, ICompilationTaskCallback::ICompilationTaskCallback,
+};
+use android_system_composd::binder::{Interface, Result as BinderResult, Strong};
+use anyhow::Result;
+use log::{error, warn};
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+#[derive(Clone)]
+pub struct CompilationTask {
+ running_task: Arc<Mutex<Option<RunningTask>>>,
+}
+
+impl Interface for CompilationTask {}
+
+impl ICompilationTask for CompilationTask {
+ fn cancel(&self) -> BinderResult<()> {
+ let task = self.take();
+ if let Some(task) = task {
+ if let Err(e) = task.odrefresh.kill() {
+ warn!("Failed to kill running task: {:?}", e)
+ }
+ }
+ Ok(())
+ }
+}
+
+impl CompilationTask {
+ /// Return the current running task, if any, removing it from this CompilationTask.
+ /// Once removed, meaning the task has ended or been canceled, further calls will always return
+ /// None.
+ fn take(&self) -> Option<RunningTask> {
+ self.running_task.lock().unwrap().take()
+ }
+
+ pub fn start_test_compile(
+ comp_os: Arc<CompOsInstance>,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<CompilationTask> {
+ let odrefresh = Odrefresh::spawn_forced_compile("test-artifacts")?;
+ let odrefresh = Arc::new(odrefresh);
+ let task =
+ RunningTask { odrefresh: odrefresh.clone(), comp_os, callback: callback.clone() };
+ let task = CompilationTask { running_task: Arc::new(Mutex::new(Some(task))) };
+
+ task.clone().start_waiting_thread(odrefresh);
+
+ Ok(task)
+ }
+
+ fn start_waiting_thread(self, odrefresh: Arc<Odrefresh>) {
+ thread::spawn(move || {
+ let exit_code = odrefresh.wait_for_exit();
+ let task = self.take();
+ // We don't do the callback if cancel has already happened.
+ if let Some(task) = task {
+ let result = match exit_code {
+ Ok(odrefresh::ExitCode::CompilationSuccess) => task.callback.onSuccess(),
+ Ok(exit_code) => {
+ error!("Unexpected odrefresh result: {:?}", exit_code);
+ task.callback.onFailure()
+ }
+ Err(e) => {
+ error!("Running odrefresh failed: {:?}", e);
+ task.callback.onFailure()
+ }
+ };
+ if let Err(e) = result {
+ warn!("Failed to deliver callback: {:?}", e);
+ }
+ }
+ });
+ }
+}
+
+struct RunningTask {
+ odrefresh: Arc<Odrefresh>,
+ callback: Strong<dyn ICompilationTaskCallback>,
+ #[allow(dead_code)] // Keeps the CompOS VM alive
+ comp_os: Arc<CompOsInstance>,
+}
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
index 60aeb39..671ed16 100644
--- a/compos/composd/src/composd_main.rs
+++ b/compos/composd/src/composd_main.rs
@@ -18,10 +18,12 @@
//! responsible for managing the lifecycle of the CompOS VM instances, providing key management for
//! them, and orchestrating trusted compilation.
+mod compilation_task;
mod instance_manager;
mod instance_starter;
mod odrefresh;
mod service;
+mod util;
use crate::instance_manager::InstanceManager;
use android_system_composd::binder::{register_lazy_service, ProcessState};
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index 1a6e592..3959859 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -21,6 +21,7 @@
IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
};
use anyhow::{bail, Context, Result};
+use binder_common::lazy_service::LazyServiceGuard;
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
use compos_aidl_interface::binder::{ParcelFileDescriptor, Strong};
use compos_common::compos_client::{VmInstance, VmParameters};
@@ -33,9 +34,11 @@
use std::path::{Path, PathBuf};
pub struct CompOsInstance {
+ service: Strong<dyn ICompOsService>,
#[allow(dead_code)] // Keeps VirtualizationService & the VM alive
vm_instance: VmInstance,
- service: Strong<dyn ICompOsService>,
+ #[allow(dead_code)] // Keeps composd process alive
+ lazy_service_guard: LazyServiceGuard,
}
impl CompOsInstance {
@@ -167,7 +170,7 @@
VmInstance::start(virtualization_service, instance_image, &self.vm_parameters)
.context("Starting VM")?;
let service = vm_instance.get_service().context("Connecting to CompOS")?;
- Ok(CompOsInstance { vm_instance, service })
+ Ok(CompOsInstance { vm_instance, service, lazy_service_guard: Default::default() })
}
fn create_instance_image(
diff --git a/compos/composd/src/odrefresh.rs b/compos/composd/src/odrefresh.rs
index 8c3febf..16dcb0f 100644
--- a/compos/composd/src/odrefresh.rs
+++ b/compos/composd/src/odrefresh.rs
@@ -17,10 +17,11 @@
//! Handle the details of executing odrefresh to generate compiled artifacts.
use anyhow::{bail, Context, Result};
+use compos_common::timeouts::{need_extra_time, EXTENDED_TIMEOUTS};
use compos_common::VMADDR_CID_ANY;
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
-use rustutils::system_properties;
+use shared_child::SharedChild;
use std::process::Command;
// TODO: What if this changes?
@@ -38,30 +39,44 @@
CleanupFailed = EX_MAX + 4,
}
-fn need_extra_time() -> Result<bool> {
- // Special case to add more time in nested VM
- let value = system_properties::read("ro.build.product")?;
- Ok(value == "vsoc_x86_64" || value == "vsoc_x86")
+pub struct Odrefresh {
+ child: SharedChild,
}
-pub fn run_forced_compile(target_dir: &str) -> Result<ExitCode> {
- // We don`t need to capture stdout/stderr - odrefresh writes to the log
- let mut cmdline = Command::new(ODREFRESH_BIN);
- if need_extra_time()? {
- cmdline.arg("--max-execution-seconds=480").arg("--max-child-process-seconds=150");
+impl Odrefresh {
+ pub fn spawn_forced_compile(target_dir: &str) -> Result<Self> {
+ // We don`t need to capture stdout/stderr - odrefresh writes to the log
+ let mut cmdline = Command::new(ODREFRESH_BIN);
+ if need_extra_time()? {
+ cmdline
+ .arg(format!(
+ "--max-execution-seconds={}",
+ EXTENDED_TIMEOUTS.odrefresh_max_execution_time.as_secs()
+ ))
+ .arg(format!(
+ "--max-child-process-seconds={}",
+ EXTENDED_TIMEOUTS.odrefresh_max_child_process_time.as_secs()
+ ));
+ }
+ cmdline
+ .arg(format!("--use-compilation-os={}", VMADDR_CID_ANY as i32))
+ .arg(format!("--dalvik-cache={}", target_dir))
+ .arg("--force-compile");
+ let child = SharedChild::spawn(&mut cmdline).context("Running odrefresh")?;
+ Ok(Odrefresh { child })
}
- cmdline
- .arg(format!("--use-compilation-os={}", VMADDR_CID_ANY as i32))
- .arg(format!("--dalvik-cache={}", target_dir))
- .arg("--force-compile");
- let mut odrefresh = cmdline.spawn().context("Running odrefresh")?;
- // TODO: timeout?
- let status = odrefresh.wait()?;
+ pub fn wait_for_exit(&self) -> Result<ExitCode> {
+ // No timeout here - but clients can kill the process, which will end the wait.
+ let status = self.child.wait()?;
+ if let Some(exit_code) = status.code().and_then(FromPrimitive::from_i32) {
+ Ok(exit_code)
+ } else {
+ bail!("odrefresh exited with {}", status)
+ }
+ }
- if let Some(exit_code) = status.code().and_then(FromPrimitive::from_i32) {
- Ok(exit_code)
- } else {
- bail!("odrefresh exited with {}", status)
+ pub fn kill(&self) -> Result<()> {
+ self.child.kill().context("Killing odrefresh process failed")
}
}
diff --git a/compos/composd/src/service.rs b/compos/composd/src/service.rs
index d3b73a1..351eae9 100644
--- a/compos/composd/src/service.rs
+++ b/compos/composd/src/service.rs
@@ -17,18 +17,20 @@
//! Implementation of IIsolatedCompilationService, called from system server when compilation is
//! desired.
+use crate::compilation_task::CompilationTask;
use crate::instance_manager::InstanceManager;
-use crate::odrefresh;
-use android_system_composd::aidl::android::system::composd::IIsolatedCompilationService::{
- BnIsolatedCompilationService, IIsolatedCompilationService,
+use crate::util::to_binder_result;
+use android_system_composd::aidl::android::system::composd::{
+ ICompilationTask::{BnCompilationTask, ICompilationTask},
+ ICompilationTaskCallback::ICompilationTaskCallback,
+ IIsolatedCompilationService::{BnIsolatedCompilationService, IIsolatedCompilationService},
};
use android_system_composd::binder::{self, BinderFeatures, Interface, Strong};
-use anyhow::{bail, Context, Result};
+use anyhow::{Context, Result};
use binder_common::new_binder_service_specific_error;
use compos_aidl_interface::aidl::com::android::compos::{
CompilationResult::CompilationResult, FdAnnotation::FdAnnotation,
};
-use log::{error, info};
pub struct IsolatedCompilationService {
instance_manager: InstanceManager,
@@ -42,9 +44,12 @@
impl Interface for IsolatedCompilationService {}
impl IIsolatedCompilationService for IsolatedCompilationService {
- fn runForcedCompileForTest(&self) -> binder::Result<()> {
+ fn startTestCompile(
+ &self,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> binder::Result<Strong<dyn ICompilationTask>> {
// TODO - check caller is system or shell/root?
- to_binder_result(self.do_run_forced_compile_for_test())
+ to_binder_result(self.do_start_test_compile(callback))
}
fn compile_cmd(
@@ -53,7 +58,7 @@
fd_annotation: &FdAnnotation,
) -> binder::Result<CompilationResult> {
// TODO - check caller is odrefresh
- to_binder_result(self.do_compile(args, fd_annotation))
+ to_binder_result(self.do_compile_cmd(args, fd_annotation))
}
fn compile(&self, _marshaled: &[u8], _fd_annotation: &FdAnnotation) -> binder::Result<i8> {
@@ -61,33 +66,19 @@
}
}
-fn to_binder_result<T>(result: Result<T>) -> binder::Result<T> {
- result.map_err(|e| {
- let message = format!("{:?}", e);
- error!("Returning binder error: {}", &message);
- new_binder_service_specific_error(-1, message)
- })
-}
-
impl IsolatedCompilationService {
- fn do_run_forced_compile_for_test(&self) -> Result<()> {
- info!("runForcedCompileForTest");
-
+ fn do_start_test_compile(
+ &self,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<Strong<dyn ICompilationTask>> {
let comp_os = self.instance_manager.start_test_instance().context("Starting CompOS")?;
- let exit_code = odrefresh::run_forced_compile("test-artifacts")?;
+ let task = CompilationTask::start_test_compile(comp_os, callback)?;
- if exit_code != odrefresh::ExitCode::CompilationSuccess {
- bail!("Unexpected odrefresh result: {:?}", exit_code);
- }
-
- // The instance is needed until odrefresh is finished
- drop(comp_os);
-
- Ok(())
+ Ok(BnCompilationTask::new_binder(task, BinderFeatures::default()))
}
- fn do_compile(
+ fn do_compile_cmd(
&self,
args: &[String],
fd_annotation: &FdAnnotation,
diff --git a/compos/composd/src/util.rs b/compos/composd/src/util.rs
new file mode 100644
index 0000000..091fb15
--- /dev/null
+++ b/compos/composd/src/util.rs
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use android_system_composd::binder::Result as BinderResult;
+use anyhow::Result;
+use binder_common::new_binder_service_specific_error;
+use log::error;
+
+pub fn to_binder_result<T>(result: Result<T>) -> BinderResult<T> {
+ result.map_err(|e| {
+ let message = format!("{:?}", e);
+ error!("Returning binder error: {}", &message);
+ new_binder_service_specific_error(-1, message)
+ })
+}
diff --git a/compos/composd_cmd/Android.bp b/compos/composd_cmd/Android.bp
index 0081a0d..c230e13 100644
--- a/compos/composd_cmd/Android.bp
+++ b/compos/composd_cmd/Android.bp
@@ -11,6 +11,7 @@
"libanyhow",
"libbinder_rs",
"libclap",
+ "libcompos_common",
],
prefer_rlib: true,
apex_available: [
diff --git a/compos/composd_cmd/composd_cmd.rs b/compos/composd_cmd/composd_cmd.rs
index 04398c0..0422b44 100644
--- a/compos/composd_cmd/composd_cmd.rs
+++ b/compos/composd_cmd/composd_cmd.rs
@@ -17,10 +17,19 @@
//! Simple command-line tool to drive composd for testing and debugging.
use android_system_composd::{
- aidl::android::system::composd::IIsolatedCompilationService::IIsolatedCompilationService,
- binder::{wait_for_interface, ProcessState},
+ aidl::android::system::composd::{
+ ICompilationTaskCallback::{BnCompilationTaskCallback, ICompilationTaskCallback},
+ IIsolatedCompilationService::IIsolatedCompilationService,
+ },
+ binder::{
+ wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface, ProcessState,
+ Result as BinderResult,
+ },
};
-use anyhow::{Context, Result};
+use anyhow::{bail, Context, Result};
+use compos_common::timeouts::timeouts;
+use std::sync::{Arc, Condvar, Mutex};
+use std::time::Duration;
fn main() -> Result<()> {
let app = clap::App::new("composd_cmd").arg(
@@ -35,11 +44,8 @@
ProcessState::start_thread_pool();
- let service = wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
- .context("Failed to connect to composd service")?;
-
match command {
- "forced-compile-test" => service.runForcedCompileForTest().context("Compilation failed")?,
+ "forced-compile-test" => run_forced_compile_for_test()?,
_ => panic!("Unexpected command {}", command),
}
@@ -47,3 +53,85 @@
Ok(())
}
+
+struct Callback(Arc<State>);
+
+#[derive(Default)]
+struct State {
+ mutex: Mutex<Option<Outcome>>,
+ completed: Condvar,
+}
+
+#[derive(Copy, Clone)]
+enum Outcome {
+ Succeeded,
+ Failed,
+}
+
+impl Interface for Callback {}
+
+impl ICompilationTaskCallback for Callback {
+ fn onSuccess(&self) -> BinderResult<()> {
+ self.0.set_outcome(Outcome::Succeeded);
+ Ok(())
+ }
+
+ fn onFailure(&self) -> BinderResult<()> {
+ self.0.set_outcome(Outcome::Failed);
+ Ok(())
+ }
+}
+
+impl State {
+ fn set_outcome(&self, outcome: Outcome) {
+ let mut guard = self.mutex.lock().unwrap();
+ *guard = Some(outcome);
+ drop(guard);
+ self.completed.notify_all();
+ }
+
+ fn wait(&self, duration: Duration) -> Result<Outcome> {
+ let (outcome, result) = self
+ .completed
+ .wait_timeout_while(self.mutex.lock().unwrap(), duration, |outcome| outcome.is_none())
+ .unwrap();
+ if result.timed_out() {
+ bail!("Timed out waiting for compilation")
+ }
+ Ok(outcome.unwrap())
+ }
+}
+
+fn run_forced_compile_for_test() -> Result<()> {
+ let service = wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
+ .context("Failed to connect to composd service")?;
+
+ let state = Arc::new(State::default());
+ let callback = Callback(state.clone());
+ let callback = BnCompilationTaskCallback::new_binder(callback, BinderFeatures::default());
+ let task = service.startTestCompile(&callback).context("Compilation failed")?;
+
+ // Make sure composd keeps going even if we don't hold a reference to its service.
+ drop(service);
+
+ let state_clone = state.clone();
+ let mut death_recipient = DeathRecipient::new(move || {
+ eprintln!("CompilationTask died");
+ state_clone.set_outcome(Outcome::Failed);
+ });
+ // Note that dropping death_recipient cancels this, so we can't use a temporary here.
+ task.as_binder().link_to_death(&mut death_recipient)?;
+
+ println!("Waiting");
+
+ match state.wait(timeouts()?.odrefresh_max_execution_time) {
+ Ok(Outcome::Succeeded) => Ok(()),
+ Ok(Outcome::Failed) => bail!("Compilation failed"),
+ Err(e) => {
+ if let Err(e) = task.cancel() {
+ eprintln!("Failed to cancel compilation: {:?}", e);
+ }
+ Err(e)
+ }
+ }
+}