Merge "Revert "Create SDK of CompOS module""
diff --git a/TEST_MAPPING b/TEST_MAPPING
index b805d03..b07dc3b 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -8,6 +8,9 @@
},
{
"name": "VirtualizationTestCases"
+ },
+ {
+ "name": "MicrodroidTestApp"
}
],
"imports": [
diff --git a/apex/Android.bp b/apex/Android.bp
index 19f5428..4775f0c 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -2,6 +2,14 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
+microdroid_filesystem_images = [
+ "microdroid_super",
+ "microdroid_boot-5.10",
+ "microdroid_vendor_boot-5.10",
+ "microdroid_vbmeta",
+ "microdroid_vbmeta_bootconfig",
+]
+
apex {
name: "com.android.virt",
@@ -13,6 +21,7 @@
key: "com.android.virt.key",
certificate: ":com.android.virt.certificate",
+ custom_sign_tool: "sign_virt_apex",
// crosvm and virtualizationservice are only enabled for 64-bit targets on device
arch: {
@@ -21,26 +30,14 @@
"crosvm",
"virtualizationservice",
],
- filesystems: [
- "microdroid_super",
- "microdroid_boot-5.10",
- "microdroid_vendor_boot-5.10",
- "microdroid_vbmeta",
- "microdroid_vbmeta_system",
- ],
+ filesystems: microdroid_filesystem_images,
},
x86_64: {
binaries: [
"crosvm",
"virtualizationservice",
],
- filesystems: [
- "microdroid_super",
- "microdroid_boot-5.10",
- "microdroid_vendor_boot-5.10",
- "microdroid_vbmeta",
- "microdroid_vbmeta_system",
- ],
+ filesystems: microdroid_filesystem_images,
},
},
binaries: [
@@ -61,6 +58,7 @@
"microdroid.json",
"microdroid_uboot_env",
"microdroid_bootloader",
+ "microdroid_bootloader.avbpubkey",
"microdroid_bootconfig_normal",
"microdroid_bootconfig_app_debuggable",
"microdroid_bootconfig_full_debuggable",
@@ -85,3 +83,78 @@
filename: "init.rc",
installable: false,
}
+
+// Virt apex needs a custom signer for its payload
+python_binary_host {
+ name: "sign_virt_apex",
+ srcs: [
+ "sign_virt_apex.py",
+ ],
+ version: {
+ py2: {
+ enabled: false,
+ },
+ py3: {
+ enabled: true,
+ embedded_launcher: true,
+ },
+ },
+ required: [
+ "img2simg",
+ "lpmake",
+ "lpunpack",
+ "simg2img",
+ ],
+}
+
+sh_test_host {
+ name: "sign_virt_apex_test",
+ src: "sign_virt_apex_test.sh",
+ test_config: "sign_virt_apex_test.xml",
+ data_bins: [
+ // deapexer
+ "deapexer",
+ "debugfs_static",
+
+ // sign_virt_apex
+ "avbtool",
+ "img2simg",
+ "lpmake",
+ "lpunpack",
+ "sign_virt_apex",
+ "simg2img",
+ ],
+ data_libs: [
+ "libbase",
+ "libc++",
+ "libcrypto_utils",
+ "libcrypto",
+ "libext4_utils",
+ "liblog",
+ "liblp",
+ "libsparse",
+ "libz",
+ ],
+ data: [
+ ":com.android.virt",
+ "test.com.android.virt.pem",
+ ],
+ test_suites: ["general-tests"],
+}
+
+// custom tool to replace bytes in a file
+python_binary_host {
+ name: "replace_bytes",
+ srcs: [
+ "replace_bytes.py",
+ ],
+ version: {
+ py2: {
+ enabled: false,
+ },
+ py3: {
+ enabled: true,
+ embedded_launcher: true,
+ },
+ },
+}
diff --git a/apex/replace_bytes.py b/apex/replace_bytes.py
new file mode 100644
index 0000000..b22f132
--- /dev/null
+++ b/apex/replace_bytes.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""replace_bytes is a command line tool to replace bytes in a file.
+
+Typical usage: replace_bytes target_file old_file new_file
+
+ replace bytes of old_file with bytes of new_file in target_file. old_file and new_file should be
+ the same size.
+
+"""
+import argparse
+import sys
+
+
+def ParseArgs(argv):
+ parser = argparse.ArgumentParser(description='Replace bytes')
+ parser.add_argument(
+ 'target_file',
+ help='path to the target file.')
+ parser.add_argument(
+ 'old_file',
+ help='path to the file containing old bytes')
+ parser.add_argument(
+ 'new_file',
+ help='path to the file containing new bytes')
+ return parser.parse_args(argv)
+
+
+def ReplaceBytes(target_file, old_file, new_file):
+ # read old bytes
+ with open(old_file, 'rb') as f:
+ old_bytes = f.read()
+
+ # read new bytes
+ with open(new_file, 'rb') as f:
+ new_bytes = f.read()
+
+ assert len(old_bytes) == len(new_bytes), 'Pubkeys should be the same size. (%d != %d)' % (
+ len(old_bytes), len(new_bytes))
+
+ # replace bytes in target_file
+ with open(target_file, 'r+b') as f:
+ pos = f.read().find(old_bytes)
+ assert pos != -1, 'Pubkey not found'
+ f.seek(pos)
+ f.write(new_bytes)
+
+
+def main(argv):
+ try:
+ args = ParseArgs(argv)
+ ReplaceBytes(args.target_file, args.old_file, args.new_file)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
new file mode 100644
index 0000000..153b5dd
--- /dev/null
+++ b/apex/sign_virt_apex.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""sign_virt_apex is a command line tool for sign the Virt APEX file.
+
+Typical usage: sign_virt_apex [-v] [--avbtool path_to_avbtool] path_to_key payload_contents_dir
+
+sign_virt_apex uses external tools which are assumed to be available via PATH.
+- avbtool (--avbtool can override the tool)
+- lpmake, lpunpack, simg2img, img2simg
+"""
+import argparse
+import glob
+import hashlib
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+
+
+def ParseArgs(argv):
+ parser = argparse.ArgumentParser(description='Sign the Virt APEX')
+ parser.add_argument('--verify', action='store_true',
+ help='Verify the Virt APEX')
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help='verbose execution')
+ parser.add_argument(
+ '--avbtool',
+ default='avbtool',
+ help='Optional flag that specifies the AVB tool to use. Defaults to `avbtool`.')
+ parser.add_argument(
+ 'key',
+ help='path to the private key file.')
+ parser.add_argument(
+ 'input_dir',
+ help='the directory having files to be packaged')
+ return parser.parse_args(argv)
+
+
+def RunCommand(args, cmd, env=None, expected_return_values={0}):
+ env = env or {}
+ env.update(os.environ.copy())
+
+ # TODO(b/193504286): we need a way to find other tool (cmd[0]) in various contexts
+ # e.g. sign_apex.py, sign_target_files_apk.py
+ if cmd[0] == 'avbtool':
+ cmd[0] = args.avbtool
+
+ if args.verbose:
+ print('Running: ' + ' '.join(cmd))
+ p = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True)
+ output, _ = p.communicate()
+
+ if args.verbose or p.returncode not in expected_return_values:
+ print(output.rstrip())
+
+ assert p.returncode in expected_return_values, (
+ '%d Failed to execute: ' + ' '.join(cmd)) % p.returncode
+ return (output, p.returncode)
+
+
+def ReadBytesSize(value):
+ return int(value.removesuffix(' bytes'))
+
+
+def ExtractAvbPubkey(args, key, output):
+ RunCommand(args, ['avbtool', 'extract_public_key',
+ '--key', key, '--output', output])
+
+
+def AvbInfo(args, image_path):
+ """Parses avbtool --info image output
+
+ Args:
+ args: program arguments.
+ image_path: The path to the image.
+ descriptor_name: Descriptor name of interest.
+
+ Returns:
+ A pair of
+ - a dict that contains VBMeta info. None if there's no VBMeta info.
+ - a list of descriptors.
+ """
+ if not os.path.exists(image_path):
+ raise ValueError('Failed to find image: {}'.format(image_path))
+
+ output, ret_code = RunCommand(
+ args, ['avbtool', 'info_image', '--image', image_path], expected_return_values={0, 1})
+ if ret_code == 1:
+ return None, None
+
+ info, descriptors = {}, []
+
+ # Read `avbtool info_image` output as "key:value" lines
+ matcher = re.compile(r'^(\s*)([^:]+):\s*(.*)$')
+
+ def IterateLine(output):
+ for line in output.split('\n'):
+ line_info = matcher.match(line)
+ if not line_info:
+ continue
+ yield line_info.group(1), line_info.group(2), line_info.group(3)
+
+ gen = IterateLine(output)
+
+ def ReadDescriptors(cur_indent, cur_name, cur_value):
+ descriptor = cur_value if cur_name == 'Prop' else {}
+ descriptors.append((cur_name, descriptor))
+ for indent, key, value in gen:
+ if indent <= cur_indent:
+ # read descriptors recursively to pass the read key as descriptor name
+ ReadDescriptors(indent, key, value)
+ break
+ descriptor[key] = value
+
+ # Read VBMeta info
+ for _, key, value in gen:
+ if key == 'Descriptors':
+ ReadDescriptors(*next(gen))
+ break
+ info[key] = value
+
+ return info, descriptors
+
+
+# Look up a list of (key, value) with a key. Returns the value of the first matching pair.
+def LookUp(pairs, key):
+ for k, v in pairs:
+ if key == k:
+ return v
+ return None
+
+
+def AddHashFooter(args, key, image_path):
+ info, descriptors = AvbInfo(args, image_path)
+ if info:
+ descriptor = LookUp(descriptors, 'Hash descriptor')
+ image_size = ReadBytesSize(info['Image size'])
+ algorithm = info['Algorithm']
+ partition_name = descriptor['Partition Name']
+ partition_size = str(image_size)
+
+ cmd = ['avbtool', 'add_hash_footer',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--partition_name', partition_name,
+ '--partition_size', partition_size,
+ '--image', image_path]
+ RunCommand(args, cmd)
+
+
+def AddHashTreeFooter(args, key, image_path):
+ info, descriptors = AvbInfo(args, image_path)
+ if info:
+ descriptor = LookUp(descriptors, 'Hashtree descriptor')
+ image_size = ReadBytesSize(info['Image size'])
+ algorithm = info['Algorithm']
+ partition_name = descriptor['Partition Name']
+ partition_size = str(image_size)
+
+ cmd = ['avbtool', 'add_hashtree_footer',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--partition_name', partition_name,
+ '--partition_size', partition_size,
+ '--do_not_generate_fec',
+ '--image', image_path]
+ RunCommand(args, cmd)
+
+
+def MakeVbmetaImage(args, key, vbmeta_img, images=None, chained_partitions=None):
+ info, descriptors = AvbInfo(args, vbmeta_img)
+ if info is None:
+ return
+
+ with TempDirectory() as work_dir:
+ algorithm = info['Algorithm']
+ rollback_index = info['Rollback Index']
+ rollback_index_location = info['Rollback Index Location']
+
+ cmd = ['avbtool', 'make_vbmeta_image',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--rollback_index', rollback_index,
+ '--rollback_index_location', rollback_index_location,
+ '--output', vbmeta_img]
+ if images:
+ for img in images:
+ cmd.extend(['--include_descriptors_from_image', img])
+
+ # replace pubkeys of chained_partitions as well
+ for name, descriptor in descriptors:
+ if name == 'Chain Partition descriptor':
+ part_name = descriptor['Partition Name']
+ ril = descriptor['Rollback Index Location']
+ part_key = chained_partitions[part_name]
+ avbpubkey = os.path.join(work_dir, part_name + '.avbpubkey')
+ ExtractAvbPubkey(args, part_key, avbpubkey)
+ cmd.extend(['--chain_partition', '%s:%s:%s' %
+ (part_name, ril, avbpubkey)])
+
+ RunCommand(args, cmd)
+ # libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
+ # which matches this or the read will fail.
+ RunCommand(args, ['truncate', '-s', '65536', vbmeta_img])
+
+
+class TempDirectory(object):
+
+ def __enter__(self):
+ self.name = tempfile.mkdtemp()
+ return self.name
+
+ def __exit__(self, *unused):
+ shutil.rmtree(self.name)
+
+
+def MakeSuperImage(args, partitions, output):
+ with TempDirectory() as work_dir:
+ cmd = ['lpmake', '--device-size=auto', '--metadata-slots=2', # A/B
+ '--metadata-size=65536', '--sparse', '--output=' + output]
+
+ for part, img in partitions.items():
+ tmp_img = os.path.join(work_dir, part)
+ RunCommand(args, ['img2simg', img, tmp_img])
+
+ image_arg = '--image=%s=%s' % (part, img)
+ partition_arg = '--partition=%s:readonly:%d:default' % (
+ part, os.path.getsize(img))
+ cmd.extend([image_arg, partition_arg])
+
+ RunCommand(args, cmd)
+
+
+def ReplaceBootloaderPubkey(args, key, bootloader, bootloader_pubkey):
+ # read old pubkey before replacement
+ with open(bootloader_pubkey, 'rb') as f:
+ old_pubkey = f.read()
+
+ # replace bootloader pubkey (overwrite the old one with the new one)
+ ExtractAvbPubkey(args, key, bootloader_pubkey)
+
+ # read new pubkey
+ with open(bootloader_pubkey, 'rb') as f:
+ new_pubkey = f.read()
+
+ assert len(old_pubkey) == len(new_pubkey)
+
+ # replace pubkey embedded in bootloader
+ with open(bootloader, 'r+b') as bl_f:
+ pos = bl_f.read().find(old_pubkey)
+ assert pos != -1
+ bl_f.seek(pos)
+ bl_f.write(new_pubkey)
+
+
+def SignVirtApex(args):
+ key = args.key
+ input_dir = args.input_dir
+
+ # target files in the Virt APEX
+ bootloader_pubkey = os.path.join(
+ input_dir, 'etc', 'microdroid_bootloader.avbpubkey')
+ bootloader = os.path.join(input_dir, 'etc', 'microdroid_bootloader')
+ boot_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_boot-5.10.img')
+ vendor_boot_img = os.path.join(
+ input_dir, 'etc', 'fs', 'microdroid_vendor_boot-5.10.img')
+ super_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_super.img')
+ vbmeta_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_vbmeta.img')
+ vbmeta_bootconfig_img = os.path.join(
+ input_dir, 'etc', 'fs', 'microdroid_vbmeta_bootconfig.img')
+ bootconfig_normal = os.path.join(
+ input_dir, 'etc', 'microdroid_bootconfig.normal')
+ bootconfig_app_debuggable = os.path.join(
+ input_dir, 'etc', 'microdroid_bootconfig.app_debuggable')
+ bootconfig_full_debuggable = os.path.join(
+ input_dir, 'etc', 'microdroid_bootconfig.full_debuggable')
+
+ # Key(pubkey) for bootloader should match with the one used to make VBmeta below
+ # while it's okay to use different keys for other image files.
+ ReplaceBootloaderPubkey(args, key, bootloader, bootloader_pubkey)
+
+ # re-sign bootloader, boot.img, vendor_boot.img
+ AddHashFooter(args, key, bootloader)
+ AddHashFooter(args, key, boot_img)
+ AddHashFooter(args, key, vendor_boot_img)
+
+ # re-sign super.img
+ with TempDirectory() as work_dir:
+ # unpack super.img
+ tmp_super_img = os.path.join(work_dir, 'super.img')
+ RunCommand(args, ['simg2img', super_img, tmp_super_img])
+ RunCommand(args, ['lpunpack', tmp_super_img, work_dir])
+
+ system_a_img = os.path.join(work_dir, 'system_a.img')
+ vendor_a_img = os.path.join(work_dir, 'vendor_a.img')
+ partitions = {"system_a": system_a_img, "vendor_a": vendor_a_img}
+
+ # re-sign partitions in super.img
+ for img in partitions.values():
+ AddHashTreeFooter(args, key, img)
+
+ # re-pack super.img
+ MakeSuperImage(args, partitions, super_img)
+
+ # re-generate vbmeta from re-signed {boot, vendor_boot, system_a, vendor_a}.img
+ # Ideally, making VBmeta should be done out of TempDirectory block. But doing it here
+ # to avoid unpacking re-signed super.img for system/vendor images which are available
+ # in this block.
+ MakeVbmetaImage(args, key, vbmeta_img, images=[
+ boot_img, vendor_boot_img, system_a_img, vendor_a_img])
+
+ # Re-sign bootconfigs with the same key
+ bootconfig_sign_key = key
+ AddHashFooter(args, bootconfig_sign_key, bootconfig_normal)
+ AddHashFooter(args, bootconfig_sign_key, bootconfig_app_debuggable)
+ AddHashFooter(args, bootconfig_sign_key, bootconfig_full_debuggable)
+
+ # Re-sign vbmeta_bootconfig with a chained_partition to "bootconfig"
+ # Note that, for now, `key` and `bootconfig_sign_key` are the same, but technically they
+ # can be different. Vbmeta records pubkeys which signed chained partitions.
+ MakeVbmetaImage(args, key, vbmeta_bootconfig_img, chained_partitions={
+ 'bootconfig': bootconfig_sign_key})
+
+
+def VerifyVirtApex(args):
+ # Generator to emit avbtool-signed items along with its pubkey digest.
+ # This supports lpmake-packed images as well.
+ def Recur(target_dir):
+ for file in glob.glob(os.path.join(target_dir, 'etc', '**', '*'), recursive=True):
+ cur_item = os.path.relpath(file, target_dir)
+
+ if not os.path.isfile(file):
+ continue
+
+ # avbpubkey
+ if cur_item == 'etc/microdroid_bootloader.avbpubkey':
+ with open(file, 'rb') as f:
+ yield (cur_item, hashlib.sha1(f.read()).hexdigest())
+ continue
+
+ # avbtool signed
+ info, _ = AvbInfo(args, file)
+ if info:
+ yield (cur_item, info['Public key (sha1)'])
+ continue
+
+ # logical partition
+ with TempDirectory() as tmp_dir:
+ unsparsed = os.path.join(tmp_dir, os.path.basename(file))
+ _, rc = RunCommand(
+ # exit with 255 if it's not sparsed
+ args, ['simg2img', file, unsparsed], expected_return_values={0, 255})
+ if rc == 0:
+ with TempDirectory() as unpack_dir:
+ # exit with 64 if it's not a logical partition.
+ _, rc = RunCommand(
+ args, ['lpunpack', unsparsed, unpack_dir], expected_return_values={0, 64})
+ if rc == 0:
+ nested_items = list(Recur(unpack_dir))
+ if len(nested_items) > 0:
+ for (item, key) in nested_items:
+ yield ('%s!/%s' % (cur_item, item), key)
+ continue
+ # Read pubkey digest
+ with TempDirectory() as tmp_dir:
+ pubkey_file = os.path.join(tmp_dir, 'avbpubkey')
+ ExtractAvbPubkey(args, args.key, pubkey_file)
+ with open(pubkey_file, 'rb') as f:
+ pubkey_digest = hashlib.sha1(f.read()).hexdigest()
+
+ # Check every avbtool-signed item against the input key
+ for (item, pubkey) in Recur(args.input_dir):
+ assert pubkey == pubkey_digest, '%s: key mismatch: %s != %s' % (
+ item, pubkey, pubkey_digest)
+
+
+def main(argv):
+ try:
+ args = ParseArgs(argv)
+ if args.verify:
+ VerifyVirtApex(args)
+ else:
+ SignVirtApex(args)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/apex/sign_virt_apex_test.sh b/apex/sign_virt_apex_test.sh
new file mode 100644
index 0000000..640a3d4
--- /dev/null
+++ b/apex/sign_virt_apex_test.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+shopt -s extglob
+
+TMP_ROOT=$(mktemp -d -t sign_virt_apex-XXXXXXXX)
+TEST_DIR=$(dirname $0)
+
+# To access host tools
+PATH=$TEST_DIR:$PATH
+DEBUGFS=$TEST_DIR/debugfs_static
+
+deapexer --debugfs_path $DEBUGFS extract $TEST_DIR/com.android.virt.apex $TMP_ROOT
+
+if [ "$(ls -A $TMP_ROOT/etc/fs/)" ]; then
+ sign_virt_apex $TEST_DIR/test.com.android.virt.pem $TMP_ROOT
+ sign_virt_apex --verify $TEST_DIR/test.com.android.virt.pem $TMP_ROOT
+else
+ echo "No filesystem images. Skip."
+fi
+
diff --git a/apex/sign_virt_apex_test.xml b/apex/sign_virt_apex_test.xml
new file mode 100644
index 0000000..5ea84a1
--- /dev/null
+++ b/apex/sign_virt_apex_test.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs sign_virt_apex test">
+ <test class="com.android.tradefed.testtype.binary.ExecutableHostTest" >
+ <option name="binary" value="sign_virt_apex_test"/>
+ </test>
+</configuration>
diff --git a/apex/test.com.android.virt.pem b/apex/test.com.android.virt.pem
new file mode 100644
index 0000000..b0cfff4
--- /dev/null
+++ b/apex/test.com.android.virt.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAw91a1/DFwSu1FbX92SxGshBGPvHW+4JpvVCw10rhx39pPynI
+ePOf94c94f+pZr6QsT94sQ93Ubjhzf29E9wb5QVT98VJycyYviClyFRl8a1KQQQh
+JGR9v4KEceEWYeJe3nbYDzPwvzdJXy0DbLNUWZBXfDEZGyQHnwb124OIkmBFWz+h
+QsRGFMP+9FgATKH2jnsrNEB2yMQqw7+gpBMJ4q2bqOGE48EjERQG7oFQYfzDsyDd
+5xDhvXFVQmIcrwHRc8DSVaXdlZwQQLCKc6wXg1XLY6/7KQr+XCuz0ptCQ0OW3MAB
+OySsxnl82R+zlb9j05uZf0Z7yUW5hyZylZshK8rAVUYbYLFUmsD3X43qx42GzNfj
+FHhZn6k8CnnyGYvjY3/Lp3JY+EEbvzmVAJrDmMmUMIpML06D7Hu509yBOSAoE8qy
+rcccglHs3rHQ93lSslU02JWYcJ193KThQIcmc1OXoT+NPZf4NKemVE2uCX+mPRNR
+M4ACofXbVg/b5NcEelgIzL0UOZDQMj+WdyGpJ3d8YmE+1WWQ8nqbbCy0vQc+8jc0
+ZzZ/RF4WlBOj/or1TkWyGvGVXYhnU8raF1MnDRbOixZpPhSfdC7vtzktcYxHXt5X
+Ect42/ynX4Q5Gz3VMeg3WcIMNMSGFo7B3UEhde5MVxgEf1AQelm8/LaqWncCAwEA
+AQKCAgAKIyrQgmW52clFlmXNF72Q+cao+1tlKRrP8Z01h2qoKLAJ1N/LYtCkvxs0
+10atSq+yfNaCU4qZcDg/sSJYJMxMzjnKWSu4hh5huM7bz4J3P8DYHJ6ag5j+kILK
+YhwGdPD0ErKcFtQfEX16r5m9xopXGGFuzBvAi9zZHkMbWXN4IAN29ZQjIIWADaTk
+gKmDTd61ASr7SVrciUqtVv25gELCuLmVxBZcs2JdP+wb7BV8/NgkLU9O5lDIvVTs
+WqehZzawBwrb4/nUBH/S2VBRLFcLNSWRw0n8ldUUcC6ed+q2EIl+Y3Gs3fkTTLZp
+hnqFBaLlEOig7cT6ZeF0XUkQ9TaCNoEXEistwT6hlWSoAwUPi2q5AeJc9TFCrw8i
+mJWY+9UZH/lOBM8jYoGPW2b7drbNn/8DuPu1N9miP12oaL5KjjGlXvN4RmdkaGjU
+/zUNceQm/Q8hPudCaZLR9trMAOjmHl9GqnGxR35pRWMRJ/N11X1KLVdSlVpUFtHB
+yhvAAhsFLAZxaiQmAANmwz9cnxJTk6+2JTyX6EZOdAFqDibjvTQIqERoSBtKDVTa
+5n02MC3MHSeouhMyQscLvxTa9RtqxQHHnnQBDplkQGErmz5VhD4CYMDKgjhGbH71
+tg0LHujMieldWnpKPZWlztmGaDaPksJAAUKA8RBKrJ2RgXAyAQKCAQEA712eJxuh
+KzoOe0rIHwC4De5vO7ZyleLGOVvaX9jcm3xxQg1feC5r03xcxqkDvuio94Y4s/Sx
+ZubieWY60pPY3d5w160EKRIwAUKtPR2Uy/QLvC3jMnmIy29KP0F6oQXxMurQ16IS
+Aul5aSHIB33UlEd9v9HenTc9hPvYMUALe0HmisXYTRR0p9DMlqt+goaiynD3U2gh
+09x640EtCvDJiM2pAaVw2z9J/eFHypy6AERaGbX3vYjlbch1oqH5+67i0Nl/FZLx
+wL2q5fUsGx8DNQmHu0kjlLLIbGAx/1dtXWOhH0q4SWrGFJXgsYu5f6AzIHz6XKDi
+cITb8P8JUoZgiwKCAQEA0XnXeppR6DASAZSi7e19WWLmUafjur/qUYy+Aolr7Oyc
+H18JU71AOohM8TxgDTGNfvzII6ryxK5j5VpBnL4IX44ymjQ2J7nOtRl7t5Ceh9Cy
+lPFZwxUlV7Mikow8kAVpbY0JonUnRCzcxNT1tO8qlWYEj8L1vZf2d61VIACE/fJU
+ekWQKr/CLlNp/PvjAQaLd6oSh5gf4Ymx+5bFM86tJbR3YAtMWvr8I+nPDT8Q0G2c
+Zt62ZKiE76duma7ndS1Od7ohuLbwW4vV1KUcSzFkfGjP/Cx6D+wQydWAdi7fsQ2u
+xNstQbbP535x5uwVIlZovflq9Sl8AA5bBRnduvSfRQKCAQAiLN6gvMwlDNP2fHXY
+H1UoAAv3nZP8nHUqyVeDacYNmRXelWQ1F4OjnVTttEHppvRA6vP7lYsiowJgzNzH
+Jf7HprO7x2MZrhQWifuMB0YwXHa0dmTC1yFV0lzqbSHiDaQjXe1VbDlgGw+PmBgk
+Ia4RQafNlFxRXAq3ivGSDo/VGFKfK6I3Vx1UvHYJaRDV9/0UJE7bpLl3szoEalDR
+CBHuK1be+k0DsKSSz/BdGEViNmAa3aUydXI0W3OYNcIoUg7mPLdtUB6eIzZcQMX8
+VVAy6VpsvgOLfn8pIg7hYw0lUU0214c6TDldxQxgrQ9eDnReRhnE0d+iqwVwAinF
+k5QDAoIBAHA/Z/Xsp6NRzvRF36C7OAYj9uMeoes6V6dnUZIubUTB7U7qMCdNLBOx
+YfmKrrWjLf00G1LxkbFO+Xy3Bp2lPvtlSTxUagiTim6Ev0S4HBsO/ALP6Zedxyrd
+dNMujm1mWP45K0aAnI/tskdPDnLsDdeMmTkn8WKtAYdTvF+vp5QkvJvglsYxhy4n
+yI2ltBiily2CVveNzteeX18/hWCjiSjBMY6nvzypbV8ZNLgWaT4m3j5JbVc27jU1
+dRCpJqIlqvyBIvzGGroTjnuqFiU8zGnWCE1K0AWkK8Lbw0CREZDgkhwujmu+OF4F
+5acmLpT91JaoBmZk2mt1RdTP7X73AjkCggEBAIwQSTzvVIqJoD4Vc9wqbCGyPr2s
+/g0GkEmcJJpe6E8AxZNzmBKV3bDzvC+thhEVQeCmVDZsOZjO/4TumskHntxMHIpp
+DHRgYiERCM2oIPMEkruqCQ+0BlH/8CtglyrPmsLgSU6L1CBQNMt39KFtcscMMwkk
+Coo/qN0DarQGkrjc+UN4Q0lJDBVB5nQj+1uCVEBnV/IC+08kr9uXIJGAllh3Wfgq
+jOdL2j1knpYD9Wi1TCZwDobCqDWwYMVQZNbGu6de3lWtuBYKCd154QUVm11Kkv3P
+Gz/yGM1v6IttZ0osMujVLADyZMLYKSt8ypRlB3TUD/4P3bUryorV/bu/ew8=
+-----END RSA PRIVATE KEY-----
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index a565a6f..bf4ac61 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -16,47 +16,67 @@
package com.android.virt.fs;
-/** {@hide} */
+/**
+ * A service that works like a file server, where the files and directories are identified by
+ * "remote FD" that may be pre-exchanged or created on request.
+ *
+ * When a binder error is returned and it is a service specific error, the error code is an errno
+ * value which is an int.
+ *
+ * {@hide}
+ */
interface IVirtFdService {
- /** Error when the requesting FD is unknown. */
- const int ERROR_UNKNOWN_FD = 1;
-
- /**
- * Error when I/O fails. This can happen when actual I/O error happens to the backing file,
- * when the given offset or size are invalid, or any problems that can fail a read/write
- * request.
- */
- const int ERROR_IO = 2;
-
- /** Error when the file is too large to handle correctly. */
- const int ERROR_FILE_TOO_LARGE = 3;
-
/** Maximum content size that the service allows the client to request. */
const int MAX_REQUESTING_DATA = 16384;
/**
- * Returns the content of the given file ID, from the offset, for the amount of requested size
+ * Returns the content of the given remote FD, from the offset, for the amount of requested size
* or until EOF.
*/
- byte[] readFile(int id, long offset, int size);
+ byte[] readFile(int fd, long offset, int size);
/**
- * Returns the content of fs-verity compatible Merkle tree of the given file ID, from the
+ * Returns the content of fs-verity compatible Merkle tree of the given remote FD, from the
* offset, for the amount of requested size or until EOF.
*/
- byte[] readFsverityMerkleTree(int id, long offset, int size);
+ byte[] readFsverityMerkleTree(int fd, long offset, int size);
- /** Returns the fs-verity signature of the given file ID. */
- byte[] readFsveritySignature(int id);
+ /** Returns the fs-verity signature of the given remote FD. */
+ byte[] readFsveritySignature(int fd);
/**
- * Writes the buffer to the given file ID from the file's offset. Returns the number of bytes
+ * Writes the buffer to the given remote FD from the file's offset. Returns the number of bytes
* written.
*/
- int writeFile(int id, in byte[] buf, long offset);
+ int writeFile(int fd, in byte[] buf, long offset);
- /** Resizes the file backed by the given file ID to the new size. */
- void resize(int id, long size);
+ /** Resizes the file backed by the given remote FD to the new size. */
+ void resize(int fd, long size);
- long getFileSize(int id);
+ /** Returns the file size. */
+ long getFileSize(int fd);
+
+ /**
+ * Open a file given the remote directory FD.
+ *
+ * @param pathname The file path to open. Must be a related path.
+ * @return file A remote FD that represents the opened file.
+ */
+ int openFileInDirectory(int dirFd, String pathname);
+
+ /**
+ * Create a file given the remote directory FD.
+ *
+ * @param basename The file name to create. Must not contain directory separator.
+ * @return file A remote FD that represents the new created file.
+ */
+ int createFileInDirectory(int dirFd, String basename);
+
+ /**
+ * Create a directory inside the given remote directory FD.
+ *
+ * @param basename The directory name to create. Must not contain directory separator.
+ * @return file FD that represents the new created directory.
+ */
+ int createDirectoryInDirectory(int dirFd, String basename);
}
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
new file mode 100644
index 0000000..fa1914a
--- /dev/null
+++ b/authfs/fd_server/src/aidl.rs
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::Result;
+use log::error;
+use nix::{
+ dir::Dir, errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::mkdirat, sys::stat::Mode,
+};
+use std::cmp::min;
+use std::collections::{btree_map, BTreeMap};
+use std::convert::TryInto;
+use std::fs::File;
+use std::io;
+use std::os::unix::fs::FileExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::{Component, Path, PathBuf, MAIN_SEPARATOR};
+use std::sync::{Arc, Mutex};
+
+use crate::fsverity;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
+ BnVirtFdService, IVirtFdService, MAX_REQUESTING_DATA,
+};
+use authfs_aidl_interface::binder::{
+ BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, StatusCode, Strong,
+};
+use binder_common::{new_binder_exception, new_binder_service_specific_error};
+
+fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
+ offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
+}
+
+fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
+ if size > MAX_REQUESTING_DATA {
+ Err(new_errno_error(Errno::EFBIG))
+ } else {
+ size.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
+ }
+}
+
+/// Configuration of a file descriptor to be served/exposed/shared.
+pub enum FdConfig {
+ /// A read-only file to serve by this server. The file is supposed to be verifiable with the
+ /// associated fs-verity metadata.
+ Readonly {
+ /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
+ file: File,
+
+ /// Alternative Merkle tree stored in another file.
+ /// TODO(205987437): Replace with .fsv_meta file.
+ alt_merkle_tree: Option<File>,
+
+ /// Alternative signature stored in another file.
+ /// TODO(205987437): Replace with .fsv_meta file.
+ alt_signature: Option<File>,
+ },
+
+ /// A readable/writable file to serve by this server. This backing file should just be a
+ /// regular file and does not have any specific property.
+ ReadWrite(File),
+
+ /// A read-only directory to serve by this server.
+ InputDir(Dir),
+
+ /// A writable directory to serve by this server.
+ OutputDir(Dir),
+}
+
+pub struct FdService {
+ /// A pool of opened files and directories, which can be looked up by the FD number.
+ fd_pool: Arc<Mutex<BTreeMap<i32, FdConfig>>>,
+}
+
+impl FdService {
+ pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
+ BnVirtFdService::new_binder(
+ FdService { fd_pool: Arc::new(Mutex::new(fd_pool)) },
+ BinderFeatures::default(),
+ )
+ }
+
+ /// Handles the requesting file `id` with `handle_fn` if it is in the FD pool. This function
+ /// returns whatever `handle_fn` returns.
+ fn handle_fd<F, R>(&self, id: i32, handle_fn: F) -> BinderResult<R>
+ where
+ F: FnOnce(&FdConfig) -> BinderResult<R>,
+ {
+ let fd_pool = self.fd_pool.lock().unwrap();
+ let fd_config = fd_pool.get(&id).ok_or_else(|| new_errno_error(Errno::EBADF))?;
+ handle_fn(fd_config)
+ }
+
+ /// Inserts a new FD and corresponding `FdConfig` created by `create_fn` to the FD pool, then
+ /// returns the new FD number.
+ fn insert_new_fd<F>(&self, fd: i32, create_fn: F) -> BinderResult<i32>
+ where
+ F: FnOnce(&mut FdConfig) -> BinderResult<(i32, FdConfig)>,
+ {
+ let mut fd_pool = self.fd_pool.lock().unwrap();
+ let mut fd_config = fd_pool.get_mut(&fd).ok_or_else(|| new_errno_error(Errno::EBADF))?;
+ let (new_fd, new_fd_config) = create_fn(&mut fd_config)?;
+ if let btree_map::Entry::Vacant(entry) = fd_pool.entry(new_fd) {
+ entry.insert(new_fd_config);
+ Ok(new_fd)
+ } else {
+ Err(new_binder_exception(
+ ExceptionCode::ILLEGAL_STATE,
+ format!("The newly created FD {} is already in the pool unexpectedly", new_fd),
+ ))
+ }
+ }
+}
+
+impl Interface for FdService {}
+
+impl IVirtFdService for FdService {
+ fn readFile(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
+ let size: usize = validate_and_cast_size(size)?;
+ let offset: u64 = validate_and_cast_offset(offset)?;
+
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
+ read_into_buf(file, size, offset).map_err(|e| {
+ error!("readFile: read error: {}", e);
+ new_errno_error(Errno::EIO)
+ })
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
+ let size: usize = validate_and_cast_size(size)?;
+ let offset: u64 = validate_and_cast_offset(offset)?;
+
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, alt_merkle_tree, .. } => {
+ if let Some(tree_file) = &alt_merkle_tree {
+ read_into_buf(tree_file, size, offset).map_err(|e| {
+ error!("readFsverityMerkleTree: read error: {}", e);
+ new_errno_error(Errno::EIO)
+ })
+ } else {
+ let mut buf = vec![0; size];
+ let s = fsverity::read_merkle_tree(file.as_raw_fd(), offset, &mut buf)
+ .map_err(|e| {
+ error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
+ new_errno_error(Errno::EIO)
+ })?;
+ debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
+ buf.truncate(s);
+ Ok(buf)
+ }
+ }
+ FdConfig::ReadWrite(_file) => {
+ // For a writable file, Merkle tree is not expected to be served since Auth FS
+ // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
+ // use.
+ Err(new_errno_error(Errno::ENOSYS))
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, alt_signature, .. } => {
+ if let Some(sig_file) = &alt_signature {
+ // Supposedly big enough buffer size to store signature.
+ let size = MAX_REQUESTING_DATA as usize;
+ let offset = 0;
+ read_into_buf(sig_file, size, offset).map_err(|e| {
+ error!("readFsveritySignature: read error: {}", e);
+ new_errno_error(Errno::EIO)
+ })
+ } else {
+ let mut buf = vec![0; MAX_REQUESTING_DATA as usize];
+ let s = fsverity::read_signature(file.as_raw_fd(), &mut buf).map_err(|e| {
+ error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
+ new_errno_error(Errno::EIO)
+ })?;
+ debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
+ buf.truncate(s);
+ Ok(buf)
+ }
+ }
+ FdConfig::ReadWrite(_file) => {
+ // There is no signature for a writable file.
+ Err(new_errno_error(Errno::ENOSYS))
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+ FdConfig::ReadWrite(file) => {
+ let offset: u64 = offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))?;
+ // Check buffer size just to make `as i32` safe below.
+ if buf.len() > i32::MAX as usize {
+ return Err(new_errno_error(Errno::EOVERFLOW));
+ }
+ Ok(file.write_at(buf, offset).map_err(|e| {
+ error!("writeFile: write error: {}", e);
+ new_errno_error(Errno::EIO)
+ })? as i32)
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn resize(&self, id: i32, size: i64) -> BinderResult<()> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+ FdConfig::ReadWrite(file) => {
+ if size < 0 {
+ return Err(new_errno_error(Errno::EINVAL));
+ }
+ file.set_len(size as u64).map_err(|e| {
+ error!("resize: set_len error: {}", e);
+ new_errno_error(Errno::EIO)
+ })
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn getFileSize(&self, id: i32) -> BinderResult<i64> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, .. } => {
+ let size = file
+ .metadata()
+ .map_err(|e| {
+ error!("getFileSize error: {}", e);
+ new_errno_error(Errno::EIO)
+ })?
+ .len();
+ Ok(size.try_into().map_err(|e| {
+ error!("getFileSize: File too large: {}", e);
+ new_errno_error(Errno::EFBIG)
+ })?)
+ }
+ FdConfig::ReadWrite(_file) => {
+ // Content and metadata of a writable file needs to be tracked by authfs, since
+ // fd_server isn't considered trusted. So there is no point to support getFileSize
+ // for a writable file.
+ Err(new_errno_error(Errno::ENOSYS))
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn openFileInDirectory(&self, fd: i32, file_path: &str) -> BinderResult<i32> {
+ let path_buf = PathBuf::from(file_path);
+ // Checks if the path is a simple, related path.
+ if path_buf.components().any(|c| !matches!(c, Component::Normal(_))) {
+ return Err(new_errno_error(Errno::EINVAL));
+ }
+
+ self.insert_new_fd(fd, |config| match config {
+ FdConfig::InputDir(dir) => {
+ let file = open_readonly_at(dir.as_raw_fd(), &path_buf).map_err(new_errno_error)?;
+
+ // TODO(205987437): Provide the corresponding ".fsv_meta" file when it's created.
+ Ok((
+ file.as_raw_fd(),
+ FdConfig::Readonly { file, alt_merkle_tree: None, alt_signature: None },
+ ))
+ }
+ FdConfig::OutputDir(_) => {
+ Err(new_errno_error(Errno::ENOSYS)) // TODO: Implement when needed
+ }
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn createFileInDirectory(&self, fd: i32, basename: &str) -> BinderResult<i32> {
+ if basename.contains(MAIN_SEPARATOR) {
+ return Err(new_errno_error(Errno::EINVAL));
+ }
+ self.insert_new_fd(fd, |config| match config {
+ FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+ FdConfig::OutputDir(dir) => {
+ let new_fd = openat(
+ dir.as_raw_fd(),
+ basename,
+ // TODO(205172873): handle the case when the file already exist, e.g. truncate
+ // or fail, and possibly allow the client to specify. For now, always truncate.
+ OFlag::O_CREAT | OFlag::O_RDWR | OFlag::O_TRUNC,
+ Mode::S_IRUSR | Mode::S_IWUSR,
+ )
+ .map_err(new_errno_error)?;
+ // SAFETY: new_fd is just created and not an error.
+ let new_file = unsafe { File::from_raw_fd(new_fd) };
+ Ok((new_fd, FdConfig::ReadWrite(new_file)))
+ }
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn createDirectoryInDirectory(&self, dir_fd: i32, basename: &str) -> BinderResult<i32> {
+ if basename.contains(MAIN_SEPARATOR) {
+ return Err(new_errno_error(Errno::EINVAL));
+ }
+ self.insert_new_fd(dir_fd, |config| match config {
+ FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+ FdConfig::OutputDir(_) => {
+ mkdirat(dir_fd, basename, Mode::S_IRWXU).map_err(new_errno_error)?;
+ let new_dir = Dir::openat(
+ dir_fd,
+ basename,
+ OFlag::O_DIRECTORY | OFlag::O_RDONLY,
+ Mode::empty(),
+ )
+ .map_err(new_errno_error)?;
+ Ok((new_dir.as_raw_fd(), FdConfig::OutputDir(new_dir)))
+ }
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+}
+
+fn read_into_buf(file: &File, max_size: usize, offset: u64) -> io::Result<Vec<u8>> {
+ let remaining = file.metadata()?.len().saturating_sub(offset);
+ let buf_size = min(remaining, max_size as u64) as usize;
+ let mut buf = vec![0; buf_size];
+ file.read_exact_at(&mut buf, offset)?;
+ Ok(buf)
+}
+
+fn new_errno_error(errno: Errno) -> Status {
+ new_binder_service_specific_error(errno as i32, errno.desc())
+}
+
+fn open_readonly_at(dir_fd: RawFd, path: &Path) -> nix::Result<File> {
+ let new_fd = openat(dir_fd, path, OFlag::O_RDONLY, Mode::empty())?;
+ // SAFETY: new_fd is just created successfully and not owned.
+ let new_file = unsafe { File::from_raw_fd(new_fd) };
+ Ok(new_file)
+}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index 395e2e9..f5a3cba 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -22,235 +22,21 @@
//! For example, `exec 9</path/to/file fd_server --ro-fds 9` starts the binder service. A client
//! client can then request the content of file 9 by offset and size.
+mod aidl;
mod fsverity;
use anyhow::{bail, Result};
use binder_common::rpc_server::run_rpc_server;
-use log::{debug, error};
-use std::cmp::min;
+use log::debug;
+use nix::dir::Dir;
use std::collections::BTreeMap;
-use std::convert::TryInto;
use std::fs::File;
-use std::io;
-use std::os::unix::fs::FileExt;
-use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::io::FromRawFd;
-use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
- BnVirtFdService, IVirtFdService, ERROR_FILE_TOO_LARGE, ERROR_IO, ERROR_UNKNOWN_FD,
- MAX_REQUESTING_DATA,
-};
-use authfs_aidl_interface::binder::{
- BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, StatusCode, Strong,
-};
-use binder_common::new_binder_exception;
+use aidl::{FdConfig, FdService};
const RPC_SERVICE_PORT: u32 = 3264; // TODO: support dynamic port for multiple fd_server instances
-fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
- offset.try_into().map_err(|_| {
- new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, format!("Invalid offset: {}", offset))
- })
-}
-
-fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
- if size > MAX_REQUESTING_DATA {
- Err(new_binder_exception(
- ExceptionCode::ILLEGAL_ARGUMENT,
- format!("Unexpectedly large size: {}", size),
- ))
- } else {
- size.try_into().map_err(|_| {
- new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, format!("Invalid size: {}", size))
- })
- }
-}
-
-/// Configuration of a file descriptor to be served/exposed/shared.
-enum FdConfig {
- /// A read-only file to serve by this server. The file is supposed to be verifiable with the
- /// associated fs-verity metadata.
- Readonly {
- /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
- file: File,
-
- /// Alternative Merkle tree stored in another file.
- alt_merkle_tree: Option<File>,
-
- /// Alternative signature stored in another file.
- alt_signature: Option<File>,
- },
-
- /// A readable/writable file to serve by this server. This backing file should just be a
- /// regular file and does not have any specific property.
- ReadWrite(File),
-}
-
-struct FdService {
- /// A pool of opened files, may be readonly or read-writable.
- fd_pool: BTreeMap<i32, FdConfig>,
-}
-
-impl FdService {
- pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
- BnVirtFdService::new_binder(FdService { fd_pool }, BinderFeatures::default())
- }
-
- fn get_file_config(&self, id: i32) -> BinderResult<&FdConfig> {
- self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))
- }
-}
-
-impl Interface for FdService {}
-
-impl IVirtFdService for FdService {
- fn readFile(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
- let size: usize = validate_and_cast_size(size)?;
- let offset: u64 = validate_and_cast_offset(offset)?;
-
- match self.get_file_config(id)? {
- FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
- read_into_buf(file, size, offset).map_err(|e| {
- error!("readFile: read error: {}", e);
- Status::from(ERROR_IO)
- })
- }
- }
- }
-
- fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
- let size: usize = validate_and_cast_size(size)?;
- let offset: u64 = validate_and_cast_offset(offset)?;
-
- match &self.get_file_config(id)? {
- FdConfig::Readonly { file, alt_merkle_tree, .. } => {
- if let Some(tree_file) = &alt_merkle_tree {
- read_into_buf(tree_file, size, offset).map_err(|e| {
- error!("readFsverityMerkleTree: read error: {}", e);
- Status::from(ERROR_IO)
- })
- } else {
- let mut buf = vec![0; size];
- let s = fsverity::read_merkle_tree(file.as_raw_fd(), offset, &mut buf)
- .map_err(|e| {
- error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
- Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
- })?;
- debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
- buf.truncate(s);
- Ok(buf)
- }
- }
- FdConfig::ReadWrite(_file) => {
- // For a writable file, Merkle tree is not expected to be served since Auth FS
- // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
- // use.
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
- }
- }
- }
-
- fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { file, alt_signature, .. } => {
- if let Some(sig_file) = &alt_signature {
- // Supposedly big enough buffer size to store signature.
- let size = MAX_REQUESTING_DATA as usize;
- let offset = 0;
- read_into_buf(sig_file, size, offset).map_err(|e| {
- error!("readFsveritySignature: read error: {}", e);
- Status::from(ERROR_IO)
- })
- } else {
- let mut buf = vec![0; MAX_REQUESTING_DATA as usize];
- let s = fsverity::read_signature(file.as_raw_fd(), &mut buf).map_err(|e| {
- error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
- Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
- })?;
- debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
- buf.truncate(s);
- Ok(buf)
- }
- }
- FdConfig::ReadWrite(_file) => {
- // There is no signature for a writable file.
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
- }
- }
- }
-
- fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
- FdConfig::ReadWrite(file) => {
- let offset: u64 = offset.try_into().map_err(|_| {
- new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, "Invalid offset")
- })?;
- // Check buffer size just to make `as i32` safe below.
- if buf.len() > i32::MAX as usize {
- return Err(new_binder_exception(
- ExceptionCode::ILLEGAL_ARGUMENT,
- "Buffer size is too big",
- ));
- }
- Ok(file.write_at(buf, offset).map_err(|e| {
- error!("writeFile: write error: {}", e);
- Status::from(ERROR_IO)
- })? as i32)
- }
- }
- }
-
- fn resize(&self, id: i32, size: i64) -> BinderResult<()> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
- FdConfig::ReadWrite(file) => {
- if size < 0 {
- return Err(new_binder_exception(
- ExceptionCode::ILLEGAL_ARGUMENT,
- "Invalid size to resize to",
- ));
- }
- file.set_len(size as u64).map_err(|e| {
- error!("resize: set_len error: {}", e);
- Status::from(ERROR_IO)
- })
- }
- }
- }
-
- fn getFileSize(&self, id: i32) -> BinderResult<i64> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { file, .. } => {
- let size = file
- .metadata()
- .map_err(|e| {
- error!("getFileSize error: {}", e);
- Status::from(ERROR_IO)
- })?
- .len();
- Ok(size.try_into().map_err(|e| {
- error!("getFileSize: File too large: {}", e);
- Status::from(ERROR_FILE_TOO_LARGE)
- })?)
- }
- FdConfig::ReadWrite(_file) => {
- // Content and metadata of a writable file needs to be tracked by authfs, since
- // fd_server isn't considered trusted. So there is no point to support getFileSize
- // for a writable file.
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
- }
- }
- }
-}
-
-fn read_into_buf(file: &File, max_size: usize, offset: u64) -> io::Result<Vec<u8>> {
- let remaining = file.metadata()?.len().saturating_sub(offset);
- let buf_size = min(remaining, max_size as u64) as usize;
- let mut buf = vec![0; buf_size];
- file.read_exact_at(&mut buf, offset)?;
- Ok(buf)
-}
-
fn is_fd_valid(fd: i32) -> bool {
// SAFETY: a query-only syscall
let retval = unsafe { libc::fcntl(fd, libc::F_GETFD) };
@@ -292,6 +78,16 @@
Ok((fd, FdConfig::ReadWrite(file)))
}
+fn parse_arg_ro_dirs(arg: &str) -> Result<(i32, FdConfig)> {
+ let fd = arg.parse::<i32>()?;
+ Ok((fd, FdConfig::InputDir(Dir::from_fd(fd)?)))
+}
+
+fn parse_arg_rw_dirs(arg: &str) -> Result<(i32, FdConfig)> {
+ let fd = arg.parse::<i32>()?;
+ Ok((fd, FdConfig::OutputDir(Dir::from_fd(fd)?)))
+}
+
struct Args {
fd_pool: BTreeMap<i32, FdConfig>,
ready_fd: Option<File>,
@@ -308,6 +104,14 @@
.long("rw-fds")
.multiple(true)
.number_of_values(1))
+ .arg(clap::Arg::with_name("ro-dirs")
+ .long("ro-dirs")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(clap::Arg::with_name("rw-dirs")
+ .long("rw-dirs")
+ .multiple(true)
+ .number_of_values(1))
.arg(clap::Arg::with_name("ready-fd")
.long("ready-fd")
.takes_value(true))
@@ -326,6 +130,18 @@
fd_pool.insert(fd, config);
}
}
+ if let Some(args) = matches.values_of("ro-dirs") {
+ for arg in args {
+ let (fd, config) = parse_arg_ro_dirs(arg)?;
+ fd_pool.insert(fd, config);
+ }
+ }
+ if let Some(args) = matches.values_of("rw-dirs") {
+ for arg in args {
+ let (fd, config) = parse_arg_rw_dirs(arg)?;
+ fd_pool.insert(fd, config);
+ }
+ }
let ready_fd = if let Some(arg) = matches.value_of("ready-fd") {
let fd = arg.parse::<i32>()?;
Some(fd_to_file(fd)?)
diff --git a/authfs/service/src/authfs.rs b/authfs/service/src/authfs.rs
index 6d87243..1b05749 100644
--- a/authfs/service/src/authfs.rs
+++ b/authfs/service/src/authfs.rs
@@ -132,11 +132,11 @@
// TODO(b/185178698): Many input files need to be signed and verified.
// or can we use debug cert for now, which is better than nothing?
args.push(OsString::from("--remote-ro-file-unverified"));
- args.push(OsString::from(format!("{}:{}", conf.fd, conf.fd)));
+ args.push(OsString::from(conf.fd.to_string()));
}
for conf in out_fds {
args.push(OsString::from("--remote-new-rw-file"));
- args.push(OsString::from(format!("{}:{}", conf.fd, conf.fd)));
+ args.push(OsString::from(conf.fd.to_string()));
}
if debuggable {
args.push(OsString::from("--debug"));
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 947b59f..6353209 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -1,18 +1,20 @@
-mod local_file;
+mod dir;
mod remote_file;
-pub use local_file::LocalFileReader;
+pub use dir::{InMemoryDir, RemoteDirEditor};
pub use remote_file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
use binder::unstable_api::{new_spibinder, AIBinder};
use binder::FromIBinder;
use std::io;
+use std::path::{Path, MAIN_SEPARATOR};
use crate::common::CHUNK_SIZE;
use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::IVirtFdService;
-use authfs_aidl_interface::binder::Strong;
+use authfs_aidl_interface::binder::{Status, Strong};
pub type VirtFdService = Strong<dyn IVirtFdService>;
+pub type VirtFdServiceStatus = Status;
pub type ChunkBuffer = [u8; CHUNK_SIZE as usize];
@@ -70,3 +72,12 @@
/// Resizes the file to the new size.
fn resize(&self, size: u64) -> io::Result<()>;
}
+
+/// Checks whether the path is a simple file name without any directory separator.
+pub fn validate_basename(path: &Path) -> io::Result<()> {
+ if matches!(path.to_str(), Some(path_str) if !path_str.contains(MAIN_SEPARATOR)) {
+ Ok(())
+ } else {
+ Err(io::Error::from_raw_os_error(libc::EINVAL))
+ }
+}
diff --git a/authfs/src/file/dir.rs b/authfs/src/file/dir.rs
new file mode 100644
index 0000000..2eaaddd
--- /dev/null
+++ b/authfs/src/file/dir.rs
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::collections::{hash_map, HashMap};
+use std::io;
+use std::path::{Path, PathBuf};
+
+use super::remote_file::RemoteFileEditor;
+use super::{validate_basename, VirtFdService, VirtFdServiceStatus};
+use crate::fsverity::VerifiedFileEditor;
+use crate::fusefs::Inode;
+
+const MAX_ENTRIES: u16 = 100; // Arbitrary limit
+
+/// A remote directory backed by a remote directory FD, where the provider/fd_server is not
+/// trusted.
+///
+/// The directory is assumed empty initially without the trust to the storage. Functionally, when
+/// the backing storage is not clean, the fd_server can fail to create a file or directory when
+/// there is name collision. From RemoteDirEditor's perspective of security, the creation failure
+/// is just one of possible errors that can happen, and what matters is RemoteDirEditor maintains
+/// the integrity itself.
+///
+/// When new files are created through RemoteDirEditor, the file integrity are maintained within the
+/// VM. Similarly, integrity (namely the list of entries) of the directory, or new directories
+/// created within such a directory, are also maintained within the VM. A compromised fd_server or
+/// malicious client can't affect the view to the files and directories within such a directory in
+/// the VM.
+pub struct RemoteDirEditor {
+ service: VirtFdService,
+ remote_dir_fd: i32,
+
+ /// Mapping of entry names to the corresponding inode number. The actual file/directory is
+ /// stored in the global pool in fusefs.
+ entries: HashMap<PathBuf, Inode>,
+}
+
+impl RemoteDirEditor {
+ pub fn new(service: VirtFdService, remote_dir_fd: i32) -> Self {
+ RemoteDirEditor { service, remote_dir_fd, entries: HashMap::new() }
+ }
+
+ /// Returns the number of entries created.
+ pub fn number_of_entries(&self) -> u16 {
+ self.entries.len() as u16 // limited to MAX_ENTRIES
+ }
+
+ /// Creates a remote file named `basename` with corresponding `inode` at the current directory.
+ pub fn create_file(
+ &mut self,
+ basename: &Path,
+ inode: Inode,
+ ) -> io::Result<VerifiedFileEditor<RemoteFileEditor>> {
+ self.validate_argument(basename)?;
+
+ let basename_str =
+ basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+ let new_fd = self
+ .service
+ .createFileInDirectory(self.remote_dir_fd, basename_str)
+ .map_err(into_io_error)?;
+
+ let new_remote_file =
+ VerifiedFileEditor::new(RemoteFileEditor::new(self.service.clone(), new_fd));
+ self.entries.insert(basename.to_path_buf(), inode);
+ Ok(new_remote_file)
+ }
+
+ /// Creates a remote directory named `basename` with corresponding `inode` at the current
+ /// directory.
+ pub fn mkdir(&mut self, basename: &Path, inode: Inode) -> io::Result<RemoteDirEditor> {
+ self.validate_argument(basename)?;
+
+ let basename_str =
+ basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+ let new_fd = self
+ .service
+ .createDirectoryInDirectory(self.remote_dir_fd, basename_str)
+ .map_err(into_io_error)?;
+
+ let new_remote_dir = RemoteDirEditor::new(self.service.clone(), new_fd);
+ self.entries.insert(basename.to_path_buf(), inode);
+ Ok(new_remote_dir)
+ }
+
+ /// Returns the inode number of a file or directory named `name` previously created through
+ /// `RemoteDirEditor`.
+ pub fn find_inode(&self, name: &Path) -> Option<Inode> {
+ self.entries.get(name).copied()
+ }
+
+ fn validate_argument(&self, basename: &Path) -> io::Result<()> {
+ // Kernel should only give us a basename.
+ debug_assert!(validate_basename(basename).is_ok());
+
+ if self.entries.contains_key(basename) {
+ Err(io::Error::from_raw_os_error(libc::EEXIST))
+ } else if self.entries.len() >= MAX_ENTRIES.into() {
+ Err(io::Error::from_raw_os_error(libc::EMLINK))
+ } else {
+ Ok(())
+ }
+ }
+}
+
+/// An in-memory directory representation of a directory structure.
+pub struct InMemoryDir(HashMap<PathBuf, Inode>);
+
+impl InMemoryDir {
+ /// Creates an empty instance of `InMemoryDir`.
+ pub fn new() -> Self {
+ // Hash map is empty since "." and ".." are excluded in entries.
+ InMemoryDir(HashMap::new())
+ }
+
+ /// Returns the number of entries in the directory (not including "." and "..").
+ pub fn number_of_entries(&self) -> u16 {
+ self.0.len() as u16 // limited to MAX_ENTRIES
+ }
+
+ /// Adds an entry (name and the inode number) to the directory. Fails if already exists. The
+ /// caller is responsible for ensure the inode uniqueness.
+ pub fn add_entry(&mut self, basename: &Path, inode: Inode) -> io::Result<()> {
+ validate_basename(basename)?;
+ if self.0.len() >= MAX_ENTRIES.into() {
+ return Err(io::Error::from_raw_os_error(libc::EMLINK));
+ }
+
+ if let hash_map::Entry::Vacant(entry) = self.0.entry(basename.to_path_buf()) {
+ entry.insert(inode);
+ Ok(())
+ } else {
+ Err(io::Error::from_raw_os_error(libc::EEXIST))
+ }
+ }
+
+ /// Looks up an entry inode by name. `None` if not found.
+ pub fn lookup_inode(&self, basename: &Path) -> Option<Inode> {
+ self.0.get(basename).copied()
+ }
+}
+
+fn into_io_error(e: VirtFdServiceStatus) -> io::Error {
+ let maybe_errno = e.service_specific_error();
+ if maybe_errno > 0 {
+ io::Error::from_raw_os_error(maybe_errno)
+ } else {
+ io::Error::new(io::ErrorKind::Other, e.get_description())
+ }
+}
diff --git a/authfs/src/file/local_file.rs b/authfs/src/file/local_file.rs
deleted file mode 100644
index 13c954f..0000000
--- a/authfs/src/file/local_file.rs
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::cmp::min;
-use std::fs::File;
-use std::io;
-use std::os::unix::fs::FileExt;
-
-use super::{ChunkBuffer, ReadByChunk};
-use crate::common::CHUNK_SIZE;
-
-/// A read-only file that can be read by chunks.
-pub struct LocalFileReader {
- file: File,
- size: u64,
-}
-
-impl LocalFileReader {
- /// Creates a `LocalFileReader` to read from for the specified `path`.
- pub fn new(file: File) -> io::Result<LocalFileReader> {
- let size = file.metadata()?.len();
- Ok(LocalFileReader { file, size })
- }
-
- pub fn len(&self) -> u64 {
- self.size
- }
-}
-
-impl ReadByChunk for LocalFileReader {
- fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
- let start = chunk_index * CHUNK_SIZE;
- if start >= self.size {
- return Ok(0);
- }
- let end = min(self.size, start + CHUNK_SIZE);
- let read_size = (end - start) as usize;
- debug_assert!(read_size <= buf.len());
- self.file.read_exact_at(&mut buf[..read_size], start)?;
- Ok(read_size)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use std::env::temp_dir;
-
- #[test]
- fn test_read_4k_file() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4k")?)?;
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(0, &mut buf)?;
- assert_eq!(size, buf.len());
- Ok(())
- }
-
- #[test]
- fn test_read_4k1_file() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4k1")?)?;
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(0, &mut buf)?;
- assert_eq!(size, buf.len());
- let size = file_reader.read_chunk(1, &mut buf)?;
- assert_eq!(size, 1);
- Ok(())
- }
-
- #[test]
- fn test_read_4m_file() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
- for index in 0..file_reader.len() / 4096 {
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(index, &mut buf)?;
- assert_eq!(size, buf.len());
- }
- Ok(())
- }
-
- #[test]
- fn test_read_beyond_file_size() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4k").unwrap()).unwrap();
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(1u64, &mut buf)?;
- assert_eq!(size, 0);
- Ok(())
- }
-
- #[test]
- fn test_read_empty_file() -> io::Result<()> {
- let mut temp_file = temp_dir();
- temp_file.push("authfs_test_empty_file");
- let file_reader = LocalFileReader::new(File::create(temp_file).unwrap()).unwrap();
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(0, &mut buf)?;
- assert_eq!(size, 0);
- Ok(())
- }
-}
diff --git a/authfs/src/file/remote_file.rs b/authfs/src/file/remote_file.rs
index 903c143..039285f 100644
--- a/authfs/src/file/remote_file.rs
+++ b/authfs/src/file/remote_file.rs
@@ -17,6 +17,7 @@
use std::cmp::min;
use std::convert::TryFrom;
use std::io;
+use std::path::Path;
use super::{ChunkBuffer, RandomWrite, ReadByChunk, VirtFdService};
use crate::common::CHUNK_SIZE;
@@ -48,6 +49,29 @@
pub fn new(service: VirtFdService, file_fd: i32) -> Self {
RemoteFileReader { service, file_fd }
}
+
+ pub fn new_by_path(
+ service: VirtFdService,
+ dir_fd: i32,
+ related_path: &Path,
+ ) -> io::Result<Self> {
+ let file_fd =
+ service.openFileInDirectory(dir_fd, related_path.to_str().unwrap()).map_err(|e| {
+ io::Error::new(
+ io::ErrorKind::Other,
+ format!(
+ "Failed to create a remote file reader by path {}: {}",
+ related_path.display(),
+ e.get_description()
+ ),
+ )
+ })?;
+ Ok(RemoteFileReader { service, file_fd })
+ }
+
+ pub fn get_remote_fd(&self) -> i32 {
+ self.file_fd
+ }
}
impl ReadByChunk for RemoteFileReader {
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index 1f21b13..4a18c6a 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -173,10 +173,42 @@
mod tests {
use super::*;
use crate::auth::FakeAuthenticator;
- use crate::file::{LocalFileReader, ReadByChunk};
+ use crate::file::ReadByChunk;
use anyhow::Result;
+ use std::cmp::min;
use std::fs::{self, File};
use std::io::Read;
+ use std::os::unix::fs::FileExt;
+
+ struct LocalFileReader {
+ file: File,
+ size: u64,
+ }
+
+ impl LocalFileReader {
+ fn new(file: File) -> io::Result<LocalFileReader> {
+ let size = file.metadata()?.len();
+ Ok(LocalFileReader { file, size })
+ }
+
+ fn len(&self) -> u64 {
+ self.size
+ }
+ }
+
+ impl ReadByChunk for LocalFileReader {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+ let start = chunk_index * CHUNK_SIZE;
+ if start >= self.size {
+ return Ok(0);
+ }
+ let end = min(self.size, start + CHUNK_SIZE);
+ let read_size = (end - start) as usize;
+ debug_assert!(read_size <= buf.len());
+ self.file.read_exact_at(&mut buf[..read_size], start)?;
+ Ok(read_size)
+ }
+ }
type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, LocalFileReader>;
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 6bdb498..b456f33 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -14,17 +14,19 @@
* limitations under the License.
*/
-use anyhow::Result;
+use anyhow::{anyhow, bail, Result};
use log::{debug, warn};
-use std::collections::BTreeMap;
+use std::collections::{btree_map, BTreeMap};
use std::convert::TryFrom;
-use std::ffi::CStr;
+use std::ffi::{CStr, OsStr};
use std::fs::OpenOptions;
use std::io;
use std::mem::MaybeUninit;
use std::option::Option;
-use std::os::unix::io::AsRawFd;
-use std::path::Path;
+use std::os::unix::{ffi::OsStrExt, io::AsRawFd};
+use std::path::{Component, Path, PathBuf};
+use std::sync::atomic::{AtomicU64, Ordering};
+use std::sync::Mutex;
use std::time::Duration;
use fuse::filesystem::{
@@ -35,59 +37,188 @@
use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
use crate::file::{
- LocalFileReader, RandomWrite, ReadByChunk, RemoteFileEditor, RemoteFileReader,
- RemoteMerkleTreeReader,
+ validate_basename, InMemoryDir, RandomWrite, ReadByChunk, RemoteDirEditor, RemoteFileEditor,
+ RemoteFileReader, RemoteMerkleTreeReader,
};
use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
-const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
-
pub type Inode = u64;
type Handle = u64;
-/// `FileConfig` defines the file type supported by AuthFS.
-pub enum FileConfig {
- /// A file type that is verified against fs-verity signature (thus read-only). The file is
- /// backed by a local file. Debug only.
- LocalVerifiedReadonly {
- reader: VerifiedFileReader<LocalFileReader, LocalFileReader>,
- file_size: u64,
- },
- /// A file type that is a read-only passthrough from a local file. Debug only.
- LocalUnverifiedReadonly { reader: LocalFileReader, file_size: u64 },
+const DEFAULT_METADATA_TIMEOUT: Duration = Duration::from_secs(5);
+const ROOT_INODE: Inode = 1;
+
+/// Maximum bytes in the write transaction to the FUSE device. This limits the maximum buffer
+/// size in a read request (including FUSE protocol overhead) that the filesystem writes to.
+const MAX_WRITE_BYTES: u32 = 65536;
+
+/// Maximum bytes in a read operation.
+/// TODO(victorhsieh): This option is deprecated by FUSE. Figure out if we can remove this.
+const MAX_READ_BYTES: u32 = 65536;
+
+/// `AuthFsEntry` defines the filesystem entry type supported by AuthFS.
+pub enum AuthFsEntry {
+ /// A read-only directory (writable during initialization). Root directory is an example.
+ ReadonlyDirectory { dir: InMemoryDir },
/// A file type that is verified against fs-verity signature (thus read-only). The file is
/// served from a remote server.
- RemoteVerifiedReadonly {
+ VerifiedReadonly {
reader: VerifiedFileReader<RemoteFileReader, RemoteMerkleTreeReader>,
file_size: u64,
},
/// A file type that is a read-only passthrough from a file on a remote serrver.
- RemoteUnverifiedReadonly { reader: RemoteFileReader, file_size: u64 },
+ UnverifiedReadonly { reader: RemoteFileReader, file_size: u64 },
/// A file type that is initially empty, and the content is stored on a remote server. File
/// integrity is guaranteed with private Merkle tree.
- RemoteVerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor> },
+ VerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor> },
+ /// A directory type that is initially empty. One can create new file (`VerifiedNew`) and new
+ /// directory (`VerifiedNewDirectory` itself) with integrity guaranteed within the VM.
+ VerifiedNewDirectory { dir: RemoteDirEditor },
}
-struct AuthFs {
- /// Store `FileConfig`s using the `Inode` number as the search index.
- ///
- /// For further optimization to minimize the search cost, since Inode is integer, we may
- /// consider storing them in a Vec if we can guarantee that the numbers are small and
- /// consecutive.
- file_pool: BTreeMap<Inode, FileConfig>,
+// AuthFS needs to be `Sync` to be accepted by fuse::worker::start_message_loop as a `FileSystem`.
+pub struct AuthFs {
+ /// Table for `Inode` to `AuthFsEntry` lookup. This needs to be `Sync` to be used in
+ /// `fuse::worker::start_message_loop`.
+ inode_table: Mutex<BTreeMap<Inode, AuthFsEntry>>,
- /// Maximum bytes in the write transaction to the FUSE device. This limits the maximum size to
- /// a read request (including FUSE protocol overhead).
- max_write: u32,
+ /// The next available inode number.
+ next_inode: AtomicU64,
}
+// Implementation for preparing an `AuthFs` instance, before starting to serve.
+// TODO(victorhsieh): Consider implement a builder to separate the mutable initialization from the
+// immutable / interiorly mutable serving phase.
impl AuthFs {
- pub fn new(file_pool: BTreeMap<Inode, FileConfig>, max_write: u32) -> AuthFs {
- AuthFs { file_pool, max_write }
+ pub fn new() -> AuthFs {
+ let mut inode_table = BTreeMap::new();
+ inode_table.insert(ROOT_INODE, AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() });
+
+ AuthFs { inode_table: Mutex::new(inode_table), next_inode: AtomicU64::new(ROOT_INODE + 1) }
}
- fn get_file_config(&self, inode: &Inode) -> io::Result<&FileConfig> {
- self.file_pool.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ /// Add an `AuthFsEntry` as `basename` to the filesystem root.
+ pub fn add_entry_at_root_dir(
+ &mut self,
+ basename: PathBuf,
+ entry: AuthFsEntry,
+ ) -> Result<Inode> {
+ validate_basename(&basename)?;
+ self.add_entry_at_ro_dir_by_path(ROOT_INODE, &basename, entry)
+ }
+
+ /// Add an `AuthFsEntry` by path from the `ReadonlyDirectory` represented by `dir_inode`. The
+ /// path must be a related path. If some ancestor directories do not exist, they will be
+ /// created (also as `ReadonlyDirectory`) automatically.
+ pub fn add_entry_at_ro_dir_by_path(
+ &mut self,
+ dir_inode: Inode,
+ path: &Path,
+ entry: AuthFsEntry,
+ ) -> Result<Inode> {
+ // 1. Make sure the parent directories all exist. Derive the entry's parent inode.
+ let parent_path =
+ path.parent().ok_or_else(|| anyhow!("No parent directory: {:?}", path))?;
+ let parent_inode =
+ parent_path.components().try_fold(dir_inode, |current_dir_inode, path_component| {
+ match path_component {
+ Component::RootDir => bail!("Absolute path is not supported"),
+ Component::Normal(name) => {
+ let inode_table = self.inode_table.get_mut().unwrap();
+ // Locate the internal directory structure.
+ let current_dir_entry =
+ inode_table.get_mut(¤t_dir_inode).ok_or_else(|| {
+ anyhow!("Unknown directory inode {}", current_dir_inode)
+ })?;
+ let dir = match current_dir_entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => dir,
+ _ => unreachable!("Not a ReadonlyDirectory"),
+ };
+ // Return directory inode. Create first if not exists.
+ if let Some(existing_inode) = dir.lookup_inode(name.as_ref()) {
+ Ok(existing_inode)
+ } else {
+ let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+ let new_dir_entry =
+ AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() };
+
+ // Actually update the tables.
+ dir.add_entry(name.as_ref(), new_inode)?;
+ if inode_table.insert(new_inode, new_dir_entry).is_some() {
+ bail!("Unexpected to find a duplicated inode");
+ }
+ Ok(new_inode)
+ }
+ }
+ _ => Err(anyhow!("Path is not canonical: {:?}", path)),
+ }
+ })?;
+
+ // 2. Insert the entry to the parent directory, as well as the inode table.
+ let inode_table = self.inode_table.get_mut().unwrap();
+ match inode_table.get_mut(&parent_inode).expect("previously returned inode") {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ let basename =
+ path.file_name().ok_or_else(|| anyhow!("Bad file name: {:?}", path))?;
+ let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+
+ // Actually update the tables.
+ dir.add_entry(basename.as_ref(), new_inode)?;
+ if inode_table.insert(new_inode, entry).is_some() {
+ bail!("Unexpected to find a duplicated inode");
+ }
+ Ok(new_inode)
+ }
+ _ => unreachable!("Not a ReadonlyDirectory"),
+ }
+ }
+}
+
+// Implementation for serving requests.
+impl AuthFs {
+ /// Handles the file associated with `inode` if found. This function returns whatever
+ /// `handle_fn` returns.
+ fn handle_inode<F, R>(&self, inode: &Inode, handle_fn: F) -> io::Result<R>
+ where
+ F: FnOnce(&AuthFsEntry) -> io::Result<R>,
+ {
+ let inode_table = self.inode_table.lock().unwrap();
+ let entry =
+ inode_table.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+ handle_fn(entry)
+ }
+
+ /// Adds a new entry `name` created by `create_fn` at `parent_inode`.
+ ///
+ /// The operation involves two updates: adding the name with a new allocated inode to the
+ /// parent directory, and insert the new inode and the actual `AuthFsEntry` to the global inode
+ /// table.
+ ///
+ /// `create_fn` receives the parent directory, through which it can create the new entry at and
+ /// register the new inode to. Its returned entry is then added to the inode table.
+ fn create_new_entry<F>(
+ &self,
+ parent_inode: Inode,
+ name: &CStr,
+ create_fn: F,
+ ) -> io::Result<Inode>
+ where
+ F: FnOnce(&mut AuthFsEntry, &Path, Inode) -> io::Result<AuthFsEntry>,
+ {
+ let mut inode_table = self.inode_table.lock().unwrap();
+ let mut parent_entry = inode_table
+ .get_mut(&parent_inode)
+ .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
+
+ let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+ let basename: &Path = cstr_to_path(name);
+ let new_file_entry = create_fn(&mut parent_entry, basename, new_inode)?;
+ if let btree_map::Entry::Vacant(entry) = inode_table.entry(new_inode) {
+ entry.insert(new_file_entry);
+ Ok(new_inode)
+ } else {
+ unreachable!("Unexpected duplication of inode {}", new_inode);
+ }
}
}
@@ -107,25 +238,30 @@
}
}
-enum FileMode {
+#[allow(clippy::enum_variant_names)]
+enum AccessMode {
ReadOnly,
ReadWrite,
}
-fn create_stat(ino: libc::ino_t, file_size: u64, file_mode: FileMode) -> io::Result<libc::stat64> {
+fn create_stat(
+ ino: libc::ino_t,
+ file_size: u64,
+ access_mode: AccessMode,
+) -> io::Result<libc::stat64> {
+ // SAFETY: stat64 is a plan C struct without pointer.
let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
st.st_ino = ino;
- st.st_mode = match file_mode {
+ st.st_mode = match access_mode {
// Until needed, let's just grant the owner access.
- FileMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
- FileMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
+ // TODO(205169366): Implement mode properly.
+ AccessMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
+ AccessMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
};
- st.st_dev = 0;
st.st_nlink = 1;
st.st_uid = 0;
st.st_gid = 0;
- st.st_rdev = 0;
st.st_size = libc::off64_t::try_from(file_size)
.map_err(|_| io::Error::from_raw_os_error(libc::EFBIG))?;
st.st_blksize = blk_size();
@@ -135,6 +271,30 @@
Ok(st)
}
+fn create_dir_stat(ino: libc::ino_t, file_number: u16) -> io::Result<libc::stat64> {
+ // SAFETY: stat64 is a plan C struct without pointer.
+ let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
+
+ st.st_ino = ino;
+ // TODO(205169366): Implement mode properly.
+ st.st_mode = libc::S_IFDIR
+ | libc::S_IXUSR
+ | libc::S_IWUSR
+ | libc::S_IRUSR
+ | libc::S_IXGRP
+ | libc::S_IXOTH;
+
+ // 2 extra for . and ..
+ st.st_nlink = file_number
+ .checked_add(2)
+ .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?
+ .into();
+
+ st.st_uid = 0;
+ st.st_gid = 0;
+ Ok(st)
+}
+
fn offset_to_chunk_index(offset: u64) -> u64 {
offset / CHUNK_SIZE
}
@@ -174,8 +334,8 @@
Ok(total)
}
-// No need to support enumerating directory entries.
-struct EmptyDirectoryIterator {}
+// TODO(205715172): Support enumerating directory entries.
+pub struct EmptyDirectoryIterator {}
impl DirectoryIterator for EmptyDirectoryIterator {
fn next(&mut self) -> Option<DirEntry> {
@@ -189,7 +349,7 @@
type DirIter = EmptyDirectoryIterator;
fn max_buffer_size(&self) -> u32 {
- self.max_write
+ MAX_WRITE_BYTES
}
fn init(&self, _capable: FsOptions) -> io::Result<FsOptions> {
@@ -198,25 +358,40 @@
Ok(FsOptions::WRITEBACK_CACHE)
}
- fn lookup(&self, _ctx: Context, _parent: Inode, name: &CStr) -> io::Result<Entry> {
- // Only accept file name that looks like an integrer. Files in the pool are simply exposed
- // by their inode number. Also, there is currently no directory structure.
- let num = name.to_str().map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?;
+ fn lookup(&self, _ctx: Context, parent: Inode, name: &CStr) -> io::Result<Entry> {
+ // Look up the entry's inode number in parent directory.
+ let inode = self.handle_inode(&parent, |parent_entry| match parent_entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ let path = cstr_to_path(name);
+ dir.lookup_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ }
+ AuthFsEntry::VerifiedNewDirectory { dir } => {
+ let path = cstr_to_path(name);
+ dir.find_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+ })?;
+
// Normally, `lookup` is required to increase a reference count for the inode (while
- // `forget` will decrease it). It is not necessary here since the files are configured to
- // be static.
- let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
- let st = match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonly { file_size, .. }
- | FileConfig::LocalUnverifiedReadonly { file_size, .. }
- | FileConfig::RemoteUnverifiedReadonly { file_size, .. }
- | FileConfig::RemoteVerifiedReadonly { file_size, .. } => {
- create_stat(inode, *file_size, FileMode::ReadOnly)?
+ // `forget` will decrease it). It is not yet necessary until we start to support
+ // deletion (only for `VerifiedNewDirectory`).
+
+ // Create the entry's stat if found.
+ let st = self.handle_inode(&inode, |entry| match entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ create_dir_stat(inode, dir.number_of_entries())
}
- FileConfig::RemoteVerifiedNew { editor } => {
- create_stat(inode, editor.size(), FileMode::ReadWrite)?
+ AuthFsEntry::UnverifiedReadonly { file_size, .. }
+ | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
+ create_stat(inode, *file_size, AccessMode::ReadOnly)
}
- };
+ AuthFsEntry::VerifiedNew { editor } => {
+ create_stat(inode, editor.size(), AccessMode::ReadWrite)
+ }
+ AuthFsEntry::VerifiedNewDirectory { dir } => {
+ create_dir_stat(inode, dir.number_of_entries())
+ }
+ })?;
Ok(Entry {
inode,
generation: 0,
@@ -232,20 +407,26 @@
inode: Inode,
_handle: Option<Handle>,
) -> io::Result<(libc::stat64, Duration)> {
- Ok((
- match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonly { file_size, .. }
- | FileConfig::LocalUnverifiedReadonly { file_size, .. }
- | FileConfig::RemoteUnverifiedReadonly { file_size, .. }
- | FileConfig::RemoteVerifiedReadonly { file_size, .. } => {
- create_stat(inode, *file_size, FileMode::ReadOnly)?
- }
- FileConfig::RemoteVerifiedNew { editor } => {
- create_stat(inode, editor.size(), FileMode::ReadWrite)?
- }
- },
- DEFAULT_METADATA_TIMEOUT,
- ))
+ self.handle_inode(&inode, |config| {
+ Ok((
+ match config {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ create_dir_stat(inode, dir.number_of_entries())
+ }
+ AuthFsEntry::UnverifiedReadonly { file_size, .. }
+ | AuthFsEntry::VerifiedReadonly { file_size, .. } => {
+ create_stat(inode, *file_size, AccessMode::ReadOnly)
+ }
+ AuthFsEntry::VerifiedNew { editor } => {
+ create_stat(inode, editor.size(), AccessMode::ReadWrite)
+ }
+ AuthFsEntry::VerifiedNewDirectory { dir } => {
+ create_dir_stat(inode, dir.number_of_entries())
+ }
+ }?,
+ DEFAULT_METADATA_TIMEOUT,
+ ))
+ })
}
fn open(
@@ -256,21 +437,64 @@
) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
// Since file handle is not really used in later operations (which use Inode directly),
// return None as the handle.
- match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonly { .. }
- | FileConfig::LocalUnverifiedReadonly { .. }
- | FileConfig::RemoteVerifiedReadonly { .. }
- | FileConfig::RemoteUnverifiedReadonly { .. } => {
- check_access_mode(flags, libc::O_RDONLY)?;
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedReadonly { .. } | AuthFsEntry::UnverifiedReadonly { .. } => {
+ check_access_mode(flags, libc::O_RDONLY)?;
+ }
+ AuthFsEntry::VerifiedNew { .. } => {
+ // No need to check access modes since all the modes are allowed to the
+ // read-writable file.
+ }
+ AuthFsEntry::ReadonlyDirectory { .. }
+ | AuthFsEntry::VerifiedNewDirectory { .. } => {
+ // TODO(victorhsieh): implement when needed.
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
}
- FileConfig::RemoteVerifiedNew { .. } => {
- // No need to check access modes since all the modes are allowed to the
- // read-writable file.
- }
- }
- // Always cache the file content. There is currently no need to support direct I/O or avoid
- // the cache buffer. Memory mapping is only possible with cache enabled.
- Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ // Always cache the file content. There is currently no need to support direct I/O or
+ // avoid the cache buffer. Memory mapping is only possible with cache enabled.
+ Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ })
+ }
+
+ fn create(
+ &self,
+ _ctx: Context,
+ parent: Self::Inode,
+ name: &CStr,
+ _mode: u32,
+ _flags: u32,
+ _umask: u32,
+ ) -> io::Result<(Entry, Option<Self::Handle>, fuse::sys::OpenOptions)> {
+ // TODO(205169366): Implement mode properly.
+ // TODO(205172873): handle O_TRUNC and O_EXCL properly.
+ let new_inode =
+ self.create_new_entry(parent, name, |parent_entry, basename, new_inode| {
+ match parent_entry {
+ AuthFsEntry::VerifiedNewDirectory { dir } => {
+ if dir.find_inode(basename).is_some() {
+ return Err(io::Error::from_raw_os_error(libc::EEXIST));
+ }
+ let new_file = dir.create_file(basename, new_inode)?;
+ Ok(AuthFsEntry::VerifiedNew { editor: new_file })
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+ }
+ })?;
+
+ Ok((
+ Entry {
+ inode: new_inode,
+ generation: 0,
+ attr: create_stat(new_inode, /* file_size */ 0, AccessMode::ReadWrite)?,
+ entry_timeout: DEFAULT_METADATA_TIMEOUT,
+ attr_timeout: DEFAULT_METADATA_TIMEOUT,
+ },
+ // See also `open`.
+ /* handle */ None,
+ fuse::sys::OpenOptions::KEEP_CACHE,
+ ))
}
fn read<W: io::Write + ZeroCopyWriter>(
@@ -284,25 +508,22 @@
_lock_owner: Option<u64>,
_flags: u32,
) -> io::Result<usize> {
- match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonly { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedReadonly { reader, file_size } => {
+ read_chunks(w, reader, *file_size, offset, size)
+ }
+ AuthFsEntry::UnverifiedReadonly { reader, file_size } => {
+ read_chunks(w, reader, *file_size, offset, size)
+ }
+ AuthFsEntry::VerifiedNew { editor } => {
+ // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
+ // request a read even if the file is open with O_WRONLY.
+ read_chunks(w, editor, editor.size(), offset, size)
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
}
- FileConfig::LocalUnverifiedReadonly { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::RemoteVerifiedReadonly { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::RemoteUnverifiedReadonly { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::RemoteVerifiedNew { editor } => {
- // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
- // request a read even if the file is open with O_WRONLY.
- read_chunks(w, editor, editor.size(), offset, size)
- }
- }
+ })
}
fn write<R: io::Read + ZeroCopyReader>(
@@ -317,14 +538,14 @@
_delayed_write: bool,
_flags: u32,
) -> io::Result<usize> {
- match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNew { editor } => {
+ self.handle_inode(&inode, |config| match config {
+ AuthFsEntry::VerifiedNew { editor } => {
let mut buf = vec![0; size as usize];
r.read_exact(&mut buf)?;
editor.write_at(&buf, offset)
}
_ => Err(io::Error::from_raw_os_error(libc::EBADF)),
- }
+ })
}
fn setattr(
@@ -335,44 +556,52 @@
_handle: Option<Handle>,
valid: SetattrValid,
) -> io::Result<(libc::stat64, Duration)> {
- match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNew { editor } => {
- // Initialize the default stat.
- let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
- // `valid` indicates what fields in `attr` are valid. Update to return correctly.
- if valid.contains(SetattrValid::SIZE) {
- // st_size is i64, but the cast should be safe since kernel should not give a
- // negative size.
- debug_assert!(attr.st_size >= 0);
- new_attr.st_size = attr.st_size;
- editor.resize(attr.st_size as u64)?;
- }
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedNew { editor } => {
+ // Initialize the default stat.
+ let mut new_attr = create_stat(inode, editor.size(), AccessMode::ReadWrite)?;
+ // `valid` indicates what fields in `attr` are valid. Update to return correctly.
+ if valid.contains(SetattrValid::SIZE) {
+ // st_size is i64, but the cast should be safe since kernel should not give a
+ // negative size.
+ debug_assert!(attr.st_size >= 0);
+ new_attr.st_size = attr.st_size;
+ editor.resize(attr.st_size as u64)?;
+ }
- if valid.contains(SetattrValid::MODE) {
- warn!("Changing st_mode is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ if valid.contains(SetattrValid::MODE) {
+ warn!("Changing st_mode is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::UID) {
+ warn!("Changing st_uid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::GID) {
+ warn!("Changing st_gid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::CTIME) {
+ debug!(
+ "Ignoring ctime change as authfs does not maintain timestamp currently"
+ );
+ }
+ if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
+ debug!(
+ "Ignoring atime change as authfs does not maintain timestamp currently"
+ );
+ }
+ if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
+ debug!(
+ "Ignoring mtime change as authfs does not maintain timestamp currently"
+ );
+ }
+ Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
}
- if valid.contains(SetattrValid::UID) {
- warn!("Changing st_uid is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::GID) {
- warn!("Changing st_gid is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::CTIME) {
- debug!("Ignoring ctime change as authfs does not maintain timestamp currently");
- }
- if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
- debug!("Ignoring atime change as authfs does not maintain timestamp currently");
- }
- if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
- debug!("Ignoring mtime change as authfs does not maintain timestamp currently");
- }
- Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
}
- _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
- }
+ })
}
fn getxattr(
@@ -382,40 +611,75 @@
name: &CStr,
size: u32,
) -> io::Result<GetxattrReply> {
- match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNew { editor } => {
- // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
- // change (see b/196635431). Until it's possible, use xattr to expose what we need
- // as an authfs specific API.
- if name != CStr::from_bytes_with_nul(b"authfs.fsverity.digest\0").unwrap() {
- return Err(io::Error::from_raw_os_error(libc::ENODATA));
- }
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedNew { editor } => {
+ // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
+ // change (see b/196635431). Until it's possible, use xattr to expose what we need
+ // as an authfs specific API.
+ if name != CStr::from_bytes_with_nul(b"authfs.fsverity.digest\0").unwrap() {
+ return Err(io::Error::from_raw_os_error(libc::ENODATA));
+ }
- if size == 0 {
- // Per protocol, when size is 0, return the value size.
- Ok(GetxattrReply::Count(editor.get_fsverity_digest_size() as u32))
- } else {
- let digest = editor.calculate_fsverity_digest()?;
- if digest.len() > size as usize {
- Err(io::Error::from_raw_os_error(libc::ERANGE))
+ if size == 0 {
+ // Per protocol, when size is 0, return the value size.
+ Ok(GetxattrReply::Count(editor.get_fsverity_digest_size() as u32))
} else {
- Ok(GetxattrReply::Value(digest.to_vec()))
+ let digest = editor.calculate_fsverity_digest()?;
+ if digest.len() > size as usize {
+ Err(io::Error::from_raw_os_error(libc::ERANGE))
+ } else {
+ Ok(GetxattrReply::Value(digest.to_vec()))
+ }
}
}
+ _ => Err(io::Error::from_raw_os_error(libc::ENODATA)),
}
- _ => Err(io::Error::from_raw_os_error(libc::ENODATA)),
- }
+ })
+ }
+
+ fn mkdir(
+ &self,
+ _ctx: Context,
+ parent: Self::Inode,
+ name: &CStr,
+ _mode: u32,
+ _umask: u32,
+ ) -> io::Result<Entry> {
+ // TODO(205169366): Implement mode properly.
+ let new_inode =
+ self.create_new_entry(parent, name, |parent_entry, basename, new_inode| {
+ match parent_entry {
+ AuthFsEntry::VerifiedNewDirectory { dir } => {
+ if dir.find_inode(basename).is_some() {
+ return Err(io::Error::from_raw_os_error(libc::EEXIST));
+ }
+ let new_dir = dir.mkdir(basename, new_inode)?;
+ Ok(AuthFsEntry::VerifiedNewDirectory { dir: new_dir })
+ }
+ AuthFsEntry::ReadonlyDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EACCES))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+ }
+ })?;
+
+ Ok(Entry {
+ inode: new_inode,
+ generation: 0,
+ attr: create_dir_stat(new_inode, /* file_number */ 0)?,
+ entry_timeout: DEFAULT_METADATA_TIMEOUT,
+ attr_timeout: DEFAULT_METADATA_TIMEOUT,
+ })
}
}
/// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
pub fn loop_forever(
- file_pool: BTreeMap<Inode, FileConfig>,
+ authfs: AuthFs,
mountpoint: &Path,
extra_options: &Option<String>,
) -> Result<(), fuse::Error> {
- let max_read: u32 = 65536;
- let max_write: u32 = 65536;
let dev_fuse = OpenOptions::new()
.read(true)
.write(true)
@@ -428,7 +692,7 @@
MountOption::AllowOther,
MountOption::UserId(0),
MountOption::GroupId(0),
- MountOption::MaxRead(max_read),
+ MountOption::MaxRead(MAX_READ_BYTES),
];
if let Some(value) = extra_options {
mount_options.push(MountOption::Extra(value));
@@ -437,10 +701,9 @@
fuse::mount(mountpoint, "authfs", libc::MS_NOSUID | libc::MS_NODEV, &mount_options)
.expect("Failed to mount fuse");
- fuse::worker::start_message_loop(
- dev_fuse,
- max_write,
- max_read,
- AuthFs::new(file_pool, max_write),
- )
+ fuse::worker::start_message_loop(dev_fuse, MAX_WRITE_BYTES, MAX_READ_BYTES, authfs)
+}
+
+fn cstr_to_path(cstr: &CStr) -> &Path {
+ OsStr::from_bytes(cstr.to_bytes()).as_ref()
}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index ecb0e68..24b041c 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -22,17 +22,14 @@
//! each read of file block can be verified individually only when needed.
//!
//! AuthFS only serve files that are specifically configured. A file configuration may include the
-//! source (e.g. local file or remote file server), verification method (e.g. certificate for
-//! fs-verity verification, or no verification if expected to mount over dm-verity), and file ID.
-//! Regardless of the actual file name, the exposed file names through AuthFS are currently integer,
-//! e.g. /mountpoint/42.
+//! source (e.g. remote file server), verification method (e.g. certificate for fs-verity
+//! verification, or no verification if expected to mount over dm-verity), and file ID. Regardless
+//! of the actual file name, the exposed file names through AuthFS are currently integer, e.g.
+//! /mountpoint/42.
use anyhow::{bail, Context, Result};
use log::error;
-use std::collections::BTreeMap;
use std::convert::TryInto;
-use std::fs::File;
-use std::io::Read;
use std::path::{Path, PathBuf};
use structopt::StructOpt;
@@ -44,9 +41,11 @@
mod fusefs;
use auth::FakeAuthenticator;
-use file::{LocalFileReader, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
+use file::{
+ InMemoryDir, RemoteDirEditor, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
+};
use fsverity::{VerifiedFileEditor, VerifiedFileReader};
-use fusefs::{FileConfig, Inode};
+use fusefs::{AuthFs, AuthFsEntry};
#[derive(StructOpt)]
struct Args {
@@ -64,278 +63,240 @@
/// A read-only remote file with integrity check. Can be multiple.
///
- /// For example, `--remote-verified-file 5:10:/path/to/cert` tells the filesystem to associate
- /// entry 5 with a remote file 10, and need to be verified against the /path/to/cert.
+ /// For example, `--remote-ro-file 5:/path/to/cert` tells the filesystem to associate the
+ /// file $MOUNTPOINT/5 with a remote FD 5, and need to be verified against the /path/to/cert.
#[structopt(long, parse(try_from_str = parse_remote_ro_file_option))]
remote_ro_file: Vec<OptionRemoteRoFile>,
/// A read-only remote file without integrity check. Can be multiple.
///
- /// For example, `--remote-unverified-file 5:10` tells the filesystem to associate entry 5
- /// with a remote file 10.
- #[structopt(long, parse(try_from_str = parse_remote_ro_file_unverified_option))]
- remote_ro_file_unverified: Vec<OptionRemoteRoFileUnverified>,
+ /// For example, `--remote-ro-file-unverified 5` tells the filesystem to associate the file
+ /// $MOUNTPOINT/5 with a remote FD 5.
+ #[structopt(long)]
+ remote_ro_file_unverified: Vec<i32>,
/// A new read-writable remote file with integrity check. Can be multiple.
///
- /// For example, `--remote-new-verified-file 12:34` tells the filesystem to associate entry 12
- /// with a remote file 34.
- #[structopt(long, parse(try_from_str = parse_remote_new_rw_file_option))]
- remote_new_rw_file: Vec<OptionRemoteRwFile>,
+ /// For example, `--remote-new-rw-file 5` tells the filesystem to associate the file
+ /// $MOUNTPOINT/5 with a remote FD 5.
+ #[structopt(long)]
+ remote_new_rw_file: Vec<i32>,
- /// Debug only. A read-only local file with integrity check. Can be multiple.
- #[structopt(long, parse(try_from_str = parse_local_file_ro_option))]
- local_ro_file: Vec<OptionLocalFileRo>,
+ /// A read-only directory that represents a remote directory. The directory view is constructed
+ /// and finalized during the filesystem initialization based on the provided mapping file
+ /// (which is a serialized protobuf of android.security.fsverity.FSVerityDigests, which
+ /// essentially provides <file path, fs-verity digest> mappings of exported files). The mapping
+ /// file is supposed to come from a trusted location in order to provide a trusted view as well
+ /// as verified access of included files with their fs-verity digest. Not all files on the
+ /// remote host may be included in the mapping file, so the directory view may be partial. The
+ /// directory structure won't change throughout the filesystem lifetime.
+ ///
+ /// For example, `--remote-ro-dir 5:/path/to/mapping:/prefix/` tells the filesystem to
+ /// construct a directory structure defined in the mapping file at $MOUNTPOINT/5, which may
+ /// include a file like /5/system/framework/framework.jar. "/prefix/" tells the filesystem to
+ /// strip the path (e.g. "/system/") from the mount point to match the expected location of the
+ /// remote FD (e.g. a directory FD of "/system" in the remote).
+ #[structopt(long, parse(try_from_str = parse_remote_new_ro_dir_option))]
+ remote_ro_dir: Vec<OptionRemoteRoDir>,
- /// Debug only. A read-only local file without integrity check. Can be multiple.
- #[structopt(long, parse(try_from_str = parse_local_ro_file_unverified_ro_option))]
- local_ro_file_unverified: Vec<OptionLocalRoFileUnverified>,
+ /// A new directory that is assumed empty in the backing filesystem. New files created in this
+ /// directory are integrity-protected in the same way as --remote-new-verified-file. Can be
+ /// multiple.
+ ///
+ /// For example, `--remote-new-rw-dir 5` tells the filesystem to associate $MOUNTPOINT/5
+ /// with a remote dir FD 5.
+ #[structopt(long)]
+ remote_new_rw_dir: Vec<i32>,
/// Enable debugging features.
#[structopt(long)]
debug: bool,
}
-impl Args {
- fn has_remote_files(&self) -> bool {
- !self.remote_ro_file.is_empty()
- || !self.remote_ro_file_unverified.is_empty()
- || !self.remote_new_rw_file.is_empty()
- }
-}
-
struct OptionRemoteRoFile {
- ino: Inode,
-
/// ID to refer to the remote file.
- remote_id: i32,
+ remote_fd: i32,
/// Certificate to verify the authenticity of the file's fs-verity signature.
/// TODO(170494765): Implement PKCS#7 signature verification.
_certificate_path: PathBuf,
}
-struct OptionRemoteRoFileUnverified {
- ino: Inode,
+struct OptionRemoteRoDir {
+ /// ID to refer to the remote dir.
+ remote_dir_fd: i32,
- /// ID to refer to the remote file.
- remote_id: i32,
-}
+ /// A mapping file that describes the expecting file/directory structure and integrity metadata
+ /// in the remote directory. The file contains serialized protobuf of
+ /// android.security.fsverity.FSVerityDigests.
+ /// TODO(203251769): Really use the file when it's generated.
+ #[allow(dead_code)]
+ mapping_file_path: PathBuf,
-struct OptionRemoteRwFile {
- ino: Inode,
-
- /// ID to refer to the remote file.
- remote_id: i32,
-}
-
-struct OptionLocalFileRo {
- ino: Inode,
-
- /// Local path of the backing file.
- file_path: PathBuf,
-
- /// Local path of the backing file's fs-verity Merkle tree dump.
- merkle_tree_dump_path: PathBuf,
-
- /// Local path of fs-verity signature for the backing file.
- signature_path: PathBuf,
-
- /// Certificate to verify the authenticity of the file's fs-verity signature.
- /// TODO(170494765): Implement PKCS#7 signature verification.
- _certificate_path: PathBuf,
-}
-
-struct OptionLocalRoFileUnverified {
- ino: Inode,
-
- /// Local path of the backing file.
- file_path: PathBuf,
+ prefix: PathBuf,
}
fn parse_remote_ro_file_option(option: &str) -> Result<OptionRemoteRoFile> {
let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 3 {
+ if strs.len() != 2 {
bail!("Invalid option: {}", option);
}
Ok(OptionRemoteRoFile {
- ino: strs[0].parse::<Inode>()?,
- remote_id: strs[1].parse::<i32>()?,
- _certificate_path: PathBuf::from(strs[2]),
+ remote_fd: strs[0].parse::<i32>()?,
+ _certificate_path: PathBuf::from(strs[1]),
})
}
-fn parse_remote_ro_file_unverified_option(option: &str) -> Result<OptionRemoteRoFileUnverified> {
+fn parse_remote_new_ro_dir_option(option: &str) -> Result<OptionRemoteRoDir> {
let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 2 {
+ if strs.len() != 3 {
bail!("Invalid option: {}", option);
}
- Ok(OptionRemoteRoFileUnverified {
- ino: strs[0].parse::<Inode>()?,
- remote_id: strs[1].parse::<i32>()?,
+ Ok(OptionRemoteRoDir {
+ remote_dir_fd: strs[0].parse::<i32>().unwrap(),
+ mapping_file_path: PathBuf::from(strs[1]),
+ prefix: PathBuf::from(strs[2]),
})
}
-fn parse_remote_new_rw_file_option(option: &str) -> Result<OptionRemoteRwFile> {
- let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 2 {
- bail!("Invalid option: {}", option);
- }
- Ok(OptionRemoteRwFile {
- ino: strs[0].parse::<Inode>().unwrap(),
- remote_id: strs[1].parse::<i32>().unwrap(),
- })
-}
-
-fn parse_local_file_ro_option(option: &str) -> Result<OptionLocalFileRo> {
- let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 5 {
- bail!("Invalid option: {}", option);
- }
- Ok(OptionLocalFileRo {
- ino: strs[0].parse::<Inode>()?,
- file_path: PathBuf::from(strs[1]),
- merkle_tree_dump_path: PathBuf::from(strs[2]),
- signature_path: PathBuf::from(strs[3]),
- _certificate_path: PathBuf::from(strs[4]),
- })
-}
-
-fn parse_local_ro_file_unverified_ro_option(option: &str) -> Result<OptionLocalRoFileUnverified> {
- let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 2 {
- bail!("Invalid option: {}", option);
- }
- Ok(OptionLocalRoFileUnverified {
- ino: strs[0].parse::<Inode>()?,
- file_path: PathBuf::from(strs[1]),
- })
-}
-
-fn new_config_remote_verified_file(
+fn new_remote_verified_file_entry(
service: file::VirtFdService,
- remote_id: i32,
+ remote_fd: i32,
file_size: u64,
-) -> Result<FileConfig> {
- let signature = service.readFsveritySignature(remote_id).context("Failed to read signature")?;
+) -> Result<AuthFsEntry> {
+ let signature = service.readFsveritySignature(remote_fd).context("Failed to read signature")?;
let authenticator = FakeAuthenticator::always_succeed();
- Ok(FileConfig::RemoteVerifiedReadonly {
+ Ok(AuthFsEntry::VerifiedReadonly {
reader: VerifiedFileReader::new(
&authenticator,
- RemoteFileReader::new(service.clone(), remote_id),
+ RemoteFileReader::new(service.clone(), remote_fd),
file_size,
signature,
- RemoteMerkleTreeReader::new(service.clone(), remote_id),
+ RemoteMerkleTreeReader::new(service.clone(), remote_fd),
)?,
file_size,
})
}
-fn new_config_remote_unverified_file(
+fn new_remote_unverified_file_entry(
service: file::VirtFdService,
- remote_id: i32,
+ remote_fd: i32,
file_size: u64,
-) -> Result<FileConfig> {
- let reader = RemoteFileReader::new(service, remote_id);
- Ok(FileConfig::RemoteUnverifiedReadonly { reader, file_size })
+) -> Result<AuthFsEntry> {
+ let reader = RemoteFileReader::new(service, remote_fd);
+ Ok(AuthFsEntry::UnverifiedReadonly { reader, file_size })
}
-fn new_config_local_ro_file(
- protected_file: &Path,
- merkle_tree_dump: &Path,
- signature: &Path,
-) -> Result<FileConfig> {
- let file = File::open(&protected_file)?;
- let file_size = file.metadata()?.len();
- let file_reader = LocalFileReader::new(file)?;
- let merkle_tree_reader = LocalFileReader::new(File::open(merkle_tree_dump)?)?;
- let authenticator = FakeAuthenticator::always_succeed();
- let mut sig = Vec::new();
- let _ = File::open(signature)?.read_to_end(&mut sig)?;
- let reader =
- VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree_reader)?;
- Ok(FileConfig::LocalVerifiedReadonly { reader, file_size })
-}
-
-fn new_config_local_ro_file_unverified(file_path: &Path) -> Result<FileConfig> {
- let reader = LocalFileReader::new(File::open(file_path)?)?;
- let file_size = reader.len();
- Ok(FileConfig::LocalUnverifiedReadonly { reader, file_size })
-}
-
-fn new_config_remote_new_verified_file(
+fn new_remote_new_verified_file_entry(
service: file::VirtFdService,
- remote_id: i32,
-) -> Result<FileConfig> {
- let remote_file = RemoteFileEditor::new(service, remote_id);
- Ok(FileConfig::RemoteVerifiedNew { editor: VerifiedFileEditor::new(remote_file) })
+ remote_fd: i32,
+) -> Result<AuthFsEntry> {
+ let remote_file = RemoteFileEditor::new(service, remote_fd);
+ Ok(AuthFsEntry::VerifiedNew { editor: VerifiedFileEditor::new(remote_file) })
}
-fn prepare_file_pool(args: &Args) -> Result<BTreeMap<Inode, FileConfig>> {
- let mut file_pool = BTreeMap::new();
+fn new_remote_new_verified_dir_entry(
+ service: file::VirtFdService,
+ remote_fd: i32,
+) -> Result<AuthFsEntry> {
+ let dir = RemoteDirEditor::new(service, remote_fd);
+ Ok(AuthFsEntry::VerifiedNewDirectory { dir })
+}
- if args.has_remote_files() {
- let service = file::get_rpc_binder_service(args.cid)?;
+fn prepare_root_dir_entries(authfs: &mut AuthFs, args: &Args) -> Result<()> {
+ let service = file::get_rpc_binder_service(args.cid)?;
- for config in &args.remote_ro_file {
- file_pool.insert(
- config.ino,
- new_config_remote_verified_file(
- service.clone(),
- config.remote_id,
- service.getFileSize(config.remote_id)?.try_into()?,
- )?,
- );
- }
-
- for config in &args.remote_ro_file_unverified {
- file_pool.insert(
- config.ino,
- new_config_remote_unverified_file(
- service.clone(),
- config.remote_id,
- service.getFileSize(config.remote_id)?.try_into()?,
- )?,
- );
- }
-
- for config in &args.remote_new_rw_file {
- file_pool.insert(
- config.ino,
- new_config_remote_new_verified_file(service.clone(), config.remote_id)?,
- );
- }
- }
-
- for config in &args.local_ro_file {
- file_pool.insert(
- config.ino,
- new_config_local_ro_file(
- &config.file_path,
- &config.merkle_tree_dump_path,
- &config.signature_path,
+ for config in &args.remote_ro_file {
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(config.remote_fd),
+ new_remote_verified_file_entry(
+ service.clone(),
+ config.remote_fd,
+ service.getFileSize(config.remote_fd)?.try_into()?,
)?,
- );
+ )?;
}
- for config in &args.local_ro_file_unverified {
- file_pool.insert(config.ino, new_config_local_ro_file_unverified(&config.file_path)?);
+ for remote_fd in &args.remote_ro_file_unverified {
+ let remote_fd = *remote_fd;
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(remote_fd),
+ new_remote_unverified_file_entry(
+ service.clone(),
+ remote_fd,
+ service.getFileSize(remote_fd)?.try_into()?,
+ )?,
+ )?;
}
- Ok(file_pool)
+ for remote_fd in &args.remote_new_rw_file {
+ let remote_fd = *remote_fd;
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(remote_fd),
+ new_remote_new_verified_file_entry(service.clone(), remote_fd)?,
+ )?;
+ }
+
+ for remote_fd in &args.remote_new_rw_dir {
+ let remote_fd = *remote_fd;
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(remote_fd),
+ new_remote_new_verified_dir_entry(service.clone(), remote_fd)?,
+ )?;
+ }
+
+ for config in &args.remote_ro_dir {
+ let dir_root_inode = authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(config.remote_dir_fd),
+ AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() },
+ )?;
+
+ // TODO(203251769): Read actual path from config.mapping_file_path when it's generated.
+ let paths = vec![
+ Path::new("/system/framework/framework.jar"),
+ Path::new("/system/framework/services.jar"),
+ ];
+
+ for path in &paths {
+ let file_entry = {
+ // TODO(205883847): Not all files will be used. Open the remote file lazily.
+ let related_path = path.strip_prefix(&config.prefix)?;
+ let remote_file = RemoteFileReader::new_by_path(
+ service.clone(),
+ config.remote_dir_fd,
+ related_path,
+ )?;
+ let file_size = service.getFileSize(remote_file.get_remote_fd())?.try_into()?;
+ // TODO(203251769): Switch to VerifiedReadonly
+ AuthFsEntry::UnverifiedReadonly { reader: remote_file, file_size }
+ };
+ authfs.add_entry_at_ro_dir_by_path(
+ dir_root_inode,
+ path.strip_prefix("/")?,
+ file_entry,
+ )?;
+ }
+ }
+
+ Ok(())
+}
+
+fn remote_fd_to_path_buf(fd: i32) -> PathBuf {
+ PathBuf::from(fd.to_string())
}
fn try_main() -> Result<()> {
- let args = Args::from_args();
+ let args = Args::from_args_safe()?;
let log_level = if args.debug { log::Level::Debug } else { log::Level::Info };
android_logger::init_once(
android_logger::Config::default().with_tag("authfs").with_min_level(log_level),
);
- let file_pool = prepare_file_pool(&args)?;
- fusefs::loop_forever(file_pool, &args.mount_point, &args.extra_options)?;
+ let mut authfs = AuthFs::new();
+ prepare_root_dir_entries(&mut authfs, &args)?;
+ fusefs::loop_forever(authfs, &args.mount_point, &args.extra_options)?;
bail!("Unexpected exit after the handler loop")
}
diff --git a/authfs/tests/Android.bp b/authfs/tests/Android.bp
index 8061c56..92fa428 100644
--- a/authfs/tests/Android.bp
+++ b/authfs/tests/Android.bp
@@ -14,8 +14,35 @@
"VirtualizationTestHelper",
],
test_suites: ["general-tests"],
+ target_required: ["open_then_run_module"],
data: [
":authfs_test_files",
":MicrodroidTestApp.signed",
],
}
+
+rust_test {
+ // PushFilePreparer can sometimes push the directory (if named "open_then_run", which contains
+ // the actual executable in a per-architecture sub-directory) instead of the executable. This
+ // makes it harder to use because the host Java test have to detect the executable path
+ // dynamically, e.g. if it's a directory, append the device's architecture to build the actual
+ // executable path. By simply renaming the module (thus the host directory), this forces
+ // PushFilePreparer to always push the executable to the destination, so that the Java test can
+ // easily locate the executable with a constant path.
+ name: "open_then_run_module",
+ stem: "open_then_run",
+
+ crate_name: "open_then_run",
+ srcs: ["open_then_run.rs"],
+ edition: "2018",
+ rustlibs: [
+ "libandroid_logger",
+ "libanyhow",
+ "liblibc",
+ "libclap",
+ "libcommand_fds",
+ "liblog_rust",
+ ],
+ test_suites: ["general-tests"],
+ test_harness: false,
+}
diff --git a/authfs/tests/AndroidTest.xml b/authfs/tests/AndroidTest.xml
index 6100ab9..643e2b4 100644
--- a/authfs/tests/AndroidTest.xml
+++ b/authfs/tests/AndroidTest.xml
@@ -23,7 +23,7 @@
<target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
<option name="throw-if-cmd-fail" value="true" />
- <!-- Prepare test directory. -->
+ <!-- Prepare test directories. -->
<option name="run-command" value="mkdir -p /data/local/tmp/authfs/mnt" />
<option name="teardown-command" value="rm -rf /data/local/tmp/authfs" />
</target_preparer>
@@ -31,6 +31,11 @@
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="true" />
<option name="abort-on-push-failure" value="true" />
+
+ <!-- Test executable -->
+ <option name="push-file" key="open_then_run" value="/data/local/tmp/open_then_run" />
+
+ <!-- Test data files -->
<option name="push-file" key="cert.der" value="/data/local/tmp/authfs/cert.der" />
<option name="push-file" key="input.4m" value="/data/local/tmp/authfs/input.4m" />
<option name="push-file" key="input.4k1" value="/data/local/tmp/authfs/input.4k1" />
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
index f06c8f5..70d48c2 100644
--- a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -35,6 +35,7 @@
import com.android.tradefed.testtype.junit4.AfterClassWithInfo;
import com.android.tradefed.testtype.junit4.BeforeClassWithInfo;
import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.CommandStatus;
import org.junit.After;
import org.junit.AssumptionViolatedException;
@@ -53,6 +54,12 @@
/** Test directory on Android where data are located */
private static final String TEST_DIR = "/data/local/tmp/authfs";
+ /** Output directory where the test can generate output on Android */
+ private static final String TEST_OUTPUT_DIR = "/data/local/tmp/authfs/output_dir";
+
+ /** Path to open_then_run on Android */
+ private static final String OPEN_THEN_RUN_BIN = "/data/local/tmp/open_then_run";
+
/** Mount point of authfs on Microdroid during the test */
private static final String MOUNT_DIR = "/data/local/tmp";
@@ -136,91 +143,91 @@
}
@Before
- public void setUp() {
+ public void setUp() throws Exception {
assumeFalse(sAssumptionFailed);
+ sAndroid.run("mkdir " + TEST_OUTPUT_DIR);
}
@After
- public void tearDown() throws DeviceNotAvailableException {
+ public void tearDown() throws Exception {
sAndroid.tryRun("killall fd_server");
- sAndroid.tryRun("rm -f " + TEST_DIR + "/output");
+ sAndroid.run("rm -rf " + TEST_OUTPUT_DIR);
tryRunOnMicrodroid("killall authfs");
tryRunOnMicrodroid("umount " + MOUNT_DIR);
}
@Test
- public void testReadWithFsverityVerification_RemoteFile()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testReadWithFsverityVerification_RemoteFile() throws Exception {
// Setup
runFdServerOnAndroid(
- "3<input.4m 4<input.4m.merkle_dump 5<input.4m.fsv_sig 6<input.4m",
+ "--open-ro 3:input.4m --open-ro 4:input.4m.merkle_dump --open-ro 5:input.4m.fsv_sig"
+ + " --open-ro 6:input.4m",
"--ro-fds 3:4:5 --ro-fds 6");
runAuthFsOnMicrodroid(
- "--remote-ro-file-unverified 10:6 --remote-ro-file 11:3:cert.der --cid "
+ "--remote-ro-file-unverified 6 --remote-ro-file 3:cert.der --cid "
+ VMADDR_CID_HOST);
// Action
- String actualHashUnverified4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/10");
- String actualHash4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/11");
+ String actualHashUnverified4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/6");
+ String actualHash4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/3");
// Verify
String expectedHash4m = computeFileHashOnAndroid(TEST_DIR + "/input.4m");
- assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4m, actualHashUnverified4m);
- assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4m, actualHash4m);
+ assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4m, actualHashUnverified4m);
+ assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4m, actualHash4m);
}
// Separate the test from the above simply because exec in shell does not allow open too many
// files.
@Test
- public void testReadWithFsverityVerification_RemoteSmallerFile()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testReadWithFsverityVerification_RemoteSmallerFile() throws Exception {
// Setup
runFdServerOnAndroid(
- "3<input.4k 4<input.4k.merkle_dump 5<input.4k.fsv_sig"
- + " 6<input.4k1 7<input.4k1.merkle_dump 8<input.4k1.fsv_sig",
+ "--open-ro 3:input.4k --open-ro 4:input.4k.merkle_dump --open-ro"
+ + " 5:input.4k.fsv_sig --open-ro 6:input.4k1 --open-ro 7:input.4k1.merkle_dump"
+ + " --open-ro 8:input.4k1.fsv_sig",
"--ro-fds 3:4:5 --ro-fds 6:7:8");
runAuthFsOnMicrodroid(
- "--remote-ro-file 10:3:cert.der --remote-ro-file 11:6:cert.der --cid "
- + VMADDR_CID_HOST);
+ "--remote-ro-file 3:cert.der --remote-ro-file 6:cert.der --cid " + VMADDR_CID_HOST);
// Action
- String actualHash4k = computeFileHashOnMicrodroid(MOUNT_DIR + "/10");
- String actualHash4k1 = computeFileHashOnMicrodroid(MOUNT_DIR + "/11");
+ String actualHash4k = computeFileHashOnMicrodroid(MOUNT_DIR + "/3");
+ String actualHash4k1 = computeFileHashOnMicrodroid(MOUNT_DIR + "/6");
// Verify
String expectedHash4k = computeFileHashOnAndroid(TEST_DIR + "/input.4k");
String expectedHash4k1 = computeFileHashOnAndroid(TEST_DIR + "/input.4k1");
- assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4k, actualHash4k);
- assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4k1, actualHash4k1);
+ assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4k, actualHash4k);
+ assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4k1, actualHash4k1);
}
@Test
- public void testReadWithFsverityVerification_TamperedMerkleTree()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testReadWithFsverityVerification_TamperedMerkleTree() throws Exception {
// Setup
runFdServerOnAndroid(
- "3<input.4m 4<input.4m.merkle_dump.bad 5<input.4m.fsv_sig", "--ro-fds 3:4:5");
- runAuthFsOnMicrodroid("--remote-ro-file 10:3:cert.der --cid " + VMADDR_CID_HOST);
+ "--open-ro 3:input.4m --open-ro 4:input.4m.merkle_dump.bad "
+ + "--open-ro 5:input.4m.fsv_sig",
+ "--ro-fds 3:4:5");
+ runAuthFsOnMicrodroid("--remote-ro-file 3:cert.der --cid " + VMADDR_CID_HOST);
// Verify
- assertFalse(copyFileOnMicrodroid(MOUNT_DIR + "/10", "/dev/null"));
+ assertFalse(copyFileOnMicrodroid(MOUNT_DIR + "/3", "/dev/null"));
}
@Test
- public void testWriteThroughCorrectly()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testWriteThroughCorrectly() throws Exception {
// Setup
- runFdServerOnAndroid("3<>output", "--rw-fds 3");
- runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid " + VMADDR_CID_HOST);
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
// Action
String srcPath = "/system/bin/linker64";
- String destPath = MOUNT_DIR + "/20";
- String backendPath = TEST_DIR + "/output";
+ String destPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
assertTrue(copyFileOnMicrodroid(srcPath, destPath));
// Verify
@@ -229,15 +236,14 @@
}
@Test
- public void testWriteFailedIfDetectsTampering()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testWriteFailedIfDetectsTampering() throws Exception {
// Setup
- runFdServerOnAndroid("3<>output", "--rw-fds 3");
- runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid " + VMADDR_CID_HOST);
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
String srcPath = "/system/bin/linker64";
- String destPath = MOUNT_DIR + "/20";
- String backendPath = TEST_DIR + "/output";
+ String destPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
assertTrue(copyFileOnMicrodroid(srcPath, destPath));
// Action
@@ -248,28 +254,32 @@
// Write to a block partially requires a read back to calculate the new hash. It should fail
// when the content is inconsistent to the known hash. Use direct I/O to avoid simply
// writing to the filesystem cache.
- assertEquals(
- tryRunOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 direct"),
- null);
+ assertFalse(
+ writeZerosAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 0, /* number */ 1024, /* writeThrough */ true));
// A full 4K write does not require to read back, so write can succeed even if the backing
// block has already been tampered.
- runOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=4096 skip=4096");
+ assertTrue(
+ writeZerosAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 4096, /* number */ 4096, /* writeThrough */ false));
// Otherwise, a partial write with correct backing file should still succeed.
- runOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 skip=8192");
+ assertTrue(
+ writeZerosAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 8192, /* number */ 1024, /* writeThrough */ false));
}
@Test
- public void testFileResize() throws DeviceNotAvailableException, InterruptedException {
+ public void testFileResize() throws Exception {
// Setup
- runFdServerOnAndroid("3<>output", "--rw-fds 3");
- runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid " + VMADDR_CID_HOST);
- String outputPath = MOUNT_DIR + "/20";
- String backendPath = TEST_DIR + "/output";
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+ String outputPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
// Action & Verify
- runOnMicrodroid("yes $'\\x01' | tr -d '\\n' | dd bs=1 count=10000 of=" + outputPath);
+ createFileWithOnesOnMicrodroid(outputPath, 10000);
assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 10000);
expectBackingFileConsistency(
outputPath,
@@ -291,6 +301,144 @@
"e53130831c13dabff71d5d1797e3aaa467b4b7d32b3b8782c4ff03d76976f2aa");
}
+ @Test
+ public void testOutputDirectory_WriteNewFiles() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ // Can create a new file to write.
+ String expectedAndroidPath = androidOutputDir + "/file";
+ String authfsPath = authfsOutputDir + "/file";
+ createFileWithOnesOnMicrodroid(authfsPath, 10000);
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 10000);
+ expectBackingFileConsistency(
+ authfsPath,
+ expectedAndroidPath,
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+
+ // Regular file operations work, e.g. resize.
+ resizeFileOnMicrodroid(authfsPath, 15000);
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 15000);
+ expectBackingFileConsistency(
+ authfsPath,
+ expectedAndroidPath,
+ "567c89f62586e0d33369157afdfe99a2fa36cdffb01e91dcdc0b7355262d610d");
+ }
+
+ @Test
+ public void testOutputDirectory_MkdirAndWriteFile() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action
+ // Can create nested directories and can create a file in one.
+ runOnMicrodroid("mkdir " + authfsOutputDir + "/new_dir");
+ runOnMicrodroid("mkdir -p " + authfsOutputDir + "/we/need/to/go/deeper");
+ createFileWithOnesOnMicrodroid(authfsOutputDir + "/new_dir/file1", 10000);
+ createFileWithOnesOnMicrodroid(authfsOutputDir + "/we/need/file2", 10000);
+
+ // Verify
+ // Directories show up in Android.
+ sAndroid.run("test -d " + androidOutputDir + "/new_dir");
+ sAndroid.run("test -d " + androidOutputDir + "/we/need/to/go/deeper");
+ // Files exist in Android. Hashes on Microdroid and Android are consistent.
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/new_dir/file1"), 10000);
+ expectBackingFileConsistency(
+ authfsOutputDir + "/new_dir/file1",
+ androidOutputDir + "/new_dir/file1",
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+ // Same to file in a nested directory.
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/we/need/file2"), 10000);
+ expectBackingFileConsistency(
+ authfsOutputDir + "/we/need/file2",
+ androidOutputDir + "/we/need/file2",
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+ }
+
+ @Test
+ public void testOutputDirectory_CreateAndTruncateExistingFile() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/file");
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/file"), 3);
+ // Can override a file and write normally.
+ createFileWithOnesOnMicrodroid(authfsOutputDir + "/file", 10000);
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/file"), 10000);
+ expectBackingFileConsistency(
+ authfsOutputDir + "/file",
+ androidOutputDir + "/file",
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+ }
+
+ @Test
+ public void testOutputDirectory_CannotRecreateDirectoryIfNameExists() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ runOnMicrodroid("touch " + authfsOutputDir + "/some_file");
+ runOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir");
+ runOnMicrodroid("touch " + authfsOutputDir + "/some_dir/file");
+ runOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/dir");
+
+ // Action & Verify
+ // Cannot create directory if an entry with the same name already exists.
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_file");
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir");
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/file");
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/dir");
+ }
+
+ @Test
+ public void testInputDirectory_CanReadFile() throws Exception {
+ // Setup
+ String authfsInputDir = MOUNT_DIR + "/3";
+ runFdServerOnAndroid("--open-dir 3:/system", "--ro-dirs 3");
+ // TODO(203251769): Replace /dev/null with real manifest file when it's generated. We
+ // currently hard-coded the files for the test manually, and ignore the integrity check.
+ runAuthFsOnMicrodroid("--remote-ro-dir 3:/dev/null:/system --cid " + VMADDR_CID_HOST);
+
+ // Action
+ String actualHash =
+ computeFileHashOnMicrodroid(authfsInputDir + "/system/framework/framework.jar");
+
+ // Verify
+ String expectedHash = computeFileHashOnAndroid("/system/framework/framework.jar");
+ assertEquals("Expect consistent hash through /authfs/3: ", expectedHash, actualHash);
+ }
+
+ @Test
+ public void testInputDirectory_OnlyAllowlistedFilesExist() throws Exception {
+ // Setup
+ String authfsInputDir = MOUNT_DIR + "/3";
+ runFdServerOnAndroid("--open-dir 3:/system", "--ro-dirs 3");
+ // TODO(203251769): Replace /dev/null with real manifest file when it's generated. We
+ // currently hard-coded the files for the test manually, and ignore the integrity check.
+ runAuthFsOnMicrodroid("--remote-ro-dir 3:/dev/null:/system --cid " + VMADDR_CID_HOST);
+
+ // Verify
+ runOnMicrodroid("test -f " + authfsInputDir + "/system/framework/services.jar");
+ assertFailedOnMicrodroid("test -f " + authfsInputDir + "/system/bin/sh");
+ }
+
private void expectBackingFileConsistency(
String authFsPath, String backendPath, String expectedHash)
throws DeviceNotAvailableException {
@@ -340,6 +488,24 @@
return Long.parseLong(runOnMicrodroid("stat -c '%s' " + path));
}
+ private void createFileWithOnesOnMicrodroid(String filePath, long numberOfOnes) {
+ runOnMicrodroid(
+ "yes $'\\x01' | tr -d '\\n' | dd bs=1 count=" + numberOfOnes + " of=" + filePath);
+ }
+
+ private boolean writeZerosAtFileOffsetOnMicrodroid(
+ String filePath, long offset, long numberOfZeros, boolean writeThrough) {
+ String cmd = "dd if=/dev/zero of=" + filePath + " bs=1 count=" + numberOfZeros;
+ if (offset > 0) {
+ cmd += " skip=" + offset;
+ }
+ if (writeThrough) {
+ cmd += " direct";
+ }
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
private void runAuthFsOnMicrodroid(String flags) {
String cmd = AUTHFS_BIN + " " + MOUNT_DIR + " " + flags;
@@ -367,10 +533,20 @@
}
}
- private void runFdServerOnAndroid(String execParamsForOpeningFds, String flags)
+ private void runFdServerOnAndroid(String helperFlags, String fdServerFlags)
throws DeviceNotAvailableException {
- String cmd = "cd " + TEST_DIR + " && exec " + execParamsForOpeningFds + " " + FD_SERVER_BIN
- + " " + flags;
+ String cmd =
+ "cd "
+ + TEST_DIR
+ + " && "
+ + OPEN_THEN_RUN_BIN
+ + " "
+ + helperFlags
+ + " -- "
+ + FD_SERVER_BIN
+ + " "
+ + fdServerFlags;
+
mThreadPool.submit(
() -> {
try {
diff --git a/authfs/tests/open_then_run.rs b/authfs/tests/open_then_run.rs
new file mode 100644
index 0000000..fca8953
--- /dev/null
+++ b/authfs/tests/open_then_run.rs
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! This is a test helper program that opens files and/or directories, then passes the file
+//! descriptors to the specified command. When passing the file descriptors, they are mapped to the
+//! specified numbers in the child process.
+
+use anyhow::{bail, Context, Result};
+use clap::{App, Arg, Values};
+use command_fds::{CommandFdExt, FdMapping};
+use log::{debug, error};
+use std::fs::{File, OpenOptions};
+use std::os::unix::{fs::OpenOptionsExt, io::AsRawFd, io::RawFd};
+use std::process::Command;
+
+// `PseudoRawFd` is just an integer and not necessarily backed by a real FD. It is used to denote
+// the expecting FD number, when trying to set up FD mapping in the child process. The intention
+// with this alias is to improve readability by distinguishing from actual RawFd.
+type PseudoRawFd = RawFd;
+
+struct FileMapping {
+ file: File,
+ target_fd: PseudoRawFd,
+}
+
+impl FileMapping {
+ fn as_fd_mapping(&self) -> FdMapping {
+ FdMapping { parent_fd: self.file.as_raw_fd(), child_fd: self.target_fd }
+ }
+}
+
+struct Args {
+ ro_files: Vec<FileMapping>,
+ rw_files: Vec<FileMapping>,
+ dir_files: Vec<FileMapping>,
+ cmdline_args: Vec<String>,
+}
+
+fn parse_and_create_file_mapping<F>(
+ values: Option<Values<'_>>,
+ opener: F,
+) -> Result<Vec<FileMapping>>
+where
+ F: Fn(&str) -> Result<File>,
+{
+ if let Some(options) = values {
+ options
+ .map(|option| {
+ // Example option: 10:/some/path
+ let strs: Vec<&str> = option.split(':').collect();
+ if strs.len() != 2 {
+ bail!("Invalid option: {}", option);
+ }
+ let fd = strs[0].parse::<PseudoRawFd>().context("Invalid FD format")?;
+ let path = strs[1];
+ Ok(FileMapping { target_fd: fd, file: opener(path)? })
+ })
+ .collect::<Result<_>>()
+ } else {
+ Ok(Vec::new())
+ }
+}
+
+fn parse_args() -> Result<Args> {
+ #[rustfmt::skip]
+ let matches = App::new("open_then_run")
+ .arg(Arg::with_name("open-ro")
+ .long("open-ro")
+ .value_name("FD:PATH")
+ .help("Open <PATH> read-only to pass as fd <FD>")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(Arg::with_name("open-rw")
+ .long("open-rw")
+ .value_name("FD:PATH")
+ .help("Open/create <PATH> read-write to pass as fd <FD>")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(Arg::with_name("open-dir")
+ .long("open-dir")
+ .value_name("FD:DIR")
+ .help("Open <DIR> to pass as fd <FD>")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(Arg::with_name("args")
+ .help("Command line to execute with pre-opened FD inherited")
+ .last(true)
+ .required(true)
+ .multiple(true))
+ .get_matches();
+
+ let ro_files = parse_and_create_file_mapping(matches.values_of("open-ro"), |path| {
+ OpenOptions::new().read(true).open(path).with_context(|| format!("Open {} read-only", path))
+ })?;
+
+ let rw_files = parse_and_create_file_mapping(matches.values_of("open-rw"), |path| {
+ OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .with_context(|| format!("Open {} read-write", path))
+ })?;
+
+ let dir_files = parse_and_create_file_mapping(matches.values_of("open-dir"), |path| {
+ // The returned FD represents a path (that's supposed to be a directory), and is not really
+ // a file. It's better to use std::os::unix::io::OwnedFd but it's currently experimental.
+ // Ideally, all FDs opened by this program should be `OwnedFd` since we are only opening
+ // them for the provided program, and are not supposed to do anything else.
+ OpenOptions::new()
+ .custom_flags(libc::O_PATH | libc::O_DIRECTORY)
+ // The custom flags above is not taken into consideration by the unix implementation of
+ // OpenOptions for flag validation. So even though the man page of open(2) says that
+ // most flags include access mode are ignored, we still need to set a "valid" mode to
+ // make the library happy. The value does not appear to matter elsewhere in the library.
+ .read(true)
+ .open(path)
+ .with_context(|| format!("Open {} directory as path", path))
+ })?;
+
+ let cmdline_args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
+
+ Ok(Args { ro_files, rw_files, dir_files, cmdline_args })
+}
+
+fn try_main() -> Result<()> {
+ let args = parse_args()?;
+
+ let mut command = Command::new(&args.cmdline_args[0]);
+ command.args(&args.cmdline_args[1..]);
+
+ // Set up FD mappings in the child process.
+ let mut fd_mappings = Vec::new();
+ fd_mappings.extend(args.ro_files.iter().map(FileMapping::as_fd_mapping));
+ fd_mappings.extend(args.rw_files.iter().map(FileMapping::as_fd_mapping));
+ fd_mappings.extend(args.dir_files.iter().map(FileMapping::as_fd_mapping));
+ command.fd_mappings(fd_mappings)?;
+
+ debug!("Spawning {:?}", command);
+ command.spawn()?;
+ Ok(())
+}
+
+fn main() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("open_then_run")
+ .with_min_level(log::Level::Debug),
+ );
+
+ if let Err(e) = try_main() {
+ error!("Failed with {:?}", e);
+ std::process::exit(1);
+ }
+}
diff --git a/binder_common/lib.rs b/binder_common/lib.rs
index f2391e3..fa91f5a 100644
--- a/binder_common/lib.rs
+++ b/binder_common/lib.rs
@@ -17,6 +17,7 @@
//! Common items useful for binder clients and/or servers.
pub mod lazy_service;
+pub mod rpc_client;
pub mod rpc_server;
use binder::public_api::{ExceptionCode, Status};
diff --git a/binder_common/rpc_client.rs b/binder_common/rpc_client.rs
new file mode 100644
index 0000000..262a689
--- /dev/null
+++ b/binder_common/rpc_client.rs
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Helpers for implementing an RPC Binder client.
+
+use binder::public_api::{StatusCode, Strong};
+use binder::unstable_api::{new_spibinder, AIBinder};
+
+/// Connects to a binder RPC server.
+pub fn connect_rpc_binder<T: binder::FromIBinder + ?Sized>(
+ cid: u32,
+ port: u32,
+) -> binder::Result<Strong<T>> {
+ // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
+ // safely taken by new_spibinder.
+ let ibinder = unsafe {
+ new_spibinder(binder_rpc_unstable_bindgen::RpcClient(cid, port) as *mut AIBinder)
+ };
+ if let Some(ibinder) = ibinder {
+ <T>::try_from(ibinder)
+ } else {
+ Err(StatusCode::BAD_VALUE)
+ }
+}
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index d8fec81..5893fd6 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -14,6 +14,7 @@
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
"liblog_rust",
+ "librustutils",
],
shared_libs: [
"libbinder_rpc_unstable",
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index a69538e..508423b 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -16,12 +16,13 @@
//! Support for starting CompOS in a VM and connecting to the service
+use crate::timeouts::timeouts;
use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
IVirtualMachine::IVirtualMachine,
IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
IVirtualizationService::IVirtualizationService,
- VirtualMachineAppConfig::VirtualMachineAppConfig,
+ VirtualMachineAppConfig::{DebugLevel::DebugLevel, VirtualMachineAppConfig},
VirtualMachineConfig::VirtualMachineConfig,
};
use android_system_virtualizationservice::binder::{
@@ -42,7 +43,6 @@
use std::path::Path;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
-use std::time::Duration;
/// This owns an instance of the CompOS VM.
pub struct VmInstance {
@@ -51,6 +51,13 @@
cid: i32,
}
+/// Parameters to be used when creating a virtual machine instance.
+#[derive(Default, Debug, Clone)]
+pub struct VmParameters {
+ /// Whether the VM should be debuggable.
+ pub debug_mode: bool,
+}
+
impl VmInstance {
/// Return a new connection to the Virtualization Service binder interface. This will start the
/// service if necessary.
@@ -59,8 +66,12 @@
.context("Failed to find VirtualizationService")
}
- /// Start a new CompOS VM instance using the specified instance image file.
- pub fn start(instance_image: File) -> Result<VmInstance> {
+ /// Start a new CompOS VM instance using the specified instance image file and parameters.
+ pub fn start(
+ service: &dyn IVirtualizationService,
+ instance_image: File,
+ parameters: &VmParameters,
+ ) -> Result<VmInstance> {
let instance_fd = ParcelFileDescriptor::new(instance_image);
let apex_dir = Path::new(COMPOS_APEX_ROOT);
@@ -74,28 +85,37 @@
.context("Failed to open config APK idsig file")?;
let idsig_fd = ParcelFileDescriptor::new(idsig_fd);
- // TODO: Send this to stdout instead? Or specify None?
- let log_fd = File::create(data_dir.join("vm.log")).context("Failed to create log file")?;
- let log_fd = ParcelFileDescriptor::new(log_fd);
+ let (log_fd, debug_level) = if parameters.debug_mode {
+ // Console output and the system log output from the VM are redirected to this file.
+ let log_fd =
+ File::create(data_dir.join("vm.log")).context("Failed to create log file")?;
+ let log_fd = ParcelFileDescriptor::new(log_fd);
+ (Some(log_fd), DebugLevel::FULL)
+ } else {
+ (None, DebugLevel::NONE)
+ };
let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
apk: Some(apk_fd),
idsig: Some(idsig_fd),
instanceImage: Some(instance_fd),
configPath: "assets/vm_config.json".to_owned(),
+ debugLevel: debug_level,
..Default::default()
});
- let service = Self::connect_to_virtualization_service()?;
-
- let vm = service.createVm(&config, Some(&log_fd)).context("Failed to create VM")?;
+ let vm = service
+ .createVm(&config, log_fd.as_ref(), log_fd.as_ref())
+ .context("Failed to create VM")?;
let vm_state = Arc::new(VmStateMonitor::default());
let vm_state_clone = Arc::clone(&vm_state);
- vm.as_binder().link_to_death(&mut DeathRecipient::new(move || {
+ let mut death_recipient = DeathRecipient::new(move || {
vm_state_clone.set_died();
log::error!("VirtualizationService died");
- }))?;
+ });
+ // Note that dropping death_recipient cancels this, so we can't use a temporary here.
+ vm.as_binder().link_to_death(&mut death_recipient)?;
let vm_state_clone = Arc::clone(&vm_state);
let callback = BnVirtualMachineCallback::new_binder(
@@ -223,14 +243,13 @@
}
fn wait_until_ready(&self) -> Result<i32> {
- // 10s is long enough on real hardware, but it can take 90s when using nested
- // virtualization.
- // TODO(b/200924405): Reduce timeout/detect nested virtualization
let (state, result) = self
.state_ready
- .wait_timeout_while(self.mutex.lock().unwrap(), Duration::from_secs(120), |state| {
- state.cid.is_none() && !state.has_died
- })
+ .wait_timeout_while(
+ self.mutex.lock().unwrap(),
+ timeouts()?.vm_max_time_to_ready,
+ |state| state.cid.is_none() && !state.has_died,
+ )
.unwrap();
if result.timed_out() {
bail!("Timed out waiting for VM")
diff --git a/compos/common/lib.rs b/compos/common/lib.rs
index 0b84a28..4bfa81f 100644
--- a/compos/common/lib.rs
+++ b/compos/common/lib.rs
@@ -17,6 +17,7 @@
//! Common items used by CompOS server and/or clients
pub mod compos_client;
+pub mod timeouts;
/// Special CID indicating "any".
pub const VMADDR_CID_ANY: u32 = -1i32 as u32;
diff --git a/compos/common/timeouts.rs b/compos/common/timeouts.rs
new file mode 100644
index 0000000..42cfe69
--- /dev/null
+++ b/compos/common/timeouts.rs
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Timeouts for common situations, with support for longer timeouts when using nested
+//! virtualization.
+
+use anyhow::Result;
+use rustutils::system_properties;
+use std::time::Duration;
+
+/// Holder for the various timeouts we use.
+#[derive(Debug, Copy, Clone)]
+pub struct Timeouts {
+ /// Total time that odrefresh may take to perform compilation
+ pub odrefresh_max_execution_time: Duration,
+ /// Time allowed for a single compilation step run by odrefresh
+ pub odrefresh_max_child_process_time: Duration,
+ /// Time allowed for the CompOS VM to start up and become ready.
+ pub vm_max_time_to_ready: Duration,
+}
+
+/// Whether the current platform requires extra time for operations inside a VM.
+pub fn need_extra_time() -> Result<bool> {
+ // Nested virtualization is slow. Check if we are running on vsoc as a proxy for this.
+ let value = system_properties::read("ro.build.product")?;
+ Ok(value == "vsoc_x86_64" || value == "vsoc_x86")
+}
+
+/// Return the timeouts that are appropriate on the current platform.
+pub fn timeouts() -> Result<&'static Timeouts> {
+ if need_extra_time()? {
+ Ok(&EXTENDED_TIMEOUTS)
+ } else {
+ Ok(&NORMAL_TIMEOUTS)
+ }
+}
+
+/// The timeouts that we use normally.
+pub const NORMAL_TIMEOUTS: Timeouts = Timeouts {
+ // Note: the source of truth for these odrefresh timeouts is art/odrefresh/odr_config.h.
+ odrefresh_max_execution_time: Duration::from_secs(300),
+ odrefresh_max_child_process_time: Duration::from_secs(90),
+ vm_max_time_to_ready: Duration::from_secs(10),
+};
+
+/// The timeouts that we use when need_extra_time() returns true.
+pub const EXTENDED_TIMEOUTS: Timeouts = Timeouts {
+ odrefresh_max_execution_time: Duration::from_secs(480),
+ odrefresh_max_child_process_time: Duration::from_secs(150),
+ vm_max_time_to_ready: Duration::from_secs(120),
+};
diff --git a/compos/compos_key_cmd/compos_key_cmd.cpp b/compos/compos_key_cmd/compos_key_cmd.cpp
index 7bf622d..3f431da 100644
--- a/compos/compos_key_cmd/compos_key_cmd.cpp
+++ b/compos/compos_key_cmd/compos_key_cmd.cpp
@@ -197,6 +197,7 @@
return Error() << "Failed to connect to virtualization service.";
}
+ // Console output and the system log output from the VM are redirected to this file.
ScopedFileDescriptor logFd;
if (mLogFile.empty()) {
logFd.set(dup(STDOUT_FILENO));
@@ -235,11 +236,11 @@
appConfig.idsig = std::move(idsigFd);
appConfig.instanceImage = std::move(instanceFd);
appConfig.configPath = kConfigFilePath;
- appConfig.debugLevel = VirtualMachineAppConfig::DebugLevel::NONE;
+ appConfig.debugLevel = VirtualMachineAppConfig::DebugLevel::FULL;
appConfig.memoryMib = 0; // Use default
LOG(INFO) << "Starting VM";
- auto status = service->createVm(config, logFd, &mVm);
+ auto status = service->createVm(config, logFd, logFd, &mVm);
if (!status.isOk()) {
return Error() << status.getDescription();
}
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index 2a24b7a..8391ed6 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -20,6 +20,7 @@
"libnum_traits",
"liblog_rust",
"librustutils",
+ "libshared_child",
],
proc_macros: ["libnum_derive"],
apex_available: [
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTask.aidl b/compos/composd/aidl/android/system/composd/ICompilationTask.aidl
new file mode 100644
index 0000000..ae03fcc
--- /dev/null
+++ b/compos/composd/aidl/android/system/composd/ICompilationTask.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.composd;
+
+/**
+ * Represents a compilation in process.
+ */
+interface ICompilationTask {
+ /**
+ * Attempt to cancel compilation. If successful compilation will end and no further success or
+ * failed callbacks will be received (although any in flight may still be delivered).
+ */
+ void cancel();
+}
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
new file mode 100644
index 0000000..a9d41b8
--- /dev/null
+++ b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.composd;
+
+/**
+ * Interface to be implemented by clients of IIsolatedCompilationService to be notified when a
+ * requested compilation task completes.
+ */
+interface ICompilationTaskCallback {
+ /**
+ * Called if a compilation task has ended successfully, generating all the required artifacts.
+ */
+ void onSuccess();
+
+ /**
+ * Called if a compilation task has ended unsuccessfully.
+ */
+ void onFailure();
+}
diff --git a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
index 3d0ad31..3d28894 100644
--- a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
+++ b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
@@ -15,6 +15,8 @@
*/
package android.system.composd;
+import android.system.composd.ICompilationTask;
+import android.system.composd.ICompilationTaskCallback;
import com.android.compos.CompilationResult;
import com.android.compos.FdAnnotation;
@@ -24,8 +26,11 @@
* This compiles BCP extensions and system server, even if the system artifacts are up to date,
* and writes the results to a test directory to avoid disrupting any real artifacts in
* existence.
+ * Compilation continues in the background, and success/failure is reported via the supplied
+ * callback, unless the returned ICompilationTask is cancelled. The caller should maintain
+ * a reference to the ICompilationTask until compilation completes or is cancelled.
*/
- void runForcedCompileForTest();
+ ICompilationTask startTestCompile(ICompilationTaskCallback callback);
/**
* Run dex2oat in the currently running instance of the CompOS VM. This is a simple proxy
diff --git a/compos/composd/src/compilation_task.rs b/compos/composd/src/compilation_task.rs
new file mode 100644
index 0000000..c4eed52
--- /dev/null
+++ b/compos/composd/src/compilation_task.rs
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use crate::instance_starter::CompOsInstance;
+use crate::odrefresh::{self, Odrefresh};
+use android_system_composd::aidl::android::system::composd::{
+ ICompilationTask::ICompilationTask, ICompilationTaskCallback::ICompilationTaskCallback,
+};
+use android_system_composd::binder::{Interface, Result as BinderResult, Strong};
+use anyhow::Result;
+use log::{error, warn};
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+#[derive(Clone)]
+pub struct CompilationTask {
+ running_task: Arc<Mutex<Option<RunningTask>>>,
+}
+
+impl Interface for CompilationTask {}
+
+impl ICompilationTask for CompilationTask {
+ fn cancel(&self) -> BinderResult<()> {
+ let task = self.take();
+ if let Some(task) = task {
+ if let Err(e) = task.odrefresh.kill() {
+ warn!("Failed to kill running task: {:?}", e)
+ }
+ }
+ Ok(())
+ }
+}
+
+impl CompilationTask {
+ /// Return the current running task, if any, removing it from this CompilationTask.
+ /// Once removed, meaning the task has ended or been canceled, further calls will always return
+ /// None.
+ fn take(&self) -> Option<RunningTask> {
+ self.running_task.lock().unwrap().take()
+ }
+
+ pub fn start_test_compile(
+ comp_os: Arc<CompOsInstance>,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<CompilationTask> {
+ let odrefresh = Odrefresh::spawn_forced_compile("test-artifacts")?;
+ let odrefresh = Arc::new(odrefresh);
+ let task =
+ RunningTask { odrefresh: odrefresh.clone(), comp_os, callback: callback.clone() };
+ let task = CompilationTask { running_task: Arc::new(Mutex::new(Some(task))) };
+
+ task.clone().start_waiting_thread(odrefresh);
+
+ Ok(task)
+ }
+
+ fn start_waiting_thread(self, odrefresh: Arc<Odrefresh>) {
+ thread::spawn(move || {
+ let exit_code = odrefresh.wait_for_exit();
+ let task = self.take();
+ // We don't do the callback if cancel has already happened.
+ if let Some(task) = task {
+ let result = match exit_code {
+ Ok(odrefresh::ExitCode::CompilationSuccess) => task.callback.onSuccess(),
+ Ok(exit_code) => {
+ error!("Unexpected odrefresh result: {:?}", exit_code);
+ task.callback.onFailure()
+ }
+ Err(e) => {
+ error!("Running odrefresh failed: {:?}", e);
+ task.callback.onFailure()
+ }
+ };
+ if let Err(e) = result {
+ warn!("Failed to deliver callback: {:?}", e);
+ }
+ }
+ });
+ }
+}
+
+struct RunningTask {
+ odrefresh: Arc<Odrefresh>,
+ callback: Strong<dyn ICompilationTaskCallback>,
+ #[allow(dead_code)] // Keeps the CompOS VM alive
+ comp_os: Arc<CompOsInstance>,
+}
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
index 60aeb39..671ed16 100644
--- a/compos/composd/src/composd_main.rs
+++ b/compos/composd/src/composd_main.rs
@@ -18,10 +18,12 @@
//! responsible for managing the lifecycle of the CompOS VM instances, providing key management for
//! them, and orchestrating trusted compilation.
+mod compilation_task;
mod instance_manager;
mod instance_starter;
mod odrefresh;
mod service;
+mod util;
use crate::instance_manager::InstanceManager;
use android_system_composd::binder::{register_lazy_service, ProcessState};
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index e31296d..6291d59 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -22,6 +22,7 @@
use anyhow::{bail, Context, Result};
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
use compos_aidl_interface::binder::Strong;
+use compos_common::compos_client::VmParameters;
use compos_common::{CURRENT_INSTANCE_DIR, TEST_INSTANCE_DIR};
use std::sync::{Arc, Mutex, Weak};
use virtualizationservice::IVirtualizationService::IVirtualizationService;
@@ -44,20 +45,26 @@
#[allow(dead_code)] // TODO: Make use of this
pub fn start_current_instance(&self) -> Result<Arc<CompOsInstance>> {
- self.start_instance(CURRENT_INSTANCE_DIR)
+ self.start_instance(CURRENT_INSTANCE_DIR, VmParameters::default())
}
pub fn start_test_instance(&self) -> Result<Arc<CompOsInstance>> {
- self.start_instance(TEST_INSTANCE_DIR)
+ let vm_parameters = VmParameters { debug_mode: true };
+ self.start_instance(TEST_INSTANCE_DIR, vm_parameters)
}
- fn start_instance(&self, instance_name: &str) -> Result<Arc<CompOsInstance>> {
+ fn start_instance(
+ &self,
+ instance_name: &str,
+ vm_parameters: VmParameters,
+ ) -> Result<Arc<CompOsInstance>> {
let mut state = self.state.lock().unwrap();
state.mark_starting()?;
// Don't hold the lock while we start the instance to avoid blocking other callers.
drop(state);
- let instance = self.try_start_instance(instance_name);
+ let instance_starter = InstanceStarter::new(instance_name, vm_parameters);
+ let instance = self.try_start_instance(instance_starter);
let mut state = self.state.lock().unwrap();
if let Ok(ref instance) = instance {
@@ -68,10 +75,8 @@
instance
}
- fn try_start_instance(&self, instance_name: &str) -> Result<Arc<CompOsInstance>> {
- let instance_starter = InstanceStarter::new(instance_name);
+ fn try_start_instance(&self, instance_starter: InstanceStarter) -> Result<Arc<CompOsInstance>> {
let compos_instance = instance_starter.create_or_start_instance(&*self.service)?;
-
Ok(Arc::new(compos_instance))
}
}
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index 63aefb8..4b3ac1b 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -21,9 +21,10 @@
IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
};
use anyhow::{bail, Context, Result};
+use binder_common::lazy_service::LazyServiceGuard;
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
use compos_aidl_interface::binder::{ParcelFileDescriptor, Strong};
-use compos_common::compos_client::VmInstance;
+use compos_common::compos_client::{VmInstance, VmParameters};
use compos_common::{
COMPOS_DATA_ROOT, INSTANCE_IMAGE_FILE, PRIVATE_KEY_BLOB_FILE, PUBLIC_KEY_FILE,
};
@@ -33,9 +34,11 @@
use std::path::{Path, PathBuf};
pub struct CompOsInstance {
+ service: Strong<dyn ICompOsService>,
#[allow(dead_code)] // Keeps VirtualizationService & the VM alive
vm_instance: VmInstance,
- service: Strong<dyn ICompOsService>,
+ #[allow(dead_code)] // Keeps composd process alive
+ lazy_service_guard: LazyServiceGuard,
}
impl CompOsInstance {
@@ -50,10 +53,11 @@
instance_image: PathBuf,
key_blob: PathBuf,
public_key: PathBuf,
+ vm_parameters: VmParameters,
}
impl InstanceStarter {
- pub fn new(instance_name: &str) -> Self {
+ pub fn new(instance_name: &str, vm_parameters: VmParameters) -> Self {
let instance_root = Path::new(COMPOS_DATA_ROOT).join(instance_name);
let instant_root_path = instance_root.as_path();
let instance_image = instant_root_path.join(INSTANCE_IMAGE_FILE);
@@ -65,23 +69,27 @@
instance_image,
key_blob,
public_key,
+ vm_parameters,
}
}
pub fn create_or_start_instance(
&self,
- service: &dyn IVirtualizationService,
+ virtualization_service: &dyn IVirtualizationService,
) -> Result<CompOsInstance> {
- let compos_instance = self.start_existing_instance();
+ let compos_instance = self.start_existing_instance(virtualization_service);
match compos_instance {
Ok(_) => return compos_instance,
Err(e) => warn!("Failed to start: {}", e),
}
- self.start_new_instance(service)
+ self.start_new_instance(virtualization_service)
}
- fn start_existing_instance(&self) -> Result<CompOsInstance> {
+ fn start_existing_instance(
+ &self,
+ virtualization_service: &dyn IVirtualizationService,
+ ) -> Result<CompOsInstance> {
// No point even trying if the files we need aren't there.
self.check_files_exist()?;
@@ -90,7 +98,7 @@
let key_blob = fs::read(&self.key_blob).context("Reading private key blob")?;
let public_key = fs::read(&self.public_key).context("Reading public key")?;
- let compos_instance = self.start_vm()?;
+ let compos_instance = self.start_vm(virtualization_service)?;
let service = &compos_instance.service;
if !service.verifySigningKey(&key_blob, &public_key).context("Verifying key pair")? {
@@ -117,7 +125,7 @@
self.create_instance_image(virtualization_service)?;
- let compos_instance = self.start_vm()?;
+ let compos_instance = self.start_vm(virtualization_service)?;
let service = &compos_instance.service;
let key_data = service.generateSigningKey().context("Generating signing key")?;
@@ -149,15 +157,20 @@
Ok(())
}
- fn start_vm(&self) -> Result<CompOsInstance> {
+ fn start_vm(
+ &self,
+ virtualization_service: &dyn IVirtualizationService,
+ ) -> Result<CompOsInstance> {
let instance_image = fs::OpenOptions::new()
.read(true)
.write(true)
.open(&self.instance_image)
.context("Failed to open instance image")?;
- let vm_instance = VmInstance::start(instance_image).context("Starting VM")?;
+ let vm_instance =
+ VmInstance::start(virtualization_service, instance_image, &self.vm_parameters)
+ .context("Starting VM")?;
let service = vm_instance.get_service().context("Connecting to CompOS")?;
- Ok(CompOsInstance { vm_instance, service })
+ Ok(CompOsInstance { vm_instance, service, lazy_service_guard: Default::default() })
}
fn create_instance_image(
@@ -166,6 +179,7 @@
) -> Result<()> {
let instance_image = fs::OpenOptions::new()
.create(true)
+ .truncate(true)
.read(true)
.write(true)
.open(&self.instance_image)
diff --git a/compos/composd/src/odrefresh.rs b/compos/composd/src/odrefresh.rs
index 8c3febf..16dcb0f 100644
--- a/compos/composd/src/odrefresh.rs
+++ b/compos/composd/src/odrefresh.rs
@@ -17,10 +17,11 @@
//! Handle the details of executing odrefresh to generate compiled artifacts.
use anyhow::{bail, Context, Result};
+use compos_common::timeouts::{need_extra_time, EXTENDED_TIMEOUTS};
use compos_common::VMADDR_CID_ANY;
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
-use rustutils::system_properties;
+use shared_child::SharedChild;
use std::process::Command;
// TODO: What if this changes?
@@ -38,30 +39,44 @@
CleanupFailed = EX_MAX + 4,
}
-fn need_extra_time() -> Result<bool> {
- // Special case to add more time in nested VM
- let value = system_properties::read("ro.build.product")?;
- Ok(value == "vsoc_x86_64" || value == "vsoc_x86")
+pub struct Odrefresh {
+ child: SharedChild,
}
-pub fn run_forced_compile(target_dir: &str) -> Result<ExitCode> {
- // We don`t need to capture stdout/stderr - odrefresh writes to the log
- let mut cmdline = Command::new(ODREFRESH_BIN);
- if need_extra_time()? {
- cmdline.arg("--max-execution-seconds=480").arg("--max-child-process-seconds=150");
+impl Odrefresh {
+ pub fn spawn_forced_compile(target_dir: &str) -> Result<Self> {
+ // We don`t need to capture stdout/stderr - odrefresh writes to the log
+ let mut cmdline = Command::new(ODREFRESH_BIN);
+ if need_extra_time()? {
+ cmdline
+ .arg(format!(
+ "--max-execution-seconds={}",
+ EXTENDED_TIMEOUTS.odrefresh_max_execution_time.as_secs()
+ ))
+ .arg(format!(
+ "--max-child-process-seconds={}",
+ EXTENDED_TIMEOUTS.odrefresh_max_child_process_time.as_secs()
+ ));
+ }
+ cmdline
+ .arg(format!("--use-compilation-os={}", VMADDR_CID_ANY as i32))
+ .arg(format!("--dalvik-cache={}", target_dir))
+ .arg("--force-compile");
+ let child = SharedChild::spawn(&mut cmdline).context("Running odrefresh")?;
+ Ok(Odrefresh { child })
}
- cmdline
- .arg(format!("--use-compilation-os={}", VMADDR_CID_ANY as i32))
- .arg(format!("--dalvik-cache={}", target_dir))
- .arg("--force-compile");
- let mut odrefresh = cmdline.spawn().context("Running odrefresh")?;
- // TODO: timeout?
- let status = odrefresh.wait()?;
+ pub fn wait_for_exit(&self) -> Result<ExitCode> {
+ // No timeout here - but clients can kill the process, which will end the wait.
+ let status = self.child.wait()?;
+ if let Some(exit_code) = status.code().and_then(FromPrimitive::from_i32) {
+ Ok(exit_code)
+ } else {
+ bail!("odrefresh exited with {}", status)
+ }
+ }
- if let Some(exit_code) = status.code().and_then(FromPrimitive::from_i32) {
- Ok(exit_code)
- } else {
- bail!("odrefresh exited with {}", status)
+ pub fn kill(&self) -> Result<()> {
+ self.child.kill().context("Killing odrefresh process failed")
}
}
diff --git a/compos/composd/src/service.rs b/compos/composd/src/service.rs
index d3b73a1..4d9dc58 100644
--- a/compos/composd/src/service.rs
+++ b/compos/composd/src/service.rs
@@ -17,18 +17,23 @@
//! Implementation of IIsolatedCompilationService, called from system server when compilation is
//! desired.
+use crate::compilation_task::CompilationTask;
use crate::instance_manager::InstanceManager;
-use crate::odrefresh;
-use android_system_composd::aidl::android::system::composd::IIsolatedCompilationService::{
- BnIsolatedCompilationService, IIsolatedCompilationService,
+use crate::util::to_binder_result;
+use android_system_composd::aidl::android::system::composd::{
+ ICompilationTask::{BnCompilationTask, ICompilationTask},
+ ICompilationTaskCallback::ICompilationTaskCallback,
+ IIsolatedCompilationService::{BnIsolatedCompilationService, IIsolatedCompilationService},
};
-use android_system_composd::binder::{self, BinderFeatures, Interface, Strong};
-use anyhow::{bail, Context, Result};
+use android_system_composd::binder::{
+ self, BinderFeatures, ExceptionCode, Interface, Status, Strong, ThreadState,
+};
+use anyhow::{Context, Result};
use binder_common::new_binder_service_specific_error;
use compos_aidl_interface::aidl::com::android::compos::{
CompilationResult::CompilationResult, FdAnnotation::FdAnnotation,
};
-use log::{error, info};
+use rustutils::users::{AID_ROOT, AID_SYSTEM};
pub struct IsolatedCompilationService {
instance_manager: InstanceManager,
@@ -42,9 +47,16 @@
impl Interface for IsolatedCompilationService {}
impl IIsolatedCompilationService for IsolatedCompilationService {
- fn runForcedCompileForTest(&self) -> binder::Result<()> {
- // TODO - check caller is system or shell/root?
- to_binder_result(self.do_run_forced_compile_for_test())
+ fn startTestCompile(
+ &self,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> binder::Result<Strong<dyn ICompilationTask>> {
+ let calling_uid = ThreadState::get_calling_uid();
+ // This should only be called by system server, or root while testing
+ if calling_uid != AID_SYSTEM && calling_uid != AID_ROOT {
+ return Err(Status::new_exception(ExceptionCode::SECURITY, None));
+ }
+ to_binder_result(self.do_start_test_compile(callback))
}
fn compile_cmd(
@@ -52,8 +64,12 @@
args: &[String],
fd_annotation: &FdAnnotation,
) -> binder::Result<CompilationResult> {
- // TODO - check caller is odrefresh
- to_binder_result(self.do_compile(args, fd_annotation))
+ let calling_uid = ThreadState::get_calling_uid();
+ // This should only be called by odrefresh, which runs as root
+ if calling_uid != AID_ROOT {
+ return Err(Status::new_exception(ExceptionCode::SECURITY, None));
+ }
+ to_binder_result(self.do_compile_cmd(args, fd_annotation))
}
fn compile(&self, _marshaled: &[u8], _fd_annotation: &FdAnnotation) -> binder::Result<i8> {
@@ -61,33 +77,19 @@
}
}
-fn to_binder_result<T>(result: Result<T>) -> binder::Result<T> {
- result.map_err(|e| {
- let message = format!("{:?}", e);
- error!("Returning binder error: {}", &message);
- new_binder_service_specific_error(-1, message)
- })
-}
-
impl IsolatedCompilationService {
- fn do_run_forced_compile_for_test(&self) -> Result<()> {
- info!("runForcedCompileForTest");
-
+ fn do_start_test_compile(
+ &self,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<Strong<dyn ICompilationTask>> {
let comp_os = self.instance_manager.start_test_instance().context("Starting CompOS")?;
- let exit_code = odrefresh::run_forced_compile("test-artifacts")?;
+ let task = CompilationTask::start_test_compile(comp_os, callback)?;
- if exit_code != odrefresh::ExitCode::CompilationSuccess {
- bail!("Unexpected odrefresh result: {:?}", exit_code);
- }
-
- // The instance is needed until odrefresh is finished
- drop(comp_os);
-
- Ok(())
+ Ok(BnCompilationTask::new_binder(task, BinderFeatures::default()))
}
- fn do_compile(
+ fn do_compile_cmd(
&self,
args: &[String],
fd_annotation: &FdAnnotation,
diff --git a/compos/composd/src/util.rs b/compos/composd/src/util.rs
new file mode 100644
index 0000000..091fb15
--- /dev/null
+++ b/compos/composd/src/util.rs
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use android_system_composd::binder::Result as BinderResult;
+use anyhow::Result;
+use binder_common::new_binder_service_specific_error;
+use log::error;
+
+pub fn to_binder_result<T>(result: Result<T>) -> BinderResult<T> {
+ result.map_err(|e| {
+ let message = format!("{:?}", e);
+ error!("Returning binder error: {}", &message);
+ new_binder_service_specific_error(-1, message)
+ })
+}
diff --git a/compos/composd_cmd/Android.bp b/compos/composd_cmd/Android.bp
index 0081a0d..c230e13 100644
--- a/compos/composd_cmd/Android.bp
+++ b/compos/composd_cmd/Android.bp
@@ -11,6 +11,7 @@
"libanyhow",
"libbinder_rs",
"libclap",
+ "libcompos_common",
],
prefer_rlib: true,
apex_available: [
diff --git a/compos/composd_cmd/composd_cmd.rs b/compos/composd_cmd/composd_cmd.rs
index 04398c0..0422b44 100644
--- a/compos/composd_cmd/composd_cmd.rs
+++ b/compos/composd_cmd/composd_cmd.rs
@@ -17,10 +17,19 @@
//! Simple command-line tool to drive composd for testing and debugging.
use android_system_composd::{
- aidl::android::system::composd::IIsolatedCompilationService::IIsolatedCompilationService,
- binder::{wait_for_interface, ProcessState},
+ aidl::android::system::composd::{
+ ICompilationTaskCallback::{BnCompilationTaskCallback, ICompilationTaskCallback},
+ IIsolatedCompilationService::IIsolatedCompilationService,
+ },
+ binder::{
+ wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface, ProcessState,
+ Result as BinderResult,
+ },
};
-use anyhow::{Context, Result};
+use anyhow::{bail, Context, Result};
+use compos_common::timeouts::timeouts;
+use std::sync::{Arc, Condvar, Mutex};
+use std::time::Duration;
fn main() -> Result<()> {
let app = clap::App::new("composd_cmd").arg(
@@ -35,11 +44,8 @@
ProcessState::start_thread_pool();
- let service = wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
- .context("Failed to connect to composd service")?;
-
match command {
- "forced-compile-test" => service.runForcedCompileForTest().context("Compilation failed")?,
+ "forced-compile-test" => run_forced_compile_for_test()?,
_ => panic!("Unexpected command {}", command),
}
@@ -47,3 +53,85 @@
Ok(())
}
+
+struct Callback(Arc<State>);
+
+#[derive(Default)]
+struct State {
+ mutex: Mutex<Option<Outcome>>,
+ completed: Condvar,
+}
+
+#[derive(Copy, Clone)]
+enum Outcome {
+ Succeeded,
+ Failed,
+}
+
+impl Interface for Callback {}
+
+impl ICompilationTaskCallback for Callback {
+ fn onSuccess(&self) -> BinderResult<()> {
+ self.0.set_outcome(Outcome::Succeeded);
+ Ok(())
+ }
+
+ fn onFailure(&self) -> BinderResult<()> {
+ self.0.set_outcome(Outcome::Failed);
+ Ok(())
+ }
+}
+
+impl State {
+ fn set_outcome(&self, outcome: Outcome) {
+ let mut guard = self.mutex.lock().unwrap();
+ *guard = Some(outcome);
+ drop(guard);
+ self.completed.notify_all();
+ }
+
+ fn wait(&self, duration: Duration) -> Result<Outcome> {
+ let (outcome, result) = self
+ .completed
+ .wait_timeout_while(self.mutex.lock().unwrap(), duration, |outcome| outcome.is_none())
+ .unwrap();
+ if result.timed_out() {
+ bail!("Timed out waiting for compilation")
+ }
+ Ok(outcome.unwrap())
+ }
+}
+
+fn run_forced_compile_for_test() -> Result<()> {
+ let service = wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
+ .context("Failed to connect to composd service")?;
+
+ let state = Arc::new(State::default());
+ let callback = Callback(state.clone());
+ let callback = BnCompilationTaskCallback::new_binder(callback, BinderFeatures::default());
+ let task = service.startTestCompile(&callback).context("Compilation failed")?;
+
+ // Make sure composd keeps going even if we don't hold a reference to its service.
+ drop(service);
+
+ let state_clone = state.clone();
+ let mut death_recipient = DeathRecipient::new(move || {
+ eprintln!("CompilationTask died");
+ state_clone.set_outcome(Outcome::Failed);
+ });
+ // Note that dropping death_recipient cancels this, so we can't use a temporary here.
+ task.as_binder().link_to_death(&mut death_recipient)?;
+
+ println!("Waiting");
+
+ match state.wait(timeouts()?.odrefresh_max_execution_time) {
+ Ok(Outcome::Succeeded) => Ok(()),
+ Ok(Outcome::Failed) => bail!("Compilation failed"),
+ Err(e) => {
+ if let Err(e) = task.cancel() {
+ eprintln!("Failed to cancel compilation: {:?}", e);
+ }
+ Err(e)
+ }
+ }
+}
diff --git a/compos/libcompos_client/Android.bp b/compos/libcompos_client/Android.bp
index b6a4ef6..5528ea1 100644
--- a/compos/libcompos_client/Android.bp
+++ b/compos/libcompos_client/Android.bp
@@ -4,14 +4,12 @@
cc_library {
name: "libcompos_client",
- srcs: ["libcompos_client.cc"],
+ whole_static_libs: ["libcompos_client_ffi"],
min_sdk_version: "apex_inherit",
shared_libs: [
- "android.system.composd-ndk",
- "compos_aidl_interface-ndk",
- "libbase",
"libbinder_ndk",
"libbinder_rpc_unstable",
+ "libminijail",
],
export_include_dirs: ["include"],
stubs: {
@@ -25,3 +23,36 @@
"//art/odrefresh:__subpackages__",
],
}
+
+// TODO(203478530): Once rust_ffi supports stubs/symbol file, remove the wrapping cc_library above.
+rust_ffi {
+ name: "libcompos_client_ffi",
+ crate_name: "compos_client_ffi",
+ srcs: ["libcompos_client.rs"],
+ include_dirs: ["include"],
+ rustlibs: [
+ "android.system.composd-rust",
+ "compos_aidl_interface-rust",
+ "libandroid_logger",
+ "libanyhow",
+ "libbinder_common",
+ "libbinder_rs",
+ "libcompos_common",
+ "liblibc",
+ "liblog_rust",
+ "libminijail_rust",
+ "libnix",
+ "libscopeguard",
+ ],
+ prefer_rlib: true,
+ shared_libs: [
+ "libbinder_ndk",
+ ],
+ apex_available: [
+ "com.android.compos",
+ ],
+ visibility: [
+ "//packages/modules/Virtualization/compos:__subpackages__",
+ "//art/odrefresh:__subpackages__",
+ ],
+}
diff --git a/compos/libcompos_client/libcompos_client.cc b/compos/libcompos_client/libcompos_client.cc
deleted file mode 100644
index 147fcd0..0000000
--- a/compos/libcompos_client/libcompos_client.cc
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "libcompos_client.h"
-
-#include <android-base/logging.h>
-#include <android-base/strings.h>
-#include <android-base/unique_fd.h>
-#include <android/binder_auto_utils.h>
-#include <android/binder_manager.h>
-#include <binder/IInterface.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <binder_rpc_unstable.hpp>
-#include <memory>
-
-#include "aidl/android/system/composd/IIsolatedCompilationService.h"
-#include "aidl/com/android/compos/FdAnnotation.h"
-#include "aidl/com/android/compos/ICompOsService.h"
-
-using aidl::android::system::composd::IIsolatedCompilationService;
-using aidl::com::android::compos::FdAnnotation;
-using aidl::com::android::compos::ICompOsService;
-using android::base::Join;
-using android::base::Pipe;
-using android::base::unique_fd;
-
-namespace {
-
-constexpr unsigned int kCompsvcRpcPort = 6432;
-constexpr const char* kComposdServiceName = "android.system.composd";
-
-void ExecFdServer(const int* ro_fds, size_t ro_fds_num, const int* rw_fds, size_t rw_fds_num,
- unique_fd ready_fd) {
- // Holder of C Strings, with enough memory reserved to avoid reallocation. Otherwise,
- // `holder.rbegin()->c_str()` may become invalid.
- std::vector<std::string> holder;
- holder.reserve(ro_fds_num + rw_fds_num + 1 /* for --ready-fd */);
-
- std::vector<char const*> args = {"/apex/com.android.virt/bin/fd_server"};
- for (int i = 0; i < ro_fds_num; ++i) {
- args.emplace_back("--ro-fds");
- holder.emplace_back(std::to_string(*(ro_fds + i)));
- args.emplace_back(holder.rbegin()->c_str());
- }
- for (int i = 0; i < rw_fds_num; ++i) {
- args.emplace_back("--rw-fds");
- holder.emplace_back(std::to_string(*(rw_fds + i)));
- args.emplace_back(holder.rbegin()->c_str());
- }
- args.emplace_back("--ready-fd");
- holder.emplace_back(std::to_string(ready_fd.get()));
- args.emplace_back(holder.rbegin()->c_str());
-
- LOG(DEBUG) << "Starting fd_server, args: " << Join(args, ' ');
- args.emplace_back(nullptr);
- if (execv(args[0], const_cast<char* const*>(args.data())) < 0) {
- PLOG(ERROR) << "execv failed";
- }
-}
-
-class FileSharingSession final {
-public:
- static std::unique_ptr<FileSharingSession> Create(const int* ro_fds, size_t ro_fds_num,
- const int* rw_fds, size_t rw_fds_num) {
- // Create pipe for receiving a ready ping from fd_server.
- unique_fd pipe_read, pipe_write;
- if (!Pipe(&pipe_read, &pipe_write, /* flags= */ 0)) {
- PLOG(ERROR) << "Cannot create pipe";
- return nullptr;
- }
-
- pid_t pid = fork();
- if (pid < 0) {
- PLOG(ERROR) << "fork error";
- return nullptr;
- } else if (pid > 0) {
- pipe_write.reset();
-
- // When fd_server is ready it closes its end of the pipe. And if it exits, the pipe is
- // also closed. Either way this read will return 0 bytes at that point, and there's no
- // point waiting any longer.
- char c;
- read(pipe_read.get(), &c, sizeof(c));
-
- std::unique_ptr<FileSharingSession> session(new FileSharingSession(pid));
- return session;
- } else if (pid == 0) {
- pipe_read.reset();
- ExecFdServer(ro_fds, ro_fds_num, rw_fds, rw_fds_num, std::move(pipe_write));
- exit(EXIT_FAILURE);
- }
- return nullptr;
- }
-
- ~FileSharingSession() {
- if (kill(fd_server_pid_, SIGTERM) < 0) {
- PLOG(ERROR) << "Cannot kill fd_server (pid " << std::to_string(fd_server_pid_)
- << ") with SIGTERM. Retry with SIGKILL.";
- if (kill(fd_server_pid_, SIGKILL) < 0) {
- PLOG(ERROR) << "Still cannot terminate with SIGKILL. Give up.";
- // TODO: it may be the safest if we turn fd_server into a library to run in a
- // thread.
- }
- }
- }
-
-private:
- explicit FileSharingSession(pid_t pid) : fd_server_pid_(pid) {}
-
- pid_t fd_server_pid_;
-};
-
-int MakeRequestToVM(int cid, const uint8_t* marshaled, size_t size, const int* ro_fds,
- size_t ro_fds_num, const int* rw_fds, size_t rw_fds_num) {
- ndk::SpAIBinder binder(RpcClient(cid, kCompsvcRpcPort));
- std::shared_ptr<ICompOsService> service = ICompOsService::fromBinder(binder);
- if (!service) {
- LOG(ERROR) << "Cannot connect to the service";
- return -1;
- }
-
- std::unique_ptr<FileSharingSession> session_raii =
- FileSharingSession::Create(ro_fds, ro_fds_num, rw_fds, rw_fds_num);
- if (!session_raii) {
- LOG(ERROR) << "Cannot start to share FDs";
- return -1;
- }
-
- // Since the input from the C API are raw pointers, we need to duplicate them into vectors in
- // order to pass to the binder API.
- std::vector<uint8_t> duplicated_buffer(marshaled, marshaled + size);
- FdAnnotation fd_annotation = {
- .input_fds = std::vector<int>(ro_fds, ro_fds + ro_fds_num),
- .output_fds = std::vector<int>(rw_fds, rw_fds + rw_fds_num),
- };
- int8_t exit_code;
- ndk::ScopedAStatus status = service->compile(duplicated_buffer, fd_annotation, &exit_code);
- if (!status.isOk()) {
- LOG(ERROR) << "Compilation failed (exit " << std::to_string(exit_code)
- << "): " << status.getDescription();
- return -1;
- }
- return 0;
-}
-
-int MakeRequestToComposd(const uint8_t* marshaled, size_t size, const int* ro_fds,
- size_t ro_fds_num, const int* rw_fds, size_t rw_fds_num) {
- ndk::SpAIBinder binder(AServiceManager_getService(kComposdServiceName));
- std::shared_ptr<IIsolatedCompilationService> service =
- IIsolatedCompilationService::fromBinder(binder);
- if (!service) {
- LOG(ERROR) << "Cannot connect to the service";
- return -1;
- }
-
- auto session_raii = std::unique_ptr<FileSharingSession>(
- FileSharingSession::Create(ro_fds, ro_fds_num, rw_fds, rw_fds_num));
- if (!session_raii) {
- LOG(ERROR) << "Cannot start to share FDs";
- return -1;
- }
-
- // Since the input from the C API are raw pointers, we need to duplicate them into vectors in
- // order to pass to the binder API.
- std::vector<uint8_t> duplicated_buffer(marshaled, marshaled + size);
- FdAnnotation fd_annotation = {
- .input_fds = std::vector<int>(ro_fds, ro_fds + ro_fds_num),
- .output_fds = std::vector<int>(rw_fds, rw_fds + rw_fds_num),
- };
- int8_t exit_code;
- ndk::ScopedAStatus status = service->compile(duplicated_buffer, fd_annotation, &exit_code);
- if (!status.isOk()) {
- LOG(ERROR) << "Compilation failed (exit " << std::to_string(exit_code)
- << "): " << status.getDescription();
- return -1;
- }
- return 0;
-}
-
-} // namespace
-
-__BEGIN_DECLS
-
-int AComposClient_Request(int cid, const uint8_t* marshaled, size_t size, const int* ro_fds,
- size_t ro_fds_num, const int* rw_fds, size_t rw_fds_num) {
- if (cid == -1 /* VMADDR_CID_ANY */) {
- return MakeRequestToComposd(marshaled, size, ro_fds, ro_fds_num, rw_fds, rw_fds_num);
- } else {
- return MakeRequestToVM(cid, marshaled, size, ro_fds, ro_fds_num, rw_fds, rw_fds_num);
- }
-}
-
-__END_DECLS
diff --git a/compos/libcompos_client/libcompos_client.rs b/compos/libcompos_client/libcompos_client.rs
new file mode 100644
index 0000000..55d70a4
--- /dev/null
+++ b/compos/libcompos_client/libcompos_client.rs
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A library for a client to send requests to the CompOS service in the VM.
+
+use anyhow::{Context, Result};
+use binder_common::rpc_client::connect_rpc_binder;
+use libc::c_int;
+use log::{debug, error, warn};
+use minijail::Minijail;
+use nix::fcntl::OFlag;
+use nix::unistd::pipe2;
+use std::fs::File;
+use std::io::Read;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::path::Path;
+use std::slice::from_raw_parts;
+
+use android_system_composd::{
+ aidl::android::system::composd::IIsolatedCompilationService::IIsolatedCompilationService,
+ binder::wait_for_interface,
+};
+use compos_aidl_interface::aidl::com::android::compos::{
+ FdAnnotation::FdAnnotation, ICompOsService::ICompOsService,
+};
+use compos_aidl_interface::binder::Strong;
+use compos_common::{COMPOS_VSOCK_PORT, VMADDR_CID_ANY};
+
+const FD_SERVER_BIN: &str = "/apex/com.android.virt/bin/fd_server";
+
+fn get_composd() -> Result<Strong<dyn IIsolatedCompilationService>> {
+ wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
+ .context("Failed to find IIsolatedCompilationService")
+}
+
+fn spawn_fd_server(fd_annotation: &FdAnnotation, ready_file: File) -> Result<Minijail> {
+ let mut inheritable_fds = Vec::new();
+ let mut args = vec![FD_SERVER_BIN.to_string()];
+ for fd in &fd_annotation.input_fds {
+ args.push("--ro-fds".to_string());
+ args.push(fd.to_string());
+ inheritable_fds.push(*fd);
+ }
+ for fd in &fd_annotation.output_fds {
+ args.push("--rw-fds".to_string());
+ args.push(fd.to_string());
+ inheritable_fds.push(*fd);
+ }
+ let ready_fd = ready_file.as_raw_fd();
+ args.push("--ready-fd".to_string());
+ args.push(ready_fd.to_string());
+ inheritable_fds.push(ready_fd);
+
+ let jail = Minijail::new()?;
+ let _pid = jail.run(Path::new(FD_SERVER_BIN), &inheritable_fds, &args)?;
+ Ok(jail)
+}
+
+fn create_pipe() -> Result<(File, File)> {
+ let (raw_read, raw_write) = pipe2(OFlag::O_CLOEXEC)?;
+ // SAFETY: We are the sole owners of these fds as they were just created.
+ let read_fd = unsafe { File::from_raw_fd(raw_read) };
+ let write_fd = unsafe { File::from_raw_fd(raw_write) };
+ Ok((read_fd, write_fd))
+}
+
+fn wait_for_fd_server_ready(mut ready_fd: File) -> Result<()> {
+ let mut buffer = [0];
+ // When fd_server is ready it closes its end of the pipe. And if it exits, the pipe is also
+ // closed. Either way this read will return 0 bytes at that point, and there's no point waiting
+ // any longer.
+ let _ = ready_fd.read(&mut buffer).context("Waiting for fd_server to be ready")?;
+ debug!("fd_server is ready");
+ Ok(())
+}
+
+fn try_request(cid: c_int, marshaled: &[u8], fd_annotation: FdAnnotation) -> Result<c_int> {
+ // 1. Spawn a fd_server to serve remote read/write requests.
+ let (ready_read_fd, ready_write_fd) = create_pipe()?;
+ let fd_server_jail = spawn_fd_server(&fd_annotation, ready_write_fd)?;
+ let fd_server_lifetime = scopeguard::guard(fd_server_jail, |fd_server_jail| {
+ if let Err(e) = fd_server_jail.kill() {
+ if !matches!(e, minijail::Error::Killed(_)) {
+ warn!("Failed to kill fd_server: {}", e);
+ }
+ }
+ });
+
+ // 2. Send the marshaled request the remote.
+ let cid = cid as u32;
+ let result = if cid == VMADDR_CID_ANY {
+ // Sentinel value that indicates we should use composd
+ let composd = get_composd()?;
+ wait_for_fd_server_ready(ready_read_fd)?;
+ composd.compile(marshaled, &fd_annotation)
+ } else {
+ // Call directly into the VM
+ let compos_vm = connect_rpc_binder::<dyn ICompOsService>(cid, COMPOS_VSOCK_PORT)
+ .context("Cannot connect to RPC binder")?;
+ wait_for_fd_server_ready(ready_read_fd)?;
+ compos_vm.compile(marshaled, &fd_annotation)
+ };
+ let result = result.context("Binder call failed")?;
+
+ // Be explicit about the lifetime, which should last at least until the task is finished.
+ drop(fd_server_lifetime);
+
+ Ok(c_int::from(result))
+}
+
+/// A public C API. See libcompos_client.h for the canonical doc.
+///
+/// # Safety
+///
+/// The client must provide legitimate pointers with correct sizes to the backing arrays.
+#[no_mangle]
+pub unsafe extern "C" fn AComposClient_Request(
+ cid: c_int,
+ marshaled: *const u8,
+ size: usize,
+ ro_fds: *const c_int,
+ ro_fds_num: usize,
+ rw_fds: *const c_int,
+ rw_fds_num: usize,
+) -> c_int {
+ if marshaled.is_null() || ro_fds.is_null() || rw_fds.is_null() {
+ error!("Argument pointers should not be null");
+ return -1;
+ }
+
+ // The unsafe parts.
+ let ro_fd_slice = from_raw_parts(ro_fds, ro_fds_num);
+ let rw_fd_slice = from_raw_parts(rw_fds, rw_fds_num);
+ let marshaled_slice = from_raw_parts(marshaled, size);
+
+ let fd_annotation =
+ FdAnnotation { input_fds: ro_fd_slice.to_vec(), output_fds: rw_fd_slice.to_vec() };
+
+ match try_request(cid, marshaled_slice, fd_annotation) {
+ Ok(exit_code) => exit_code,
+ Err(e) => {
+ error!("AComposClient_Request failed: {:?}", e);
+ -1
+ }
+ }
+}
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index 6887947..fc00039 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -36,9 +36,6 @@
use binder_common::rpc_server::run_rpc_server;
use compos_common::COMPOS_VSOCK_PORT;
use log::{debug, error};
-use nix::ioctl_read_bad;
-use std::fs::OpenOptions;
-use std::os::unix::io::AsRawFd;
/// The CID representing the host VM
const VMADDR_CID_HOST: u32 = 2;
@@ -64,12 +61,11 @@
let service = compsvc::new_binder()?.as_binder();
let vm_service = get_vm_service()?;
- let local_cid = get_local_cid()?;
debug!("compsvc is starting as a rpc service.");
let retval = run_rpc_server(service, COMPOS_VSOCK_PORT, || {
- if let Err(e) = vm_service.notifyPayloadReady(local_cid as i32) {
+ if let Err(e) = vm_service.notifyPayloadReady() {
error!("Unable to notify ready: {}", e);
}
});
@@ -94,26 +90,3 @@
FromIBinder::try_from(ibinder).context("Connecting to IVirtualMachineService")
}
-
-// TODO(b/199259751): remove this after VS can check the peer addresses of binder clients
-fn get_local_cid() -> Result<u32> {
- let f = OpenOptions::new()
- .read(true)
- .write(false)
- .open("/dev/vsock")
- .context("Failed to open /dev/vsock")?;
- let mut cid = 0;
- // SAFETY: the kernel only modifies the given u32 integer.
- unsafe { vm_sockets_get_local_cid(f.as_raw_fd(), &mut cid) }
- .context("Failed to get local CID")?;
- Ok(cid)
-}
-
-// TODO(b/199259751): remove this after VS can check the peer addresses of binder clients
-const IOCTL_VM_SOCKETS_GET_LOCAL_CID: usize = 0x7b9;
-ioctl_read_bad!(
- /// Gets local cid from /dev/vsock
- vm_sockets_get_local_cid,
- IOCTL_VM_SOCKETS_GET_LOCAL_CID,
- u32
-);
diff --git a/compos/verify_key/verify_key.rs b/compos/verify_key/verify_key.rs
index a5b0b6b..0a9d36b 100644
--- a/compos/verify_key/verify_key.rs
+++ b/compos/verify_key/verify_key.rs
@@ -19,7 +19,7 @@
use anyhow::{bail, Context, Result};
use compos_aidl_interface::binder::ProcessState;
-use compos_common::compos_client::VmInstance;
+use compos_common::compos_client::{VmInstance, VmParameters};
use compos_common::{
COMPOS_DATA_ROOT, CURRENT_INSTANCE_DIR, INSTANCE_IMAGE_FILE, PENDING_INSTANCE_DIR,
PRIVATE_KEY_BLOB_FILE, PUBLIC_KEY_FILE,
@@ -91,7 +91,9 @@
let public_key = read_small_file(public_key).context("Failed to read public key")?;
let instance_image = File::open(instance_image).context("Failed to open instance image")?;
- let vm_instance = VmInstance::start(instance_image)?;
+ let virtualization_service = VmInstance::connect_to_virtualization_service()?;
+ let vm_instance =
+ VmInstance::start(&*virtualization_service, instance_image, &VmParameters::default())?;
let service = vm_instance.get_service()?;
let result = service.verifySigningKey(&blob, &public_key).context("Verifying signing key")?;
diff --git a/demo/Android.bp b/demo/Android.bp
index 749ca90..1342a26 100644
--- a/demo/Android.bp
+++ b/demo/Android.bp
@@ -19,5 +19,4 @@
platform_apis: true,
use_embedded_native_libs: true,
v4_signature: true,
- certificate: "platform",
}
diff --git a/demo/AndroidManifest.xml b/demo/AndroidManifest.xml
index 7e1a58d..74ec210 100644
--- a/demo/AndroidManifest.xml
+++ b/demo/AndroidManifest.xml
@@ -6,7 +6,8 @@
<application
android:label="MicrodroidDemo"
- android:theme="@style/Theme.MicrodroidDemo">
+ android:theme="@style/Theme.MicrodroidDemo"
+ android:testOnly="true">
<uses-library android:name="android.system.virtualmachine" android:required="true" />
<activity android:name=".MainActivity" android:exported="true">
<intent-filter>
diff --git a/demo/java/com/android/microdroid/demo/MainActivity.java b/demo/java/com/android/microdroid/demo/MainActivity.java
index bc87c3c..15d9046 100644
--- a/demo/java/com/android/microdroid/demo/MainActivity.java
+++ b/demo/java/com/android/microdroid/demo/MainActivity.java
@@ -64,47 +64,14 @@
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
- TextView consoleView = (TextView) findViewById(R.id.consoleOutput);
- TextView payloadView = (TextView) findViewById(R.id.payloadOutput);
Button runStopButton = (Button) findViewById(R.id.runStopButton);
- ScrollView scrollView = (ScrollView) findViewById(R.id.scrollConsoleOutput);
+ TextView consoleView = (TextView) findViewById(R.id.consoleOutput);
+ TextView logView = (TextView) findViewById(R.id.logOutput);
+ TextView payloadView = (TextView) findViewById(R.id.payloadOutput);
+ ScrollView scrollConsoleView = (ScrollView) findViewById(R.id.scrollConsoleOutput);
+ ScrollView scrollLogView = (ScrollView) findViewById(R.id.scrollLogOutput);
- // When the console output or payload output is updated, append the new line to the
- // corresponding text view.
VirtualMachineModel model = new ViewModelProvider(this).get(VirtualMachineModel.class);
- model.getConsoleOutput()
- .observeForever(
- new Observer<String>() {
- @Override
- public void onChanged(String line) {
- consoleView.append(line + "\n");
- scrollView.fullScroll(View.FOCUS_DOWN);
- }
- });
- model.getPayloadOutput()
- .observeForever(
- new Observer<String>() {
- @Override
- public void onChanged(String line) {
- payloadView.append(line + "\n");
- }
- });
-
- // When the VM status is updated, change the label of the button
- model.getStatus()
- .observeForever(
- new Observer<VirtualMachine.Status>() {
- @Override
- public void onChanged(VirtualMachine.Status status) {
- if (status == VirtualMachine.Status.RUNNING) {
- runStopButton.setText("Stop");
- consoleView.setText("");
- payloadView.setText("");
- } else {
- runStopButton.setText("Run");
- }
- }
- });
// When the button is clicked, run or stop the VM
runStopButton.setOnClickListener(
@@ -119,12 +86,86 @@
}
}
});
+
+ // When the VM status is updated, change the label of the button
+ model.getStatus()
+ .observeForever(
+ new Observer<VirtualMachine.Status>() {
+ @Override
+ public void onChanged(VirtualMachine.Status status) {
+ if (status == VirtualMachine.Status.RUNNING) {
+ runStopButton.setText("Stop");
+ // Clear the outputs from the previous run
+ consoleView.setText("");
+ logView.setText("");
+ payloadView.setText("");
+ } else {
+ runStopButton.setText("Run");
+ }
+ }
+ });
+
+ // When the console, log, or payload output is updated, append the new line to the
+ // corresponding text view.
+ model.getConsoleOutput()
+ .observeForever(
+ new Observer<String>() {
+ @Override
+ public void onChanged(String line) {
+ consoleView.append(line + "\n");
+ scrollConsoleView.fullScroll(View.FOCUS_DOWN);
+ }
+ });
+ model.getLogOutput()
+ .observeForever(
+ new Observer<String>() {
+ @Override
+ public void onChanged(String line) {
+ logView.append(line + "\n");
+ scrollLogView.fullScroll(View.FOCUS_DOWN);
+ }
+ });
+ model.getPayloadOutput()
+ .observeForever(
+ new Observer<String>() {
+ @Override
+ public void onChanged(String line) {
+ payloadView.append(line + "\n");
+ }
+ });
}
- /** Models a virtual machine and console output from it. */
+ /** Reads data from an input stream and posts it to the output data */
+ static class Reader implements Runnable {
+ private final String mName;
+ private final MutableLiveData<String> mOutput;
+ private final InputStream mStream;
+
+ Reader(String name, MutableLiveData<String> output, InputStream stream) {
+ mName = name;
+ mOutput = output;
+ mStream = stream;
+ }
+
+ @Override
+ public void run() {
+ try {
+ BufferedReader reader = new BufferedReader(new InputStreamReader(mStream));
+ String line;
+ while ((line = reader.readLine()) != null && !Thread.interrupted()) {
+ mOutput.postValue(line);
+ }
+ } catch (IOException e) {
+ Log.e(TAG, "Exception while posting " + mName + " output: " + e.getMessage());
+ }
+ }
+ }
+
+ /** Models a virtual machine and outputs from it. */
public static class VirtualMachineModel extends AndroidViewModel {
private VirtualMachine mVirtualMachine;
private final MutableLiveData<String> mConsoleOutput = new MutableLiveData<>();
+ private final MutableLiveData<String> mLogOutput = new MutableLiveData<>();
private final MutableLiveData<String> mPayloadOutput = new MutableLiveData<>();
private final MutableLiveData<VirtualMachine.Status> mStatus = new MutableLiveData<>();
private ExecutorService mExecutorService;
@@ -134,20 +175,11 @@
mStatus.setValue(VirtualMachine.Status.DELETED);
}
- private static void postOutput(MutableLiveData<String> output, InputStream stream)
- throws IOException {
- BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
- String line;
- while ((line = reader.readLine()) != null && !Thread.interrupted()) {
- output.postValue(line);
- }
- }
-
/** Runs a VM */
public void run(boolean debug) {
// Create a VM and run it.
// TODO(jiyong): remove the call to idsigPath
- mExecutorService = Executors.newFixedThreadPool(3);
+ mExecutorService = Executors.newFixedThreadPool(4);
VirtualMachineCallback callback =
new VirtualMachineCallback() {
@@ -162,23 +194,8 @@
return;
}
- mService.execute(
- new Runnable() {
- @Override
- public void run() {
- try {
- postOutput(
- mPayloadOutput,
- new FileInputStream(
- stream.getFileDescriptor()));
- } catch (IOException e) {
- Log.e(
- TAG,
- "IOException while reading payload: "
- + e.getMessage());
- }
- }
- });
+ InputStream input = new FileInputStream(stream.getFileDescriptor());
+ mService.execute(new Reader("payload", mPayloadOutput, input));
}
@Override
@@ -261,29 +278,23 @@
VirtualMachineConfig config = builder.build();
VirtualMachineManager vmm = VirtualMachineManager.getInstance(getApplication());
mVirtualMachine = vmm.getOrCreate("demo_vm", config);
+ try {
+ mVirtualMachine.setConfig(config);
+ } catch (VirtualMachineException e) {
+ mVirtualMachine.delete();
+ mVirtualMachine = vmm.create("demo_vm", config);
+ }
mVirtualMachine.run();
- mVirtualMachine.setCallback(callback);
+ mVirtualMachine.setCallback(Executors.newSingleThreadExecutor(), callback);
mStatus.postValue(mVirtualMachine.getStatus());
+
+ InputStream console = mVirtualMachine.getConsoleOutputStream();
+ InputStream log = mVirtualMachine.getLogOutputStream();
+ mExecutorService.execute(new Reader("console", mConsoleOutput, console));
+ mExecutorService.execute(new Reader("log", mLogOutput, log));
} catch (VirtualMachineException e) {
throw new RuntimeException(e);
}
-
- // Read console output from the VM in the background
- mExecutorService.execute(
- new Runnable() {
- @Override
- public void run() {
- try {
- postOutput(
- mConsoleOutput, mVirtualMachine.getConsoleOutputStream());
- } catch (IOException | VirtualMachineException e) {
- Log.e(
- TAG,
- "Exception while posting console output: "
- + e.getMessage());
- }
- }
- });
}
/** Stops the running VM */
@@ -303,6 +314,11 @@
return mConsoleOutput;
}
+ /** Returns the log output from the VM */
+ public LiveData<String> getLogOutput() {
+ return mLogOutput;
+ }
+
/** Returns the payload output from the VM */
public LiveData<String> getPayloadOutput() {
return mPayloadOutput;
diff --git a/demo/res/layout/activity_main.xml b/demo/res/layout/activity_main.xml
index e100027..f0e35d6 100644
--- a/demo/res/layout/activity_main.xml
+++ b/demo/res/layout/activity_main.xml
@@ -62,17 +62,50 @@
<ScrollView
android:id="@+id/scrollConsoleOutput"
- android:layout_width="match_parent"
+ android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="2">
- <TextView
- android:id="@+id/consoleOutput"
+ <HorizontalScrollView
android:layout_width="match_parent"
- android:layout_height="wrap_content"
- android:background="#FFEB3B"
- android:fontFamily="monospace"
- android:textColor="#000000" />
+ android:layout_height="match_parent">
+
+ <TextView
+ android:id="@+id/consoleOutput"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:background="#FFEB3B"
+ android:fontFamily="monospace"
+ android:textSize="10sp"
+ android:textColor="#000000" />
+ </HorizontalScrollView>
+ </ScrollView>
+
+ <TextView
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_marginTop="10dp"
+ android:text="Log output:" />
+
+ <ScrollView
+ android:id="@+id/scrollLogOutput"
+ android:layout_width="wrap_content"
+ android:layout_height="0dp"
+ android:layout_weight="2">
+
+ <HorizontalScrollView
+ android:layout_width="match_parent"
+ android:layout_height="match_parent">
+
+ <TextView
+ android:id="@+id/logOutput"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:background="#FFEB3B"
+ android:fontFamily="monospace"
+ android:textSize="10sp"
+ android:textColor="#000000" />
+ </HorizontalScrollView>
</ScrollView>
</LinearLayout>
diff --git a/javalib/AndroidManifest.xml b/javalib/AndroidManifest.xml
index f96b39f..2a0b903 100644
--- a/javalib/AndroidManifest.xml
+++ b/javalib/AndroidManifest.xml
@@ -18,7 +18,7 @@
package="com.android.virtualmachine.res">
<permission android:name="android.permission.MANAGE_VIRTUAL_MACHINE"
- android:protectionLevel="signature" />
+ android:protectionLevel="signature|development" />
<permission android:name="android.permission.DEBUG_VIRTUAL_MACHINE"
android:protectionLevel="signature" />
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index 2da7ecb..6556b87 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -19,6 +19,7 @@
import static android.os.ParcelFileDescriptor.MODE_READ_ONLY;
import static android.os.ParcelFileDescriptor.MODE_READ_WRITE;
+import android.annotation.CallbackExecutor;
import android.annotation.NonNull;
import android.annotation.Nullable;
import android.content.Context;
@@ -42,6 +43,7 @@
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.util.Optional;
+import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@@ -110,9 +112,15 @@
/** The registered callback */
private @Nullable VirtualMachineCallback mCallback;
+ /** The executor on which the callback will be executed */
+ private @NonNull Executor mCallbackExecutor;
+
private @Nullable ParcelFileDescriptor mConsoleReader;
private @Nullable ParcelFileDescriptor mConsoleWriter;
+ private @Nullable ParcelFileDescriptor mLogReader;
+ private @Nullable ParcelFileDescriptor mLogWriter;
+
private final ExecutorService mExecutorService = Executors.newCachedThreadPool();
static {
@@ -260,7 +268,10 @@
* Registers the callback object to get events from the virtual machine. If a callback was
* already registered, it is replaced with the new one.
*/
- public void setCallback(@Nullable VirtualMachineCallback callback) {
+ public void setCallback(
+ @NonNull @CallbackExecutor Executor executor,
+ @Nullable VirtualMachineCallback callback) {
+ mCallbackExecutor = executor;
mCallback = callback;
}
@@ -297,6 +308,12 @@
mConsoleWriter = pipe[1];
}
+ if (mLogReader == null && mLogWriter == null) {
+ ParcelFileDescriptor[] pipe = ParcelFileDescriptor.createPipe();
+ mLogReader = pipe[0];
+ mLogWriter = pipe[1];
+ }
+
VirtualMachineAppConfig appConfig = getConfig().toParcel();
// Fill the idsig file by hashing the apk
@@ -310,7 +327,7 @@
android.system.virtualizationservice.VirtualMachineConfig vmConfigParcel =
android.system.virtualizationservice.VirtualMachineConfig.appConfig(appConfig);
- mVirtualMachine = service.createVm(vmConfigParcel, mConsoleWriter);
+ mVirtualMachine = service.createVm(vmConfigParcel, mConsoleWriter, mLogWriter);
mVirtualMachine.registerCallback(
new IVirtualMachineCallback.Stub() {
@Override
@@ -319,7 +336,8 @@
if (cb == null) {
return;
}
- cb.onPayloadStarted(VirtualMachine.this, stream);
+ mCallbackExecutor.execute(
+ () -> cb.onPayloadStarted(VirtualMachine.this, stream));
}
@Override
@@ -328,7 +346,7 @@
if (cb == null) {
return;
}
- cb.onPayloadReady(VirtualMachine.this);
+ mCallbackExecutor.execute(() -> cb.onPayloadReady(VirtualMachine.this));
}
@Override
@@ -337,7 +355,8 @@
if (cb == null) {
return;
}
- cb.onPayloadFinished(VirtualMachine.this, exitCode);
+ mCallbackExecutor.execute(
+ () -> cb.onPayloadFinished(VirtualMachine.this, exitCode));
}
@Override
@@ -346,7 +365,7 @@
if (cb == null) {
return;
}
- cb.onDied(VirtualMachine.this);
+ mCallbackExecutor.execute(() -> cb.onDied(VirtualMachine.this));
}
});
service.asBinder()
@@ -377,6 +396,14 @@
return new FileInputStream(mConsoleReader.getFileDescriptor());
}
+ /** Returns the stream object representing the log output from the virtual machine. */
+ public @NonNull InputStream getLogOutputStream() throws VirtualMachineException {
+ if (mLogReader == null) {
+ throw new VirtualMachineException("Log output not available");
+ }
+ return new FileInputStream(mLogReader.getFileDescriptor());
+ }
+
/**
* Stops this virtual machine. Stopping a virtual machine is like pulling the plug on a real
* computer; the machine halts immediately. Software running on the virtual machine is not
@@ -401,6 +428,7 @@
final File vmRootDir = mConfigFilePath.getParentFile();
mConfigFilePath.delete();
mInstanceFilePath.delete();
+ mIdsigFilePath.delete();
vmRootDir.delete();
}
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 389ebb0..3eaf124 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -44,16 +44,16 @@
android_system_image {
name: "microdroid",
use_avb: true,
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
avb_algorithm: "SHA256_RSA4096",
partition_name: "system",
deps: [
"init_second_stage",
"microdroid_build_prop",
"microdroid_init_rc",
+ "microdroid_ueventd_rc",
"microdroid_launcher",
- "ueventd.rc",
"libbinder",
"libbinder_ndk",
"libstdc++",
@@ -136,6 +136,13 @@
installable: false, // avoid collision with system partition's init.rc
}
+prebuilt_etc {
+ name: "microdroid_ueventd_rc",
+ filename: "ueventd.rc",
+ src: "ueventd.rc",
+ installable: false, // avoid collision with system partition's ueventd.rc
+}
+
prebuilt_root {
name: "microdroid_build_prop",
filename: "build.prop",
@@ -186,7 +193,7 @@
],
},
},
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
avb_algorithm: "SHA256_RSA4096",
file_contexts: ":microdroid_vendor_file_contexts.gen",
}
@@ -225,7 +232,15 @@
},
x86_64: {
kernel_prebuilt: ":kernel_prebuilts-5.10-x86_64",
- cmdline: microdroid_boot_cmdline + ["acpi=noirq"],
+ cmdline: microdroid_boot_cmdline + [
+ // console=none is to work around the x86 specific u-boot behavior which when
+ // console= option is not found in the kernel commandline console=ttyS0 is
+ // automatically added. By adding console=none, we can prevent u-boot from doing
+ // that. Note that console is set to hvc0 by bootconfig if the VM is configured as
+ // debuggable.
+ "console=none",
+ "acpi=noirq",
+ ],
},
},
@@ -233,7 +248,7 @@
header_version: "4",
partition_name: "boot",
use_avb: true,
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
}
android_filesystem {
@@ -270,7 +285,7 @@
},
partition_name: "vendor_boot",
use_avb: true,
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
}
android_filesystem {
@@ -320,25 +335,92 @@
cmd: "cat $(in) > $(out)",
}
-// TODO(b/203031847) sign these bootconfig images using avb
+vbmeta {
+ name: "microdroid_vbmeta_bootconfig",
+ partition_name: "vbmeta",
+ private_key: ":microdroid_sign_key",
+ chained_partitions: [
+ {
+ name: "bootconfig",
+ private_key: ":microdroid_sign_key",
+ },
+ ],
+}
+
+// See external/avb/avbtool.py
+// MAX_VBMETA_SIZE=64KB, MAX_FOOTER_SIZE=4KB
+avb_hash_footer_kb = "68"
+
prebuilt_etc {
name: "microdroid_bootconfig_normal",
- src: "bootconfig.normal",
+ src: ":microdroid_bootconfig_normal_gen",
filename: "microdroid_bootconfig.normal",
}
prebuilt_etc {
name: "microdroid_bootconfig_app_debuggable",
- src: "bootconfig.app_debuggable",
+ src: ":microdroid_bootconfig_app_debuggable_gen",
filename: "microdroid_bootconfig.app_debuggable",
}
prebuilt_etc {
name: "microdroid_bootconfig_full_debuggable",
- src: "bootconfig.full_debuggable",
+ src: ":microdroid_bootconfig_full_debuggable_gen",
filename: "microdroid_bootconfig.full_debuggable",
}
+// TODO(jiyong): make a new module type that does the avb signing
+genrule {
+ name: "microdroid_bootconfig_normal_gen",
+ tools: ["avbtool"],
+ srcs: [
+ "bootconfig.normal",
+ ":microdroid_sign_key",
+ ],
+ out: ["microdroid_bootconfig.normal"],
+ cmd: "cp $(location bootconfig.normal) $(out) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name bootconfig " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
+}
+
+genrule {
+ name: "microdroid_bootconfig_app_debuggable_gen",
+ tools: ["avbtool"],
+ srcs: [
+ "bootconfig.app_debuggable",
+ ":microdroid_sign_key",
+ ],
+ out: ["microdroid_bootconfig.app_debuggable"],
+ cmd: "cp $(location bootconfig.app_debuggable) $(out) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name bootconfig " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
+}
+
+genrule {
+ name: "microdroid_bootconfig_full_debuggable_gen",
+ tools: ["avbtool"],
+ srcs: [
+ "bootconfig.full_debuggable",
+ ":microdroid_sign_key",
+ ],
+ out: ["microdroid_bootconfig.full_debuggable"],
+ cmd: "cp $(location bootconfig.full_debuggable) $(out) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name bootconfig " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
+}
+
prebuilt_etc {
name: "microdroid_fstab",
src: "fstab.microdroid",
@@ -354,22 +436,18 @@
// For unknown reason, the signed bootloader doesn't work on x86_64. Until the problem
// is fixed, let's use the unsigned bootloader for the architecture.
// TODO(b/185115783): remove this
- src: ":cuttlefish_crosvm_bootloader",
+ src: ":microdroid_bootloader_pubkey_replaced",
},
},
filename: "microdroid_bootloader",
}
-// See external/avb/avbtool.py
-// MAX_VBMETA_SIZE=64KB, MAX_FOOTER_SIZE=4KB
-avb_hash_footer_kb = "68"
-
genrule {
name: "microdroid_bootloader_gen",
tools: ["avbtool"],
srcs: [
- ":cuttlefish_crosvm_bootloader",
- ":avb_testkey_rsa4096",
+ ":microdroid_bootloader_pubkey_replaced",
+ ":microdroid_sign_key",
],
out: ["bootloader-signed"],
// 1. Copy the input to the output becaise avbtool modifies --image in
@@ -378,17 +456,57 @@
// bootloader file whose size is 1. It can't pass avbtool.
// 3. Add the hash footer. The partition size is set to (image size + 68KB)
// rounded up to 4KB boundary.
- cmd: "cp $(location :cuttlefish_crosvm_bootloader) $(out) && " +
+ cmd: "cp $(location :microdroid_bootloader_pubkey_replaced) $(out) && " +
"if [ $$(stat --format=%s $(out)) -gt 4096 ]; then " +
"$(location avbtool) add_hash_footer " +
"--algorithm SHA256_RSA4096 " +
"--partition_name bootloader " +
- "--key $(location :avb_testkey_rsa4096) " +
+ "--key $(location :microdroid_sign_key) " +
"--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
"--image $(out)" +
"; fi",
}
+// Replace avbpubkey of prebuilt bootloader with the avbpubkey of the signing key
+genrule {
+ name: "microdroid_bootloader_pubkey_replaced",
+ tools: ["replace_bytes"],
+ srcs: [
+ ":microdroid_crosvm_bootloader", // input (bootloader)
+ ":microdroid_crosvm_bootloader.avbpubkey", // old bytes (old pubkey)
+ ":microdroid_bootloader_avbpubkey_gen", // new bytes (new pubkey)
+ ],
+ out: ["bootloader-pubkey-replaced"],
+ // 1. Copy the input to the output (replace_bytes modifies the file in-place)
+ // 2. Check if the file is big enough. For arm and x86 we have fake
+ // bootloader file whose size is 1. (replace_bytes fails if key not found)
+ // 3. Replace embedded pubkey with new one.
+ cmd: "cp $(location :microdroid_crosvm_bootloader) $(out) && " +
+ "if [ $$(stat --format=%s $(out)) -gt 4096 ]; then " +
+ "$(location replace_bytes) $(out) " +
+ "$(location :microdroid_crosvm_bootloader.avbpubkey) " +
+ "$(location :microdroid_bootloader_avbpubkey_gen)" +
+ "; fi",
+}
+
+// Apex keeps a copy of avbpubkey embedded in bootloader so that embedded avbpubkey can be replaced
+// while re-signing bootloader.
+prebuilt_etc {
+ name: "microdroid_bootloader.avbpubkey",
+ src: ":microdroid_bootloader_avbpubkey_gen",
+}
+
+// Generate avbpukey from the signing key
+genrule {
+ name: "microdroid_bootloader_avbpubkey_gen",
+ tools: ["avbtool"],
+ srcs: [":microdroid_sign_key"],
+ out: ["bootloader.pubkey"],
+ cmd: "$(location avbtool) extract_public_key " +
+ "--key $(location :microdroid_sign_key) " +
+ "--output $(out)",
+}
+
prebuilt_etc {
name: "microdroid_uboot_env",
src: ":microdroid_uboot_env_gen",
@@ -416,28 +534,21 @@
cmd: "$(location mkenvimage_host) -s 4096 -o $(out) $(in)",
}
-vbmeta {
- name: "microdroid_vbmeta",
- partition_name: "vbmeta",
- private_key: ":avb_testkey_rsa4096",
- partitions: [
- "microdroid_vendor",
- "microdroid_vendor_boot-5.10",
- ],
- chained_partitions: [
- {
- name: "vbmeta_system",
- rollback_index_location: 1,
- private_key: ":avb_testkey_rsa4096",
- },
- ],
+// Note that keys can be different for filesystem images even though we're using the same key
+// for microdroid. However, the key signing VBmeta should match with the pubkey embedded in
+// bootloader.
+filegroup {
+ name: "microdroid_sign_key",
+ srcs: [":avb_testkey_rsa4096"],
}
vbmeta {
- name: "microdroid_vbmeta_system",
- partition_name: "vbmeta_system",
- private_key: ":avb_testkey_rsa4096",
+ name: "microdroid_vbmeta",
+ partition_name: "vbmeta",
+ private_key: ":microdroid_sign_key",
partitions: [
+ "microdroid_vendor",
+ "microdroid_vendor_boot-5.10",
"microdroid",
"microdroid_boot-5.10",
],
diff --git a/microdroid/bootconfig.app_debuggable b/microdroid/bootconfig.app_debuggable
index 79e2b08..98d326a 100644
--- a/microdroid/bootconfig.app_debuggable
+++ b/microdroid/bootconfig.app_debuggable
@@ -1 +1,14 @@
+# TODO(b/203369076) This should be 0 to disable adb rooting. For now, we can't do that because
+# if this is set to 0, adbd enforces the host authentication but we don't put the adb
+# public key (which represents the owner) in the VM yet.
+androidboot.microdroid.debuggable=0
+
+# Console output is not redirect to the host-side.
kernel.console = null
+
+# ADB is supported but rooting is prohibited.
+androidboot.adb.enabled=1
+
+# logd is enabled
+# TODO(b/200914564) Filter only the log from the app
+androidboot.logd.enabled=1
diff --git a/microdroid/bootconfig.full_debuggable b/microdroid/bootconfig.full_debuggable
index d67cd76..fd8a83e 100644
--- a/microdroid/bootconfig.full_debuggable
+++ b/microdroid/bootconfig.full_debuggable
@@ -1 +1,14 @@
+# ro.debuggable is set.
+androidboot.microdroid.debuggable=1
+
+# Kernel message is exported.
kernel.printk.devkmsg=on
+kernel.console=hvc0
+
+# ADB is supported and rooting is possible. Note that
+# ro.adb.secure is still 0 (see build.prop) which means that adbd is started
+# unrooted by default. To root, developer should explicitly execute `adb root`.
+androidboot.adb.enabled=1
+
+# logd is enabled
+androidboot.logd.enabled=1
diff --git a/microdroid/bootconfig.normal b/microdroid/bootconfig.normal
index 79e2b08..9cfb55a 100644
--- a/microdroid/bootconfig.normal
+++ b/microdroid/bootconfig.normal
@@ -1 +1,11 @@
+# ro.debuggable is off
+androidboot.microdroid.debuggable=0
+
+# Console output is not redirect to the host-side.
kernel.console = null
+
+# ADB is not enabled.
+androidboot.adb.enabled=0
+
+# logd is not enabled
+androidboot.logd.enabled=0
diff --git a/microdroid/bootconfig.x86_64 b/microdroid/bootconfig.x86_64
index 75e4a80..6076889 100644
--- a/microdroid/bootconfig.x86_64
+++ b/microdroid/bootconfig.x86_64
@@ -1 +1 @@
-androidboot.boot_devices = pci0000:00/0000:00:01.0,pci0000:00/0000:00:02.0,pci0000:00/0000:00:03.0
+androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0
diff --git a/microdroid/build.prop b/microdroid/build.prop
index eaca63d..ada945d 100644
--- a/microdroid/build.prop
+++ b/microdroid/build.prop
@@ -1,6 +1,5 @@
# build.prop for microdroid
ro.apex.updatable=true
-ro.debuggable=1
ro.adb.secure=0
service.adb.listen_addrs=vsock:5555
diff --git a/microdroid/fstab.microdroid b/microdroid/fstab.microdroid
index f0e70b6..25d82cc 100644
--- a/microdroid/fstab.microdroid
+++ b/microdroid/fstab.microdroid
@@ -1,2 +1,2 @@
-system /system ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta_system,first_stage_mount,logical
+system /system ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta,first_stage_mount,logical
vendor /vendor ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta,first_stage_mount,logical
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 23434bb..664402f 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -30,6 +30,8 @@
# payloads are not designed to run with bootstrap bionic
setprop apex_config.done true
+ setprop ro.debuggable ${ro.boot.microdroid.debuggable:-0}
+
on init
# Mount binderfs
mkdir /dev/binderfs
@@ -72,17 +74,20 @@
chmod 0664 /dev/cpuset/background/tasks
chmod 0664 /dev/cpuset/system-background/tasks
+on init && property:ro.boot.logd.enabled=1
# Start logd before any other services run to ensure we capture all of their logs.
start logd
+on init
start servicemanager
# TODO(b/185767624): remove hidl after full keymint support
start hwservicemanager
+on init && property:ro.boot.adb.enabled=1
start adbd
-on load_persist_props_action
+on load_persist_props_action && property:ro.boot.logd.enabled=1
start logd
start logd-reinit
@@ -190,6 +195,11 @@
seclabel u:r:shell:s0
setenv HOSTNAME console
+service seriallogging /system/bin/logcat -b all -v threadtime -f /dev/hvc2 *:V
+ disabled
+ user logd
+ group root logd
+
on fs
write /dev/event-log-tags "# content owned by logd
"
diff --git a/microdroid/microdroid.json b/microdroid/microdroid.json
index 1337edf..211bae4 100644
--- a/microdroid/microdroid.json
+++ b/microdroid/microdroid.json
@@ -16,10 +16,6 @@
"path": "/apex/com.android.virt/etc/fs/microdroid_vbmeta.img"
},
{
- "label": "vbmeta_system_a",
- "path": "/apex/com.android.virt/etc/fs/microdroid_vbmeta_system.img"
- },
- {
"label": "super",
"path": "/apex/com.android.virt/etc/fs/microdroid_super.img"
}
diff --git a/microdroid/ueventd.rc b/microdroid/ueventd.rc
new file mode 100644
index 0000000..037b8fc
--- /dev/null
+++ b/microdroid/ueventd.rc
@@ -0,0 +1,29 @@
+uevent_socket_rcvbuf_size 16M
+
+subsystem dma_heap
+ devname uevent_devpath
+ dirname /dev/dma_heap
+
+/dev/null 0666 root root
+/dev/zero 0666 root root
+/dev/full 0666 root root
+/dev/ptmx 0666 root root
+/dev/tty 0666 root root
+/dev/random 0666 root root
+/dev/urandom 0666 root root
+/dev/ashmem* 0666 root root
+/dev/binder 0666 root root
+/dev/hwbinder 0666 root root
+/dev/vndbinder 0666 root root
+
+/dev/pmsg0 0222 root log
+/dev/dma_heap/system 0444 system system
+/dev/dma_heap/system-uncached 0444 system system
+/dev/dma_heap/system-secure 0444 system system
+
+# these should not be world writable
+/dev/rtc0 0640 system system
+/dev/tty0 0660 root system
+
+# Virtual console for logcat
+/dev/hvc2 0660 logd logd
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index dc72c95..f666294 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -27,12 +27,11 @@
use log::{error, info, warn};
use microdroid_metadata::{write_metadata, Metadata};
use microdroid_payload_config::{Task, TaskType, VmPayloadConfig};
-use nix::ioctl_read_bad;
use payload::{get_apex_data_from_payload, load_metadata, to_metadata};
use rustutils::system_properties;
use rustutils::system_properties::PropertyWatcher;
use std::fs::{self, File, OpenOptions};
-use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
+use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::path::Path;
use std::process::{Command, Stdio};
use std::str;
@@ -50,6 +49,7 @@
const VMADDR_CID_HOST: u32 = 2;
const APEX_CONFIG_DONE_PROP: &str = "apex_config.done";
+const LOGD_ENABLED_PROP: &str = "ro.boot.logd.enabled";
fn get_vms_rpc_binder() -> Result<Strong<dyn IVirtualMachineService>> {
// SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
@@ -67,30 +67,12 @@
}
}
-const IOCTL_VM_SOCKETS_GET_LOCAL_CID: usize = 0x7b9;
-ioctl_read_bad!(
- /// Gets local cid from /dev/vsock
- vm_sockets_get_local_cid,
- IOCTL_VM_SOCKETS_GET_LOCAL_CID,
- u32
-);
-
-// TODO: remove this after VS can check the peer addresses of binder clients
-fn get_local_cid() -> Result<u32> {
- let f = OpenOptions::new()
- .read(true)
- .write(false)
- .open("/dev/vsock")
- .context("failed to open /dev/vsock")?;
- let mut ret = 0;
- // SAFETY: the kernel only modifies the given u32 integer.
- unsafe { vm_sockets_get_local_cid(f.as_raw_fd(), &mut ret) }?;
- Ok(ret)
-}
-
fn main() {
if let Err(e) = try_main() {
- error!("failed with {:?}", e);
+ error!("Failed with {:?}. Shutting down...", e);
+ if let Err(e) = system_properties::write("sys.powerctl", "shutdown") {
+ error!("failed to shutdown {:?}", e);
+ }
std::process::exit(1);
}
}
@@ -240,16 +222,21 @@
/// virtualizationservice in the host side.
fn exec_task(task: &Task, service: &Strong<dyn IVirtualMachineService>) -> Result<()> {
info!("executing main task {:?}...", task);
- let mut child = build_command(task)?.spawn()?;
+ let mut command = build_command(task)?;
- let local_cid = get_local_cid()?;
info!("notifying payload started");
- service.notifyPayloadStarted(local_cid as i32)?;
+ service.notifyPayloadStarted()?;
- let exit_status = child.wait()?;
+ // Start logging if enabled
+ // TODO(b/200914564) set filterspec if debug_level is app_only
+ if system_properties::read(LOGD_ENABLED_PROP)? == "1" {
+ system_properties::write("ctl.start", "seriallogging")?;
+ }
+
+ let exit_status = command.spawn()?.wait()?;
if let Some(code) = exit_status.code() {
info!("notifying payload finished");
- service.notifyPayloadFinished(local_cid as i32, code)?;
+ service.notifyPayloadFinished(code)?;
if code == 0 {
info!("task successfully finished");
diff --git a/pvmfw/pvmfw.img b/pvmfw/pvmfw.img
index 317821f..510b2c4 100644
--- a/pvmfw/pvmfw.img
+++ b/pvmfw/pvmfw.img
Binary files differ
diff --git a/tests/benchmark/Android.bp b/tests/benchmark/Android.bp
new file mode 100644
index 0000000..cf9d16e
--- /dev/null
+++ b/tests/benchmark/Android.bp
@@ -0,0 +1,23 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_binary {
+ name: "fs_benchmark",
+ static_executable: true,
+ static_libs: ["libbase"],
+ srcs: ["fs_benchmark.cpp"],
+}
+
+cc_library_shared {
+ name: "empty_payload",
+ srcs: ["empty_payload.cpp"],
+}
+
+android_app {
+ name: "MicrodroidFilesystemBenchmarkApp",
+ srcs: [],
+ jni_libs: ["empty_payload"],
+ platform_apis: true,
+ use_embedded_native_libs: true,
+}
diff --git a/tests/benchmark/AndroidManifest.xml b/tests/benchmark/AndroidManifest.xml
new file mode 100644
index 0000000..9fd7347
--- /dev/null
+++ b/tests/benchmark/AndroidManifest.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.microdroid.benchmark">
+ <application android:label="Microdroid Filesystem Benchmark" />
+</manifest>
diff --git a/tests/benchmark/assets/benchmark.pem b/tests/benchmark/assets/benchmark.pem
new file mode 100644
index 0000000..2716fde
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCsXMc0YBu8ch0E
+ACd9xchbrAHulBYggzsQ9sEj6paC211QfRoYt2kCukAvj4uqutIBACLO44FPUfjf
+Pzh6woSXU8NdLiMR23Quyg8JChChdTvvl83aTCnKdUAO8YnmjWxdebuIeisv0QGQ
+VbZWhZP+VTXWBgbBQ426Klkx/GZXBGT5TpKBf4/Y9x3iOaYKSK0F5uSbl5BSD9rG
+iizWQAgBe2TfyzYQFOdjjiu6/sJ92d/Y5bVWo4efhWg8ZPyhi6oLyfnm/hbn5t3H
+IIYptmdoqFy2xgZf++EWoPwfvaG3YB6mmRwt/bvuGyab43duzSCjR6Sj93p7Y3Zb
+l4vdIG9TAgMBAAECggEADO/dx9Ga41cRVHaKgEczFaZgcr7Mtl4DNe+/aWm3KFU8
+uMjbB5XknN7L6IX2rrbdAlQ3SJ4M255EdsUxAQ3Ok+kmlbzbxwBYxRZHcJ8/xk6d
+VAtp2QO0c0y/pR9+AT8GLjHv4HuC+FDQtDuqtu3JwOI2az0Cjsj6P0nkbfsK12cO
+eKQnCH8dYSvmdPWF2GimBgJVhsfoeY9gQ44aR9sMSSwWMO7M58CkRuur9QvKYB/C
+fLjcA7dqodmLNMbiLAe/RWfg+WfdR9DUgbK3zB2h+2+qFyFCfMFt03I+DkVzg/ej
+ICNmgv4D9eRJaKdIXaCKA8FqHqQN+/a6cmDUi8qQ6QKBgQDbnrkxJAoLJ6gPBFOu
+Eml/XVczll8F4EEyQz0uPBgADkE5CV6Zh4gaBMj3b6iuUq7sQRldeDb3C/g5zcUZ
+U940gmzlJv4FPf0itJ46JNkIrCSuf0/NhDb2qIvrb/j+VTzd350YgMIG34B9tLxu
+W+eHuDTDSMsS0YZHAVZzGmhFRQKBgQDI6gisOKgpp4KZnvqxJCJ/54PMO6Kks7Oa
+4ZVyc8iTn1B6k+njOy98xzk29pI3+o1v822WImwGs0njAUcAPm7jPEer657rva8C
+ZVmSzme/YHfxhOI7QGzGyXdmh+3Da4ywAUwgfPY7b+lv+I0J9pcijpIh1ayApKy2
+I32TIjZvtwKBgQDGzLrenLzqtA8Q6N3GqKwOurOA4xFJBGJ/2RW8kHE5O64Wr0CO
+wXyV8NbqBI0wn2/wNE19qqA2qQMdcAKGlsCBz747ADzZCe/mRpEkGM7NZuYdfukC
+JDiMtq1RhZ5iu03Jme1ejM8V4aMyJzSawV6oIDrCu1X3xupBxBg5QSI58QKBgQCx
+/Ts/r1WyyTZW99NpWPTDUQuew/obZSOpA03NPiukNBAs95rNdqJkLW5PdfMlam8g
+jYw45DfFW9IKLBiFa8n6v21TLgL1H27KdZT8DKU2krTPnwR4r2NuXA7OI3+Mj1vs
+lMmnQm01TLiGPLBd8joEID/vf4c51Ck5lolp7nZBUwKBgQCmS5R2fsH0XYCMvfWR
+hHUre/zxNMj6+FrxiglecdJLPAAIEUmP2No/KRnLezi36TdL9aXGLBhTt9KQeQnv
+eoKIUFkYr6kTP/mXG9LM7yqE+ic37M4MZ2qL7DE8MSASy/aBKruueyLEUSWvjGxd
+aBW8JQ/zbcsdZKwV1as6St5kyQ==
+-----END PRIVATE KEY-----
diff --git a/tests/benchmark/assets/benchmark.pk8 b/tests/benchmark/assets/benchmark.pk8
new file mode 100644
index 0000000..a78fa9b
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.pk8
Binary files differ
diff --git a/tests/benchmark/assets/benchmark.x509.der b/tests/benchmark/assets/benchmark.x509.der
new file mode 100644
index 0000000..d137bc0
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.x509.der
Binary files differ
diff --git a/tests/benchmark/assets/benchmark.x509.pem b/tests/benchmark/assets/benchmark.x509.pem
new file mode 100644
index 0000000..7bc794b
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIECzCCAvOgAwIBAgIUXL3rcLOhlqZ9IDu04sF+FGo3OtIwDQYJKoZIhvcNAQEL
+BQAwgZQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH
+DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy
+b2lkMRAwDgYDVQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFu
+ZHJvaWQuY29tMB4XDTIxMTAyNzEzNDE1NloXDTQ5MDMxNDEzNDE1NlowgZQxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFp
+biBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRAwDgYD
+VQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFuZHJvaWQuY29t
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArFzHNGAbvHIdBAAnfcXI
+W6wB7pQWIIM7EPbBI+qWgttdUH0aGLdpArpAL4+LqrrSAQAizuOBT1H43z84esKE
+l1PDXS4jEdt0LsoPCQoQoXU775fN2kwpynVADvGJ5o1sXXm7iHorL9EBkFW2VoWT
+/lU11gYGwUONuipZMfxmVwRk+U6SgX+P2Pcd4jmmCkitBebkm5eQUg/axoos1kAI
+AXtk38s2EBTnY44ruv7Cfdnf2OW1VqOHn4VoPGT8oYuqC8n55v4W5+bdxyCGKbZn
+aKhctsYGX/vhFqD8H72ht2AeppkcLf277hsmm+N3bs0go0eko/d6e2N2W5eL3SBv
+UwIDAQABo1MwUTAdBgNVHQ4EFgQU8eS6/fiyZMqPN1erLU8POJHci4swHwYDVR0j
+BBgwFoAU8eS6/fiyZMqPN1erLU8POJHci4swDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEAagHQT+tZ5RE2V9U/3gXdqyQzpIjIAWBdA1HJ0obi+mqr
+n5BtftKHK2li/f6jp64oCxwBOtZZEWj8J4m53EWunG9oktjfiCq1wKASdfrhSN6J
+hz+YSBURsrrDOVzVCcKgzwlEgYYMsAt+NnGGp9UlSaJMpQSghrDNkKmDLB1zfkN1
+sRG71UbqqxSun/3k0HcwWIRy6WTDXoPeyYWuCaksdzqPHMvn0bbgf1Jw6jI5UNXG
+3ZSteqhLseS6jhlYOmfLaINHpBfdZXdzqsEjlg6Qt2pCNaRfVp2+fIfNjsWhrfOJ
+8uoz3I/u5Nd3S2ET/jYqpqsB3g9ngjbilclKYjL1bg==
+-----END CERTIFICATE-----
diff --git a/tests/benchmark/assets/vm_config.json b/tests/benchmark/assets/vm_config.json
new file mode 100644
index 0000000..d431877
--- /dev/null
+++ b/tests/benchmark/assets/vm_config.json
@@ -0,0 +1,11 @@
+{
+ "os": {
+ "name": "microdroid"
+ },
+ "task": {
+ "type": "microdroid_launcher",
+ "command": "empty_payload.so",
+ "args": []
+ }
+}
+
diff --git a/tests/benchmark/benchmark_example.sh b/tests/benchmark/benchmark_example.sh
new file mode 100755
index 0000000..49cf258
--- /dev/null
+++ b/tests/benchmark/benchmark_example.sh
@@ -0,0 +1,61 @@
+# This script runs 256 MB file benchmark, both on host and on authfs.
+# Usage: after connecting the device with adb, run:
+# $ packages/modules/Virtualization/tests/benchmark/benchmark_example.sh <target> (e.g. aosp_oriole_pkvm-userdebug)
+
+set -e
+
+# Prerequisite: we need root to flush disk cache.
+adb root
+
+# 1. Build needed artifacts, and install it to device
+source build/make/rbesetup.sh
+lunch $1
+m fs_benchmark MicrodroidFilesystemBenchmarkApp fsverity
+adb push $OUT/system/bin/fs_benchmark /data/local/tmp
+adb install $OUT/system/app/MicrodroidFilesystemBenchmarkApp/MicrodroidFilesystemBenchmarkApp.apk
+
+# 2. Generate testcases
+# /data/local/tmp/testcase: 256 MB, signed by fsverity.
+# /data/local/tmp/testcase2: empty file, used for authfs write test.
+adb shell 'rm -rf /data/local/tmp/virt /data/local/tmp/testcase*'
+adb shell 'mkdir -p /data/local/tmp/virt'
+dd if=/dev/zero of=/tmp/testcase bs=1048576 count=256
+fsverity sign /tmp/testcase /tmp/testcase.fsv_sig --key=packages/modules/Virtualization/tests/benchmark/assets/benchmark.pem \
+ --out-merkle-tree=/tmp/testcase.merkle_dump --cert=packages/modules/Virtualization/tests/benchmark/assets/benchmark.x509.pem
+adb shell 'dd if=/dev/zero of=/data/local/tmp/testcase bs=1048576 count=256'
+adb push /tmp/testcase.fsv_sig /tmp/testcase.merkle_dump /data/local/tmp
+
+# 3. Run fd_server from host
+adb shell 'exec 3</data/local/tmp/testcase 4</data/local/tmp/testcase.merkle_dump 5</data/local/tmp/testcase.fsv_sig 6</data/local/tmp/testcase 7<>/data/local/tmp/testcase2 /apex/com.android.virt/bin/fd_server --ro-fds 3:4:5 --ro-fds 6 --rw-fds 7' &
+
+# 4. Run VM and get the CID
+result=$(adb shell "/apex/com.android.virt/bin/vm run-app --debug full --daemonize --log /data/local/tmp/virt/log.txt $(adb shell pm path com.android.microdroid.benchmark | cut -d':' -f2) /data/local/tmp/virt/MicrodroidFilesystemBenchmarkApp.apk.idsig /data/local/tmp/virt/instance.img assets/vm_config.json")
+cid=$(echo $result | grep -P "with CID \d+" --only-matching --color=none | cut -d' ' -f3)
+echo "CID IS $cid"
+
+# 5. Run host tests
+echo "Running host read/write test..."
+adb shell 'dd if=/dev/zero of=/data/local/tmp/testcase_host bs=1048576 count=256'
+adb shell '/data/local/tmp/fs_benchmark /data/local/tmp/testcase_host 268435456 both 5'
+
+# 6. Connect to the VM
+# We are cheating here. The VM is expected to finish booting, while the host tests are running.
+adb forward tcp:8000 vsock:$cid:5555
+adb connect localhost:8000
+adb -s localhost:8000 root
+sleep 10
+
+# 7. Install artifacts and run authfs
+adb -s localhost:8000 push $OUT/system/bin/fs_benchmark /data/local/tmp
+adb -s localhost:8000 shell "mkdir -p /data/local/tmp/authfs"
+adb -s localhost:8000 shell "/system/bin/authfs /data/local/tmp/authfs --cid 2 --remote-ro-file 3:/mnt/apk/assets/benchmark.x509.der --remote-ro-file-unverified 6 --remote-new-rw-file 7" &
+
+# 8. Run guest tests
+echo "Running guest block device read test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /dev/block/vda $(adb -s localhost:8000 shell blockdev --getsize64 /dev/block/vda) read 5"
+echo "Running guest authfs read test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /data/local/tmp/authfs/3 268435456 read 5"
+echo "Running guest authfs unverified read test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /data/local/tmp/authfs/6 268435456 read 5"
+echo "Running guest authfs write test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /data/local/tmp/authfs/7 268435456 write 5"
diff --git a/tests/benchmark/empty_payload.cpp b/tests/benchmark/empty_payload.cpp
new file mode 100644
index 0000000..afcd653
--- /dev/null
+++ b/tests/benchmark/empty_payload.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern "C" int android_native_main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) {
+ // do nothing
+ return 0;
+}
diff --git a/tests/benchmark/fs_benchmark.cpp b/tests/benchmark/fs_benchmark.cpp
new file mode 100644
index 0000000..220e004
--- /dev/null
+++ b/tests/benchmark/fs_benchmark.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/result.h>
+#include <android-base/unique_fd.h>
+#include <linux/vm_sockets.h>
+#include <strings.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cerrno>
+#include <cinttypes>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <random>
+#include <string>
+#include <vector>
+
+using android::base::ErrnoError;
+using android::base::Error;
+using android::base::Result;
+using android::base::unique_fd;
+
+namespace {
+
+constexpr int kBlockSize = 4096;
+
+[[noreturn]] void PrintUsage(const char* exe_name) {
+ std::printf("Usage: %s path size (read|write|both) [rounds]\n", exe_name);
+ std::exit(EXIT_FAILURE);
+}
+
+void DropCache() {
+ system("echo 1 > /proc/sys/vm/drop_caches");
+}
+
+struct BenchmarkResult {
+ struct timespec elapsed;
+ std::uint64_t size;
+};
+
+enum class BenchmarkOption {
+ READ = 0,
+ WRITE = 1,
+ RANDREAD = 2,
+ RANDWRITE = 3,
+};
+
+Result<BenchmarkResult> runTest(const char* path, BenchmarkOption option, std::uint64_t size) {
+ bool is_read = (option == BenchmarkOption::READ || option == BenchmarkOption::RANDREAD);
+ bool is_rand = (option == BenchmarkOption::RANDREAD || option == BenchmarkOption::RANDWRITE);
+
+ unique_fd fd(open(path, is_read ? O_RDONLY : O_WRONLY | O_CREAT, 0644));
+ if (fd.get() == -1) {
+ return ErrnoError() << "opening " << path << " failed";
+ }
+
+ uint64_t block_count = (size + kBlockSize - 1) / kBlockSize;
+ std::vector<uint64_t> offsets;
+ if (is_rand) {
+ std::mt19937 rd{std::random_device{}()};
+ offsets.reserve(block_count);
+ for (uint64_t i = 0; i < block_count; i++) offsets.push_back(i * kBlockSize);
+ std::shuffle(offsets.begin(), offsets.end(), rd);
+ }
+
+ uint64_t total_processed = 0;
+ char buf[kBlockSize] = {};
+
+ struct timespec start;
+ if (clock_gettime(CLOCK_REALTIME, &start) < 0) {
+ return ErrnoError() << "failed to get start time";
+ }
+
+ for (uint64_t i = 0; i < block_count; i++) {
+ if (!offsets.empty()) {
+ if (lseek(fd.get(), offsets[i], SEEK_SET) == -1) {
+ return ErrnoError() << "failed to lseek";
+ }
+ }
+
+ auto ret = is_read ? read(fd.get(), buf, kBlockSize) : write(fd.get(), buf, kBlockSize);
+ if (ret == 0) {
+ return Error() << "unexpected end of file";
+ } else if (ret == -1) {
+ return ErrnoError() << "file io failed";
+ }
+ total_processed += ret;
+ }
+
+ struct timespec stop;
+ if (clock_gettime(CLOCK_REALTIME, &stop) < 0) {
+ return ErrnoError() << "failed to get finish time";
+ }
+
+ struct timespec elapsed;
+ if ((stop.tv_nsec - start.tv_nsec) < 0) {
+ elapsed.tv_sec = stop.tv_sec - start.tv_sec - 1;
+ elapsed.tv_nsec = stop.tv_nsec - start.tv_nsec + 1000000000;
+ } else {
+ elapsed.tv_sec = stop.tv_sec - start.tv_sec;
+ elapsed.tv_nsec = stop.tv_nsec - start.tv_nsec;
+ }
+
+ return BenchmarkResult{elapsed, total_processed};
+}
+
+} // namespace
+
+int main(int argc, char* argv[]) {
+ // without this, stdout isn't immediately flushed when running via "adb shell"
+ std::setvbuf(stdout, nullptr, _IONBF, 0);
+ std::setvbuf(stderr, nullptr, _IONBF, 0);
+
+ if (argc < 4 || argc > 5) {
+ PrintUsage(argv[0]);
+ }
+
+ const char* path = argv[1];
+
+ std::uint64_t size = std::strtoull(argv[2], nullptr, 0);
+ if (size == 0 || size == UINT64_MAX) {
+ std::fprintf(stderr, "invalid size %s\n", argv[1]);
+ PrintUsage(argv[0]);
+ }
+
+ std::vector<std::pair<BenchmarkOption, std::string>> benchmarkList;
+ if (strcmp(argv[3], "read") != 0) {
+ benchmarkList.emplace_back(BenchmarkOption::WRITE, "write");
+ benchmarkList.emplace_back(BenchmarkOption::RANDWRITE, "randwrite");
+ }
+ if (strcmp(argv[3], "write") != 0) {
+ benchmarkList.emplace_back(BenchmarkOption::READ, "read");
+ benchmarkList.emplace_back(BenchmarkOption::RANDREAD, "randread");
+ }
+
+ std::shuffle(benchmarkList.begin(), benchmarkList.end(), std::mt19937{std::random_device{}()});
+
+ int rounds = 1;
+ if (argc == 5) {
+ rounds = std::atoi(argv[4]);
+ if (rounds <= 0) {
+ std::fprintf(stderr, "invalid round %s\n", argv[4]);
+ PrintUsage(argv[0]);
+ }
+ }
+
+ for (auto [option, name] : benchmarkList) {
+ std::printf("%s test:\n", name.c_str());
+
+ for (int i = 0; i < rounds; i++) {
+ DropCache();
+ auto res = runTest(path, option, size);
+ if (!res.ok()) {
+ std::fprintf(stderr, "Error while benchmarking: %s\n",
+ res.error().message().c_str());
+ return EXIT_FAILURE;
+ }
+
+ double elapsed_time = res->elapsed.tv_sec + res->elapsed.tv_nsec / 1e9;
+ std::printf("total %" PRIu64 " bytes, took %.3g seconds ", res->size, elapsed_time);
+
+ double speed = res->size / elapsed_time;
+ const char* unit = "bytes";
+ if (speed >= 1000) {
+ speed /= 1024;
+ unit = "KB";
+ }
+ if (speed >= 1000) {
+ speed /= 1024;
+ unit = "MB";
+ }
+ if (speed >= 1000) {
+ speed /= 1024;
+ unit = "GB";
+ }
+ std::printf("(%.3g %s/s)\n", speed, unit);
+ }
+ std::printf("\n");
+ }
+}
diff --git a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
index b3a76ce..8d9a7e3 100644
--- a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
+++ b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
@@ -152,6 +152,12 @@
.runTimedCmd(timeout, "adb", "-s", MICRODROID_SERIAL, "shell", join(cmd));
}
+ // Asserts the command will fail on Microdroid.
+ public static void assertFailedOnMicrodroid(String... cmd) {
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ assertThat(result.getStatus(), is(CommandStatus.FAILED));
+ }
+
private static String join(String... strs) {
return String.join(" ", Arrays.asList(strs));
}
diff --git a/tests/testapk/Android.bp b/tests/testapk/Android.bp
index 0b0810f..32c47dd 100644
--- a/tests/testapk/Android.bp
+++ b/tests/testapk/Android.bp
@@ -2,12 +2,15 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-android_test_helper_app {
+android_test {
name: "MicrodroidTestApp",
+ test_suites: ["device-tests"],
srcs: ["src/java/**/*.java"],
- libs: [
- "android.system.virtualmachine",
+ static_libs: [
+ "androidx.test.runner",
+ "androidx.test.ext.junit",
],
+ libs: ["android.system.virtualmachine"],
jni_libs: ["MicrodroidTestNativeLib"],
platform_apis: true,
use_embedded_native_libs: true,
diff --git a/tests/testapk/AndroidManifest.xml b/tests/testapk/AndroidManifest.xml
index 94f49dd..bc955d2 100644
--- a/tests/testapk/AndroidManifest.xml
+++ b/tests/testapk/AndroidManifest.xml
@@ -1,5 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2021 The Android Open Source Project
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -14,13 +15,11 @@
-->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.android.microdroid.test">
- <application android:label="Microdroid Test">
- <uses-library android:name="android.system.virtualmachine" android:required="true" />
- <activity android:name="TestActivity" android:exported="true">
- <intent-filter>
- <action android:name="android.intent.action.MAIN" />
- <category android:name="android.intent.category.LAUNCHER" />
- </intent-filter>
- </activity>
+ <uses-permission android:name="android.permission.MANAGE_VIRTUAL_MACHINE" />
+ <application>
+ <uses-library android:name="android.system.virtualmachine" android:required="false" />
</application>
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.microdroid.test"
+ android:label="Microdroid Test" />
</manifest>
diff --git a/tests/testapk/AndroidTest.xml b/tests/testapk/AndroidTest.xml
new file mode 100644
index 0000000..c7097db
--- /dev/null
+++ b/tests/testapk/AndroidTest.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs sample instrumentation test.">
+ <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+ <option name="test-file-name" value="MicrodroidTestApp.apk" />
+ </target_preparer>
+ <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+ <option
+ name="run-command"
+ value="pm grant com.android.microdroid.test android.permission.MANAGE_VIRTUAL_MACHINE" />
+ </target_preparer>
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.microdroid.test" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="shell-timeout" value="300000" />
+ <option name="test-timeout" value="300000" />
+ </test>
+</configuration>
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
new file mode 100644
index 0000000..e0d6cc1
--- /dev/null
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.microdroid.test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNoException;
+
+import android.content.Context;
+import android.os.ParcelFileDescriptor;
+import android.system.virtualmachine.VirtualMachine;
+import android.system.virtualmachine.VirtualMachineCallback;
+import android.system.virtualmachine.VirtualMachineConfig;
+import android.system.virtualmachine.VirtualMachineException;
+import android.system.virtualmachine.VirtualMachineManager;
+
+import androidx.test.core.app.ApplicationProvider;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+@RunWith(JUnit4.class)
+public class MicrodroidTests {
+ @Rule public Timeout globalTimeout = Timeout.seconds(300);
+
+ private static class Inner {
+ public Context mContext;
+ public VirtualMachineManager mVmm;
+ public VirtualMachine mVm;
+ }
+
+ private boolean mPkvmSupported = false;
+ private Inner mInner;
+
+ @Before
+ public void setup() {
+ // In case when the virt APEX doesn't exist on the device, classes in the
+ // android.system.virtualmachine package can't be loaded. Therefore, before using the
+ // classes, check the existence of a class in the package and skip this test if not exist.
+ try {
+ Class.forName("android.system.virtualmachine.VirtualMachineManager");
+ mPkvmSupported = true;
+ } catch (ClassNotFoundException e) {
+ assumeNoException(e);
+ return;
+ }
+ mInner = new Inner();
+ mInner.mContext = ApplicationProvider.getApplicationContext();
+ mInner.mVmm = VirtualMachineManager.getInstance(mInner.mContext);
+ }
+
+ @After
+ public void cleanup() throws VirtualMachineException {
+ if (!mPkvmSupported) {
+ return;
+ }
+ if (mInner.mVm == null) {
+ return;
+ }
+ mInner.mVm.stop();
+ mInner.mVm.delete();
+ }
+
+ private abstract static class VmEventListener implements VirtualMachineCallback {
+ private ExecutorService mExecutorService = Executors.newSingleThreadExecutor();
+
+ void runToFinish(VirtualMachine vm) throws VirtualMachineException, InterruptedException {
+ vm.setCallback(mExecutorService, this);
+ vm.run();
+ mExecutorService.awaitTermination(300, TimeUnit.SECONDS);
+ }
+
+ void forceStop(VirtualMachine vm) {
+ try {
+ vm.stop();
+ this.onDied(vm);
+ mExecutorService.shutdown();
+ } catch (VirtualMachineException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {}
+
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {}
+
+ @Override
+ public void onPayloadFinished(VirtualMachine vm, int exitCode) {}
+
+ @Override
+ public void onDied(VirtualMachine vm) {}
+ }
+
+ @Test
+ public void startAndStop() throws VirtualMachineException, InterruptedException {
+ VirtualMachineConfig.Builder builder =
+ new VirtualMachineConfig.Builder(mInner.mContext, "assets/vm_config.json");
+ VirtualMachineConfig config = builder.build();
+
+ mInner.mVm = mInner.mVmm.getOrCreate("test_vm", config);
+ VmEventListener listener =
+ new VmEventListener() {
+ private boolean mPayloadReadyCalled = false;
+ private boolean mPayloadStartedCalled = false;
+
+ @Override
+ public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {
+ mPayloadStartedCalled = true;
+ }
+
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ mPayloadReadyCalled = true;
+ forceStop(vm);
+ }
+
+ @Override
+ public void onDied(VirtualMachine vm) {
+ assertTrue(mPayloadReadyCalled);
+ assertTrue(mPayloadStartedCalled);
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+ }
+}
diff --git a/tests/testapk/src/java/com/android/microdroid/test/TestActivity.java b/tests/testapk/src/java/com/android/microdroid/test/TestActivity.java
deleted file mode 100644
index ad34ca4..0000000
--- a/tests/testapk/src/java/com/android/microdroid/test/TestActivity.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.android.microdroid.test;
-
-import android.app.Activity;
-import android.os.Bundle;
-import android.system.virtualmachine.VirtualMachine;
-import android.system.virtualmachine.VirtualMachineConfig;
-import android.system.virtualmachine.VirtualMachineException;
-import android.system.virtualmachine.VirtualMachineManager;
-
-public class TestActivity extends Activity {
-
- @Override
- public void onCreate(Bundle savedInstanceState) {
- super.onCreate(savedInstanceState);
-
- VirtualMachine vm1 = createAndRunVirtualMachine("vm1");
- VirtualMachine vm2 = createAndRunVirtualMachine("vm2");
- }
-
- private VirtualMachine createAndRunVirtualMachine(String name) {
- VirtualMachine vm;
- try {
- VirtualMachineConfig config =
- new VirtualMachineConfig.Builder(this, "assets/vm_config.json")
- .build();
-
- VirtualMachineManager vmm = VirtualMachineManager.getInstance(this);
- vm = vmm.create(name, config);
- vm.run();
- } catch (VirtualMachineException e) {
- throw new RuntimeException(e);
- }
- return vm;
- }
-}
diff --git a/tests/testapk/src/native/testbinary.cpp b/tests/testapk/src/native/testbinary.cpp
index 2903a08..f56b261 100644
--- a/tests/testapk/src/native/testbinary.cpp
+++ b/tests/testapk/src/native/testbinary.cpp
@@ -198,21 +198,6 @@
return result;
}
-Result<unsigned> get_local_cid() {
- // TODO: remove this after VS can check the peer addresses of binder clients
- unique_fd fd(open("/dev/vsock", O_RDONLY));
- if (fd.get() == -1) {
- return ErrnoError() << "failed to open /dev/vsock";
- }
-
- unsigned cid;
- if (ioctl(fd.get(), IOCTL_VM_SOCKETS_GET_LOCAL_CID, &cid) == -1) {
- return ErrnoError() << "failed to IOCTL_VM_SOCKETS_GET_LOCAL_CID";
- }
-
- return cid;
-}
-
Result<void> start_test_service() {
class TestService : public aidl::com::android::microdroid::testservice::BnTestService {
ndk::ScopedAStatus addInteger(int32_t a, int32_t b, int32_t* out) override {
@@ -232,9 +217,7 @@
std::cerr << "failed to connect VirtualMachineService";
return;
}
- if (auto res = get_local_cid(); !res.ok()) {
- std::cerr << "failed to get local cid: " << res.error();
- } else if (!virtualMachineService->notifyPayloadReady(res.value()).isOk()) {
+ if (!virtualMachineService->notifyPayloadReady().isOk()) {
std::cerr << "failed to notify payload ready to virtualizationservice";
}
};
diff --git a/tests/vsock_test.cc b/tests/vsock_test.cc
index 480d05a..0b863a9 100644
--- a/tests/vsock_test.cc
+++ b/tests/vsock_test.cc
@@ -85,7 +85,7 @@
VirtualMachineConfig config(std::move(raw_config));
sp<IVirtualMachine> vm;
- status = virtualization_service->createVm(config, std::nullopt, &vm);
+ status = virtualization_service->createVm(config, std::nullopt, std::nullopt, &vm);
ASSERT_TRUE(status.isOk()) << "Error creating VM: " << status;
int32_t cid;
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
index 8be7331..e417ec4 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
@@ -23,10 +23,13 @@
interface IVirtualizationService {
/**
* Create the VM with the given config file, and return a handle to it ready to start it. If
- * `logFd` is provided then console logs from the VM will be sent to it.
+ * `consoleFd` is provided then console output from the VM will be sent to it. If `osLogFd` is
+ * provided then the OS-level logs will be sent to it. `osLogFd` is supported only when the OS
+ * running in the VM has the logging system. In case of Microdroid, the logging system is logd.
*/
- IVirtualMachine createVm(
- in VirtualMachineConfig config, in @nullable ParcelFileDescriptor logFd);
+ IVirtualMachine createVm(in VirtualMachineConfig config,
+ in @nullable ParcelFileDescriptor consoleFd,
+ in @nullable ParcelFileDescriptor osLogFd);
/**
* Initialise an empty partition image of the given size to be used as a writable partition.
diff --git a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
index fba83c8..8611898 100644
--- a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
@@ -31,19 +31,16 @@
/**
* Notifies that the payload has started.
- * TODO(b/191845268): remove cid parameter
*/
- void notifyPayloadStarted(int cid);
+ void notifyPayloadStarted();
/**
* Notifies that the payload is ready to serve.
- * TODO(b/191845268): remove cid parameter
*/
- void notifyPayloadReady(int cid);
+ void notifyPayloadReady();
/**
* Notifies that the payload has finished.
- * TODO(b/191845268): remove cid parameter
*/
- void notifyPayloadFinished(int cid, int exitCode);
+ void notifyPayloadFinished(int exitCode);
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index fa6ee40..5d64684 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -55,8 +55,10 @@
use std::fs::{create_dir, File, OpenOptions};
use std::io::{Error, ErrorKind, Write};
use std::num::NonZeroU32;
+use std::os::raw;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::path::{Path, PathBuf};
+use std::ptr::null_mut;
use std::sync::{Arc, Mutex, Weak};
use vmconfig::VmConfig;
use vsock::{SockAddr, VsockListener, VsockStream};
@@ -120,10 +122,12 @@
fn createVm(
&self,
config: &VirtualMachineConfig,
+ console_fd: Option<&ParcelFileDescriptor>,
log_fd: Option<&ParcelFileDescriptor>,
) -> binder::Result<Strong<dyn IVirtualMachine>> {
check_manage_access()?;
let state = &mut *self.state.lock().unwrap();
+ let mut console_fd = console_fd.map(clone_file).transpose()?;
let mut log_fd = log_fd.map(clone_file).transpose()?;
let requester_uid = ThreadState::get_calling_uid();
let requester_sid = get_calling_sid()?;
@@ -158,6 +162,9 @@
// doesn't understand the bootconfig parameters.
if let VirtualMachineConfig::AppConfig(config) = config {
if config.debugLevel != DebugLevel::FULL {
+ console_fd = None;
+ }
+ if config.debugLevel == DebugLevel::NONE {
log_fd = None;
}
}
@@ -210,6 +217,7 @@
params: config.params.to_owned(),
protected: config.protectedVm,
memory_mib: config.memoryMib.try_into().ok().and_then(NonZeroU32::new),
+ console_fd,
log_fd,
indirect_files,
};
@@ -248,7 +256,13 @@
)
})?;
let image = clone_file(image_fd)?;
-
+ // initialize the file. Any data in the file will be erased.
+ image.set_len(0).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("Failed to reset a file: {}", e),
+ )
+ })?;
let mut part = QcowFile::new(image, size).map_err(|e| {
new_binder_exception(
ExceptionCode::SERVICE_SPECIFIC,
@@ -345,15 +359,18 @@
});
// binder server for vm
- let state = service.state.clone(); // reference to state (not the state itself) is copied
+ let mut state = service.state.clone(); // reference to state (not the state itself) is copied
std::thread::spawn(move || {
- let mut service = VirtualMachineService::new_binder(state).as_binder();
+ let state_ptr = &mut state as *mut _ as *mut raw::c_void;
+
debug!("virtual machine service is starting as an RPC service.");
- // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
- // Plus the binder objects are threadsafe.
+ // SAFETY: factory function is only ever called by RunRpcServerWithFactory, within the
+ // lifetime of the state, with context taking the pointer value above (so a properly
+ // aligned non-null pointer to an initialized instance).
let retval = unsafe {
- binder_rpc_unstable_bindgen::RunRpcServer(
- service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder,
+ binder_rpc_unstable_bindgen::RunRpcServerWithFactory(
+ Some(VirtualMachineService::factory),
+ state_ptr,
VM_BINDER_SERVICE_PORT as u32,
)
};
@@ -846,13 +863,14 @@
#[derive(Debug, Default)]
struct VirtualMachineService {
state: Arc<Mutex<State>>,
+ cid: Cid,
}
impl Interface for VirtualMachineService {}
impl IVirtualMachineService for VirtualMachineService {
- fn notifyPayloadStarted(&self, cid: i32) -> binder::Result<()> {
- let cid = cid as Cid;
+ fn notifyPayloadStarted(&self) -> binder::Result<()> {
+ let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM having CID {} started payload", cid);
vm.update_payload_state(PayloadState::Started)
@@ -869,8 +887,8 @@
}
}
- fn notifyPayloadReady(&self, cid: i32) -> binder::Result<()> {
- let cid = cid as Cid;
+ fn notifyPayloadReady(&self) -> binder::Result<()> {
+ let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM having CID {} payload is ready", cid);
vm.update_payload_state(PayloadState::Ready)
@@ -886,8 +904,8 @@
}
}
- fn notifyPayloadFinished(&self, cid: i32, exit_code: i32) -> binder::Result<()> {
- let cid = cid as Cid;
+ fn notifyPayloadFinished(&self, exit_code: i32) -> binder::Result<()> {
+ let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM having CID {} finished payload", cid);
vm.update_payload_state(PayloadState::Finished)
@@ -905,9 +923,26 @@
}
impl VirtualMachineService {
- fn new_binder(state: Arc<Mutex<State>>) -> Strong<dyn IVirtualMachineService> {
+ // SAFETY: Service ownership is held by state, and the binder objects are threadsafe.
+ pub unsafe extern "C" fn factory(
+ cid: Cid,
+ context: *mut raw::c_void,
+ ) -> *mut binder_rpc_unstable_bindgen::AIBinder {
+ let state_ptr = context as *mut Arc<Mutex<State>>;
+ let state = state_ptr.as_ref().unwrap();
+ if let Some(vm) = state.lock().unwrap().get_vm(cid) {
+ let mut vm_service = vm.vm_service.lock().unwrap();
+ let service = vm_service.get_or_insert_with(|| Self::new_binder(state.clone(), cid));
+ service.as_binder().as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder
+ } else {
+ error!("connection from cid={} is not from a guest VM", cid);
+ null_mut()
+ }
+ }
+
+ fn new_binder(state: Arc<Mutex<State>>, cid: Cid) -> Strong<dyn IVirtualMachineService> {
BnVirtualMachineService::new_binder(
- VirtualMachineService { state },
+ VirtualMachineService { state, cid },
BinderFeatures::default(),
)
}
diff --git a/virtualizationservice/src/composite.rs b/virtualizationservice/src/composite.rs
index 40c7e5e..cb814f3 100644
--- a/virtualizationservice/src/composite.rs
+++ b/virtualizationservice/src/composite.rs
@@ -16,7 +16,9 @@
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::Partition::Partition;
use anyhow::{anyhow, Context, Error};
-use disk::{create_composite_disk, create_disk_file, ImagePartitionType, PartitionInfo};
+use disk::{
+ create_composite_disk, create_disk_file, ImagePartitionType, PartitionInfo, MAX_NESTING_DEPTH,
+};
use std::fs::{File, OpenOptions};
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
@@ -119,7 +121,7 @@
/// This will work for raw, QCOW2, composite and Android sparse images.
fn get_partition_size(partition: &File) -> Result<u64, Error> {
// TODO: Use `context` once disk::Error implements std::error::Error.
- Ok(create_disk_file(partition.try_clone()?)
+ Ok(create_disk_file(partition.try_clone()?, MAX_NESTING_DEPTH)
.map_err(|e| anyhow!("Failed to open partition image: {}", e))?
.get_len()?)
}
diff --git a/virtualizationservice/src/crosvm.rs b/virtualizationservice/src/crosvm.rs
index 38e5bf3..bf1ff0c 100644
--- a/virtualizationservice/src/crosvm.rs
+++ b/virtualizationservice/src/crosvm.rs
@@ -29,6 +29,8 @@
use std::sync::{Arc, Mutex};
use std::thread;
use vsock::VsockStream;
+use android_system_virtualmachineservice::binder::Strong;
+use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::IVirtualMachineService;
const CROSVM_PATH: &str = "/apex/com.android.virt/bin/crosvm";
@@ -43,6 +45,7 @@
pub params: Option<String>,
pub protected: bool,
pub memory_mib: Option<NonZeroU32>,
+ pub console_fd: Option<File>,
pub log_fd: Option<File>,
pub indirect_files: Vec<File>,
}
@@ -132,6 +135,8 @@
pub callbacks: VirtualMachineCallbacks,
/// Input/output stream of the payload run in the VM.
pub stream: Mutex<Option<VsockStream>>,
+ /// VirtualMachineService binder object for the VM.
+ pub vm_service: Mutex<Option<Strong<dyn IVirtualMachineService>>>,
/// The latest lifecycle state which the payload reported itself to be in.
payload_state: Mutex<PayloadState>,
}
@@ -158,6 +163,7 @@
requester_debug_pid,
callbacks: Default::default(),
stream: Mutex::new(None),
+ vm_service: Mutex::new(None),
payload_state: Mutex::new(PayloadState::Starting),
})
}
@@ -175,8 +181,8 @@
/// `self.vm_state` to avoid holding the lock on `vm_state` while it is running.
fn monitor(&self, child: Arc<SharedChild>) {
match child.wait() {
- Err(e) => error!("Error waiting for crosvm instance to die: {}", e),
- Ok(status) => info!("crosvm exited with status {}", status),
+ Err(e) => error!("Error waiting for crosvm({}) instance to die: {}", child.id(), e),
+ Ok(status) => info!("crosvm({}) exited with status {}", child.id(), status),
}
let mut vm_state = self.vm_state.lock().unwrap();
@@ -214,9 +220,11 @@
pub fn kill(&self) {
let vm_state = &*self.vm_state.lock().unwrap();
if let VmState::Running { child } = vm_state {
+ let id = child.id();
+ debug!("Killing crosvm({})", id);
// TODO: Talk to crosvm to shutdown cleanly.
if let Err(e) = child.kill() {
- error!("Error killing crosvm instance: {}", e);
+ error!("Error killing crosvm({}) instance: {}", id, e);
}
}
}
@@ -238,16 +246,39 @@
command.arg("--mem").arg(memory_mib.to_string());
}
- if let Some(log_fd) = config.log_fd {
- command.stdout(log_fd);
- } else {
- // Ignore console output.
- command.arg("--serial=type=sink");
- }
-
// Keep track of what file descriptors should be mapped to the crosvm process.
let mut preserved_fds = config.indirect_files.iter().map(|file| file.as_raw_fd()).collect();
+ // Setup the serial devices.
+ // 1. uart device: used as the output device by bootloaders and as early console by linux
+ // 2. virtio-console device: used as the console device where kmsg is redirected to
+ // 3. virtio-console device: used as the androidboot.console device (not used currently)
+ // 4. virtio-console device: used as the logcat output
+ //
+ // When [console|log]_fd is not specified, the devices are attached to sink, which means what's
+ // written there is discarded.
+ let mut format_serial_arg = |fd: &Option<File>| {
+ let path = fd.as_ref().map(|fd| add_preserved_fd(&mut preserved_fds, fd));
+ let type_arg = path.as_ref().map_or("type=sink", |_| "type=file");
+ let path_arg = path.as_ref().map_or(String::new(), |path| format!(",path={}", path));
+ format!("{}{}", type_arg, path_arg)
+ };
+ let console_arg = format_serial_arg(&config.console_fd);
+ let log_arg = format_serial_arg(&config.log_fd);
+
+ // Warning: Adding more serial devices requires you to shift the PCI device ID of the boot
+ // disks in bootconfig.x86_64. This is because x86 crosvm puts serial devices and the block
+ // devices in the same PCI bus and serial devices comes before the block devices. Arm crosvm
+ // doesn't have the issue.
+ // /dev/ttyS0
+ command.arg(format!("--serial={},hardware=serial", &console_arg));
+ // /dev/hvc0
+ command.arg(format!("--serial={},hardware=virtio-console,num=1", &console_arg));
+ // /dev/hvc1 (not used currently)
+ command.arg("--serial=type=sink,hardware=virtio-console,num=2");
+ // /dev/hvc2
+ command.arg(format!("--serial={},hardware=virtio-console,num=3", &log_arg));
+
if let Some(bootloader) = &config.bootloader {
command.arg("--bios").arg(add_preserved_fd(&mut preserved_fds, bootloader));
}
@@ -275,6 +306,7 @@
info!("Running {:?}", command);
let result = SharedChild::spawn(&mut command)?;
+ debug!("Spawned crosvm({}).", result.id());
Ok(result)
}
diff --git a/virtualizationservice/src/payload.rs b/virtualizationservice/src/payload.rs
index 4c71c37..55eb19b 100644
--- a/virtualizationservice/src/payload.rs
+++ b/virtualizationservice/src/payload.rs
@@ -21,7 +21,7 @@
};
use android_system_virtualizationservice::binder::ParcelFileDescriptor;
use anyhow::{anyhow, Context, Result};
-use binder::{wait_for_interface, Strong};
+use binder::wait_for_interface;
use log::{error, info};
use microdroid_metadata::{ApexPayload, ApkPayload, Metadata};
use microdroid_payload_config::{ApexConfig, VmPayloadConfig};
@@ -36,7 +36,8 @@
/// The list of APEXes which microdroid requires.
// TODO(b/192200378) move this to microdroid.json?
-const MICRODROID_REQUIRED_APEXES: [&str; 2] = ["com.android.adbd", "com.android.os.statsd"];
+const MICRODROID_REQUIRED_APEXES: [&str; 1] = ["com.android.os.statsd"];
+const MICRODROID_REQUIRED_APEXES_DEBUG: [&str; 1] = ["com.android.adbd"];
const APEX_INFO_LIST_PATH: &str = "/apex/apex-info-list.xml";
@@ -108,30 +109,35 @@
}
struct PackageManager {
- service: Strong<dyn IPackageManagerNative>,
// TODO(b/199146189) use IPackageManagerNative
apex_info_list: &'static ApexInfoList,
}
impl PackageManager {
fn new() -> Result<Self> {
- let service = wait_for_interface(PACKAGE_MANAGER_NATIVE_SERVICE)
- .context("Failed to find PackageManager")?;
let apex_info_list = ApexInfoList::load()?;
- Ok(Self { service, apex_info_list })
+ Ok(Self { apex_info_list })
}
fn get_apex_list(&self, prefer_staged: bool) -> Result<ApexInfoList> {
+ // get the list of active apexes
let mut list = self.apex_info_list.clone();
+ // When prefer_staged, we override ApexInfo by consulting "package_native"
if prefer_staged {
- // When prefer_staged, we override ApexInfo by consulting "package_native"
- let staged = self.service.getStagedApexModuleNames()?;
+ let pm =
+ wait_for_interface::<dyn IPackageManagerNative>(PACKAGE_MANAGER_NATIVE_SERVICE)
+ .context("Failed to get service when prefer_staged is set.")?;
+ let staged = pm.getStagedApexModuleNames()?;
for apex_info in list.list.iter_mut() {
if staged.contains(&apex_info.name) {
- let staged_apex_info = self.service.getStagedApexInfo(&apex_info.name)?;
+ let staged_apex_info = pm.getStagedApexInfo(&apex_info.name)?;
if let Some(staged_apex_info) = staged_apex_info {
apex_info.path = PathBuf::from(staged_apex_info.diskImagePath);
- // TODO(b/201788989) copy bootclasspath/systemserverclasspath
+ apex_info.boot_classpath = staged_apex_info.hasBootClassPathJars;
+ apex_info.systemserver_classpath =
+ staged_apex_info.hasSystemServerClassPathJars;
+ apex_info.dex2oatboot_classpath =
+ staged_apex_info.hasDex2OatBootClassPathJars;
}
}
}
@@ -194,12 +200,13 @@
config_path: &str,
vm_payload_config: &VmPayloadConfig,
temporary_directory: &Path,
+ debug_level: DebugLevel,
) -> Result<DiskImage> {
let pm = PackageManager::new()?;
let apex_list = pm.get_apex_list(vm_payload_config.prefer_staged)?;
// collect APEX names from config
- let apexes = collect_apex_names(&apex_list, &vm_payload_config.apexes);
+ let apexes = collect_apex_names(&apex_list, &vm_payload_config.apexes, debug_level);
info!("Microdroid payload APEXes: {:?}", apexes);
let metadata_file = make_metadata_file(config_path, &apexes, temporary_directory)?;
@@ -252,7 +259,11 @@
}
// Collect APEX names from config
-fn collect_apex_names(apex_list: &ApexInfoList, apexes: &[ApexConfig]) -> Vec<String> {
+fn collect_apex_names(
+ apex_list: &ApexInfoList,
+ apexes: &[ApexConfig],
+ debug_level: DebugLevel,
+) -> Vec<String> {
// Process pseudo names like "{BOOTCLASSPATH}".
// For now we have following pseudo APEX names:
// - {BOOTCLASSPATH}: represents APEXes contributing "BOOTCLASSPATH" environment variable
@@ -269,6 +280,9 @@
.collect();
// Add required APEXes
apex_names.extend(MICRODROID_REQUIRED_APEXES.iter().map(|name| name.to_string()));
+ if debug_level != DebugLevel::NONE {
+ apex_names.extend(MICRODROID_REQUIRED_APEXES_DEBUG.iter().map(|name| name.to_string()));
+ }
apex_names.sort();
apex_names.dedup();
apex_names
@@ -289,8 +303,17 @@
&config.configPath,
vm_payload_config,
temporary_directory,
+ config.debugLevel,
)?);
+ vm_config.disks[1].partitions.push(Partition {
+ label: "vbmeta".to_owned(),
+ image: Some(open_parcel_file(
+ Path::new("/apex/com.android.virt/etc/fs/microdroid_vbmeta_bootconfig.img"),
+ false,
+ )?),
+ writable: false,
+ });
let bootconfig_image = "/apex/com.android.virt/etc/microdroid_bootconfig.".to_owned()
+ match config.debugLevel {
DebugLevel::NONE => "normal",
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 7e2a925..87bcda7 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -57,12 +57,16 @@
#[structopt(short, long)]
daemonize: bool,
+ /// Path to file for VM console output.
+ #[structopt(long)]
+ console: Option<PathBuf>,
+
/// Path to file for VM log output.
- #[structopt(short, long)]
+ #[structopt(long)]
log: Option<PathBuf>,
/// Debug level of the VM. Supported values: "none" (default), "app_only", and "full".
- #[structopt(short, long, default_value = "none", parse(try_from_str=parse_debug_level))]
+ #[structopt(long, default_value = "none", parse(try_from_str=parse_debug_level))]
debug: DebugLevel,
/// Memory size (in MiB) of the VM. If unspecified, defaults to the value of `memory_mib`
@@ -80,9 +84,9 @@
#[structopt(short, long)]
daemonize: bool,
- /// Path to file for VM log output.
- #[structopt(short, long)]
- log: Option<PathBuf>,
+ /// Path to file for VM console output.
+ #[structopt(long)]
+ console: Option<PathBuf>,
},
/// Stop a virtual machine running in the background
Stop {
@@ -134,7 +138,7 @@
.context("Failed to find VirtualizationService")?;
match opt {
- Opt::RunApp { apk, idsig, instance, config_path, daemonize, log, debug, mem } => {
+ Opt::RunApp { apk, idsig, instance, config_path, daemonize, console, log, debug, mem } => {
command_run_app(
service,
&apk,
@@ -142,13 +146,14 @@
&instance,
&config_path,
daemonize,
+ console.as_deref(),
log.as_deref(),
debug,
mem,
)
}
- Opt::Run { config, daemonize, log } => {
- command_run(service, &config, daemonize, log.as_deref(), /* mem */ None)
+ Opt::Run { config, daemonize, console } => {
+ command_run(service, &config, daemonize, console.as_deref(), /* mem */ None)
}
Opt::Stop { cid } => command_stop(service, cid),
Opt::List => command_list(service),
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 2d771fc..15775cb 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -44,6 +44,7 @@
instance: &Path,
config_path: &str,
daemonize: bool,
+ console_path: Option<&Path>,
log_path: Option<&Path>,
debug_level: DebugLevel,
mem: Option<u32>,
@@ -76,7 +77,14 @@
debugLevel: debug_level,
memoryMib: mem.unwrap_or(0) as i32, // 0 means use the VM default
});
- run(service, &config, &format!("{:?}!{:?}", apk, config_path), daemonize, log_path)
+ run(
+ service,
+ &config,
+ &format!("{:?}!{:?}", apk, config_path),
+ daemonize,
+ console_path,
+ log_path,
+ )
}
/// Run a VM from the given configuration file.
@@ -84,7 +92,7 @@
service: Strong<dyn IVirtualizationService>,
config_path: &Path,
daemonize: bool,
- log_path: Option<&Path>,
+ console_path: Option<&Path>,
mem: Option<u32>,
) -> Result<(), Error> {
let config_file = File::open(config_path).context("Failed to open config file")?;
@@ -98,7 +106,8 @@
&VirtualMachineConfig::RawConfig(config),
&format!("{:?}", config_path),
daemonize,
- log_path,
+ console_path,
+ None,
)
}
@@ -119,9 +128,20 @@
config: &VirtualMachineConfig,
config_path: &str,
daemonize: bool,
+ console_path: Option<&Path>,
log_path: Option<&Path>,
) -> Result<(), Error> {
- let stdout = if let Some(log_path) = log_path {
+ let console = if let Some(console_path) = console_path {
+ Some(ParcelFileDescriptor::new(
+ File::create(console_path)
+ .with_context(|| format!("Failed to open console file {:?}", console_path))?,
+ ))
+ } else if daemonize {
+ None
+ } else {
+ Some(ParcelFileDescriptor::new(duplicate_stdout()?))
+ };
+ let log = if let Some(log_path) = log_path {
Some(ParcelFileDescriptor::new(
File::create(log_path)
.with_context(|| format!("Failed to open log file {:?}", log_path))?,
@@ -131,7 +151,9 @@
} else {
Some(ParcelFileDescriptor::new(duplicate_stdout()?))
};
- let vm = service.createVm(config, stdout.as_ref()).context("Failed to create VM")?;
+
+ let vm =
+ service.createVm(config, console.as_ref(), log.as_ref()).context("Failed to create VM")?;
let cid = vm.getCid().context("Failed to get CID")?;
println!(