Merge "Remove hwbinder from microdroid"
diff --git a/virtualizationservice/.gitignore b/.gitignore
similarity index 100%
rename from virtualizationservice/.gitignore
rename to .gitignore
diff --git a/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb b/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb
new file mode 100644
index 0000000..7973ed6
--- /dev/null
+++ b/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb
@@ -0,0 +1,12 @@
+drops {
+ android_build_drop {
+ build_id: "8231605"
+ target: "u-boot_pvmfw"
+ source_file: "pvmfw.img"
+ }
+ dest_file: "pvmfw/pvmfw.img"
+ version: ""
+ version_group: ""
+ git_project: "platform/packages/modules/Virtualization"
+ git_branch: "master"
+}
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 00f34b9..a6b1f95 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -4,7 +4,6 @@
bpfmt = true
clang_format = true
jsonlint = true
-google_java_format = true
pylint3 = true
rustfmt = true
xmllint = true
diff --git a/TEST_MAPPING b/TEST_MAPPING
index d8b294b..80d0807 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -5,15 +5,23 @@
},
{
"name": "ComposHostTestCases"
+ },
+ {
+ "name": "VirtualizationTestCases.64"
+ },
+ {
+ "name": "MicrodroidTestApp"
+ },
+ {
+ "name": "art_standalone_dexpreopt_tests"
+ },
+ {
+ "name": "compos_key_tests"
}
-
],
"postsubmit": [
- // TODO(jiyong): promote this to presubmit. That currently doesn't work because
- // this test is skipped for cf_x86_64_phone (not aosp_cf_x86_64_phone), but tradefed
- // somehow thinks that the test wasn't executed at all and reports it as a failure.
{
- "name": "VirtualizationTestCases"
+ "name": "odsign_e2e_tests"
}
],
"imports": [
@@ -21,6 +29,12 @@
"path": "packages/modules/Virtualization/apkdmverity"
},
{
+ "path": "packages/modules/Virtualization/virtualizationservice"
+ },
+ {
+ "path": "packages/modules/Virtualization/libs/apkverify"
+ },
+ {
"path": "packages/modules/Virtualization/authfs"
},
{
diff --git a/apex/Android.bp b/apex/Android.bp
index ccf34fd..0f30c67 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -2,47 +2,46 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
+microdroid_filesystem_images = [
+ "microdroid_super",
+ "microdroid_boot-5.10",
+ "microdroid_init_boot",
+ "microdroid_vendor_boot-5.10",
+ "microdroid_vbmeta",
+ "microdroid_vbmeta_bootconfig",
+]
+
apex {
name: "com.android.virt",
// TODO(jiyong): make it updatable
updatable: false,
+ future_updatable: true,
platform_apis: true,
+ system_ext_specific: true,
+
manifest: "manifest.json",
key: "com.android.virt.key",
certificate: ":com.android.virt.certificate",
+ custom_sign_tool: "sign_virt_apex",
// crosvm and virtualizationservice are only enabled for 64-bit targets on device
arch: {
arm64: {
binaries: [
- "authfs", // TODO(victorhsieh): move to microdroid once we can run the test in VM.
"crosvm",
"virtualizationservice",
],
- filesystems: [
- "microdroid_super",
- "microdroid_boot-5.10",
- "microdroid_vendor_boot-5.10",
- "microdroid_vbmeta",
- "microdroid_vbmeta_system",
- ],
+ filesystems: microdroid_filesystem_images,
},
x86_64: {
binaries: [
- "authfs", // TODO(victorhsieh): move to microdroid once we can run the test in VM.
"crosvm",
"virtualizationservice",
],
- filesystems: [
- "microdroid_super",
- "microdroid_boot-5.10",
- "microdroid_vendor_boot-5.10",
- "microdroid_vbmeta",
- "microdroid_vbmeta_system",
- ],
+ filesystems: microdroid_filesystem_images,
},
},
binaries: [
@@ -52,6 +51,9 @@
java_libs: [
"android.system.virtualmachine",
],
+ jni_libs: [
+ "libvirtualmachine_jni",
+ ],
apps: [
"android.system.virtualmachine.res",
],
@@ -60,9 +62,13 @@
"microdroid.json",
"microdroid_uboot_env",
"microdroid_bootloader",
- "microdroid_bootconfig_debug",
+ "microdroid_bootloader.avbpubkey",
+ "microdroid_bootconfig_normal",
+ "microdroid_bootconfig_app_debuggable",
+ "microdroid_bootconfig_full_debuggable",
],
file_contexts: ":com.android.virt-file_contexts",
+ canned_fs_config: "canned_fs_config",
}
apex_key {
@@ -80,4 +86,90 @@
name: "com.android.virt.init.rc",
src: "virtualizationservice.rc",
filename: "init.rc",
+ installable: false,
+}
+
+// Virt apex needs a custom signer for its payload
+python_binary_host {
+ name: "sign_virt_apex",
+ srcs: [
+ "sign_virt_apex.py",
+ ],
+ version: {
+ py2: {
+ enabled: false,
+ },
+ py3: {
+ enabled: true,
+ embedded_launcher: true,
+ },
+ },
+ required: [
+ "img2simg",
+ "lpmake",
+ "lpunpack",
+ "simg2img",
+ ],
+}
+
+sh_test_host {
+ name: "sign_virt_apex_test",
+ src: "sign_virt_apex_test.sh",
+ test_config: "sign_virt_apex_test.xml",
+ data_bins: [
+ // deapexer
+ "deapexer",
+ "debugfs_static",
+
+ // sign_virt_apex
+ "avbtool",
+ "img2simg",
+ "lpmake",
+ "lpunpack",
+ "sign_virt_apex",
+ "simg2img",
+ ],
+ data_libs: [
+ "libbase",
+ "libc++",
+ "libcrypto_utils",
+ "libcrypto",
+ "libext4_utils",
+ "liblog",
+ "liblp",
+ "libsparse",
+ "libz",
+ ],
+ data: [
+ ":com.android.virt",
+ ":test.com.android.virt.pem",
+ ],
+ test_suites: ["general-tests"],
+}
+
+filegroup {
+ name: "test.com.android.virt.pem",
+ srcs: ["test.com.android.virt.pem"],
+}
+
+filegroup {
+ name: "test2.com.android.virt.pem",
+ srcs: ["test2.com.android.virt.pem"],
+}
+
+// custom tool to replace bytes in a file
+python_binary_host {
+ name: "replace_bytes",
+ srcs: [
+ "replace_bytes.py",
+ ],
+ version: {
+ py2: {
+ enabled: false,
+ },
+ py3: {
+ enabled: true,
+ embedded_launcher: true,
+ },
+ },
}
diff --git a/apex/canned_fs_config b/apex/canned_fs_config
new file mode 100644
index 0000000..1cf63b6
--- /dev/null
+++ b/apex/canned_fs_config
@@ -0,0 +1 @@
+/bin/crosvm 0 2000 0755 capabilities=0x4000
diff --git a/apex/product_packages.mk b/apex/product_packages.mk
index fef6316..ec295f5 100644
--- a/apex/product_packages.mk
+++ b/apex/product_packages.mk
@@ -21,17 +21,15 @@
PRODUCT_PACKAGES += \
com.android.compos \
- com.android.virt
+ com.android.virt \
-PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST += \
- system/apex/com.android.compos.apex \
- system/apex/com.android.virt.apex \
- system/bin/crosvm \
- system/lib64/%.dylib.so \
- system/lib64/libfdt.so \
- system/lib64/libgfxstream_backend.so \
- system/lib64/libcuttlefish_allocd_utils.so \
- system/lib64/libcuttlefish_fs.so \
- system/lib64/libcuttlefish_utils.so
+# TODO(b/207336449): Figure out how to get these off /system
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST := \
+ system/framework/oat/%@service-compos.jar@classes.odex \
+ system/framework/oat/%@service-compos.jar@classes.vdex \
-$(call inherit-product, external/crosvm/seccomp/crosvm_seccomp_policy_product_packages.mk)
+PRODUCT_APEX_SYSTEM_SERVER_JARS := com.android.compos:service-compos
+
+PRODUCT_SYSTEM_EXT_PROPERTIES := ro.config.isolated_compilation_enabled=true
+
+PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA := true
diff --git a/apex/replace_bytes.py b/apex/replace_bytes.py
new file mode 100644
index 0000000..b22f132
--- /dev/null
+++ b/apex/replace_bytes.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""replace_bytes is a command line tool to replace bytes in a file.
+
+Typical usage: replace_bytes target_file old_file new_file
+
+ replace bytes of old_file with bytes of new_file in target_file. old_file and new_file should be
+ the same size.
+
+"""
+import argparse
+import sys
+
+
+def ParseArgs(argv):
+ parser = argparse.ArgumentParser(description='Replace bytes')
+ parser.add_argument(
+ 'target_file',
+ help='path to the target file.')
+ parser.add_argument(
+ 'old_file',
+ help='path to the file containing old bytes')
+ parser.add_argument(
+ 'new_file',
+ help='path to the file containing new bytes')
+ return parser.parse_args(argv)
+
+
+def ReplaceBytes(target_file, old_file, new_file):
+ # read old bytes
+ with open(old_file, 'rb') as f:
+ old_bytes = f.read()
+
+ # read new bytes
+ with open(new_file, 'rb') as f:
+ new_bytes = f.read()
+
+ assert len(old_bytes) == len(new_bytes), 'Pubkeys should be the same size. (%d != %d)' % (
+ len(old_bytes), len(new_bytes))
+
+ # replace bytes in target_file
+ with open(target_file, 'r+b') as f:
+ pos = f.read().find(old_bytes)
+ assert pos != -1, 'Pubkey not found'
+ f.seek(pos)
+ f.write(new_bytes)
+
+
+def main(argv):
+ try:
+ args = ParseArgs(argv)
+ ReplaceBytes(args.target_file, args.old_file, args.new_file)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
new file mode 100644
index 0000000..e782bd2
--- /dev/null
+++ b/apex/sign_virt_apex.py
@@ -0,0 +1,451 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""sign_virt_apex is a command line tool for sign the Virt APEX file.
+
+Typical usage:
+ sign_virt_apex [-v] [--avbtool path_to_avbtool] [--signing_args args] payload_key payload_dir
+
+sign_virt_apex uses external tools which are assumed to be available via PATH.
+- avbtool (--avbtool can override the tool)
+- lpmake, lpunpack, simg2img, img2simg
+"""
+import argparse
+import glob
+import hashlib
+import os
+import re
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+
+
+def ParseArgs(argv):
+ parser = argparse.ArgumentParser(description='Sign the Virt APEX')
+ parser.add_argument('--verify', action='store_true',
+ help='Verify the Virt APEX')
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help='verbose execution')
+ parser.add_argument(
+ '--avbtool',
+ default='avbtool',
+ help='Optional flag that specifies the AVB tool to use. Defaults to `avbtool`.')
+ parser.add_argument(
+ '--signing_args',
+ help='the extra signing arguments passed to avbtool.'
+ )
+ parser.add_argument(
+ '--key_override',
+ metavar="filename=key",
+ action='append',
+ help='Overrides a signing key for a file e.g. microdroid_bootloader=mykey (for testing)')
+ parser.add_argument(
+ 'key',
+ help='path to the private key file.')
+ parser.add_argument(
+ 'input_dir',
+ help='the directory having files to be packaged')
+ args = parser.parse_args(argv)
+ # preprocess --key_override into a map
+ args.key_overrides = dict()
+ if args.key_override:
+ for pair in args.key_override:
+ name, key = pair.split('=')
+ args.key_overrides[name] = key
+ return args
+
+
+def RunCommand(args, cmd, env=None, expected_return_values={0}):
+ env = env or {}
+ env.update(os.environ.copy())
+
+ # TODO(b/193504286): we need a way to find other tool (cmd[0]) in various contexts
+ # e.g. sign_apex.py, sign_target_files_apk.py
+ if cmd[0] == 'avbtool':
+ cmd[0] = args.avbtool
+
+ if args.verbose:
+ print('Running: ' + ' '.join(cmd))
+ p = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True)
+ output, _ = p.communicate()
+
+ if args.verbose or p.returncode not in expected_return_values:
+ print(output.rstrip())
+
+ assert p.returncode in expected_return_values, (
+ '%d Failed to execute: ' + ' '.join(cmd)) % p.returncode
+ return (output, p.returncode)
+
+
+def ReadBytesSize(value):
+ return int(value.removesuffix(' bytes'))
+
+
+def ExtractAvbPubkey(args, key, output):
+ RunCommand(args, ['avbtool', 'extract_public_key',
+ '--key', key, '--output', output])
+
+
+def AvbInfo(args, image_path):
+ """Parses avbtool --info image output
+
+ Args:
+ args: program arguments.
+ image_path: The path to the image.
+ descriptor_name: Descriptor name of interest.
+
+ Returns:
+ A pair of
+ - a dict that contains VBMeta info. None if there's no VBMeta info.
+ - a list of descriptors.
+ """
+ if not os.path.exists(image_path):
+ raise ValueError('Failed to find image: {}'.format(image_path))
+
+ output, ret_code = RunCommand(
+ args, ['avbtool', 'info_image', '--image', image_path], expected_return_values={0, 1})
+ if ret_code == 1:
+ return None, None
+
+ info, descriptors = {}, []
+
+ # Read `avbtool info_image` output as "key:value" lines
+ matcher = re.compile(r'^(\s*)([^:]+):\s*(.*)$')
+
+ def IterateLine(output):
+ for line in output.split('\n'):
+ line_info = matcher.match(line)
+ if not line_info:
+ continue
+ yield line_info.group(1), line_info.group(2), line_info.group(3)
+
+ gen = IterateLine(output)
+
+ def ReadDescriptors(cur_indent, cur_name, cur_value):
+ descriptor = cur_value if cur_name == 'Prop' else {}
+ descriptors.append((cur_name, descriptor))
+ for indent, key, value in gen:
+ if indent <= cur_indent:
+ # read descriptors recursively to pass the read key as descriptor name
+ ReadDescriptors(indent, key, value)
+ break
+ descriptor[key] = value
+
+ # Read VBMeta info
+ for _, key, value in gen:
+ if key == 'Descriptors':
+ ReadDescriptors(*next(gen))
+ break
+ info[key] = value
+
+ return info, descriptors
+
+
+# Look up a list of (key, value) with a key. Returns the value of the first matching pair.
+def LookUp(pairs, key):
+ for k, v in pairs:
+ if key == k:
+ return v
+ return None
+
+
+def AddHashFooter(args, key, image_path):
+ if os.path.basename(image_path) in args.key_overrides:
+ key = args.key_overrides[os.path.basename(image_path)]
+ info, descriptors = AvbInfo(args, image_path)
+ if info:
+ descriptor = LookUp(descriptors, 'Hash descriptor')
+ image_size = ReadBytesSize(info['Image size'])
+ algorithm = info['Algorithm']
+ partition_name = descriptor['Partition Name']
+ partition_size = str(image_size)
+
+ cmd = ['avbtool', 'add_hash_footer',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--partition_name', partition_name,
+ '--partition_size', partition_size,
+ '--image', image_path]
+ if args.signing_args:
+ cmd.extend(shlex.split(args.signing_args))
+ RunCommand(args, cmd)
+
+
+def AddHashTreeFooter(args, key, image_path):
+ if os.path.basename(image_path) in args.key_overrides:
+ key = args.key_overrides[os.path.basename(image_path)]
+ info, descriptors = AvbInfo(args, image_path)
+ if info:
+ descriptor = LookUp(descriptors, 'Hashtree descriptor')
+ image_size = ReadBytesSize(info['Image size'])
+ algorithm = info['Algorithm']
+ partition_name = descriptor['Partition Name']
+ partition_size = str(image_size)
+
+ cmd = ['avbtool', 'add_hashtree_footer',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--partition_name', partition_name,
+ '--partition_size', partition_size,
+ '--do_not_generate_fec',
+ '--image', image_path]
+ if args.signing_args:
+ cmd.extend(shlex.split(args.signing_args))
+ RunCommand(args, cmd)
+
+
+def MakeVbmetaImage(args, key, vbmeta_img, images=None, chained_partitions=None):
+ if os.path.basename(vbmeta_img) in args.key_overrides:
+ key = args.key_overrides[os.path.basename(vbmeta_img)]
+ info, descriptors = AvbInfo(args, vbmeta_img)
+ if info is None:
+ return
+
+ with TempDirectory() as work_dir:
+ algorithm = info['Algorithm']
+ rollback_index = info['Rollback Index']
+ rollback_index_location = info['Rollback Index Location']
+
+ cmd = ['avbtool', 'make_vbmeta_image',
+ '--key', key,
+ '--algorithm', algorithm,
+ '--rollback_index', rollback_index,
+ '--rollback_index_location', rollback_index_location,
+ '--output', vbmeta_img]
+ if images:
+ for img in images:
+ cmd.extend(['--include_descriptors_from_image', img])
+
+ # replace pubkeys of chained_partitions as well
+ for name, descriptor in descriptors:
+ if name == 'Chain Partition descriptor':
+ part_name = descriptor['Partition Name']
+ ril = descriptor['Rollback Index Location']
+ part_key = chained_partitions[part_name]
+ avbpubkey = os.path.join(work_dir, part_name + '.avbpubkey')
+ ExtractAvbPubkey(args, part_key, avbpubkey)
+ cmd.extend(['--chain_partition', '%s:%s:%s' %
+ (part_name, ril, avbpubkey)])
+
+ if args.signing_args:
+ cmd.extend(shlex.split(args.signing_args))
+
+ RunCommand(args, cmd)
+ # libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
+ # which matches this or the read will fail.
+ with open(vbmeta_img, 'a') as f:
+ f.truncate(65536)
+
+
+class TempDirectory(object):
+
+ def __enter__(self):
+ self.name = tempfile.mkdtemp()
+ return self.name
+
+ def __exit__(self, *unused):
+ shutil.rmtree(self.name)
+
+
+def MakeSuperImage(args, partitions, output):
+ with TempDirectory() as work_dir:
+ cmd = ['lpmake', '--device-size=auto', '--metadata-slots=2', # A/B
+ '--metadata-size=65536', '--sparse', '--output=' + output]
+
+ for part, img in partitions.items():
+ tmp_img = os.path.join(work_dir, part)
+ RunCommand(args, ['img2simg', img, tmp_img])
+
+ image_arg = '--image=%s=%s' % (part, img)
+ partition_arg = '--partition=%s:readonly:%d:default' % (
+ part, os.path.getsize(img))
+ cmd.extend([image_arg, partition_arg])
+
+ RunCommand(args, cmd)
+
+
+def ReplaceBootloaderPubkey(args, key, bootloader, bootloader_pubkey):
+ if os.path.basename(bootloader) in args.key_overrides:
+ key = args.key_overrides[os.path.basename(bootloader)]
+ # read old pubkey before replacement
+ with open(bootloader_pubkey, 'rb') as f:
+ old_pubkey = f.read()
+
+ # replace bootloader pubkey (overwrite the old one with the new one)
+ ExtractAvbPubkey(args, key, bootloader_pubkey)
+
+ # read new pubkey
+ with open(bootloader_pubkey, 'rb') as f:
+ new_pubkey = f.read()
+
+ assert len(old_pubkey) == len(new_pubkey)
+
+ # replace pubkey embedded in bootloader
+ with open(bootloader, 'r+b') as bl_f:
+ pos = bl_f.read().find(old_pubkey)
+ assert pos != -1
+ bl_f.seek(pos)
+ bl_f.write(new_pubkey)
+
+
+def SignVirtApex(args):
+ key = args.key
+ input_dir = args.input_dir
+
+ # target files in the Virt APEX
+ bootloader_pubkey = os.path.join(
+ input_dir, 'etc', 'microdroid_bootloader.avbpubkey')
+ bootloader = os.path.join(input_dir, 'etc', 'microdroid_bootloader')
+ boot_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_boot-5.10.img')
+ vendor_boot_img = os.path.join(
+ input_dir, 'etc', 'fs', 'microdroid_vendor_boot-5.10.img')
+ init_boot_img = os.path.join(
+ input_dir, 'etc', 'fs', 'microdroid_init_boot.img')
+ super_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_super.img')
+ vbmeta_img = os.path.join(input_dir, 'etc', 'fs', 'microdroid_vbmeta.img')
+ vbmeta_bootconfig_img = os.path.join(
+ input_dir, 'etc', 'fs', 'microdroid_vbmeta_bootconfig.img')
+ bootconfig_normal = os.path.join(
+ input_dir, 'etc', 'microdroid_bootconfig.normal')
+ bootconfig_app_debuggable = os.path.join(
+ input_dir, 'etc', 'microdroid_bootconfig.app_debuggable')
+ bootconfig_full_debuggable = os.path.join(
+ input_dir, 'etc', 'microdroid_bootconfig.full_debuggable')
+ uboot_env_img = os.path.join(
+ input_dir, 'etc', 'uboot_env.img')
+
+ # Key(pubkey) for bootloader should match with the one used to make VBmeta below
+ # while it's okay to use different keys for other image files.
+ ReplaceBootloaderPubkey(args, key, bootloader, bootloader_pubkey)
+
+ # re-sign bootloader, boot.img, vendor_boot.img, and init_boot.img
+ AddHashFooter(args, key, bootloader)
+ AddHashFooter(args, key, boot_img)
+ AddHashFooter(args, key, vendor_boot_img)
+ AddHashFooter(args, key, init_boot_img)
+
+ # re-sign super.img
+ with TempDirectory() as work_dir:
+ # unpack super.img
+ tmp_super_img = os.path.join(work_dir, 'super.img')
+ RunCommand(args, ['simg2img', super_img, tmp_super_img])
+ RunCommand(args, ['lpunpack', tmp_super_img, work_dir])
+
+ system_a_img = os.path.join(work_dir, 'system_a.img')
+ vendor_a_img = os.path.join(work_dir, 'vendor_a.img')
+ partitions = {"system_a": system_a_img, "vendor_a": vendor_a_img}
+
+ # re-sign partitions in super.img
+ for img in partitions.values():
+ AddHashTreeFooter(args, key, img)
+
+ # re-pack super.img
+ MakeSuperImage(args, partitions, super_img)
+
+ # re-generate vbmeta from re-signed {boot, vendor_boot, init_boot, system_a, vendor_a}.img
+ # Ideally, making VBmeta should be done out of TempDirectory block. But doing it here
+ # to avoid unpacking re-signed super.img for system/vendor images which are available
+ # in this block.
+ MakeVbmetaImage(args, key, vbmeta_img, images=[
+ boot_img, vendor_boot_img, init_boot_img, system_a_img, vendor_a_img])
+
+ # Re-sign bootconfigs and the uboot_env with the same key
+ bootconfig_sign_key = key
+ AddHashFooter(args, bootconfig_sign_key, bootconfig_normal)
+ AddHashFooter(args, bootconfig_sign_key, bootconfig_app_debuggable)
+ AddHashFooter(args, bootconfig_sign_key, bootconfig_full_debuggable)
+ AddHashFooter(args, bootconfig_sign_key, uboot_env_img)
+
+ # Re-sign vbmeta_bootconfig with chained_partitions to "bootconfig" and
+ # "uboot_env". Note that, for now, `key` and `bootconfig_sign_key` are the
+ # same, but technically they can be different. Vbmeta records pubkeys which
+ # signed chained partitions.
+ MakeVbmetaImage(args, key, vbmeta_bootconfig_img, chained_partitions={
+ 'bootconfig': bootconfig_sign_key,
+ 'uboot_env': bootconfig_sign_key,
+ })
+
+
+def VerifyVirtApex(args):
+ # Generator to emit avbtool-signed items along with its pubkey digest.
+ # This supports lpmake-packed images as well.
+ def Recur(target_dir):
+ for file in glob.glob(os.path.join(target_dir, 'etc', '**', '*'), recursive=True):
+ cur_item = os.path.relpath(file, target_dir)
+
+ if not os.path.isfile(file):
+ continue
+
+ # avbpubkey
+ if cur_item == 'etc/microdroid_bootloader.avbpubkey':
+ with open(file, 'rb') as f:
+ yield (cur_item, hashlib.sha1(f.read()).hexdigest())
+ continue
+
+ # avbtool signed
+ info, _ = AvbInfo(args, file)
+ if info:
+ yield (cur_item, info['Public key (sha1)'])
+ continue
+
+ # logical partition
+ with TempDirectory() as tmp_dir:
+ unsparsed = os.path.join(tmp_dir, os.path.basename(file))
+ _, rc = RunCommand(
+ # exit with 255 if it's not sparsed
+ args, ['simg2img', file, unsparsed], expected_return_values={0, 255})
+ if rc == 0:
+ with TempDirectory() as unpack_dir:
+ # exit with 64 if it's not a logical partition.
+ _, rc = RunCommand(
+ args, ['lpunpack', unsparsed, unpack_dir], expected_return_values={0, 64})
+ if rc == 0:
+ nested_items = list(Recur(unpack_dir))
+ if len(nested_items) > 0:
+ for (item, key) in nested_items:
+ yield ('%s!/%s' % (cur_item, item), key)
+ continue
+ # Read pubkey digest
+ with TempDirectory() as tmp_dir:
+ pubkey_file = os.path.join(tmp_dir, 'avbpubkey')
+ ExtractAvbPubkey(args, args.key, pubkey_file)
+ with open(pubkey_file, 'rb') as f:
+ pubkey_digest = hashlib.sha1(f.read()).hexdigest()
+
+ # Check every avbtool-signed item against the input key
+ for (item, pubkey) in Recur(args.input_dir):
+ assert pubkey == pubkey_digest, '%s: key mismatch: %s != %s' % (
+ item, pubkey, pubkey_digest)
+
+
+def main(argv):
+ try:
+ args = ParseArgs(argv)
+ if args.verify:
+ VerifyVirtApex(args)
+ else:
+ SignVirtApex(args)
+ except Exception as e:
+ print(e)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/apex/sign_virt_apex_test.sh b/apex/sign_virt_apex_test.sh
new file mode 100644
index 0000000..640a3d4
--- /dev/null
+++ b/apex/sign_virt_apex_test.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+shopt -s extglob
+
+TMP_ROOT=$(mktemp -d -t sign_virt_apex-XXXXXXXX)
+TEST_DIR=$(dirname $0)
+
+# To access host tools
+PATH=$TEST_DIR:$PATH
+DEBUGFS=$TEST_DIR/debugfs_static
+
+deapexer --debugfs_path $DEBUGFS extract $TEST_DIR/com.android.virt.apex $TMP_ROOT
+
+if [ "$(ls -A $TMP_ROOT/etc/fs/)" ]; then
+ sign_virt_apex $TEST_DIR/test.com.android.virt.pem $TMP_ROOT
+ sign_virt_apex --verify $TEST_DIR/test.com.android.virt.pem $TMP_ROOT
+else
+ echo "No filesystem images. Skip."
+fi
+
diff --git a/apex/sign_virt_apex_test.xml b/apex/sign_virt_apex_test.xml
new file mode 100644
index 0000000..5ea84a1
--- /dev/null
+++ b/apex/sign_virt_apex_test.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs sign_virt_apex test">
+ <test class="com.android.tradefed.testtype.binary.ExecutableHostTest" >
+ <option name="binary" value="sign_virt_apex_test"/>
+ </test>
+</configuration>
diff --git a/apex/test.com.android.virt.pem b/apex/test.com.android.virt.pem
new file mode 100644
index 0000000..b0cfff4
--- /dev/null
+++ b/apex/test.com.android.virt.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAw91a1/DFwSu1FbX92SxGshBGPvHW+4JpvVCw10rhx39pPynI
+ePOf94c94f+pZr6QsT94sQ93Ubjhzf29E9wb5QVT98VJycyYviClyFRl8a1KQQQh
+JGR9v4KEceEWYeJe3nbYDzPwvzdJXy0DbLNUWZBXfDEZGyQHnwb124OIkmBFWz+h
+QsRGFMP+9FgATKH2jnsrNEB2yMQqw7+gpBMJ4q2bqOGE48EjERQG7oFQYfzDsyDd
+5xDhvXFVQmIcrwHRc8DSVaXdlZwQQLCKc6wXg1XLY6/7KQr+XCuz0ptCQ0OW3MAB
+OySsxnl82R+zlb9j05uZf0Z7yUW5hyZylZshK8rAVUYbYLFUmsD3X43qx42GzNfj
+FHhZn6k8CnnyGYvjY3/Lp3JY+EEbvzmVAJrDmMmUMIpML06D7Hu509yBOSAoE8qy
+rcccglHs3rHQ93lSslU02JWYcJ193KThQIcmc1OXoT+NPZf4NKemVE2uCX+mPRNR
+M4ACofXbVg/b5NcEelgIzL0UOZDQMj+WdyGpJ3d8YmE+1WWQ8nqbbCy0vQc+8jc0
+ZzZ/RF4WlBOj/or1TkWyGvGVXYhnU8raF1MnDRbOixZpPhSfdC7vtzktcYxHXt5X
+Ect42/ynX4Q5Gz3VMeg3WcIMNMSGFo7B3UEhde5MVxgEf1AQelm8/LaqWncCAwEA
+AQKCAgAKIyrQgmW52clFlmXNF72Q+cao+1tlKRrP8Z01h2qoKLAJ1N/LYtCkvxs0
+10atSq+yfNaCU4qZcDg/sSJYJMxMzjnKWSu4hh5huM7bz4J3P8DYHJ6ag5j+kILK
+YhwGdPD0ErKcFtQfEX16r5m9xopXGGFuzBvAi9zZHkMbWXN4IAN29ZQjIIWADaTk
+gKmDTd61ASr7SVrciUqtVv25gELCuLmVxBZcs2JdP+wb7BV8/NgkLU9O5lDIvVTs
+WqehZzawBwrb4/nUBH/S2VBRLFcLNSWRw0n8ldUUcC6ed+q2EIl+Y3Gs3fkTTLZp
+hnqFBaLlEOig7cT6ZeF0XUkQ9TaCNoEXEistwT6hlWSoAwUPi2q5AeJc9TFCrw8i
+mJWY+9UZH/lOBM8jYoGPW2b7drbNn/8DuPu1N9miP12oaL5KjjGlXvN4RmdkaGjU
+/zUNceQm/Q8hPudCaZLR9trMAOjmHl9GqnGxR35pRWMRJ/N11X1KLVdSlVpUFtHB
+yhvAAhsFLAZxaiQmAANmwz9cnxJTk6+2JTyX6EZOdAFqDibjvTQIqERoSBtKDVTa
+5n02MC3MHSeouhMyQscLvxTa9RtqxQHHnnQBDplkQGErmz5VhD4CYMDKgjhGbH71
+tg0LHujMieldWnpKPZWlztmGaDaPksJAAUKA8RBKrJ2RgXAyAQKCAQEA712eJxuh
+KzoOe0rIHwC4De5vO7ZyleLGOVvaX9jcm3xxQg1feC5r03xcxqkDvuio94Y4s/Sx
+ZubieWY60pPY3d5w160EKRIwAUKtPR2Uy/QLvC3jMnmIy29KP0F6oQXxMurQ16IS
+Aul5aSHIB33UlEd9v9HenTc9hPvYMUALe0HmisXYTRR0p9DMlqt+goaiynD3U2gh
+09x640EtCvDJiM2pAaVw2z9J/eFHypy6AERaGbX3vYjlbch1oqH5+67i0Nl/FZLx
+wL2q5fUsGx8DNQmHu0kjlLLIbGAx/1dtXWOhH0q4SWrGFJXgsYu5f6AzIHz6XKDi
+cITb8P8JUoZgiwKCAQEA0XnXeppR6DASAZSi7e19WWLmUafjur/qUYy+Aolr7Oyc
+H18JU71AOohM8TxgDTGNfvzII6ryxK5j5VpBnL4IX44ymjQ2J7nOtRl7t5Ceh9Cy
+lPFZwxUlV7Mikow8kAVpbY0JonUnRCzcxNT1tO8qlWYEj8L1vZf2d61VIACE/fJU
+ekWQKr/CLlNp/PvjAQaLd6oSh5gf4Ymx+5bFM86tJbR3YAtMWvr8I+nPDT8Q0G2c
+Zt62ZKiE76duma7ndS1Od7ohuLbwW4vV1KUcSzFkfGjP/Cx6D+wQydWAdi7fsQ2u
+xNstQbbP535x5uwVIlZovflq9Sl8AA5bBRnduvSfRQKCAQAiLN6gvMwlDNP2fHXY
+H1UoAAv3nZP8nHUqyVeDacYNmRXelWQ1F4OjnVTttEHppvRA6vP7lYsiowJgzNzH
+Jf7HprO7x2MZrhQWifuMB0YwXHa0dmTC1yFV0lzqbSHiDaQjXe1VbDlgGw+PmBgk
+Ia4RQafNlFxRXAq3ivGSDo/VGFKfK6I3Vx1UvHYJaRDV9/0UJE7bpLl3szoEalDR
+CBHuK1be+k0DsKSSz/BdGEViNmAa3aUydXI0W3OYNcIoUg7mPLdtUB6eIzZcQMX8
+VVAy6VpsvgOLfn8pIg7hYw0lUU0214c6TDldxQxgrQ9eDnReRhnE0d+iqwVwAinF
+k5QDAoIBAHA/Z/Xsp6NRzvRF36C7OAYj9uMeoes6V6dnUZIubUTB7U7qMCdNLBOx
+YfmKrrWjLf00G1LxkbFO+Xy3Bp2lPvtlSTxUagiTim6Ev0S4HBsO/ALP6Zedxyrd
+dNMujm1mWP45K0aAnI/tskdPDnLsDdeMmTkn8WKtAYdTvF+vp5QkvJvglsYxhy4n
+yI2ltBiily2CVveNzteeX18/hWCjiSjBMY6nvzypbV8ZNLgWaT4m3j5JbVc27jU1
+dRCpJqIlqvyBIvzGGroTjnuqFiU8zGnWCE1K0AWkK8Lbw0CREZDgkhwujmu+OF4F
+5acmLpT91JaoBmZk2mt1RdTP7X73AjkCggEBAIwQSTzvVIqJoD4Vc9wqbCGyPr2s
+/g0GkEmcJJpe6E8AxZNzmBKV3bDzvC+thhEVQeCmVDZsOZjO/4TumskHntxMHIpp
+DHRgYiERCM2oIPMEkruqCQ+0BlH/8CtglyrPmsLgSU6L1CBQNMt39KFtcscMMwkk
+Coo/qN0DarQGkrjc+UN4Q0lJDBVB5nQj+1uCVEBnV/IC+08kr9uXIJGAllh3Wfgq
+jOdL2j1knpYD9Wi1TCZwDobCqDWwYMVQZNbGu6de3lWtuBYKCd154QUVm11Kkv3P
+Gz/yGM1v6IttZ0osMujVLADyZMLYKSt8ypRlB3TUD/4P3bUryorV/bu/ew8=
+-----END RSA PRIVATE KEY-----
diff --git a/apex/test2.com.android.virt.pem b/apex/test2.com.android.virt.pem
new file mode 100644
index 0000000..7e4614a
--- /dev/null
+++ b/apex/test2.com.android.virt.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAxNrDPSV0GxORgFRVX7mwCVQSc1jxZ0ETudLpICfQzK0MTggm
+tNse5O9K9w7ZdxLHt3llm6wokz4yL/WDVirSMY5W1w4TMuTZJ3iGnBmEpjPXFxhW
+yPCBqfs3ZOQ0ndz6pimI8ZTaKF8cM6Iz/ZkNGUcBjgiUKDJQgL+zwJADK63zrGgJ
+WJdXZfPRHDPLX11kwOAemggobVgFAfnGiJc37n1561Ozq/joYD1OZTIdS8+fKkey
+p5cH9uSZgU4Fpf5xU5EDDEnZvqSYAghZY8rKlfxHuM3GdwLOhrCoUEnSYcycnDFs
+t6mYEvyv13pQRR/Co6udEW7Xha3PV5io79066XBRK0RICY5vb+92TJ7dhp3Wz+4j
+3LMwB+G02GlBWIZ6S/uRLbPpdCUj6waBb3im2+1wg5306peHoU2Pj3CJXtz96pYA
+6zgdM0d78H2KfSmKku+k6tjDKlo6584Xa3p5P6yxLnCSqwIBK6saK0M7j3gPU1P6
+j2G0fVXa20OT0qIWAcm24bBQjCF1kbQzPQnn3nKpTPVJNozH72dXYxgBQd80Sb6s
+yhnxXtiEJp+gd/SET/STOZ+qYvPmEF4TkUbtB1MLdpey3+NkRbGRCfeLeKoXJ6zW
+rS85Wo5U8v5d0nRhRCO8bRIKu3BgbBWdouHmmmUshUn3s6PoatUuDPcaX4cCAwEA
+AQKCAgBc/fbC+OFrNQhD9hLKgJ5fGb8JjFelbkGSQ8bq8MQbHBg2+HTIdMaYYU4p
+fXNPY6jCzG8qZd5ZCEWPEEy1tM7MqC/vsW9yWFcY5T+5l/hoxw5xk4bTr4GhOdJG
+L+OHO2+QdQiDDs0ryxo4bgRfZSCh80ARx42tm28aEvUoHx/QT4FPzWm01vFrcQ00
+ZGdLKoRA9N6f5wCp/q2G7GZT+hNq7w1cFJNIxvGHHQ7ekRjzyiWnRG1p69fQUtcN
+FT1n19XTIyqscGqTO4+vTiBkGtUumxmKfHKnn8TOLb+lBeqDVrQYuORhspTrS0EJ
+6nbm4IUC6jvtk03ukVfkSnJrtTdxYMR74TSgLWvsNHUtZcCDDQ+1W5g4LrMsTsfU
+bUXAwdVcRTy8rlT2EOTWVorMZHOCl9HojorRlK1kjQHStdUqrewTEF8bk1BeBI5M
+ddP2acdjfCbn1MBp4S7CqAXSuEi5TgIuoQ9ZZ8XU0n1/PuLNk9dYEdrP32VXfsM7
+AUznCq/kItrPDCzz+sNzeXXP77HRlBD7snJdtY06932ysYw85AQLEX3F3arqcyJj
+3c0/ddbGR9O4re6S0x2XhOplbaePya22Y+/I9ryWQZZO1OVOZNtTj83toIw/E5LN
+HHBAeMjHSVVyzbIEF0XqCmg59l62CRMFxcNtS67l2pcSIWSCkQKCAQEA7FWr4rUY
+gXRVg5473tM/4VT3yIGwKt83s78b0Z30VjfaZp88wx0gJaNR5HTMsEsBjof4khqu
+AKlTHb4riXBPWNz0Sf1s2PzqlKj7Ke4eowEUOAqrvkAZhZBwIvsiwFPH/mzNTdBD
+PmMDsMFerEOhVAZpDUlTUPiWHCa9ani25TYAKMA6QGqpouAMGw2xN/GtdUD4Dy5b
+0K3B2cg1lhXjWqf8LKftFGh0vsZTR/heAPx8NdrY1EOMid1blIa2KefxhunqudGr
+Owx5wMKpJA5x3sid4qds0+q8X6TWYCQORQJNiw0ggHnnjM8O9cszRIlTbMRJ3j1p
+L4TQR+1JKVu1CQKCAQEA1TwZEkUkx/vGrY358/655Nymqa0oMz/g005DTUHRILRf
+hHLH+bTyx9YSquqvE5uGIbavrJtmRwxFWikPfuBdNf87oxfT2mBESrK73162rNt2
+okgky2AoNeVRu2V93wGCTwESoLzgtruUo2EP5W7Af7KFd5Ri5zSOn0hB9nHwNHQf
+RnWxB35bBpjpAkbATr4xAeJ7t99VN0T/sgUkTVRCAy4aEyyAgz3yXGH5Nn0d76zf
+CzNAUzWuK+nwtWQetkcRcAj4ZncyqzDy7XPaoHeur8X45Bh3e27nNzbblrKm0DFT
+eRD4raxubyYFcge8I64xvt73zHcQOTKuC8kuTfmkDwKCAQBrgmmH3yP/t+Ey16ea
+rPTRV6rEbqKqThLz1Msd50IAerYCmwu0Iqq+FHare6qlw+k4YohkRnjDWkOyMxFx
+G0MtRI5onj2G1D8OU3S2VVlgg5wkBk6sZFJ33QX2E9JyNWq0ReB7NnNwjPBf1wdv
+S/C23ZeqcKHTItJ+iez+410oFhGqeA/Hv/3dVxiKsgbdUTa8MUrm9QrVekXGAXrH
+BLwBQIvJ8LY742zAYE4AXm68+h6zDRQ4M2ZaTPVdMo7pr1bDLeQWldfUK8+zLZpu
+CZgpZY/VTJ6IJK9+vui6oYxQPkTyLY2MhGgeOQ8wJzjyQ5pMz1pfHAaelEd/gOUY
+SFypAoIBAE8uUeEG6/GW/N/VqMuB+2WQyhKXyiW9wq60kSlPF2kdkZqNRNTk7IJo
+a+Yr33dYeSZrwDBIRGJ9nAMu3CIxDmvOq0aUwoaE2NckJ796XDs0A4mfYIpk2omo
+7gC4X1VAKjNMIq6tdIRmg3tnv49i4PiKQiV1ZISWb5+WJWhuRtQziqmPan1t3j9E
+6MF/pEmZNnmMsIRG2k37wTdJ0YElmJ21sNkN3WrexfCoMPKa41LszqZKEcjUVijY
+Zhn1Y7IsEb2YlyT1fkszkgG606RizOtYiGOq8jNTq2hFZqU/EdKdfnGma7GSJi//
+3mXJmYNmW/KUuU+jptKWjyqxOhCacuECggEBAJ4UfR26SKumYjw/HZj7ZNp31Fuw
++kqO9GuHlxieVk0FAk9Wd1L1r1VZReyUfKUah57JdS94iO5XizLA8xcMQ37vw5Ki
+SgKVX6ONVwkAmkHQSVAC5783k74n1PoEKd36DcBurib2SPxwXp/Yl9Y0744K4iaT
+VQSVWl/wd1NDaDY7xrOFw1keqY+hFVL/2zUozui1pypzwYOMvlmyT+UfLUnR3Kdc
+EaQMoRMLK1+ct3lyBr1CmB0tXaF+rm4yMJNrZhym2AUFUi8jOBCf3zUnJ/1O45HW
+iTir6LZxBwHcCSwJn6/HdcoYIEsLqwsVzoTMdFBGIOpB+eWaRA7/cYkRv7A=
+-----END RSA PRIVATE KEY-----
diff --git a/apkdmverity/Android.bp b/apkdmverity/Android.bp
index 9b53a47..06d4500 100644
--- a/apkdmverity/Android.bp
+++ b/apkdmverity/Android.bp
@@ -13,6 +13,8 @@
"libbitflags",
"libclap",
"libdata_model",
+ "libidsig",
+ "libitertools",
"liblibc",
"libnix",
"libnum_traits",
@@ -30,6 +32,7 @@
rust_binary {
name: "apkdmverity",
defaults: ["apkdmverity.defaults"],
+ bootstrap: true,
}
rust_test {
diff --git a/apkdmverity/src/apksigv4.rs b/apkdmverity/src/apksigv4.rs
deleted file mode 100644
index fef21a5..0000000
--- a/apkdmverity/src/apksigv4.rs
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use anyhow::{anyhow, Context, Result};
-use num_derive::FromPrimitive;
-use num_traits::FromPrimitive;
-use std::io::{Read, Seek};
-
-// `apksigv4` module provides routines to decode the idsig file as defined in [APK signature
-// scheme v4] (https://source.android.com/security/apksigning/v4).
-
-#[derive(Debug)]
-pub struct V4Signature {
- pub version: Version,
- pub hashing_info: HashingInfo,
- pub signing_info: SigningInfo,
- pub merkle_tree_size: u32,
- pub merkle_tree_offset: u64,
-}
-
-#[derive(Debug)]
-pub struct HashingInfo {
- pub hash_algorithm: HashAlgorithm,
- pub log2_blocksize: u8,
- pub salt: Box<[u8]>,
- pub raw_root_hash: Box<[u8]>,
-}
-
-#[derive(Debug)]
-pub struct SigningInfo {
- pub apk_digest: Box<[u8]>,
- pub x509_certificate: Box<[u8]>,
- pub additional_data: Box<[u8]>,
- pub public_key: Box<[u8]>,
- pub signature_algorithm_id: SignatureAlgorithmId,
- pub signature: Box<[u8]>,
-}
-
-#[derive(Debug, PartialEq, FromPrimitive)]
-#[repr(u32)]
-pub enum Version {
- V2 = 2,
-}
-
-impl Version {
- fn from(val: u32) -> Result<Version> {
- Self::from_u32(val).ok_or_else(|| anyhow!("{} is an unsupported version", val))
- }
-}
-
-#[derive(Debug, PartialEq, FromPrimitive)]
-#[repr(u32)]
-pub enum HashAlgorithm {
- SHA256 = 1,
-}
-
-impl HashAlgorithm {
- fn from(val: u32) -> Result<HashAlgorithm> {
- Self::from_u32(val).ok_or_else(|| anyhow!("{} is an unsupported hash algorithm", val))
- }
-}
-
-#[derive(Debug, PartialEq, FromPrimitive)]
-#[allow(non_camel_case_types)]
-#[repr(u32)]
-pub enum SignatureAlgorithmId {
- RSASSA_PSS_SHA2_256 = 0x0101,
- RSASSA_PSS_SHA2_512 = 0x0102,
- RSASSA_PKCS1_SHA2_256 = 0x0103,
- RSASSA_PKCS1_SHA2_512 = 0x0104,
- ECDSA_SHA2_256 = 0x0201,
- ECDSA_SHA2_512 = 0x0202,
- DSA_SHA2_256 = 0x0301,
-}
-
-impl SignatureAlgorithmId {
- fn from(val: u32) -> Result<SignatureAlgorithmId> {
- Self::from_u32(val)
- .with_context(|| format!("{:#06x} is an unsupported signature algorithm", val))
- }
-}
-
-impl V4Signature {
- /// Reads a stream from `r` and then parses it into a `V4Signature` struct.
- pub fn from<T: Read + Seek>(mut r: T) -> Result<V4Signature> {
- Ok(V4Signature {
- version: Version::from(read_le_u32(&mut r)?)?,
- hashing_info: HashingInfo::from(&mut r)?,
- signing_info: SigningInfo::from(&mut r)?,
- merkle_tree_size: read_le_u32(&mut r)?,
- merkle_tree_offset: r.stream_position()?,
- })
- }
-}
-
-impl HashingInfo {
- fn from(mut r: &mut dyn Read) -> Result<HashingInfo> {
- read_le_u32(&mut r)?;
- Ok(HashingInfo {
- hash_algorithm: HashAlgorithm::from(read_le_u32(&mut r)?)?,
- log2_blocksize: read_u8(&mut r)?,
- salt: read_sized_array(&mut r)?,
- raw_root_hash: read_sized_array(&mut r)?,
- })
- }
-}
-
-impl SigningInfo {
- fn from(mut r: &mut dyn Read) -> Result<SigningInfo> {
- read_le_u32(&mut r)?;
- Ok(SigningInfo {
- apk_digest: read_sized_array(&mut r)?,
- x509_certificate: read_sized_array(&mut r)?,
- additional_data: read_sized_array(&mut r)?,
- public_key: read_sized_array(&mut r)?,
- signature_algorithm_id: SignatureAlgorithmId::from(read_le_u32(&mut r)?)?,
- signature: read_sized_array(&mut r)?,
- })
- }
-}
-
-fn read_u8(r: &mut dyn Read) -> Result<u8> {
- let mut byte = [0; 1];
- r.read_exact(&mut byte)?;
- Ok(byte[0])
-}
-
-fn read_le_u32(r: &mut dyn Read) -> Result<u32> {
- let mut bytes = [0; 4];
- r.read_exact(&mut bytes)?;
- Ok(u32::from_le_bytes(bytes))
-}
-
-fn read_sized_array(r: &mut dyn Read) -> Result<Box<[u8]>> {
- let size = read_le_u32(r)?;
- let mut data = vec![0; size as usize];
- r.read_exact(&mut data)?;
- Ok(data.into_boxed_slice())
-}
-
-#[cfg(test)]
-mod tests {
- use crate::util::hexstring_from;
- use crate::*;
- use std::io::Cursor;
-
- #[test]
- fn parse_idsig_file() {
- let idsig = Cursor::new(include_bytes!("../testdata/test.apk.idsig"));
- let parsed = V4Signature::from(idsig).unwrap();
-
- assert_eq!(Version::V2, parsed.version);
-
- let hi = parsed.hashing_info;
- assert_eq!(HashAlgorithm::SHA256, hi.hash_algorithm);
- assert_eq!(12, hi.log2_blocksize);
- assert_eq!("", hexstring_from(hi.salt.as_ref()));
- assert_eq!(
- "ce1194fdb3cb2537daf0ac8cdf4926754adcbce5abeece7945fe25d204a0df6a",
- hexstring_from(hi.raw_root_hash.as_ref())
- );
-
- let si = parsed.signing_info;
- assert_eq!(
- "b5225523a813fb84ed599dd649698c080bcfed4fb19ddb00283a662a2683bc15",
- hexstring_from(si.apk_digest.as_ref())
- );
- assert_eq!("", hexstring_from(si.additional_data.as_ref()));
- assert_eq!(
- "303d021c77304d0f4732a90372bbfce095223e4ba82427ceb381f69bc6762d78021d008b99924\
- a8585c38d7f654835eb219ae9e176b44e86dcb23153e3d9d6",
- hexstring_from(si.signature.as_ref())
- );
- assert_eq!(SignatureAlgorithmId::DSA_SHA2_256, si.signature_algorithm_id);
-
- assert_eq!(36864, parsed.merkle_tree_size);
- assert_eq!(2251, parsed.merkle_tree_offset);
- }
-}
diff --git a/apkdmverity/src/dm.rs b/apkdmverity/src/dm.rs
index 2b44876..4cb24fc 100644
--- a/apkdmverity/src/dm.rs
+++ b/apkdmverity/src/dm.rs
@@ -147,15 +147,15 @@
/// The path to the generated device is "/dev/mapper/<name>".
pub fn create_device(&self, name: &str, target: &DmVerityTarget) -> Result<PathBuf> {
// Step 1: create an empty device
- let mut data = DmIoctl::new(&name)?;
+ let mut data = DmIoctl::new(name)?;
data.set_uuid(&uuid()?)?;
- dm_dev_create(&self, &mut data)
+ dm_dev_create(self, &mut data)
.context(format!("failed to create an empty device with name {}", &name))?;
// Step 2: load table onto the device
let payload_size = size_of::<DmIoctl>() + target.as_slice().len();
- let mut data = DmIoctl::new(&name)?;
+ let mut data = DmIoctl::new(name)?;
data.data_size = payload_size as u32;
data.data_start = size_of::<DmIoctl>() as u32;
data.target_count = 1;
@@ -164,13 +164,13 @@
let mut payload = Vec::with_capacity(payload_size);
payload.extend_from_slice(data.as_slice());
payload.extend_from_slice(target.as_slice());
- dm_table_load(&self, payload.as_mut_ptr() as *mut DmIoctl)
+ dm_table_load(self, payload.as_mut_ptr() as *mut DmIoctl)
.context("failed to load table")?;
// Step 3: activate the device (note: the term 'suspend' might be misleading, but it
// actually activates the table. See include/uapi/linux/dm-ioctl.h
- let mut data = DmIoctl::new(&name)?;
- dm_dev_suspend(&self, &mut data).context("failed to activate")?;
+ let mut data = DmIoctl::new(name)?;
+ dm_dev_suspend(self, &mut data).context("failed to activate")?;
// Step 4: wait unti the device is created and return the device path
let path = Path::new(MAPPER_DEV_ROOT).join(&name);
@@ -181,9 +181,9 @@
/// Removes a mapper device
#[cfg(test)]
pub fn delete_device_deferred(&self, name: &str) -> Result<()> {
- let mut data = DmIoctl::new(&name)?;
+ let mut data = DmIoctl::new(name)?;
data.flags |= Flag::DM_DEFERRED_REMOVE;
- dm_dev_remove(&self, &mut data)
+ dm_dev_remove(self, &mut data)
.context(format!("failed to remove device with name {}", &name))?;
Ok(())
}
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index f09af79..dbf3131 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -21,15 +21,14 @@
//! system managed by the host Android which is assumed to be compromisable, it is important to
//! keep the integrity of the file "inside" Microdroid.
-mod apksigv4;
mod dm;
mod loopdevice;
mod util;
-use crate::apksigv4::*;
-
use anyhow::{bail, Context, Result};
use clap::{App, Arg};
+use idsig::{HashAlgorithm, V4Signature};
+use itertools::Itertools;
use std::fmt::Debug;
use std::fs;
use std::fs::File;
@@ -39,36 +38,34 @@
fn main() -> Result<()> {
let matches = App::new("apkdmverity")
.about("Creates a dm-verity block device out of APK signed with APK signature scheme V4.")
- .arg(
- Arg::with_name("apk")
- .help("Input APK file. Must be signed using the APK signature scheme V4.")
- .required(true),
- )
- .arg(
- Arg::with_name("idsig")
- .help("The idsig file having the merkle tree and the signing info.")
- .required(true),
- )
- .arg(
- Arg::with_name("name")
- .help(
- "Name of the dm-verity block device. The block device is created at \
- \"/dev/mapper/<name>\".",
- )
- .required(true),
- )
+ .arg(Arg::from_usage(
+ "--apk... <apk_path> <idsig_path> <name> <root_hash> \
+ 'Input APK file, idsig file, name of the block device, and root hash. \
+ The APK file must be signed using the APK signature scheme 4. The \
+ block device is created at \"/dev/mapper/<name>\".' root_hash is \
+ optional; idsig file's root hash will be used if specified as \"none\"."
+ ))
.arg(Arg::with_name("verbose").short("v").long("verbose").help("Shows verbose output"))
.get_matches();
- let apk = matches.value_of("apk").unwrap();
- let idsig = matches.value_of("idsig").unwrap();
- let name = matches.value_of("name").unwrap();
- let ret = enable_verity(apk, idsig, name)?;
- if matches.is_present("verbose") {
- println!(
- "data_device: {:?}, hash_device: {:?}, mapper_device: {:?}",
- ret.data_device, ret.hash_device, ret.mapper_device
- );
+ let apks = matches.values_of("apk").unwrap();
+ assert!(apks.len() % 4 == 0);
+
+ let verbose = matches.is_present("verbose");
+
+ for (apk, idsig, name, roothash) in apks.tuples() {
+ let roothash = if roothash != "none" {
+ Some(util::parse_hexstring(roothash).expect("failed to parse roothash"))
+ } else {
+ None
+ };
+ let ret = enable_verity(apk, idsig, name, roothash.as_deref())?;
+ if verbose {
+ println!(
+ "data_device: {:?}, hash_device: {:?}, mapper_device: {:?}",
+ ret.data_device, ret.hash_device, ret.mapper_device
+ );
+ }
}
Ok(())
}
@@ -82,7 +79,12 @@
const BLOCK_SIZE: u64 = 4096;
// Makes a dm-verity block device out of `apk` and its accompanying `idsig` files.
-fn enable_verity<P: AsRef<Path> + Debug>(apk: P, idsig: P, name: &str) -> Result<VerityResult> {
+fn enable_verity<P: AsRef<Path> + Debug>(
+ apk: P,
+ idsig: P,
+ name: &str,
+ roothash: Option<&[u8]>,
+) -> Result<VerityResult> {
// Attach the apk file to a loop device if the apk file is a regular file. If not (i.e. block
// device), we only need to get the size and use the block device as it is.
let (data_device, apk_size) = if fs::metadata(&apk)?.file_type().is_block_device() {
@@ -110,9 +112,13 @@
let target = dm::DmVerityTargetBuilder::default()
.data_device(&data_device, apk_size)
.hash_device(&hash_device)
- .root_digest(&sig.hashing_info.raw_root_hash)
+ .root_digest(if let Some(roothash) = roothash {
+ roothash
+ } else {
+ &sig.hashing_info.raw_root_hash
+ })
.hash_algorithm(match sig.hashing_info.hash_algorithm {
- apksigv4::HashAlgorithm::SHA256 => dm::DmVerityHashAlgorithm::SHA256,
+ HashAlgorithm::SHA256 => dm::DmVerityHashAlgorithm::SHA256,
})
.salt(&sig.hashing_info.salt)
.build()
@@ -121,7 +127,7 @@
// Actually create a dm-verity block device using the spec.
let dm = dm::DeviceMapper::new()?;
let mapper_device =
- dm.create_device(&name, &target).context("Failed to create dm-verity device")?;
+ dm.create_device(name, &target).context("Failed to create dm-verity device")?;
Ok(VerityResult { data_device, hash_device, mapper_device })
}
@@ -169,14 +175,24 @@
}
fn run_test(apk: &[u8], idsig: &[u8], name: &str, check: fn(TestContext)) {
+ run_test_with_hash(apk, idsig, name, None, check);
+ }
+
+ fn run_test_with_hash(
+ apk: &[u8],
+ idsig: &[u8],
+ name: &str,
+ roothash: Option<&[u8]>,
+ check: fn(TestContext),
+ ) {
if should_skip() {
return;
}
let test_dir = tempfile::TempDir::new().unwrap();
- let (apk_path, idsig_path) = prepare_inputs(&test_dir.path(), apk, idsig);
+ let (apk_path, idsig_path) = prepare_inputs(test_dir.path(), apk, idsig);
// Run the program and register clean-ups.
- let ret = enable_verity(&apk_path, &idsig_path, name).unwrap();
+ let ret = enable_verity(&apk_path, &idsig_path, name, roothash).unwrap();
let ret = scopeguard::guard(ret, |ret| {
loopdevice::detach(ret.data_device).unwrap();
loopdevice::detach(ret.hash_device).unwrap();
@@ -216,8 +232,7 @@
}
run_test(modified_apk.as_slice(), idsig.as_ref(), "incorrect_apk", |ctx| {
- let ret = fs::read(&ctx.result.mapper_device).map_err(|e| e.kind());
- assert_eq!(ret, Err(std::io::ErrorKind::Other));
+ fs::read(&ctx.result.mapper_device).expect_err("Should fail");
});
}
@@ -237,8 +252,7 @@
}
run_test(apk.as_ref(), modified_idsig.as_slice(), "incorrect_merkle_tree", |ctx| {
- let ret = fs::read(&ctx.result.mapper_device).map_err(|e| e.kind());
- assert_eq!(ret, Err(std::io::ErrorKind::Other));
+ fs::read(&ctx.result.mapper_device).expect_err("Should fail");
});
}
@@ -260,9 +274,7 @@
// Read around the modified location causes an error
let f = File::open(&ctx.result.mapper_device).unwrap();
let mut buf = vec![0; 10]; // just read 10 bytes
- let ret = f.read_at(&mut buf, MODIFIED_OFFSET).map_err(|e| e.kind());
- assert!(ret.is_err());
- assert_eq!(ret, Err(std::io::ErrorKind::Other));
+ f.read_at(&mut buf, MODIFIED_OFFSET).expect_err("Should fail");
});
}
@@ -296,7 +308,7 @@
let idsig = include_bytes!("../testdata/test.apk.idsig");
let test_dir = tempfile::TempDir::new().unwrap();
- let (apk_path, idsig_path) = prepare_inputs(&test_dir.path(), apk, idsig);
+ let (apk_path, idsig_path) = prepare_inputs(test_dir.path(), apk, idsig);
// attach the files to loop devices to make them block devices
let apk_size = fs::metadata(&apk_path).unwrap().len();
@@ -314,7 +326,8 @@
let name = "loop_as_input";
// Run the program WITH the loop devices, not the regular files.
- let ret = enable_verity(apk_loop_device.deref(), idsig_loop_device.deref(), &name).unwrap();
+ let ret =
+ enable_verity(apk_loop_device.deref(), idsig_loop_device.deref(), name, None).unwrap();
let ret = scopeguard::guard(ret, |ret| {
loopdevice::detach(ret.data_device).unwrap();
loopdevice::detach(ret.hash_device).unwrap();
@@ -327,4 +340,24 @@
assert_eq!(verity.len(), original.len()); // fail fast
assert_eq!(verity.as_slice(), original.as_slice());
}
+
+ // test with custom roothash
+ #[test]
+ fn correct_custom_roothash() {
+ let apk = include_bytes!("../testdata/test.apk");
+ let idsig = include_bytes!("../testdata/test.apk.idsig");
+ let roothash = V4Signature::from(Cursor::new(&idsig)).unwrap().hashing_info.raw_root_hash;
+ run_test_with_hash(
+ apk.as_ref(),
+ idsig.as_ref(),
+ "correct_custom_roothash",
+ Some(&roothash),
+ |ctx| {
+ let verity = fs::read(&ctx.result.mapper_device).unwrap();
+ let original = fs::read(&ctx.result.data_device).unwrap();
+ assert_eq!(verity.len(), original.len()); // fail fast
+ assert_eq!(verity.as_slice(), original.as_slice());
+ },
+ );
+ }
}
diff --git a/apkdmverity/src/util.rs b/apkdmverity/src/util.rs
index d2bc799..913f827 100644
--- a/apkdmverity/src/util.rs
+++ b/apkdmverity/src/util.rs
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-use anyhow::{bail, Result};
+use anyhow::{anyhow, bail, Result};
use nix::sys::stat::FileStat;
use std::fs::File;
use std::os::unix::fs::FileTypeExt;
@@ -42,6 +42,19 @@
s.iter().map(|byte| format!("{:02x}", byte)).reduce(|i, j| i + &j).unwrap_or_default()
}
+/// Parses a hexadecimal string into a byte array
+pub fn parse_hexstring(s: &str) -> Result<Vec<u8>> {
+ let len = s.len();
+ if len % 2 != 0 {
+ bail!("length {} is not even", len)
+ } else {
+ (0..len)
+ .step_by(2)
+ .map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|e| anyhow!(e)))
+ .collect()
+ }
+}
+
/// fstat that accepts a path rather than FD
pub fn fstat(p: &Path) -> Result<FileStat> {
let f = File::open(p)?;
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 174914f..935ed5c 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -14,12 +14,16 @@
"libandroid_logger",
"libanyhow",
"libauthfs_crypto_bindgen",
+ "libauthfs_fsverity_metadata",
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
"libcfg_if",
+ "libfsverity_digests_proto_rust",
"libfuse_rust",
"liblibc",
"liblog_rust",
+ "libnix",
+ "libprotobuf",
"libstructopt",
"libthiserror",
],
@@ -59,7 +63,7 @@
rust_test {
name: "authfs_device_test_src_lib",
defaults: ["authfs_defaults"],
- test_suites: ["device-tests"],
+ test_suites: ["general-tests"],
data: [":authfs_test_files"],
}
@@ -68,14 +72,21 @@
srcs: [
"testdata/cert.der",
"testdata/input.4k",
- "testdata/input.4k.fsv_sig",
- "testdata/input.4k.merkle_dump",
+ "testdata/input.4k.fsv_meta",
"testdata/input.4k1",
- "testdata/input.4k1.fsv_sig",
- "testdata/input.4k1.merkle_dump",
+ "testdata/input.4k1.fsv_meta",
"testdata/input.4m",
- "testdata/input.4m.fsv_sig",
- "testdata/input.4m.merkle_dump",
- "testdata/input.4m.merkle_dump.bad",
+ "testdata/input.4m.fsv_meta",
+ "testdata/input.4m.fsv_meta.bad_merkle",
],
}
+
+rust_test {
+ name: "libauthfs_crypto_bindgen_test",
+ srcs: [":libauthfs_crypto_bindgen"],
+ crate_name: "authfs_crypto_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+}
diff --git a/authfs/aidl/Android.bp b/authfs/aidl/Android.bp
index 35a3c4a..9504037 100644
--- a/authfs/aidl/Android.bp
+++ b/authfs/aidl/Android.bp
@@ -9,7 +9,10 @@
backend: {
rust: {
enabled: true,
- apex_available: ["com.android.virt"],
+ apex_available: [
+ "com.android.compos",
+ "com.android.virt",
+ ],
},
},
}
diff --git a/authfs/aidl/com/android/virt/fs/AuthFsConfig.aidl b/authfs/aidl/com/android/virt/fs/AuthFsConfig.aidl
new file mode 100644
index 0000000..2e3479c
--- /dev/null
+++ b/authfs/aidl/com/android/virt/fs/AuthFsConfig.aidl
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+/** @hide */
+parcelable AuthFsConfig {
+ parcelable InputFdAnnotation {
+ /**
+ * File descriptor number to be passed to the program. This is also the same file
+ * descriptor number used in the backend server.
+ */
+ int fd;
+ }
+
+ parcelable OutputFdAnnotation {
+ /**
+ * File descriptor number to be passed to the program. This is also the same file
+ * descriptor number used in the backend server.
+ */
+ int fd;
+ }
+
+ parcelable InputDirFdAnnotation {
+ /**
+ * File descriptor number to be passed to the program. This is also the same file
+ * descriptor number used in the backend server.
+ */
+ int fd;
+
+ /**
+ * A manifest file that includes serialized protobuf of
+ * android.security.fsverity.FSVerityDigests. The path must be accessible to the
+ * IAuthFsService.
+ */
+ String manifestPath;
+
+ /**
+ * Prefix path that should be stripped from the path in the manifest.
+ */
+ String prefix;
+ }
+
+ parcelable OutputDirFdAnnotation {
+ /**
+ * File descriptor number to be passed to the program. This is also the same file
+ * descriptor number used in the backend server.
+ */
+ int fd;
+ }
+
+ /** Port of the filesystem backend. */
+ int port;
+
+ /** Annotation for the remote input file descriptors. */
+ InputFdAnnotation[] inputFdAnnotations;
+
+ /** Annotation for the remote output file descriptors. */
+ OutputFdAnnotation[] outputFdAnnotations;
+
+ /** Annotation for the remote input directory descriptors. */
+ InputDirFdAnnotation[] inputDirFdAnnotations;
+
+ /** Annotation for the remote output directory descriptors. */
+ OutputDirFdAnnotation[] outputDirFdAnnotations;
+}
diff --git a/compos/aidl/com/android/compos/Metadata.aidl b/authfs/aidl/com/android/virt/fs/IAuthFs.aidl
similarity index 63%
copy from compos/aidl/com/android/compos/Metadata.aidl
copy to authfs/aidl/com/android/virt/fs/IAuthFs.aidl
index a15214d..f7b2c8d 100644
--- a/compos/aidl/com/android/compos/Metadata.aidl
+++ b/authfs/aidl/com/android/virt/fs/IAuthFs.aidl
@@ -14,13 +14,15 @@
* limitations under the License.
*/
-package com.android.compos;
+package com.android.virt.fs;
-import com.android.compos.InputFdAnnotation;
-import com.android.compos.OutputFdAnnotation;
+import com.android.virt.fs.AuthFsConfig;
-/** {@hide} */
-parcelable Metadata {
- InputFdAnnotation[] input_fd_annotations;
- OutputFdAnnotation[] output_fd_annotations;
+/** @hide */
+interface IAuthFs {
+ /** Returns a file descriptor given the name of a remote file descriptor. */
+ ParcelFileDescriptor openFile(int remoteFdName, boolean writable);
+
+ /** Returns the mount path of the current IAuthFs instance. */
+ String getMountPoint();
}
diff --git a/compos/aidl/com/android/compos/CompOsKeyData.aidl b/authfs/aidl/com/android/virt/fs/IAuthFsService.aidl
similarity index 61%
rename from compos/aidl/com/android/compos/CompOsKeyData.aidl
rename to authfs/aidl/com/android/virt/fs/IAuthFsService.aidl
index 381ec0d..b349db2 100644
--- a/compos/aidl/com/android/compos/CompOsKeyData.aidl
+++ b/authfs/aidl/com/android/virt/fs/IAuthFsService.aidl
@@ -14,17 +14,16 @@
* limitations under the License.
*/
-package com.android.compos;
+package com.android.virt.fs;
-/** {@hide} */
-parcelable CompOsKeyData {
- /**
- * Self-signed certificate (X.509 DER) containing the public key.
- */
- byte[] certificate;
+import com.android.virt.fs.AuthFsConfig;
+import com.android.virt.fs.IAuthFs;
+/** @hide */
+interface IAuthFsService {
/**
- * Opaque encrypted blob containing the private key and related metadata.
+ * Creates an AuthFS mount given the config. Returns the binder object that represent the AuthFS
+ * instance. The AuthFS setup is deleted once the lifetime of the returned binder object ends.
*/
- byte[] keyBlob;
+ IAuthFs mount(in AuthFsConfig config);
}
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index d3c0979..43dee52 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -16,42 +16,110 @@
package com.android.virt.fs;
-/** {@hide} */
+/**
+ * A service that works like a file server, where the files and directories are identified by
+ * "remote FD" that may be pre-exchanged or created on request.
+ *
+ * When a binder error is returned and it is a service specific error, the error code is an errno
+ * value which is an int.
+ *
+ * {@hide}
+ */
interface IVirtFdService {
- /** Error when the requesting FD is unknown. */
- const int ERROR_UNKNOWN_FD = 1;
-
- /**
- * Error when I/O fails. This can happen when actual I/O error happens to the backing file,
- * when the given offset or size are invalid, or any problems that can fail a read/write
- * request.
- */
- const int ERROR_IO = 2;
-
/** Maximum content size that the service allows the client to request. */
const int MAX_REQUESTING_DATA = 16384;
/**
- * Returns the content of the given file ID, from the offset, for the amount of requested size
+ * Returns the content of the given remote FD, from the offset, for the amount of requested size
* or until EOF.
*/
- byte[] readFile(int id, long offset, int size);
+ byte[] readFile(int fd, long offset, int size);
/**
- * Returns the content of fs-verity compatible Merkle tree of the given file ID, from the
+ * Returns the content of fs-verity compatible Merkle tree of the given remote FD, from the
* offset, for the amount of requested size or until EOF.
*/
- byte[] readFsverityMerkleTree(int id, long offset, int size);
+ byte[] readFsverityMerkleTree(int fd, long offset, int size);
- /** Returns the fs-verity signature of the given file ID. */
- byte[] readFsveritySignature(int id);
+ /** Returns the fs-verity signature of the given remote FD. */
+ byte[] readFsveritySignature(int fd);
/**
- * Writes the buffer to the given file ID from the file's offset. Returns the number of bytes
+ * Writes the buffer to the given remote FD from the file's offset. Returns the number of bytes
* written.
*/
- int writeFile(int id, in byte[] buf, long offset);
+ int writeFile(int fd, in byte[] buf, long offset);
- /** Resizes the file backed by the given file ID to the new size. */
- void resize(int id, long size);
+ /** Resizes the file backed by the given remote FD to the new size. */
+ void resize(int fd, long size);
+
+ /** Returns the file size. */
+ long getFileSize(int fd);
+
+ /**
+ * Opens a file given the remote directory FD.
+ *
+ * @param pathname The file path to open. Must be a related path.
+ * @return file A remote FD that represents the opened file.
+ */
+ int openFileInDirectory(int dirFd, String pathname);
+
+ /**
+ * Creates a file given the remote directory FD.
+ *
+ * @param basename The file name to create. Must not contain directory separator.
+ * @param mode File mode of the new file. See open(2).
+ * @return file A remote FD that represents the new created file.
+ */
+ int createFileInDirectory(int dirFd, String basename, int mode);
+
+ /**
+ * Creates a directory inside the given remote directory FD.
+ *
+ * @param basename The directory name to create. Must not contain directory separator.
+ * @param mode File mode of the new directory. See mkdir(2).
+ * @return file FD that represents the new created directory.
+ */
+ int createDirectoryInDirectory(int dirFd, String basename, int mode);
+
+ /**
+ * Deletes a file in the given directory.
+ *
+ * @param basename The file name to delete. Must not contain directory separator.
+ */
+ void deleteFile(int dirFd, String basename);
+
+ /**
+ * Deletes a sub-directory in the given directory.
+ *
+ * @param basename The directory name to delete. Must not contain directory separator.
+ */
+ void deleteDirectory(int dirFd, String basename);
+
+ /**
+ * Changes mode of the FD.
+ *
+ * @param fd The FD to change.
+ * @param mode New file mode to pass to chmod(2)/fchmod(2).
+ */
+ void chmod(int fd, int mode);
+
+ /** Filesystem stats that AuthFS is interested in.*/
+ parcelable FsStat {
+ /** Block size of the filesystem */
+ long blockSize;
+ /** Fragment size of the filesystem */
+ long fragmentSize;
+ /** Number of blocks in the filesystem */
+ long blockNumbers;
+ /** Number of free blocks */
+ long blockAvailable;
+ /** Number of free inodes */
+ long inodesAvailable;
+ /** Maximum filename length */
+ long maxFilename;
+ }
+
+ /** Returns relevant filesystem stats. */
+ FsStat statfs();
}
diff --git a/authfs/fd_server/Android.bp b/authfs/fd_server/Android.bp
index 8ddbf69..9499cd2 100644
--- a/authfs/fd_server/Android.bp
+++ b/authfs/fd_server/Android.bp
@@ -9,6 +9,8 @@
"authfs_aidl_interface-rust",
"libandroid_logger",
"libanyhow",
+ "libauthfs_fsverity_metadata",
+ "libbinder_common",
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
"libclap",
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
new file mode 100644
index 0000000..3a3cdb2
--- /dev/null
+++ b/authfs/fd_server/src/aidl.rs
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::Result;
+use log::error;
+use nix::{
+ errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::fchmod, sys::stat::mkdirat,
+ sys::stat::mode_t, sys::stat::Mode, sys::statvfs::statvfs, sys::statvfs::Statvfs,
+ unistd::unlinkat, unistd::UnlinkatFlags,
+};
+use std::cmp::min;
+use std::collections::{btree_map, BTreeMap};
+use std::convert::TryInto;
+use std::fs::File;
+use std::io;
+use std::os::unix::fs::FileExt;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::{Component, Path, PathBuf, MAIN_SEPARATOR};
+use std::sync::{Arc, RwLock};
+
+use crate::common::OwnedFd;
+use crate::fsverity;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
+ BnVirtFdService, FsStat::FsStat, IVirtFdService, MAX_REQUESTING_DATA,
+};
+use authfs_aidl_interface::binder::{
+ BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, StatusCode, Strong,
+};
+use authfs_fsverity_metadata::{
+ get_fsverity_metadata_path, parse_fsverity_metadata, FSVerityMetadata,
+};
+use binder_common::{new_binder_exception, new_binder_service_specific_error};
+
+/// Bitflags of forbidden file mode, e.g. setuid, setgid and sticky bit.
+const FORBIDDEN_MODES: Mode = Mode::from_bits_truncate(!0o777);
+
+/// Configuration of a file descriptor to be served/exposed/shared.
+pub enum FdConfig {
+ /// A read-only file to serve by this server. The file is supposed to be verifiable with the
+ /// associated fs-verity metadata.
+ Readonly {
+ /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
+ file: File,
+
+ // Alternative metadata storing merkle tree and signature.
+ alt_metadata: Option<Box<FSVerityMetadata>>,
+ },
+
+ /// A readable/writable file to serve by this server. This backing file should just be a
+ /// regular file and does not have any specific property.
+ ReadWrite(File),
+
+ /// A read-only directory to serve by this server.
+ InputDir(OwnedFd),
+
+ /// A writable directory to serve by this server.
+ OutputDir(OwnedFd),
+}
+
+pub struct FdService {
+ /// A pool of opened files and directories, which can be looked up by the FD number.
+ fd_pool: Arc<RwLock<BTreeMap<i32, FdConfig>>>,
+}
+
+impl FdService {
+ pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
+ BnVirtFdService::new_binder(
+ FdService { fd_pool: Arc::new(RwLock::new(fd_pool)) },
+ BinderFeatures::default(),
+ )
+ }
+
+ /// Handles the requesting file `id` with `handle_fn` if it is in the FD pool. This function
+ /// returns whatever `handle_fn` returns.
+ fn handle_fd<F, R>(&self, id: i32, handle_fn: F) -> BinderResult<R>
+ where
+ F: FnOnce(&FdConfig) -> BinderResult<R>,
+ {
+ let fd_pool = self.fd_pool.read().unwrap();
+ let fd_config = fd_pool.get(&id).ok_or_else(|| new_errno_error(Errno::EBADF))?;
+ handle_fn(fd_config)
+ }
+
+ /// Inserts a new FD and corresponding `FdConfig` created by `create_fn` to the FD pool, then
+ /// returns the new FD number.
+ fn insert_new_fd<F>(&self, fd: i32, create_fn: F) -> BinderResult<i32>
+ where
+ F: FnOnce(&mut FdConfig) -> BinderResult<(i32, FdConfig)>,
+ {
+ let mut fd_pool = self.fd_pool.write().unwrap();
+ let fd_config = fd_pool.get_mut(&fd).ok_or_else(|| new_errno_error(Errno::EBADF))?;
+ let (new_fd, new_fd_config) = create_fn(fd_config)?;
+ if let btree_map::Entry::Vacant(entry) = fd_pool.entry(new_fd) {
+ entry.insert(new_fd_config);
+ Ok(new_fd)
+ } else {
+ Err(new_binder_exception(
+ ExceptionCode::ILLEGAL_STATE,
+ format!("The newly created FD {} is already in the pool unexpectedly", new_fd),
+ ))
+ }
+ }
+}
+
+impl Interface for FdService {}
+
+impl IVirtFdService for FdService {
+ fn readFile(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
+ let size: usize = validate_and_cast_size(size)?;
+ let offset: u64 = validate_and_cast_offset(offset)?;
+
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
+ read_into_buf(file, size, offset).map_err(|e| {
+ error!("readFile: read error: {}", e);
+ new_errno_error(Errno::EIO)
+ })
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
+ let size: usize = validate_and_cast_size(size)?;
+ let offset: u64 = validate_and_cast_offset(offset)?;
+
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, alt_metadata, .. } => {
+ let mut buf = vec![0; size];
+
+ let s = if let Some(metadata) = &alt_metadata {
+ metadata.read_merkle_tree(offset, &mut buf).map_err(|e| {
+ error!("readFsverityMerkleTree: read error: {}", e);
+ new_errno_error(Errno::EIO)
+ })?
+ } else {
+ fsverity::read_merkle_tree(file.as_raw_fd(), offset, &mut buf).map_err(|e| {
+ error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
+ new_errno_error(Errno::EIO)
+ })?
+ };
+ debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
+ buf.truncate(s);
+ Ok(buf)
+ }
+ FdConfig::ReadWrite(_file) => {
+ // For a writable file, Merkle tree is not expected to be served since Auth FS
+ // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
+ // use.
+ Err(new_errno_error(Errno::ENOSYS))
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, alt_metadata, .. } => {
+ if let Some(metadata) = &alt_metadata {
+ if let Some(signature) = &metadata.signature {
+ Ok(signature.clone())
+ } else {
+ Err(new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ "metadata doesn't contain a signature",
+ ))
+ }
+ } else {
+ let mut buf = vec![0; MAX_REQUESTING_DATA as usize];
+ let s = fsverity::read_signature(file.as_raw_fd(), &mut buf).map_err(|e| {
+ error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
+ new_errno_error(Errno::EIO)
+ })?;
+ debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
+ buf.truncate(s);
+ Ok(buf)
+ }
+ }
+ FdConfig::ReadWrite(_file) => {
+ // There is no signature for a writable file.
+ Err(new_errno_error(Errno::ENOSYS))
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+ FdConfig::ReadWrite(file) => {
+ let offset: u64 = offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))?;
+ // Check buffer size just to make `as i32` safe below.
+ if buf.len() > i32::MAX as usize {
+ return Err(new_errno_error(Errno::EOVERFLOW));
+ }
+ Ok(file.write_at(buf, offset).map_err(|e| {
+ error!("writeFile: write error: {}", e);
+ new_errno_error(Errno::EIO)
+ })? as i32)
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn resize(&self, id: i32, size: i64) -> BinderResult<()> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+ FdConfig::ReadWrite(file) => {
+ if size < 0 {
+ return Err(new_errno_error(Errno::EINVAL));
+ }
+ file.set_len(size as u64).map_err(|e| {
+ error!("resize: set_len error: {}", e);
+ new_errno_error(Errno::EIO)
+ })
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn getFileSize(&self, id: i32) -> BinderResult<i64> {
+ self.handle_fd(id, |config| match config {
+ FdConfig::Readonly { file, .. } => {
+ let size = file
+ .metadata()
+ .map_err(|e| {
+ error!("getFileSize error: {}", e);
+ new_errno_error(Errno::EIO)
+ })?
+ .len();
+ Ok(size.try_into().map_err(|e| {
+ error!("getFileSize: File too large: {}", e);
+ new_errno_error(Errno::EFBIG)
+ })?)
+ }
+ FdConfig::ReadWrite(_file) => {
+ // Content and metadata of a writable file needs to be tracked by authfs, since
+ // fd_server isn't considered trusted. So there is no point to support getFileSize
+ // for a writable file.
+ Err(new_errno_error(Errno::ENOSYS))
+ }
+ FdConfig::InputDir(_) | FdConfig::OutputDir(_) => Err(new_errno_error(Errno::EISDIR)),
+ })
+ }
+
+ fn openFileInDirectory(&self, dir_fd: i32, file_path: &str) -> BinderResult<i32> {
+ let path_buf = PathBuf::from(file_path);
+ // Checks if the path is a simple, related path.
+ if path_buf.components().any(|c| !matches!(c, Component::Normal(_))) {
+ return Err(new_errno_error(Errno::EINVAL));
+ }
+
+ self.insert_new_fd(dir_fd, |config| match config {
+ FdConfig::InputDir(dir) => {
+ let file = open_readonly_at(dir.as_raw_fd(), &path_buf).map_err(new_errno_error)?;
+
+ let metadata_path_buf = get_fsverity_metadata_path(&path_buf);
+ let metadata = open_readonly_at(dir.as_raw_fd(), &metadata_path_buf)
+ .ok()
+ .and_then(|f| parse_fsverity_metadata(f).ok());
+
+ Ok((file.as_raw_fd(), FdConfig::Readonly { file, alt_metadata: metadata }))
+ }
+ FdConfig::OutputDir(_) => {
+ Err(new_errno_error(Errno::ENOSYS)) // TODO: Implement when needed
+ }
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn createFileInDirectory(&self, dir_fd: i32, basename: &str, mode: i32) -> BinderResult<i32> {
+ validate_basename(basename)?;
+
+ self.insert_new_fd(dir_fd, |config| match config {
+ FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+ FdConfig::OutputDir(dir) => {
+ let mode = validate_file_mode(mode)?;
+ let new_fd = openat(
+ dir.as_raw_fd(),
+ basename,
+ // This function is supposed to be only called when FUSE/authfs thinks the file
+ // does not exist. However, if the file does exist from the view of fd_server
+ // (where the execution context is considered untrusted), we prefer to honor
+ // authfs and still allow the create to success. Therefore, always use O_TRUNC.
+ OFlag::O_CREAT | OFlag::O_RDWR | OFlag::O_TRUNC,
+ mode,
+ )
+ .map_err(new_errno_error)?;
+ // SAFETY: new_fd is just created and not an error.
+ let new_file = unsafe { File::from_raw_fd(new_fd) };
+ Ok((new_fd, FdConfig::ReadWrite(new_file)))
+ }
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn createDirectoryInDirectory(
+ &self,
+ dir_fd: i32,
+ basename: &str,
+ mode: i32,
+ ) -> BinderResult<i32> {
+ validate_basename(basename)?;
+
+ self.insert_new_fd(dir_fd, |config| match config {
+ FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+ FdConfig::OutputDir(_) => {
+ let mode = validate_file_mode(mode)?;
+ mkdirat(dir_fd, basename, mode).map_err(new_errno_error)?;
+ let new_dir_fd =
+ openat(dir_fd, basename, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())
+ .map_err(new_errno_error)?;
+ // SAFETY: new_dir_fd is just created and not an error.
+ let fd_owner = unsafe { OwnedFd::from_raw_fd(new_dir_fd) };
+ Ok((new_dir_fd, FdConfig::OutputDir(fd_owner)))
+ }
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn deleteFile(&self, dir_fd: i32, basename: &str) -> BinderResult<()> {
+ validate_basename(basename)?;
+
+ self.handle_fd(dir_fd, |config| match config {
+ FdConfig::OutputDir(_) => {
+ unlinkat(Some(dir_fd), basename, UnlinkatFlags::NoRemoveDir)
+ .map_err(new_errno_error)?;
+ Ok(())
+ }
+ FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn deleteDirectory(&self, dir_fd: i32, basename: &str) -> BinderResult<()> {
+ validate_basename(basename)?;
+
+ self.handle_fd(dir_fd, |config| match config {
+ FdConfig::OutputDir(_) => {
+ unlinkat(Some(dir_fd), basename, UnlinkatFlags::RemoveDir)
+ .map_err(new_errno_error)?;
+ Ok(())
+ }
+ FdConfig::InputDir(_) => Err(new_errno_error(Errno::EACCES)),
+ _ => Err(new_errno_error(Errno::ENOTDIR)),
+ })
+ }
+
+ fn chmod(&self, fd: i32, mode: i32) -> BinderResult<()> {
+ self.handle_fd(fd, |config| match config {
+ FdConfig::ReadWrite(_) | FdConfig::OutputDir(_) => {
+ let mode = validate_file_mode(mode)?;
+ fchmod(fd, mode).map_err(new_errno_error)
+ }
+ _ => Err(new_errno_error(Errno::EACCES)),
+ })
+ }
+
+ fn statfs(&self) -> BinderResult<FsStat> {
+ let st = statvfs("/data").map_err(new_errno_error)?;
+ try_into_fs_stat(st).map_err(|_e| new_errno_error(Errno::EINVAL))
+ }
+}
+
+fn try_into_fs_stat(st: Statvfs) -> Result<FsStat, std::num::TryFromIntError> {
+ Ok(FsStat {
+ blockSize: st.block_size().try_into()?,
+ fragmentSize: st.fragment_size().try_into()?,
+ blockNumbers: st.blocks().try_into()?,
+ blockAvailable: st.blocks_available().try_into()?,
+ inodesAvailable: st.files_available().try_into()?,
+ maxFilename: st.name_max().try_into()?,
+ })
+}
+
+fn read_into_buf(file: &File, max_size: usize, offset: u64) -> io::Result<Vec<u8>> {
+ let remaining = file.metadata()?.len().saturating_sub(offset);
+ let buf_size = min(remaining, max_size as u64) as usize;
+ let mut buf = vec![0; buf_size];
+ file.read_exact_at(&mut buf, offset)?;
+ Ok(buf)
+}
+
+fn new_errno_error(errno: Errno) -> Status {
+ new_binder_service_specific_error(errno as i32, errno.desc())
+}
+
+fn open_readonly_at(dir_fd: RawFd, path: &Path) -> nix::Result<File> {
+ let new_fd = openat(dir_fd, path, OFlag::O_RDONLY, Mode::empty())?;
+ // SAFETY: new_fd is just created successfully and not owned.
+ let new_file = unsafe { File::from_raw_fd(new_fd) };
+ Ok(new_file)
+}
+
+fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
+ offset.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
+}
+
+fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
+ if size > MAX_REQUESTING_DATA {
+ Err(new_errno_error(Errno::EFBIG))
+ } else {
+ size.try_into().map_err(|_| new_errno_error(Errno::EINVAL))
+ }
+}
+
+fn validate_basename(name: &str) -> BinderResult<()> {
+ if name.contains(MAIN_SEPARATOR) {
+ Err(new_errno_error(Errno::EINVAL))
+ } else {
+ Ok(())
+ }
+}
+
+fn validate_file_mode(mode: i32) -> BinderResult<Mode> {
+ let mode = Mode::from_bits(mode as mode_t).ok_or_else(|| new_errno_error(Errno::EINVAL))?;
+ if mode.intersects(FORBIDDEN_MODES) {
+ Err(new_errno_error(Errno::EPERM))
+ } else {
+ Ok(mode)
+ }
+}
diff --git a/authfs/fd_server/src/common.rs b/authfs/fd_server/src/common.rs
new file mode 100644
index 0000000..f836bac
--- /dev/null
+++ b/authfs/fd_server/src/common.rs
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::fs::File;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+
+// TODO: Remove if/when std::os::unix::io::OwnedFd is standardized.
+pub struct OwnedFd {
+ owner: File,
+}
+
+impl FromRawFd for OwnedFd {
+ unsafe fn from_raw_fd(fd: RawFd) -> Self {
+ OwnedFd { owner: File::from_raw_fd(fd) }
+ }
+}
+
+impl AsRawFd for OwnedFd {
+ fn as_raw_fd(&self) -> RawFd {
+ self.owner.as_raw_fd()
+ }
+}
diff --git a/authfs/fd_server/src/fsverity.rs b/authfs/fd_server/src/fsverity.rs
index e89bbd0..576f9dd 100644
--- a/authfs/fd_server/src/fsverity.rs
+++ b/authfs/fd_server/src/fsverity.rs
@@ -47,16 +47,7 @@
buf_ptr: buf.as_mut_ptr() as u64,
__reserved: 0,
};
- Ok(unsafe { read_verity_metadata(fd, &mut arg) }.map_err(|e| {
- if let nix::Error::Sys(errno) = e {
- io::Error::from_raw_os_error(errno as i32)
- } else {
- // Document of nix::sys::ioctl indicates the macro-generated function always returns an
- // nix::errno::Errno, which can be converted nix::Error::Sys above. As the result, this
- // branch is unreachable.
- unreachable!();
- }
- })? as usize)
+ Ok(unsafe { read_verity_metadata(fd, &mut arg) }? as usize)
}
/// Read the raw Merkle tree from the fd, if it exists. The API semantics is similar to a regular
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index 5137a2e..a1d09fc 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -14,270 +14,90 @@
* limitations under the License.
*/
-//! This program is a constrained file/FD server to serve file requests through a remote[1] binder
+//! This program is a constrained file/FD server to serve file requests through a remote binder
//! service. The file server is not designed to serve arbitrary file paths in the filesystem. On
//! the contrary, the server should be configured to start with already opened FDs, and serve the
//! client's request against the FDs
//!
//! For example, `exec 9</path/to/file fd_server --ro-fds 9` starts the binder service. A client
//! client can then request the content of file 9 by offset and size.
-//!
-//! [1] Since the remote binder is not ready, this currently implementation uses local binder
-//! first.
+mod aidl;
+mod common;
mod fsverity;
-use std::cmp::min;
+use anyhow::{bail, Result};
+use binder_common::rpc_server::run_rpc_server;
+use log::debug;
+use nix::sys::stat::{umask, Mode};
use std::collections::BTreeMap;
-use std::convert::TryInto;
-use std::ffi::CString;
use std::fs::File;
-use std::io;
-use std::os::unix::fs::FileExt;
-use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::io::FromRawFd;
-use anyhow::{bail, Context, Result};
-use binder::unstable_api::AsNative;
-use log::{debug, error};
+use aidl::{FdConfig, FdService};
+use authfs_fsverity_metadata::parse_fsverity_metadata;
-use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
- BnVirtFdService, IVirtFdService, ERROR_IO, ERROR_UNKNOWN_FD, MAX_REQUESTING_DATA,
-};
-use authfs_aidl_interface::binder::{
- add_service, BinderFeatures, ExceptionCode, Interface, ProcessState, Result as BinderResult,
- Status, StatusCode, Strong,
-};
-
-const SERVICE_NAME: &str = "authfs_fd_server";
const RPC_SERVICE_PORT: u32 = 3264; // TODO: support dynamic port for multiple fd_server instances
-fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
- Status::new_exception(exception, CString::new(message.as_ref()).as_deref().ok())
-}
-
-fn validate_and_cast_offset(offset: i64) -> Result<u64, Status> {
- offset.try_into().map_err(|_| {
- new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, format!("Invalid offset: {}", offset))
- })
-}
-
-fn validate_and_cast_size(size: i32) -> Result<usize, Status> {
- if size > MAX_REQUESTING_DATA {
- Err(new_binder_exception(
- ExceptionCode::ILLEGAL_ARGUMENT,
- format!("Unexpectedly large size: {}", size),
- ))
- } else {
- size.try_into().map_err(|_| {
- new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, format!("Invalid size: {}", size))
- })
- }
-}
-
-/// Configuration of a file descriptor to be served/exposed/shared.
-enum FdConfig {
- /// A read-only file to serve by this server. The file is supposed to be verifiable with the
- /// associated fs-verity metadata.
- Readonly {
- /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
- file: File,
-
- /// Alternative Merkle tree stored in another file.
- alt_merkle_tree: Option<File>,
-
- /// Alternative signature stored in another file.
- alt_signature: Option<File>,
- },
-
- /// A readable/writable file to serve by this server. This backing file should just be a
- /// regular file and does not have any specific property.
- ReadWrite(File),
-}
-
-struct FdService {
- /// A pool of opened files, may be readonly or read-writable.
- fd_pool: BTreeMap<i32, FdConfig>,
-}
-
-impl FdService {
- pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
- BnVirtFdService::new_binder(FdService { fd_pool }, BinderFeatures::default())
- }
-
- fn get_file_config(&self, id: i32) -> BinderResult<&FdConfig> {
- self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))
- }
-}
-
-impl Interface for FdService {}
-
-impl IVirtFdService for FdService {
- fn readFile(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
- let size: usize = validate_and_cast_size(size)?;
- let offset: u64 = validate_and_cast_offset(offset)?;
-
- match self.get_file_config(id)? {
- FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
- read_into_buf(&file, size, offset).map_err(|e| {
- error!("readFile: read error: {}", e);
- Status::from(ERROR_IO)
- })
- }
- }
- }
-
- fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
- let size: usize = validate_and_cast_size(size)?;
- let offset: u64 = validate_and_cast_offset(offset)?;
-
- match &self.get_file_config(id)? {
- FdConfig::Readonly { file, alt_merkle_tree, .. } => {
- if let Some(tree_file) = &alt_merkle_tree {
- read_into_buf(&tree_file, size, offset).map_err(|e| {
- error!("readFsverityMerkleTree: read error: {}", e);
- Status::from(ERROR_IO)
- })
- } else {
- let mut buf = vec![0; size];
- let s = fsverity::read_merkle_tree(file.as_raw_fd(), offset, &mut buf)
- .map_err(|e| {
- error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
- Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
- })?;
- debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
- buf.truncate(s);
- Ok(buf)
- }
- }
- FdConfig::ReadWrite(_file) => {
- // For a writable file, Merkle tree is not expected to be served since Auth FS
- // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
- // use.
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
- }
- }
- }
-
- fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { file, alt_signature, .. } => {
- if let Some(sig_file) = &alt_signature {
- // Supposedly big enough buffer size to store signature.
- let size = MAX_REQUESTING_DATA as usize;
- let offset = 0;
- read_into_buf(&sig_file, size, offset).map_err(|e| {
- error!("readFsveritySignature: read error: {}", e);
- Status::from(ERROR_IO)
- })
- } else {
- let mut buf = vec![0; MAX_REQUESTING_DATA as usize];
- let s = fsverity::read_signature(file.as_raw_fd(), &mut buf).map_err(|e| {
- error!("readFsverityMerkleTree: failed to retrieve merkle tree: {}", e);
- Status::from(e.raw_os_error().unwrap_or(ERROR_IO))
- })?;
- debug_assert!(s <= buf.len(), "Shouldn't return more bytes than asked");
- buf.truncate(s);
- Ok(buf)
- }
- }
- FdConfig::ReadWrite(_file) => {
- // There is no signature for a writable file.
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
- }
- }
- }
-
- fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
- FdConfig::ReadWrite(file) => {
- let offset: u64 = offset.try_into().map_err(|_| {
- new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, "Invalid offset")
- })?;
- // Check buffer size just to make `as i32` safe below.
- if buf.len() > i32::MAX as usize {
- return Err(new_binder_exception(
- ExceptionCode::ILLEGAL_ARGUMENT,
- "Buffer size is too big",
- ));
- }
- Ok(file.write_at(buf, offset).map_err(|e| {
- error!("writeFile: write error: {}", e);
- Status::from(ERROR_IO)
- })? as i32)
- }
- }
- }
-
- fn resize(&self, id: i32, size: i64) -> BinderResult<()> {
- match &self.get_file_config(id)? {
- FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
- FdConfig::ReadWrite(file) => {
- if size < 0 {
- return Err(new_binder_exception(
- ExceptionCode::ILLEGAL_ARGUMENT,
- "Invalid size to resize to",
- ));
- }
- file.set_len(size as u64).map_err(|e| {
- error!("resize: set_len error: {}", e);
- Status::from(ERROR_IO)
- })
- }
- }
- }
-}
-
-fn read_into_buf(file: &File, max_size: usize, offset: u64) -> io::Result<Vec<u8>> {
- let remaining = file.metadata()?.len().saturating_sub(offset);
- let buf_size = min(remaining, max_size as u64) as usize;
- let mut buf = vec![0; buf_size];
- file.read_exact_at(&mut buf, offset)?;
- Ok(buf)
-}
-
fn is_fd_valid(fd: i32) -> bool {
// SAFETY: a query-only syscall
let retval = unsafe { libc::fcntl(fd, libc::F_GETFD) };
retval >= 0
}
-fn fd_to_file(fd: i32) -> Result<File> {
+fn fd_to_owned<T: FromRawFd>(fd: i32) -> Result<T> {
if !is_fd_valid(fd) {
bail!("Bad FD: {}", fd);
}
// SAFETY: The caller is supposed to provide valid FDs to this process.
- Ok(unsafe { File::from_raw_fd(fd) })
+ Ok(unsafe { T::from_raw_fd(fd) })
}
fn parse_arg_ro_fds(arg: &str) -> Result<(i32, FdConfig)> {
let result: Result<Vec<i32>, _> = arg.split(':').map(|x| x.parse::<i32>()).collect();
let fds = result?;
- if fds.len() > 3 {
+ if fds.len() > 2 {
bail!("Too many options: {}", arg);
}
Ok((
fds[0],
FdConfig::Readonly {
- file: fd_to_file(fds[0])?,
- // Alternative Merkle tree, if provided
- alt_merkle_tree: fds.get(1).map(|fd| fd_to_file(*fd)).transpose()?,
- // Alternative signature, if provided
- alt_signature: fds.get(2).map(|fd| fd_to_file(*fd)).transpose()?,
+ file: fd_to_owned(fds[0])?,
+ // Alternative metadata source, if provided
+ alt_metadata: fds
+ .get(1)
+ .map(|fd| fd_to_owned(*fd))
+ .transpose()?
+ .and_then(|f| parse_fsverity_metadata(f).ok()),
},
))
}
fn parse_arg_rw_fds(arg: &str) -> Result<(i32, FdConfig)> {
let fd = arg.parse::<i32>()?;
- let file = fd_to_file(fd)?;
+ let file = fd_to_owned::<File>(fd)?;
if file.metadata()?.len() > 0 {
bail!("File is expected to be empty");
}
Ok((fd, FdConfig::ReadWrite(file)))
}
-fn parse_args() -> Result<(bool, BTreeMap<i32, FdConfig>)> {
+fn parse_arg_ro_dirs(arg: &str) -> Result<(i32, FdConfig)> {
+ let fd = arg.parse::<i32>()?;
+ Ok((fd, FdConfig::InputDir(fd_to_owned(fd)?)))
+}
+
+fn parse_arg_rw_dirs(arg: &str) -> Result<(i32, FdConfig)> {
+ let fd = arg.parse::<i32>()?;
+ Ok((fd, FdConfig::OutputDir(fd_to_owned(fd)?)))
+}
+
+struct Args {
+ fd_pool: BTreeMap<i32, FdConfig>,
+ ready_fd: Option<File>,
+}
+
+fn parse_args() -> Result<Args> {
#[rustfmt::skip]
let matches = clap::App::new("fd_server")
.arg(clap::Arg::with_name("ro-fds")
@@ -288,8 +108,17 @@
.long("rw-fds")
.multiple(true)
.number_of_values(1))
- .arg(clap::Arg::with_name("rpc-binder")
- .long("rpc-binder"))
+ .arg(clap::Arg::with_name("ro-dirs")
+ .long("ro-dirs")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(clap::Arg::with_name("rw-dirs")
+ .long("rw-dirs")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(clap::Arg::with_name("ready-fd")
+ .long("ready-fd")
+ .takes_value(true))
.get_matches();
let mut fd_pool = BTreeMap::new();
@@ -305,9 +134,25 @@
fd_pool.insert(fd, config);
}
}
-
- let rpc_binder = matches.is_present("rpc-binder");
- Ok((rpc_binder, fd_pool))
+ if let Some(args) = matches.values_of("ro-dirs") {
+ for arg in args {
+ let (fd, config) = parse_arg_ro_dirs(arg)?;
+ fd_pool.insert(fd, config);
+ }
+ }
+ if let Some(args) = matches.values_of("rw-dirs") {
+ for arg in args {
+ let (fd, config) = parse_arg_rw_dirs(arg)?;
+ fd_pool.insert(fd, config);
+ }
+ }
+ let ready_fd = if let Some(arg) = matches.value_of("ready-fd") {
+ let fd = arg.parse::<i32>()?;
+ Some(fd_to_owned(fd)?)
+ } else {
+ None
+ };
+ Ok(Args { fd_pool, ready_fd })
}
fn main() -> Result<()> {
@@ -315,32 +160,27 @@
android_logger::Config::default().with_tag("fd_server").with_min_level(log::Level::Debug),
);
- let (rpc_binder, fd_pool) = parse_args()?;
+ let args = parse_args()?;
- if rpc_binder {
- let mut service = FdService::new_binder(fd_pool).as_binder();
- debug!("fd_server is starting as a rpc service.");
- // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
- // Plus the binder objects are threadsafe.
- let retval = unsafe {
- binder_rpc_unstable_bindgen::RunRpcServer(
- service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder,
- RPC_SERVICE_PORT,
- )
- };
- if retval {
- debug!("RPC server has shut down gracefully");
- Ok(())
- } else {
- bail!("Premature termination of RPC server");
- }
+ // Allow open/create/mkdir from authfs to create with expecting mode. It's possible to still
+ // use a custom mask on creation, then report the actual file mode back to authfs. But there
+ // is no demand now.
+ let old_umask = umask(Mode::empty());
+ debug!("Setting umask to 0 (old: {:03o})", old_umask.bits());
+
+ let service = FdService::new_binder(args.fd_pool).as_binder();
+ debug!("fd_server is starting as a rpc service.");
+ let mut ready_fd = args.ready_fd;
+ let retval = run_rpc_server(service, RPC_SERVICE_PORT, || {
+ debug!("fd_server is ready");
+ // Close the ready-fd if we were given one to signal our readiness.
+ drop(ready_fd.take());
+ });
+
+ if retval {
+ debug!("RPC server has shut down gracefully");
+ Ok(())
} else {
- ProcessState::start_thread_pool();
- let service = FdService::new_binder(fd_pool).as_binder();
- add_service(SERVICE_NAME, service)
- .with_context(|| format!("Failed to register service {}", SERVICE_NAME))?;
- debug!("fd_server is running as a local service.");
- ProcessState::join_thread_pool();
- bail!("Unexpected exit after join_thread_pool")
+ bail!("Premature termination of RPC server");
}
}
diff --git a/authfs/service/Android.bp b/authfs/service/Android.bp
new file mode 100644
index 0000000..6c32c67
--- /dev/null
+++ b/authfs/service/Android.bp
@@ -0,0 +1,24 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "authfs_service",
+ srcs: [
+ "src/main.rs",
+ ],
+ edition: "2018",
+ rustlibs: [
+ "authfs_aidl_interface-rust",
+ "libandroid_logger",
+ "libanyhow",
+ "libbinder_common",
+ "libbinder_rs",
+ "liblibc",
+ "liblog_rust",
+ "libnix",
+ "libshared_child",
+ ],
+ prefer_rlib: true,
+ init_rc: ["authfs_service.rc"],
+}
diff --git a/authfs/service/authfs_service.rc b/authfs/service/authfs_service.rc
new file mode 100644
index 0000000..9ad0ce6
--- /dev/null
+++ b/authfs/service/authfs_service.rc
@@ -0,0 +1,2 @@
+service authfs_service /system/bin/authfs_service
+ disabled
diff --git a/authfs/service/src/authfs.rs b/authfs/service/src/authfs.rs
new file mode 100644
index 0000000..c941360
--- /dev/null
+++ b/authfs/service/src/authfs.rs
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{bail, Context, Result};
+use log::{debug, error, warn};
+use nix::mount::{umount2, MntFlags};
+use nix::sys::statfs::{statfs, FsType};
+use shared_child::SharedChild;
+use std::ffi::{OsStr, OsString};
+use std::fs::{remove_dir, OpenOptions};
+use std::path::PathBuf;
+use std::process::Command;
+use std::thread::sleep;
+use std::time::{Duration, Instant};
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::AuthFsConfig::{
+ AuthFsConfig, InputDirFdAnnotation::InputDirFdAnnotation, InputFdAnnotation::InputFdAnnotation,
+ OutputDirFdAnnotation::OutputDirFdAnnotation, OutputFdAnnotation::OutputFdAnnotation,
+};
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFs::{BnAuthFs, IAuthFs};
+use authfs_aidl_interface::binder::{
+ self, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor, Strong,
+};
+use binder_common::new_binder_exception;
+
+const AUTHFS_BIN: &str = "/system/bin/authfs";
+const AUTHFS_SETUP_POLL_INTERVAL_MS: Duration = Duration::from_millis(50);
+const AUTHFS_SETUP_TIMEOUT_SEC: Duration = Duration::from_secs(10);
+const FUSE_SUPER_MAGIC: FsType = FsType(0x65735546);
+
+/// An `AuthFs` instance is supposed to be backed by an `authfs` process. When the lifetime of the
+/// instance is over, it should leave no trace on the system: the process should be terminated, the
+/// FUSE should be unmounted, and the mount directory should be deleted.
+pub struct AuthFs {
+ mountpoint: OsString,
+ process: SharedChild,
+}
+
+impl Interface for AuthFs {}
+
+impl IAuthFs for AuthFs {
+ fn openFile(
+ &self,
+ remote_fd_name: i32,
+ writable: bool,
+ ) -> binder::Result<ParcelFileDescriptor> {
+ let mut path = PathBuf::from(&self.mountpoint);
+ path.push(remote_fd_name.to_string());
+ let file = OpenOptions::new().read(true).write(writable).open(&path).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("failed to open {:?} on authfs: {}", &path, e),
+ )
+ })?;
+ Ok(ParcelFileDescriptor::new(file))
+ }
+
+ fn getMountPoint(&self) -> binder::Result<String> {
+ if let Some(s) = self.mountpoint.to_str() {
+ Ok(s.to_string())
+ } else {
+ Err(new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, "Bad string encoding"))
+ }
+ }
+}
+
+impl AuthFs {
+ /// Mount an authfs at `mountpoint` with specified FD annotations.
+ pub fn mount_and_wait(
+ mountpoint: OsString,
+ config: &AuthFsConfig,
+ debuggable: bool,
+ ) -> Result<Strong<dyn IAuthFs>> {
+ let child = run_authfs(
+ &mountpoint,
+ &config.inputFdAnnotations,
+ &config.outputFdAnnotations,
+ &config.inputDirFdAnnotations,
+ &config.outputDirFdAnnotations,
+ debuggable,
+ )?;
+ wait_until_authfs_ready(&child, &mountpoint).map_err(|e| {
+ match child.wait() {
+ Ok(status) => debug!("Wait for authfs: {}", status),
+ Err(e) => warn!("Failed to wait for child: {}", e),
+ }
+ e
+ })?;
+
+ let authfs = AuthFs { mountpoint, process: child };
+ Ok(BnAuthFs::new_binder(authfs, BinderFeatures::default()))
+ }
+}
+
+impl Drop for AuthFs {
+ /// On drop, try to erase all the traces for this authfs mount.
+ fn drop(&mut self) {
+ debug!("Dropping AuthFs instance at mountpoint {:?}", &self.mountpoint);
+ if let Err(e) = self.process.kill() {
+ error!("Failed to kill authfs: {}", e);
+ }
+ match self.process.wait() {
+ Ok(status) => debug!("authfs exit code: {}", status),
+ Err(e) => warn!("Failed to wait for authfs: {}", e),
+ }
+ // The client may still hold the file descriptors that refer to this filesystem. Use
+ // MNT_DETACH to detach the mountpoint, and automatically unmount when there is no more
+ // reference.
+ if let Err(e) = umount2(self.mountpoint.as_os_str(), MntFlags::MNT_DETACH) {
+ error!("Failed to umount authfs at {:?}: {}", &self.mountpoint, e)
+ }
+
+ if let Err(e) = remove_dir(&self.mountpoint) {
+ error!("Failed to clean up mount directory {:?}: {}", &self.mountpoint, e)
+ }
+ }
+}
+
+fn run_authfs(
+ mountpoint: &OsStr,
+ in_file_fds: &[InputFdAnnotation],
+ out_file_fds: &[OutputFdAnnotation],
+ in_dir_fds: &[InputDirFdAnnotation],
+ out_dir_fds: &[OutputDirFdAnnotation],
+ debuggable: bool,
+) -> Result<SharedChild> {
+ let mut args = vec![mountpoint.to_owned(), OsString::from("--cid=2")];
+ args.push(OsString::from("-o"));
+ args.push(OsString::from("fscontext=u:object_r:authfs_fuse:s0"));
+ for conf in in_file_fds {
+ // TODO(b/185178698): Many input files need to be signed and verified.
+ // or can we use debug cert for now, which is better than nothing?
+ args.push(OsString::from("--remote-ro-file-unverified"));
+ args.push(OsString::from(conf.fd.to_string()));
+ }
+ for conf in out_file_fds {
+ args.push(OsString::from("--remote-new-rw-file"));
+ args.push(OsString::from(conf.fd.to_string()));
+ }
+ for conf in in_dir_fds {
+ args.push(OsString::from("--remote-ro-dir"));
+ args.push(OsString::from(format!("{}:{}:{}", conf.fd, conf.manifestPath, conf.prefix)));
+ }
+ for conf in out_dir_fds {
+ args.push(OsString::from("--remote-new-rw-dir"));
+ args.push(OsString::from(conf.fd.to_string()));
+ }
+ if debuggable {
+ args.push(OsString::from("--debug"));
+ }
+
+ let mut command = Command::new(AUTHFS_BIN);
+ command.args(&args);
+ debug!("Spawn authfs: {:?}", command);
+ SharedChild::spawn(&mut command).context("Spawn authfs")
+}
+
+fn wait_until_authfs_ready(child: &SharedChild, mountpoint: &OsStr) -> Result<()> {
+ let start_time = Instant::now();
+ loop {
+ if is_fuse(mountpoint)? {
+ break;
+ }
+ if let Some(exit_status) = child.try_wait()? {
+ // If the child has exited, we will never become ready.
+ bail!("Child has exited: {}", exit_status);
+ }
+ if start_time.elapsed() > AUTHFS_SETUP_TIMEOUT_SEC {
+ let _ = child.kill();
+ bail!("Time out mounting authfs");
+ }
+ sleep(AUTHFS_SETUP_POLL_INTERVAL_MS);
+ }
+ Ok(())
+}
+
+fn is_fuse(path: &OsStr) -> Result<bool> {
+ Ok(statfs(path)?.filesystem_type() == FUSE_SUPER_MAGIC)
+}
diff --git a/authfs/service/src/main.rs b/authfs/service/src/main.rs
new file mode 100644
index 0000000..890e108
--- /dev/null
+++ b/authfs/service/src/main.rs
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! AuthFsService facilitates authfs mounting (which is a privileged operation) for the client. The
+//! client will provide an `AuthFsConfig` which includes the backend address (only port for now) and
+//! the filesystem configuration. It is up to the client to ensure the backend server is running. On
+//! a successful mount, the client receives an `IAuthFs`, and through the binder object, the client
+//! is able to retrieve "remote file descriptors".
+
+mod authfs;
+
+use anyhow::{bail, Context, Result};
+use log::*;
+use std::ffi::OsString;
+use std::fs::{create_dir, read_dir, remove_dir_all, remove_file};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::AuthFsConfig::AuthFsConfig;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFs::IAuthFs;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFsService::{
+ BnAuthFsService, IAuthFsService,
+};
+use authfs_aidl_interface::binder::{
+ self, add_service, BinderFeatures, ExceptionCode, Interface, ProcessState, Strong,
+};
+use binder_common::new_binder_exception;
+
+const SERVICE_NAME: &str = "authfs_service";
+const SERVICE_ROOT: &str = "/data/misc/authfs";
+
+/// Implementation of `IAuthFsService`.
+pub struct AuthFsService {
+ serial_number: AtomicUsize,
+ debuggable: bool,
+}
+
+impl Interface for AuthFsService {}
+
+impl IAuthFsService for AuthFsService {
+ fn mount(&self, config: &AuthFsConfig) -> binder::Result<Strong<dyn IAuthFs>> {
+ self.validate(config)?;
+
+ let mountpoint = self.get_next_mount_point();
+
+ // The directory is supposed to be deleted when `AuthFs` is dropped.
+ create_dir(&mountpoint).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("Cannot create mount directory {:?}: {:?}", &mountpoint, e),
+ )
+ })?;
+
+ authfs::AuthFs::mount_and_wait(mountpoint, config, self.debuggable).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("mount_and_wait failed: {:?}", e),
+ )
+ })
+ }
+}
+
+impl AuthFsService {
+ fn new_binder(debuggable: bool) -> Strong<dyn IAuthFsService> {
+ let service = AuthFsService { serial_number: AtomicUsize::new(1), debuggable };
+ BnAuthFsService::new_binder(service, BinderFeatures::default())
+ }
+
+ fn validate(&self, config: &AuthFsConfig) -> binder::Result<()> {
+ if config.port < 0 {
+ return Err(new_binder_exception(
+ ExceptionCode::ILLEGAL_ARGUMENT,
+ format!("Invalid port: {}", config.port),
+ ));
+ }
+ Ok(())
+ }
+
+ fn get_next_mount_point(&self) -> OsString {
+ let previous = self.serial_number.fetch_add(1, Ordering::Relaxed);
+ OsString::from(format!("{}/{}", SERVICE_ROOT, previous))
+ }
+}
+
+fn clean_up_working_directory() -> Result<()> {
+ for entry in read_dir(SERVICE_ROOT)? {
+ let entry = entry?;
+ let path = entry.path();
+ if path.is_dir() {
+ remove_dir_all(path)?;
+ } else if path.is_file() {
+ remove_file(path)?;
+ } else {
+ bail!("Unrecognized path type: {:?}", path);
+ }
+ }
+ Ok(())
+}
+
+fn try_main() -> Result<()> {
+ let debuggable = env!("TARGET_BUILD_VARIANT") != "user";
+ let log_level = if debuggable { log::Level::Trace } else { log::Level::Info };
+ android_logger::init_once(
+ android_logger::Config::default().with_tag("authfs_service").with_min_level(log_level),
+ );
+
+ clean_up_working_directory()?;
+
+ ProcessState::start_thread_pool();
+
+ let service = AuthFsService::new_binder(debuggable).as_binder();
+ add_service(SERVICE_NAME, service)
+ .with_context(|| format!("Failed to register service {}", SERVICE_NAME))?;
+ debug!("{} is running", SERVICE_NAME);
+
+ ProcessState::join_thread_pool();
+ bail!("Unexpected exit after join_thread_pool")
+}
+
+fn main() {
+ if let Err(e) = try_main() {
+ error!("failed with {:?}", e);
+ std::process::exit(1);
+ }
+}
diff --git a/authfs/src/auth.rs b/authfs/src/auth.rs
deleted file mode 100644
index 71ad858..0000000
--- a/authfs/src/auth.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::io;
-
-// TODO(b/170494765): Implement an authenticator to verify a PKCS#7 signature. We only need to
-// verify the signature, not the full certificate chain.
-
-pub trait Authenticator {
- fn verify(&self, signature: &[u8], signed_data: &[u8]) -> io::Result<bool>;
-}
-
-pub struct FakeAuthenticator {
- should_allow: bool,
-}
-
-#[allow(dead_code)]
-impl FakeAuthenticator {
- pub fn always_succeed() -> Self {
- FakeAuthenticator { should_allow: true }
- }
-
- pub fn always_fail() -> Self {
- FakeAuthenticator { should_allow: false }
- }
-}
-
-impl Authenticator for FakeAuthenticator {
- fn verify(&self, _signature_pem: &[u8], _signed_data: &[u8]) -> io::Result<bool> {
- Ok(self.should_allow)
- }
-}
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 44d5000..44e60d8 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -1,34 +1,29 @@
-mod local_file;
+mod attr;
+mod dir;
mod remote_file;
-pub use local_file::LocalFileReader;
+pub use attr::Attr;
+pub use dir::{InMemoryDir, RemoteDirEditor};
pub use remote_file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
use binder::unstable_api::{new_spibinder, AIBinder};
use binder::FromIBinder;
+use std::convert::TryFrom;
use std::io;
+use std::path::{Path, MAIN_SEPARATOR};
-use crate::common::CHUNK_SIZE;
+use crate::common::{divide_roundup, CHUNK_SIZE};
use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::IVirtFdService;
-use authfs_aidl_interface::binder::{get_interface, Strong};
+use authfs_aidl_interface::binder::{Status, Strong};
pub type VirtFdService = Strong<dyn IVirtFdService>;
+pub type VirtFdServiceStatus = Status;
pub type ChunkBuffer = [u8; CHUNK_SIZE as usize];
pub const RPC_SERVICE_PORT: u32 = 3264;
-fn get_local_binder() -> io::Result<VirtFdService> {
- let service_name = "authfs_fd_server";
- get_interface(&service_name).map_err(|e| {
- io::Error::new(
- io::ErrorKind::AddrNotAvailable,
- format!("Cannot reach authfs_fd_server binder service: {}", e),
- )
- })
-}
-
-fn get_rpc_binder(cid: u32) -> io::Result<VirtFdService> {
+pub fn get_rpc_binder_service(cid: u32) -> io::Result<VirtFdService> {
// SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
// safely taken by new_spibinder.
let ibinder = unsafe {
@@ -46,14 +41,6 @@
}
}
-pub fn get_binder_service(cid: Option<u32>) -> io::Result<VirtFdService> {
- if let Some(cid) = cid {
- get_rpc_binder(cid)
- } else {
- get_local_binder()
- }
-}
-
/// A trait for reading data by chunks. Chunks can be read by specifying the chunk index. Only the
/// last chunk may have incomplete chunk size.
pub trait ReadByChunk {
@@ -88,3 +75,48 @@
/// Resizes the file to the new size.
fn resize(&self, size: u64) -> io::Result<()>;
}
+
+/// Checks whether the path is a simple file name without any directory separator.
+pub fn validate_basename(path: &Path) -> io::Result<()> {
+ if matches!(path.to_str(), Some(path_str) if !path_str.contains(MAIN_SEPARATOR)) {
+ Ok(())
+ } else {
+ Err(io::Error::from_raw_os_error(libc::EINVAL))
+ }
+}
+
+pub struct EagerChunkReader {
+ buffer: Vec<u8>,
+}
+
+impl EagerChunkReader {
+ pub fn new<F: ReadByChunk>(chunked_file: F, file_size: u64) -> io::Result<EagerChunkReader> {
+ let last_index = divide_roundup(file_size, CHUNK_SIZE);
+ let file_size = usize::try_from(file_size).unwrap();
+ let mut buffer = Vec::with_capacity(file_size);
+ let mut chunk_buffer = [0; CHUNK_SIZE as usize];
+ for index in 0..last_index {
+ let size = chunked_file.read_chunk(index, &mut chunk_buffer)?;
+ buffer.extend_from_slice(&chunk_buffer[..size]);
+ }
+ if buffer.len() < file_size {
+ Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("Insufficient data size ({} < {})", buffer.len(), file_size),
+ ))
+ } else {
+ Ok(EagerChunkReader { buffer })
+ }
+ }
+}
+
+impl ReadByChunk for EagerChunkReader {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+ if let Some(chunk) = &self.buffer.chunks(CHUNK_SIZE as usize).nth(chunk_index as usize) {
+ buf[..chunk.len()].copy_from_slice(chunk);
+ Ok(chunk.len())
+ } else {
+ Ok(0) // Read beyond EOF is normal
+ }
+ }
+}
diff --git a/authfs/src/file/attr.rs b/authfs/src/file/attr.rs
new file mode 100644
index 0000000..48084aa
--- /dev/null
+++ b/authfs/src/file/attr.rs
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use log::error;
+use nix::sys::stat::{mode_t, Mode, SFlag};
+use std::io;
+
+use super::VirtFdService;
+
+/// Default/assumed mode of files not created by authfs.
+///
+/// For files that are given to authfs as FDs (i.e. not created through authfs), their mode is
+/// unknown (or untrusted) until it is ever set. The default mode is just to make it
+/// readable/writable to VFS. When the mode is set, the value on fd_server is supposed to become
+/// consistent.
+const DEFAULT_FILE_MODE: Mode =
+ Mode::from_bits_truncate(Mode::S_IRUSR.bits() | Mode::S_IWUSR.bits());
+
+/// Default/assumed mode of directories not created by authfs.
+///
+/// See above.
+const DEFAULT_DIR_MODE: Mode = Mode::S_IRWXU;
+
+/// `Attr` maintains the local truth for attributes (e.g. mode and type) while allowing setting the
+/// remote attribute for the file description.
+pub struct Attr {
+ service: VirtFdService,
+ mode: Mode,
+ remote_fd: i32,
+ is_dir: bool,
+}
+
+impl Attr {
+ pub fn new_file(service: VirtFdService, remote_fd: i32) -> Attr {
+ Attr { service, mode: DEFAULT_FILE_MODE, remote_fd, is_dir: false }
+ }
+
+ pub fn new_dir(service: VirtFdService, remote_fd: i32) -> Attr {
+ Attr { service, mode: DEFAULT_DIR_MODE, remote_fd, is_dir: true }
+ }
+
+ pub fn new_file_with_mode(service: VirtFdService, remote_fd: i32, mode: mode_t) -> Attr {
+ Attr { service, mode: Mode::from_bits_truncate(mode), remote_fd, is_dir: false }
+ }
+
+ pub fn new_dir_with_mode(service: VirtFdService, remote_fd: i32, mode: mode_t) -> Attr {
+ Attr { service, mode: Mode::from_bits_truncate(mode), remote_fd, is_dir: true }
+ }
+
+ pub fn mode(&self) -> u32 {
+ self.mode.bits()
+ }
+
+ /// Sets the file mode.
+ ///
+ /// In addition to the actual file mode, `encoded_mode` also contains information of the file
+ /// type.
+ pub fn set_mode(&mut self, encoded_mode: u32) -> io::Result<()> {
+ let new_sflag = SFlag::from_bits_truncate(encoded_mode);
+ let new_mode = Mode::from_bits_truncate(encoded_mode);
+
+ let type_flag = if self.is_dir { SFlag::S_IFDIR } else { SFlag::S_IFREG };
+ if !type_flag.contains(new_sflag) {
+ return Err(io::Error::from_raw_os_error(libc::EINVAL));
+ }
+
+ // Request for update only if changing.
+ if new_mode != self.mode {
+ self.service.chmod(self.remote_fd, new_mode.bits() as i32).map_err(|e| {
+ error!(
+ "Failed to chmod (fd: {}, mode: {:o}) on fd_server: {:?}",
+ self.remote_fd, new_mode, e
+ );
+ io::Error::from_raw_os_error(libc::EIO)
+ })?;
+ self.mode = new_mode;
+ }
+ Ok(())
+ }
+}
diff --git a/authfs/src/file/dir.rs b/authfs/src/file/dir.rs
new file mode 100644
index 0000000..f3cc6f8
--- /dev/null
+++ b/authfs/src/file/dir.rs
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use log::warn;
+use nix::sys::stat::Mode;
+use std::collections::{hash_map, HashMap};
+use std::ffi::{CString, OsString};
+use std::io;
+use std::os::unix::ffi::OsStringExt;
+use std::path::{Path, PathBuf};
+
+use super::attr::Attr;
+use super::remote_file::RemoteFileEditor;
+use super::{validate_basename, VirtFdService, VirtFdServiceStatus};
+use crate::fsverity::VerifiedFileEditor;
+use crate::fusefs::{AuthFsDirEntry, Inode};
+
+const MAX_ENTRIES: u16 = 100; // Arbitrary limit
+
+struct InodeInfo {
+ inode: Inode,
+
+ // This information is duplicated since it is also available in `AuthFs::inode_table` via the
+ // type system. But it makes it simple to deal with deletion, where otherwise we need to get a
+ // mutable parent directory in the table, and query the table for directory/file type checking
+ // at the same time.
+ is_dir: bool,
+}
+
+/// A remote directory backed by a remote directory FD, where the provider/fd_server is not
+/// trusted.
+///
+/// The directory is assumed empty initially without the trust to the storage. Functionally, when
+/// the backing storage is not clean, the fd_server can fail to create a file or directory when
+/// there is name collision. From RemoteDirEditor's perspective of security, the creation failure
+/// is just one of possible errors that can happen, and what matters is RemoteDirEditor maintains
+/// the integrity itself.
+///
+/// When new files are created through RemoteDirEditor, the file integrity are maintained within the
+/// VM. Similarly, integrity (namely the list of entries) of the directory, or new directories
+/// created within such a directory, are also maintained within the VM. A compromised fd_server or
+/// malicious client can't affect the view to the files and directories within such a directory in
+/// the VM.
+pub struct RemoteDirEditor {
+ service: VirtFdService,
+ remote_dir_fd: i32,
+
+ /// Mapping of entry names to the corresponding inode. The actual file/directory is stored in
+ /// the global pool in fusefs.
+ entries: HashMap<PathBuf, InodeInfo>,
+}
+
+impl RemoteDirEditor {
+ pub fn new(service: VirtFdService, remote_dir_fd: i32) -> Self {
+ RemoteDirEditor { service, remote_dir_fd, entries: HashMap::new() }
+ }
+
+ /// Returns the number of entries created.
+ pub fn number_of_entries(&self) -> u16 {
+ self.entries.len() as u16 // limited to MAX_ENTRIES
+ }
+
+ /// Creates a remote file named `basename` with corresponding `inode` at the current directory.
+ pub fn create_file(
+ &mut self,
+ basename: &Path,
+ inode: Inode,
+ mode: libc::mode_t,
+ ) -> io::Result<(VerifiedFileEditor<RemoteFileEditor>, Attr)> {
+ let mode = self.validate_arguments(basename, mode)?;
+ let basename_str =
+ basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+ let new_fd = self
+ .service
+ .createFileInDirectory(self.remote_dir_fd, basename_str, mode as i32)
+ .map_err(into_io_error)?;
+
+ let new_remote_file =
+ VerifiedFileEditor::new(RemoteFileEditor::new(self.service.clone(), new_fd));
+ self.entries.insert(basename.to_path_buf(), InodeInfo { inode, is_dir: false });
+ let new_attr = Attr::new_file_with_mode(self.service.clone(), new_fd, mode);
+ Ok((new_remote_file, new_attr))
+ }
+
+ /// Creates a remote directory named `basename` with corresponding `inode` at the current
+ /// directory.
+ pub fn mkdir(
+ &mut self,
+ basename: &Path,
+ inode: Inode,
+ mode: libc::mode_t,
+ ) -> io::Result<(RemoteDirEditor, Attr)> {
+ let mode = self.validate_arguments(basename, mode)?;
+ let basename_str =
+ basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+ let new_fd = self
+ .service
+ .createDirectoryInDirectory(self.remote_dir_fd, basename_str, mode as i32)
+ .map_err(into_io_error)?;
+
+ let new_remote_dir = RemoteDirEditor::new(self.service.clone(), new_fd);
+ self.entries.insert(basename.to_path_buf(), InodeInfo { inode, is_dir: true });
+ let new_attr = Attr::new_dir_with_mode(self.service.clone(), new_fd, mode);
+ Ok((new_remote_dir, new_attr))
+ }
+
+ /// Deletes a file
+ pub fn delete_file(&mut self, basename: &Path) -> io::Result<Inode> {
+ let inode = self.force_delete_entry(basename, /* expect_dir */ false)?;
+
+ let basename_str =
+ basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+ if let Err(e) = self.service.deleteFile(self.remote_dir_fd, basename_str) {
+ // Ignore the error to honor the local state.
+ warn!("Deletion on the host is reportedly failed: {:?}", e);
+ }
+ Ok(inode)
+ }
+
+ /// Forces to delete a directory. The caller must only call if `basename` is a directory and
+ /// empty.
+ pub fn force_delete_directory(&mut self, basename: &Path) -> io::Result<Inode> {
+ let inode = self.force_delete_entry(basename, /* expect_dir */ true)?;
+
+ let basename_str =
+ basename.to_str().ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
+ if let Err(e) = self.service.deleteDirectory(self.remote_dir_fd, basename_str) {
+ // Ignore the error to honor the local state.
+ warn!("Deletion on the host is reportedly failed: {:?}", e);
+ }
+ Ok(inode)
+ }
+
+ /// Returns the inode number of a file or directory named `name` previously created through
+ /// `RemoteDirEditor`.
+ pub fn find_inode(&self, name: &Path) -> io::Result<Inode> {
+ self.entries
+ .get(name)
+ .map(|entry| entry.inode)
+ .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ }
+
+ /// Returns whether the directory has an entry of the given name.
+ pub fn has_entry(&self, name: &Path) -> bool {
+ self.entries.contains_key(name)
+ }
+
+ pub fn retrieve_entries(&self) -> io::Result<Vec<AuthFsDirEntry>> {
+ self.entries
+ .iter()
+ .map(|(name, InodeInfo { inode, is_dir })| {
+ Ok(AuthFsDirEntry { inode: *inode, name: path_to_cstring(name)?, is_dir: *is_dir })
+ })
+ .collect::<io::Result<Vec<_>>>()
+ }
+
+ fn force_delete_entry(&mut self, basename: &Path, expect_dir: bool) -> io::Result<Inode> {
+ // Kernel should only give us a basename.
+ debug_assert!(validate_basename(basename).is_ok());
+
+ if let Some(entry) = self.entries.get(basename) {
+ match (expect_dir, entry.is_dir) {
+ (true, false) => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+ (false, true) => Err(io::Error::from_raw_os_error(libc::EISDIR)),
+ _ => {
+ let inode = entry.inode;
+ let _ = self.entries.remove(basename);
+ Ok(inode)
+ }
+ }
+ } else {
+ Err(io::Error::from_raw_os_error(libc::ENOENT))
+ }
+ }
+
+ fn validate_arguments(&self, basename: &Path, mode: u32) -> io::Result<u32> {
+ // Kernel should only give us a basename.
+ debug_assert!(validate_basename(basename).is_ok());
+
+ if self.entries.contains_key(basename) {
+ return Err(io::Error::from_raw_os_error(libc::EEXIST));
+ }
+
+ if self.entries.len() >= MAX_ENTRIES.into() {
+ return Err(io::Error::from_raw_os_error(libc::EMLINK));
+ }
+
+ Ok(Mode::from_bits_truncate(mode).bits())
+ }
+}
+
+/// An in-memory directory representation of a directory structure.
+pub struct InMemoryDir(HashMap<PathBuf, InodeInfo>);
+
+impl InMemoryDir {
+ /// Creates an empty instance of `InMemoryDir`.
+ pub fn new() -> Self {
+ // Hash map is empty since "." and ".." are excluded in entries.
+ InMemoryDir(HashMap::new())
+ }
+
+ /// Returns the number of entries in the directory (not including "." and "..").
+ pub fn number_of_entries(&self) -> u16 {
+ self.0.len() as u16 // limited to MAX_ENTRIES
+ }
+
+ /// Adds a directory name and its inode number to the directory. Fails if already exists. The
+ /// caller is responsible for ensure the inode uniqueness.
+ pub fn add_dir(&mut self, basename: &Path, inode: Inode) -> io::Result<()> {
+ self.add_entry(basename, InodeInfo { inode, is_dir: true })
+ }
+
+ /// Adds a file name and its inode number to the directory. Fails if already exists. The
+ /// caller is responsible for ensure the inode uniqueness.
+ pub fn add_file(&mut self, basename: &Path, inode: Inode) -> io::Result<()> {
+ self.add_entry(basename, InodeInfo { inode, is_dir: false })
+ }
+
+ fn add_entry(&mut self, basename: &Path, dir_entry: InodeInfo) -> io::Result<()> {
+ validate_basename(basename)?;
+ if self.0.len() >= MAX_ENTRIES.into() {
+ return Err(io::Error::from_raw_os_error(libc::EMLINK));
+ }
+
+ if let hash_map::Entry::Vacant(entry) = self.0.entry(basename.to_path_buf()) {
+ entry.insert(dir_entry);
+ Ok(())
+ } else {
+ Err(io::Error::from_raw_os_error(libc::EEXIST))
+ }
+ }
+
+ /// Looks up an entry inode by name. `None` if not found.
+ pub fn lookup_inode(&self, basename: &Path) -> Option<Inode> {
+ self.0.get(basename).map(|entry| entry.inode)
+ }
+
+ pub fn retrieve_entries(&self) -> io::Result<Vec<AuthFsDirEntry>> {
+ self.0
+ .iter()
+ .map(|(name, InodeInfo { inode, is_dir })| {
+ Ok(AuthFsDirEntry { inode: *inode, name: path_to_cstring(name)?, is_dir: *is_dir })
+ })
+ .collect::<io::Result<Vec<_>>>()
+ }
+}
+
+fn path_to_cstring(path: &Path) -> io::Result<CString> {
+ let bytes = OsString::from(path).into_vec();
+ CString::new(bytes).map_err(|_| io::Error::from_raw_os_error(libc::EILSEQ))
+}
+
+fn into_io_error(e: VirtFdServiceStatus) -> io::Error {
+ let maybe_errno = e.service_specific_error();
+ if maybe_errno > 0 {
+ io::Error::from_raw_os_error(maybe_errno)
+ } else {
+ io::Error::new(io::ErrorKind::Other, e.get_description())
+ }
+}
diff --git a/authfs/src/file/local_file.rs b/authfs/src/file/local_file.rs
deleted file mode 100644
index 13c954f..0000000
--- a/authfs/src/file/local_file.rs
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::cmp::min;
-use std::fs::File;
-use std::io;
-use std::os::unix::fs::FileExt;
-
-use super::{ChunkBuffer, ReadByChunk};
-use crate::common::CHUNK_SIZE;
-
-/// A read-only file that can be read by chunks.
-pub struct LocalFileReader {
- file: File,
- size: u64,
-}
-
-impl LocalFileReader {
- /// Creates a `LocalFileReader` to read from for the specified `path`.
- pub fn new(file: File) -> io::Result<LocalFileReader> {
- let size = file.metadata()?.len();
- Ok(LocalFileReader { file, size })
- }
-
- pub fn len(&self) -> u64 {
- self.size
- }
-}
-
-impl ReadByChunk for LocalFileReader {
- fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
- let start = chunk_index * CHUNK_SIZE;
- if start >= self.size {
- return Ok(0);
- }
- let end = min(self.size, start + CHUNK_SIZE);
- let read_size = (end - start) as usize;
- debug_assert!(read_size <= buf.len());
- self.file.read_exact_at(&mut buf[..read_size], start)?;
- Ok(read_size)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use std::env::temp_dir;
-
- #[test]
- fn test_read_4k_file() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4k")?)?;
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(0, &mut buf)?;
- assert_eq!(size, buf.len());
- Ok(())
- }
-
- #[test]
- fn test_read_4k1_file() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4k1")?)?;
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(0, &mut buf)?;
- assert_eq!(size, buf.len());
- let size = file_reader.read_chunk(1, &mut buf)?;
- assert_eq!(size, 1);
- Ok(())
- }
-
- #[test]
- fn test_read_4m_file() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
- for index in 0..file_reader.len() / 4096 {
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(index, &mut buf)?;
- assert_eq!(size, buf.len());
- }
- Ok(())
- }
-
- #[test]
- fn test_read_beyond_file_size() -> io::Result<()> {
- let file_reader = LocalFileReader::new(File::open("testdata/input.4k").unwrap()).unwrap();
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(1u64, &mut buf)?;
- assert_eq!(size, 0);
- Ok(())
- }
-
- #[test]
- fn test_read_empty_file() -> io::Result<()> {
- let mut temp_file = temp_dir();
- temp_file.push("authfs_test_empty_file");
- let file_reader = LocalFileReader::new(File::create(temp_file).unwrap()).unwrap();
- let mut buf = [0u8; 4096];
- let size = file_reader.read_chunk(0, &mut buf)?;
- assert_eq!(size, 0);
- Ok(())
- }
-}
diff --git a/authfs/src/file/remote_file.rs b/authfs/src/file/remote_file.rs
index 037b8ec..039285f 100644
--- a/authfs/src/file/remote_file.rs
+++ b/authfs/src/file/remote_file.rs
@@ -17,6 +17,7 @@
use std::cmp::min;
use std::convert::TryFrom;
use std::io;
+use std::path::Path;
use super::{ChunkBuffer, RandomWrite, ReadByChunk, VirtFdService};
use crate::common::CHUNK_SIZE;
@@ -48,6 +49,29 @@
pub fn new(service: VirtFdService, file_fd: i32) -> Self {
RemoteFileReader { service, file_fd }
}
+
+ pub fn new_by_path(
+ service: VirtFdService,
+ dir_fd: i32,
+ related_path: &Path,
+ ) -> io::Result<Self> {
+ let file_fd =
+ service.openFileInDirectory(dir_fd, related_path.to_str().unwrap()).map_err(|e| {
+ io::Error::new(
+ io::ErrorKind::Other,
+ format!(
+ "Failed to create a remote file reader by path {}: {}",
+ related_path.display(),
+ e.get_description()
+ ),
+ )
+ })?;
+ Ok(RemoteFileReader { service, file_fd })
+ }
+
+ pub fn get_remote_fd(&self) -> i32 {
+ self.file_fd
+ }
}
impl ReadByChunk for RemoteFileReader {
@@ -101,7 +125,7 @@
i64::try_from(offset).map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
let size = self
.service
- .writeFile(self.file_fd, &buf, offset)
+ .writeFile(self.file_fd, buf, offset)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
Ok(size as usize) // within range because size is supposed to <= buf.len(), which is a usize
}
diff --git a/authfs/src/fsstat.rs b/authfs/src/fsstat.rs
new file mode 100644
index 0000000..81eaca1
--- /dev/null
+++ b/authfs/src/fsstat.rs
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use log::error;
+use std::convert::TryInto;
+use std::io;
+
+use crate::file::VirtFdService;
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::FsStat::FsStat;
+
+/// Relevant/interesting stats of a remote filesystem.
+pub struct RemoteFsStats {
+ /// Block size of the filesystem
+ pub block_size: u64,
+ /// Fragment size of the filesystem
+ pub fragment_size: u64,
+ /// Number of blocks in the filesystem
+ pub block_numbers: u64,
+ /// Number of free blocks
+ pub block_available: u64,
+ /// Number of free inodes
+ pub inodes_available: u64,
+ /// Maximum filename length
+ pub max_filename: u64,
+}
+
+pub struct RemoteFsStatsReader {
+ service: VirtFdService,
+}
+
+impl RemoteFsStatsReader {
+ pub fn new(service: VirtFdService) -> Self {
+ Self { service }
+ }
+
+ pub fn statfs(&self) -> io::Result<RemoteFsStats> {
+ let st = self.service.statfs().map_err(|e| {
+ error!("Failed to call statfs on fd_server: {:?}", e);
+ io::Error::from_raw_os_error(libc::EIO)
+ })?;
+ try_into_remote_fs_stats(st).map_err(|_| {
+ error!("Received invalid stats from fd_server");
+ io::Error::from_raw_os_error(libc::EIO)
+ })
+ }
+}
+
+fn try_into_remote_fs_stats(st: FsStat) -> Result<RemoteFsStats, std::num::TryFromIntError> {
+ Ok(RemoteFsStats {
+ block_size: st.blockSize.try_into()?,
+ fragment_size: st.fragmentSize.try_into()?,
+ block_numbers: st.blockNumbers.try_into()?,
+ block_available: st.blockAvailable.try_into()?,
+ inodes_available: st.inodesAvailable.try_into()?,
+ max_filename: st.maxFilename.try_into()?,
+ })
+}
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index 1515574..61ae928 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -20,5 +20,6 @@
mod sys;
mod verifier;
+pub use common::merkle_tree_size;
pub use editor::VerifiedFileEditor;
pub use verifier::VerifiedFileReader;
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index 1842425..fda47bc 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -248,7 +248,7 @@
let mut tree = MerkleLeaves::new();
for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
let hash = Sha256Hasher::new()?
- .update(&chunk)?
+ .update(chunk)?
.update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
.finalize()?;
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
index 8889f5c..eba379d 100644
--- a/authfs/src/fsverity/common.rs
+++ b/authfs/src/fsverity/common.rs
@@ -24,8 +24,8 @@
#[derive(Error, Debug)]
pub enum FsverityError {
- #[error("Cannot verify a signature")]
- BadSignature,
+ #[error("Invalid digest")]
+ InvalidDigest,
#[error("Insufficient data, only got {0}")]
InsufficientData(usize),
#[error("Cannot verify a block")]
@@ -52,6 +52,18 @@
log128_ceil(hash_pages)
}
+/// Returns the size of Merkle tree for `data_size` bytes amount of data.
+pub fn merkle_tree_size(mut data_size: u64) -> u64 {
+ let mut total = 0;
+ while data_size > CHUNK_SIZE {
+ let hash_size = divide_roundup(data_size, CHUNK_SIZE) * Sha256Hasher::HASH_SIZE as u64;
+ let hash_storage_size = divide_roundup(hash_size, CHUNK_SIZE) * CHUNK_SIZE;
+ total += hash_storage_size;
+ data_size = hash_storage_size;
+ }
+ total
+}
+
pub fn build_fsverity_digest(
root_hash: &Sha256Hash,
file_size: u64,
@@ -75,3 +87,22 @@
.update(&[0u8; 16])? // reserved
.finalize()
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_merkle_tree_size() {
+ // To produce groundtruth:
+ // dd if=/dev/zero of=zeros bs=1 count=524289 && \
+ // fsverity digest --out-merkle-tree=tree zeros && \
+ // du -b tree
+ assert_eq!(merkle_tree_size(0), 0);
+ assert_eq!(merkle_tree_size(1), 0);
+ assert_eq!(merkle_tree_size(4096), 0);
+ assert_eq!(merkle_tree_size(4097), 4096);
+ assert_eq!(merkle_tree_size(524288), 4096);
+ assert_eq!(merkle_tree_size(524289), 12288);
+ }
+}
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 8468cc9..857c6d9 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -88,13 +88,47 @@
Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
}
+ /// Returns the fs-verity digest size in bytes.
+ pub fn get_fsverity_digest_size(&self) -> usize {
+ Sha256Hasher::HASH_SIZE
+ }
+
/// Calculates the fs-verity digest of the current file.
- #[allow(dead_code)]
pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
let merkle_tree = self.merkle_tree.read().unwrap();
merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
}
+ fn read_backing_chunk_unverified(
+ &self,
+ chunk_index: u64,
+ buf: &mut ChunkBuffer,
+ ) -> io::Result<usize> {
+ self.file.read_chunk(chunk_index, buf)
+ }
+
+ fn read_backing_chunk_verified(
+ &self,
+ chunk_index: u64,
+ buf: &mut ChunkBuffer,
+ merkle_tree_locked: &MerkleLeaves,
+ ) -> io::Result<usize> {
+ debug_assert_usize_is_u64();
+
+ if merkle_tree_locked.is_index_valid(chunk_index as usize) {
+ let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
+
+ // Ensure the returned buffer matches the known hash.
+ let hash = Sha256Hasher::new()?.update(buf)?.finalize()?;
+ if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
+ return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
+ }
+ Ok(size)
+ } else {
+ Ok(0)
+ }
+ }
+
fn new_hash_for_incomplete_write(
&self,
source: &[u8],
@@ -110,7 +144,7 @@
// If previous data exists, read back and verify against the known hash (since the
// storage / remote server is not trusted).
if merkle_tree.is_index_valid(output_chunk_index) {
- self.read_chunk(output_chunk_index as u64, &mut orig_data)?;
+ self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
// Verify original content
let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
@@ -206,7 +240,7 @@
// (original) integrity for the file. To matches what write(2) describes for an error
// case (though it's about direct I/O), "Partial data may be written ... should be
// considered inconsistent", an error below is propagated.
- self.file.write_all_at(&source, output_offset)?;
+ self.file.write_all_at(source, output_offset)?;
// Update the hash only after the write succeeds. Note that this only attempts to keep
// the tree consistent to what has been written regardless the actual state beyond the
@@ -235,7 +269,7 @@
let chunk_index = size / CHUNK_SIZE;
if new_tail_size > 0 {
let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
- let s = self.read_chunk(chunk_index, &mut buf)?;
+ let s = self.read_backing_chunk_verified(chunk_index, &mut buf, &merkle_tree)?;
debug_assert!(new_tail_size <= s);
let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
@@ -256,7 +290,8 @@
impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
- self.file.read_chunk(chunk_index, buf)
+ let merkle_tree = self.merkle_tree.read().unwrap();
+ self.read_backing_chunk_verified(chunk_index, buf, &merkle_tree)
}
}
@@ -290,7 +325,7 @@
if end > self.data.borrow().len() {
self.data.borrow_mut().resize(end, 0);
}
- self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(&buf);
+ self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(buf);
Ok(buf.len())
}
@@ -318,7 +353,7 @@
format!("read_chunk out of bound: index {}", chunk_index),
)
})?;
- buf[..chunk.len()].copy_from_slice(&chunk);
+ buf[..chunk.len()].copy_from_slice(chunk);
Ok(chunk.len())
}
}
diff --git a/authfs/src/fsverity/metadata/Android.bp b/authfs/src/fsverity/metadata/Android.bp
new file mode 100644
index 0000000..af3729f
--- /dev/null
+++ b/authfs/src/fsverity/metadata/Android.bp
@@ -0,0 +1,25 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_bindgen {
+ name: "libauthfs_fsverity_metadata_bindgen",
+ wrapper_src: "metadata.hpp",
+ crate_name: "authfs_fsverity_metadata_bindgen",
+ source_stem: "metadata_bindings",
+ apex_available: ["com.android.virt"],
+}
+
+rust_library {
+ name: "libauthfs_fsverity_metadata",
+ crate_name: "authfs_fsverity_metadata",
+ srcs: [
+ "metadata.rs",
+ ],
+ rustlibs: [
+ "libauthfs_fsverity_metadata_bindgen",
+ "libring",
+ ],
+ edition: "2018",
+ apex_available: ["com.android.virt"],
+}
diff --git a/authfs/src/fsverity/metadata/metadata.hpp b/authfs/src/fsverity/metadata/metadata.hpp
new file mode 100644
index 0000000..f05740e
--- /dev/null
+++ b/authfs/src/fsverity/metadata/metadata.hpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTHFS_FSVERITY_METADATA_H
+#define AUTHFS_FSVERITY_METADATA_H
+
+// This file contains the format of fs-verity metadata (.fsv_meta).
+//
+// The header format of .fsv_meta is:
+//
+// +-----------+---------------------------------------------+------------+
+// | Address | Description | Size |
+// +-----------+---------------------------------------------+------------+
+// | 0x0000 | 32-bit LE, version of the format | 4 |
+// | | | |
+// | 0x0004 | fsverity_descriptor (see linux/fsverity.h) | 256 |
+// | | | |
+// | 0x0104 | 32-bit LE, type of signature | 4 |
+// | | (0: NONE, 1: PKCS7, 2: RAW) | |
+// | | | |
+// | 0x0108 | 32-bit LE, size of signature | 4 |
+// | | | |
+// | 0x010C | signature | See 0x0108 |
+// +-----------+---------------------------------------------+------------+
+//
+// After the header, merkle tree dump exists at the first 4K boundary. Usually it's 0x1000, but it
+// could be, for example, 0x2000 or 0x3000, depending on the size of header.
+//
+// TODO(b/193113326): sync with build/make/tools/releasetools/fsverity_metadata_generator.py
+
+#include <stddef.h>
+#include <stdint.h>
+#include <linux/fsverity.h>
+
+const uint64_t CHUNK_SIZE = 4096;
+
+// Give the macro value a name to export.
+const uint8_t FSVERITY_HASH_ALG_SHA256 = FS_VERITY_HASH_ALG_SHA256;
+
+enum class FSVERITY_SIGNATURE_TYPE : __le32 {
+ NONE = 0,
+ PKCS7 = 1,
+ RAW = 2,
+};
+
+struct fsverity_metadata_header {
+ __le32 version;
+ fsverity_descriptor descriptor;
+ FSVERITY_SIGNATURE_TYPE signature_type;
+ __le32 signature_size;
+} __attribute__((packed));
+
+#endif // AUTHFS_FSVERITY_METADATA_H
diff --git a/authfs/src/fsverity/metadata/metadata.rs b/authfs/src/fsverity/metadata/metadata.rs
new file mode 100644
index 0000000..8bc0617
--- /dev/null
+++ b/authfs/src/fsverity/metadata/metadata.rs
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Rust bindgen interface for FSVerity Metadata file (.fsv_meta)
+use authfs_fsverity_metadata_bindgen::{
+ fsverity_descriptor, fsverity_metadata_header, FSVERITY_HASH_ALG_SHA256,
+ FSVERITY_SIGNATURE_TYPE_NONE, FSVERITY_SIGNATURE_TYPE_PKCS7, FSVERITY_SIGNATURE_TYPE_RAW,
+};
+
+use ring::digest::{Context, SHA256};
+use std::cmp::min;
+use std::ffi::OsString;
+use std::fs::File;
+use std::io::{self, Read, Seek};
+use std::mem::{size_of, zeroed};
+use std::os::unix::fs::{FileExt, MetadataExt};
+use std::path::{Path, PathBuf};
+use std::slice::from_raw_parts_mut;
+
+/// Offset of `descriptor` in `struct fsverity_metadatata_header`.
+const DESCRIPTOR_OFFSET: usize = 4;
+
+/// Structure for parsed metadata.
+pub struct FSVerityMetadata {
+ /// Header for the metadata.
+ pub header: fsverity_metadata_header,
+
+ /// fs-verity digest of the file, with hash algorithm defined in the fs-verity descriptor.
+ pub digest: Vec<u8>,
+
+ /// Optional signature for the metadata.
+ pub signature: Option<Vec<u8>>,
+
+ metadata_file: File,
+
+ merkle_tree_offset: u64,
+}
+
+impl FSVerityMetadata {
+ /// Read the raw Merkle tree from the metadata, if it exists. The API semantics is similar to a
+ /// regular pread(2), and may not return full requested buffer.
+ pub fn read_merkle_tree(&self, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
+ let file_size = self.metadata_file.metadata()?.size();
+ let start = self.merkle_tree_offset + offset;
+ let end = min(file_size, start + buf.len() as u64);
+ let read_size = (end - start) as usize;
+ debug_assert!(read_size <= buf.len());
+ if read_size == 0 {
+ Ok(0)
+ } else {
+ self.metadata_file.read_exact_at(&mut buf[..read_size], start)?;
+ Ok(read_size)
+ }
+ }
+}
+
+/// Common block and page size in Linux.
+pub const CHUNK_SIZE: u64 = authfs_fsverity_metadata_bindgen::CHUNK_SIZE;
+
+/// Derive a path of metadata for a given path.
+/// e.g. "system/framework/foo.jar" -> "system/framework/foo.jar.fsv_meta"
+pub fn get_fsverity_metadata_path(path: &Path) -> PathBuf {
+ let mut os_string: OsString = path.into();
+ os_string.push(".fsv_meta");
+ os_string.into()
+}
+
+/// Parse metadata from given file, and returns a structure for the metadata.
+pub fn parse_fsverity_metadata(mut metadata_file: File) -> io::Result<Box<FSVerityMetadata>> {
+ let (header, digest) = {
+ // SAFETY: The header doesn't include any pointers.
+ let mut header: fsverity_metadata_header = unsafe { zeroed() };
+
+ // SAFETY: fsverity_metadata_header is packed, so reading/write from/to the back_buffer
+ // won't overflow.
+ let back_buffer = unsafe {
+ from_raw_parts_mut(
+ &mut header as *mut fsverity_metadata_header as *mut u8,
+ size_of::<fsverity_metadata_header>(),
+ )
+ };
+ metadata_file.read_exact(back_buffer)?;
+
+ // Digest needs to be calculated with the raw value (without changing the endianness).
+ let digest = match header.descriptor.hash_algorithm {
+ FSVERITY_HASH_ALG_SHA256 => {
+ let mut context = Context::new(&SHA256);
+ context.update(
+ &back_buffer
+ [DESCRIPTOR_OFFSET..DESCRIPTOR_OFFSET + size_of::<fsverity_descriptor>()],
+ );
+ Ok(context.finish().as_ref().to_owned())
+ }
+ alg => Err(io::Error::new(
+ io::ErrorKind::Other,
+ format!("Unsupported hash algorithm {}, continue (likely failing soon)", alg),
+ )),
+ }?;
+
+ // TODO(inseob): This doesn't seem ideal. Maybe we can consider nom?
+ header.version = u32::from_le(header.version);
+ header.descriptor.data_size = u64::from_le(header.descriptor.data_size);
+ header.signature_type = u32::from_le(header.signature_type);
+ header.signature_size = u32::from_le(header.signature_size);
+ (header, digest)
+ };
+
+ if header.version != 1 {
+ return Err(io::Error::new(io::ErrorKind::Other, "unsupported metadata version"));
+ }
+
+ let signature = match header.signature_type {
+ FSVERITY_SIGNATURE_TYPE_NONE => None,
+ FSVERITY_SIGNATURE_TYPE_PKCS7 | FSVERITY_SIGNATURE_TYPE_RAW => {
+ // TODO: unpad pkcs7?
+ let mut buf = vec![0u8; header.signature_size as usize];
+ metadata_file.read_exact(&mut buf)?;
+ Some(buf)
+ }
+ _ => return Err(io::Error::new(io::ErrorKind::Other, "unknown signature type")),
+ };
+
+ // merkle tree is at the next 4K boundary
+ let merkle_tree_offset =
+ (metadata_file.stream_position()? + CHUNK_SIZE - 1) / CHUNK_SIZE * CHUNK_SIZE;
+
+ Ok(Box::new(FSVerityMetadata { header, digest, signature, metadata_file, merkle_tree_offset }))
+}
diff --git a/authfs/src/fsverity/sys.rs b/authfs/src/fsverity/sys.rs
index b3222db..51e10a5 100644
--- a/authfs/src/fsverity/sys.rs
+++ b/authfs/src/fsverity/sys.rs
@@ -14,9 +14,6 @@
* limitations under the License.
*/
-/// Magic used in fs-verity digest
-pub const FS_VERITY_MAGIC: &[u8; 8] = b"FSVerity";
-
/// fs-verity version that we are using
pub const FS_VERITY_VERSION: u8 = 1;
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index 13de42a..aaf4bf7 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -18,22 +18,17 @@
use std::io;
use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
-use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_MAGIC};
-use crate::auth::Authenticator;
use crate::common::{divide_roundup, CHUNK_SIZE};
use crate::crypto::{CryptoError, Sha256Hasher};
use crate::file::{ChunkBuffer, ReadByChunk};
const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
-// The size of `struct fsverity_formatted_digest` in Linux with SHA-256.
-const SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256: usize = 12 + Sha256Hasher::HASH_SIZE;
-
type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
let padding_size = pad_to - chunk.len();
- Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
+ Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
}
fn verity_check<T: ReadByChunk>(
@@ -47,7 +42,13 @@
// beyond the file size, including empty file.
assert_ne!(file_size, 0);
- let chunk_hash = hash_with_padding(&chunk, CHUNK_SIZE as usize)?;
+ let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
+
+ // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
+ // hash of the file content, plus padding.
+ if file_size <= CHUNK_SIZE {
+ return Ok(chunk_hash);
+ }
fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
chunk_hash,
@@ -110,48 +111,36 @@
}))
}
-fn build_fsverity_formatted_digest(
- root_hash: &HashBuffer,
- file_size: u64,
-) -> Result<[u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256], CryptoError> {
- let digest = build_fsverity_digest(root_hash, file_size)?;
- // Little-endian byte representation of fsverity_formatted_digest from linux/fsverity.h
- // Not FFI-ed as it seems easier to deal with the raw bytes manually.
- let mut formatted_digest = [0u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256];
- formatted_digest[0..8].copy_from_slice(FS_VERITY_MAGIC);
- formatted_digest[8..10].copy_from_slice(&(FS_VERITY_HASH_ALG_SHA256 as u16).to_le_bytes());
- formatted_digest[10..12].copy_from_slice(&(Sha256Hasher::HASH_SIZE as u16).to_le_bytes());
- formatted_digest[12..].copy_from_slice(&digest);
- Ok(formatted_digest)
-}
-
pub struct VerifiedFileReader<F: ReadByChunk, M: ReadByChunk> {
+ pub file_size: u64,
chunked_file: F,
- file_size: u64,
merkle_tree: M,
root_hash: HashBuffer,
}
impl<F: ReadByChunk, M: ReadByChunk> VerifiedFileReader<F, M> {
- pub fn new<A: Authenticator>(
- authenticator: &A,
+ pub fn new(
chunked_file: F,
file_size: u64,
- sig: Vec<u8>,
+ expected_digest: &[u8],
merkle_tree: M,
) -> Result<VerifiedFileReader<F, M>, FsverityError> {
let mut buf = [0u8; CHUNK_SIZE as usize];
- let size = merkle_tree.read_chunk(0, &mut buf)?;
- if buf.len() != size {
- return Err(FsverityError::InsufficientData(size));
+ if file_size <= CHUNK_SIZE {
+ let _size = chunked_file.read_chunk(0, &mut buf)?;
+ // The rest of buffer is 0-padded.
+ } else {
+ let size = merkle_tree.read_chunk(0, &mut buf)?;
+ if buf.len() != size {
+ return Err(FsverityError::InsufficientData(size));
+ }
}
let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
- let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
- let valid = authenticator.verify(&sig, &formatted_digest)?;
- if valid {
+ if expected_digest == build_fsverity_digest(&root_hash, file_size)? {
+ // Once verified, use the root_hash for verification going forward.
Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
} else {
- Err(FsverityError::BadSignature)
+ Err(FsverityError::InvalidDigest)
}
}
}
@@ -172,13 +161,54 @@
#[cfg(test)]
mod tests {
use super::*;
- use crate::auth::FakeAuthenticator;
- use crate::file::{LocalFileReader, ReadByChunk};
+ use crate::file::ReadByChunk;
use anyhow::Result;
- use std::fs::{self, File};
- use std::io::Read;
+ use authfs_fsverity_metadata::{parse_fsverity_metadata, FSVerityMetadata};
+ use std::cmp::min;
+ use std::fs::File;
+ use std::os::unix::fs::FileExt;
- type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, LocalFileReader>;
+ struct LocalFileReader {
+ file: File,
+ size: u64,
+ }
+
+ impl LocalFileReader {
+ fn new(file: File) -> io::Result<LocalFileReader> {
+ let size = file.metadata()?.len();
+ Ok(LocalFileReader { file, size })
+ }
+
+ fn len(&self) -> u64 {
+ self.size
+ }
+ }
+
+ impl ReadByChunk for LocalFileReader {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+ let start = chunk_index * CHUNK_SIZE;
+ if start >= self.size {
+ return Ok(0);
+ }
+ let end = min(self.size, start + CHUNK_SIZE);
+ let read_size = (end - start) as usize;
+ debug_assert!(read_size <= buf.len());
+ self.file.read_exact_at(&mut buf[..read_size], start)?;
+ Ok(read_size)
+ }
+ }
+
+ type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, MerkleTreeReader>;
+
+ pub struct MerkleTreeReader {
+ metadata: Box<FSVerityMetadata>,
+ }
+
+ impl ReadByChunk for MerkleTreeReader {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+ self.metadata.read_merkle_tree(chunk_index * CHUNK_SIZE, buf)
+ }
+ }
fn total_chunk_number(file_size: u64) -> u64 {
(file_size + 4095) / 4096
@@ -187,28 +217,26 @@
// Returns a reader with fs-verity verification and the file size.
fn new_reader_with_fsverity(
content_path: &str,
- merkle_tree_path: &str,
- signature_path: &str,
+ metadata_path: &str,
) -> Result<(LocalVerifiedFileReader, u64)> {
let file_reader = LocalFileReader::new(File::open(content_path)?)?;
let file_size = file_reader.len();
- let merkle_tree = LocalFileReader::new(File::open(merkle_tree_path)?)?;
- let mut sig = Vec::new();
- let _ = File::open(signature_path)?.read_to_end(&mut sig)?;
- let authenticator = FakeAuthenticator::always_succeed();
+ let metadata = parse_fsverity_metadata(File::open(metadata_path)?)?;
Ok((
- VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree)?,
+ VerifiedFileReader::new(
+ file_reader,
+ file_size,
+ &metadata.digest.clone(),
+ MerkleTreeReader { metadata },
+ )?,
file_size,
))
}
#[test]
fn fsverity_verify_full_read_4k() -> Result<()> {
- let (file_reader, file_size) = new_reader_with_fsverity(
- "testdata/input.4k",
- "testdata/input.4k.merkle_dump",
- "testdata/input.4k.fsv_sig",
- )?;
+ let (file_reader, file_size) =
+ new_reader_with_fsverity("testdata/input.4k", "testdata/input.4k.fsv_meta")?;
for i in 0..total_chunk_number(file_size) {
let mut buf = [0u8; 4096];
@@ -219,11 +247,8 @@
#[test]
fn fsverity_verify_full_read_4k1() -> Result<()> {
- let (file_reader, file_size) = new_reader_with_fsverity(
- "testdata/input.4k1",
- "testdata/input.4k1.merkle_dump",
- "testdata/input.4k1.fsv_sig",
- )?;
+ let (file_reader, file_size) =
+ new_reader_with_fsverity("testdata/input.4k1", "testdata/input.4k1.fsv_meta")?;
for i in 0..total_chunk_number(file_size) {
let mut buf = [0u8; 4096];
@@ -234,11 +259,8 @@
#[test]
fn fsverity_verify_full_read_4m() -> Result<()> {
- let (file_reader, file_size) = new_reader_with_fsverity(
- "testdata/input.4m",
- "testdata/input.4m.merkle_dump",
- "testdata/input.4m.fsv_sig",
- )?;
+ let (file_reader, file_size) =
+ new_reader_with_fsverity("testdata/input.4m", "testdata/input.4m.fsv_meta")?;
for i in 0..total_chunk_number(file_size) {
let mut buf = [0u8; 4096];
@@ -251,8 +273,7 @@
fn fsverity_verify_bad_merkle_tree() -> Result<()> {
let (file_reader, _) = new_reader_with_fsverity(
"testdata/input.4m",
- "testdata/input.4m.merkle_dump.bad", // First leaf node is corrupted.
- "testdata/input.4m.fsv_sig",
+ "testdata/input.4m.fsv_meta.bad_merkle", // First leaf node is corrupted.
)?;
// A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
@@ -266,16 +287,4 @@
assert!(file_reader.read_chunk(last_index, &mut buf).is_ok());
Ok(())
}
-
- #[test]
- fn invalid_signature() -> Result<()> {
- let authenticator = FakeAuthenticator::always_fail();
- let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
- let file_size = file_reader.len();
- let merkle_tree = LocalFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
- let sig = fs::read("testdata/input.4m.fsv_sig")?;
- assert!(VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree)
- .is_err());
- Ok(())
- }
}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index d2948c7..511db68 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -14,80 +14,367 @@
* limitations under the License.
*/
-use anyhow::Result;
-use log::{debug, warn};
-use std::collections::BTreeMap;
-use std::convert::TryFrom;
-use std::ffi::CStr;
-use std::fs::OpenOptions;
-use std::io;
-use std::mem::MaybeUninit;
-use std::option::Option;
-use std::os::unix::io::AsRawFd;
-use std::path::Path;
-use std::time::Duration;
+mod file;
+mod mount;
+use anyhow::{anyhow, bail, Result};
use fuse::filesystem::{
- Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, SetattrValid,
- ZeroCopyReader, ZeroCopyWriter,
+ Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, GetxattrReply,
+ SetattrValid, ZeroCopyReader, ZeroCopyWriter,
};
-use fuse::mount::MountOption;
+use fuse::sys::OpenOptions as FuseOpenOptions;
+use log::{debug, error, warn};
+use std::collections::{btree_map, BTreeMap};
+use std::convert::{TryFrom, TryInto};
+use std::ffi::{CStr, CString, OsStr};
+use std::io;
+use std::mem::{zeroed, MaybeUninit};
+use std::option::Option;
+use std::os::unix::ffi::OsStrExt;
+use std::path::{Component, Path, PathBuf};
+use std::sync::atomic::{AtomicU64, Ordering};
+use std::sync::{Arc, RwLock};
+use std::time::Duration;
use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
use crate::file::{
- LocalFileReader, RandomWrite, ReadByChunk, RemoteFileEditor, RemoteFileReader,
- RemoteMerkleTreeReader,
+ validate_basename, Attr, InMemoryDir, RandomWrite, ReadByChunk, RemoteDirEditor,
+ RemoteFileEditor, RemoteFileReader,
};
-use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
+use crate::fsstat::RemoteFsStatsReader;
+use crate::fsverity::VerifiedFileEditor;
-const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
+pub use self::file::LazyVerifiedReadonlyFile;
+pub use self::mount::mount_and_enter_message_loop;
+use self::mount::MAX_WRITE_BYTES;
pub type Inode = u64;
type Handle = u64;
-/// `FileConfig` defines the file type supported by AuthFS.
-pub enum FileConfig {
- /// A file type that is verified against fs-verity signature (thus read-only). The file is
- /// backed by a local file. Debug only.
- LocalVerifiedReadonlyFile {
- reader: VerifiedFileReader<LocalFileReader, LocalFileReader>,
- file_size: u64,
- },
- /// A file type that is a read-only passthrough from a local file. Debug only.
- LocalUnverifiedReadonlyFile { reader: LocalFileReader, file_size: u64 },
+/// Maximum time for a file's metadata to be cached by the kernel. Since any file and directory
+/// changes (if not read-only) has to go through AuthFS to be trusted, the timeout can be maximum.
+const DEFAULT_METADATA_TIMEOUT: Duration = Duration::MAX;
+
+const ROOT_INODE: Inode = 1;
+
+/// `AuthFsEntry` defines the filesystem entry type supported by AuthFS.
+pub enum AuthFsEntry {
+ /// A read-only directory (writable during initialization). Root directory is an example.
+ ReadonlyDirectory { dir: InMemoryDir },
/// A file type that is verified against fs-verity signature (thus read-only). The file is
/// served from a remote server.
- RemoteVerifiedReadonlyFile {
- reader: VerifiedFileReader<RemoteFileReader, RemoteMerkleTreeReader>,
- file_size: u64,
- },
- /// A file type that is a read-only passthrough from a file on a remote serrver.
- RemoteUnverifiedReadonlyFile { reader: RemoteFileReader, file_size: u64 },
+ VerifiedReadonly { reader: LazyVerifiedReadonlyFile },
+ /// A file type that is a read-only passthrough from a file on a remote server.
+ UnverifiedReadonly { reader: RemoteFileReader, file_size: u64 },
/// A file type that is initially empty, and the content is stored on a remote server. File
/// integrity is guaranteed with private Merkle tree.
- RemoteVerifiedNewFile { editor: VerifiedFileEditor<RemoteFileEditor> },
+ VerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor>, attr: Attr },
+ /// A directory type that is initially empty. One can create new file (`VerifiedNew`) and new
+ /// directory (`VerifiedNewDirectory` itself) with integrity guaranteed within the VM.
+ VerifiedNewDirectory { dir: RemoteDirEditor, attr: Attr },
}
-struct AuthFs {
- /// Store `FileConfig`s using the `Inode` number as the search index.
+impl AuthFsEntry {
+ fn expect_empty_deletable_directory(&self) -> io::Result<()> {
+ match self {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ if dir.number_of_entries() == 0 {
+ Ok(())
+ } else {
+ Err(io::Error::from_raw_os_error(libc::ENOTEMPTY))
+ }
+ }
+ AuthFsEntry::ReadonlyDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EACCES))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+ }
+ }
+}
+
+struct InodeState {
+ /// Actual inode entry.
+ entry: AuthFsEntry,
+
+ /// Number of `Handle`s (i.e. file descriptors) that are currently referring to the this inode.
///
- /// For further optimization to minimize the search cost, since Inode is integer, we may
- /// consider storing them in a Vec if we can guarantee that the numbers are small and
- /// consecutive.
- file_pool: BTreeMap<Inode, FileConfig>,
+ /// Technically, this does not matter to readonly entries, since they live forever. The
+ /// reference count is only needed for manageing lifetime of writable entries like `VerifiedNew`
+ /// and `VerifiedNewDirectory`. That is, when an entry is deleted, the actual entry needs to
+ /// stay alive until the reference count reaches zero.
+ ///
+ /// Note: This is not to be confused with hardlinks, which AuthFS doesn't currently implement.
+ handle_ref_count: AtomicU64,
- /// Maximum bytes in the write transaction to the FUSE device. This limits the maximum size to
- /// a read request (including FUSE protocol overhead).
- max_write: u32,
+ /// Whether the inode is already unlinked, i.e. should be removed, once `handle_ref_count` is
+ /// down to zero.
+ unlinked: bool,
}
-impl AuthFs {
- pub fn new(file_pool: BTreeMap<Inode, FileConfig>, max_write: u32) -> AuthFs {
- AuthFs { file_pool, max_write }
+impl InodeState {
+ fn new(entry: AuthFsEntry) -> Self {
+ InodeState { entry, handle_ref_count: AtomicU64::new(0), unlinked: false }
}
- fn get_file_config(&self, inode: &Inode) -> io::Result<&FileConfig> {
- self.file_pool.get(&inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ fn new_with_ref_count(entry: AuthFsEntry, handle_ref_count: u64) -> Self {
+ InodeState { entry, handle_ref_count: AtomicU64::new(handle_ref_count), unlinked: false }
+ }
+}
+
+/// Data type that a directory implementation should be able to present its entry to `AuthFs`.
+#[derive(Clone)]
+pub struct AuthFsDirEntry {
+ pub inode: Inode,
+ pub name: CString,
+ pub is_dir: bool,
+}
+
+/// A snapshot of a directory entries for supporting `readdir` operation.
+///
+/// The `readdir` implementation is required by FUSE to not return any entries that have been
+/// returned previously (while it's fine to not return new entries). Snapshot is the easiest way to
+/// be compliant. See `fuse::filesystem::readdir` for more details.
+///
+/// A `DirEntriesSnapshot` is created on `opendir`, and is associated with the returned
+/// `Handle`/FD. The snapshot is deleted when the handle is released in `releasedir`.
+type DirEntriesSnapshot = Vec<AuthFsDirEntry>;
+
+/// An iterator for reading from `DirEntriesSnapshot`.
+pub struct DirEntriesSnapshotIterator {
+ /// A reference to the `DirEntriesSnapshot` in `AuthFs`.
+ snapshot: Arc<DirEntriesSnapshot>,
+
+ /// A value determined by `Self` to identify the last entry. 0 is a reserved value by FUSE to
+ /// mean reading from the beginning.
+ prev_offset: usize,
+}
+
+impl<'a> DirectoryIterator for DirEntriesSnapshotIterator {
+ fn next(&mut self) -> Option<DirEntry> {
+ // This iterator should not be the only reference to the snapshot. The snapshot should
+ // still be hold in `dir_handle_table`, i.e. when the FD is not yet closed.
+ //
+ // This code is unreachable when `readdir` is called with a closed FD. Only when the FD is
+ // not yet closed, `DirEntriesSnapshotIterator` can be created (but still short-lived
+ // during `readdir`).
+ debug_assert!(Arc::strong_count(&self.snapshot) >= 2);
+
+ // Since 0 is reserved, let's use 1-based index for the offset. This allows us to
+ // resume from the previous read in the snapshot easily.
+ let current_offset = if self.prev_offset == 0 {
+ 1 // first element in the vector
+ } else {
+ self.prev_offset + 1 // next element in the vector
+ };
+ if current_offset > self.snapshot.len() {
+ None
+ } else {
+ let AuthFsDirEntry { inode, name, is_dir } = &self.snapshot[current_offset - 1];
+ let entry = DirEntry {
+ offset: current_offset as u64,
+ ino: *inode,
+ name,
+ type_: if *is_dir { libc::DT_DIR.into() } else { libc::DT_REG.into() },
+ };
+ self.prev_offset = current_offset;
+ Some(entry)
+ }
+ }
+}
+
+type DirHandleTable = BTreeMap<Handle, Arc<DirEntriesSnapshot>>;
+
+// AuthFS needs to be `Sync` to be accepted by fuse::worker::start_message_loop as a `FileSystem`.
+pub struct AuthFs {
+ /// Table for `Inode` to `InodeState` lookup. This needs to be `Sync` to be used in
+ /// `fuse::worker::start_message_loop`.
+ inode_table: RwLock<BTreeMap<Inode, InodeState>>,
+
+ /// The next available inode number.
+ next_inode: AtomicU64,
+
+ /// Table for `Handle` to `Arc<DirEntriesSnapshot>` lookup. On `opendir`, a new directory handle
+ /// is created and the snapshot of the current directory is created. This is not super
+ /// efficient, but is the simplest way to be compliant to the FUSE contract (see
+ /// `fuse::filesystem::readdir`).
+ ///
+ /// Currently, no code locks `dir_handle_table` and `inode_table` at the same time to avoid
+ /// deadlock.
+ dir_handle_table: RwLock<DirHandleTable>,
+
+ /// The next available handle number.
+ next_handle: AtomicU64,
+
+ /// A reader to access the remote filesystem stats, which is supposed to be of "the" output
+ /// directory. We assume all output are stored in the same partition.
+ remote_fs_stats_reader: RemoteFsStatsReader,
+}
+
+// Implementation for preparing an `AuthFs` instance, before starting to serve.
+// TODO(victorhsieh): Consider implement a builder to separate the mutable initialization from the
+// immutable / interiorly mutable serving phase.
+impl AuthFs {
+ pub fn new(remote_fs_stats_reader: RemoteFsStatsReader) -> AuthFs {
+ let mut inode_table = BTreeMap::new();
+ inode_table.insert(
+ ROOT_INODE,
+ InodeState::new(AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() }),
+ );
+
+ AuthFs {
+ inode_table: RwLock::new(inode_table),
+ next_inode: AtomicU64::new(ROOT_INODE + 1),
+ dir_handle_table: RwLock::new(BTreeMap::new()),
+ next_handle: AtomicU64::new(1),
+ remote_fs_stats_reader,
+ }
+ }
+
+ /// Add an `AuthFsEntry` as `basename` to the filesystem root.
+ pub fn add_entry_at_root_dir(
+ &mut self,
+ basename: PathBuf,
+ entry: AuthFsEntry,
+ ) -> Result<Inode> {
+ validate_basename(&basename)?;
+ self.add_entry_at_ro_dir_by_path(ROOT_INODE, &basename, entry)
+ }
+
+ /// Add an `AuthFsEntry` by path from the `ReadonlyDirectory` represented by `dir_inode`. The
+ /// path must be a related path. If some ancestor directories do not exist, they will be
+ /// created (also as `ReadonlyDirectory`) automatically.
+ pub fn add_entry_at_ro_dir_by_path(
+ &mut self,
+ dir_inode: Inode,
+ path: &Path,
+ entry: AuthFsEntry,
+ ) -> Result<Inode> {
+ // 1. Make sure the parent directories all exist. Derive the entry's parent inode.
+ let parent_path =
+ path.parent().ok_or_else(|| anyhow!("No parent directory: {:?}", path))?;
+ let parent_inode =
+ parent_path.components().try_fold(dir_inode, |current_dir_inode, path_component| {
+ match path_component {
+ Component::RootDir => bail!("Absolute path is not supported"),
+ Component::Normal(name) => {
+ let inode_table = self.inode_table.get_mut().unwrap();
+ // Locate the internal directory structure.
+ let current_dir_entry = &mut inode_table
+ .get_mut(¤t_dir_inode)
+ .ok_or_else(|| {
+ anyhow!("Unknown directory inode {}", current_dir_inode)
+ })?
+ .entry;
+ let dir = match current_dir_entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => dir,
+ _ => unreachable!("Not a ReadonlyDirectory"),
+ };
+ // Return directory inode. Create first if not exists.
+ if let Some(existing_inode) = dir.lookup_inode(name.as_ref()) {
+ Ok(existing_inode)
+ } else {
+ let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+ let new_dir_entry =
+ AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() };
+
+ // Actually update the tables.
+ dir.add_dir(name.as_ref(), new_inode)?;
+ if inode_table
+ .insert(new_inode, InodeState::new(new_dir_entry))
+ .is_some()
+ {
+ bail!("Unexpected to find a duplicated inode");
+ }
+ Ok(new_inode)
+ }
+ }
+ _ => Err(anyhow!("Path is not canonical: {:?}", path)),
+ }
+ })?;
+
+ // 2. Insert the entry to the parent directory, as well as the inode table.
+ let inode_table = self.inode_table.get_mut().unwrap();
+ let inode_state = inode_table.get_mut(&parent_inode).expect("previously returned inode");
+ match &mut inode_state.entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ let basename =
+ path.file_name().ok_or_else(|| anyhow!("Bad file name: {:?}", path))?;
+ let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+
+ // Actually update the tables.
+ dir.add_file(basename.as_ref(), new_inode)?;
+ if inode_table.insert(new_inode, InodeState::new(entry)).is_some() {
+ bail!("Unexpected to find a duplicated inode");
+ }
+ Ok(new_inode)
+ }
+ _ => unreachable!("Not a ReadonlyDirectory"),
+ }
+ }
+}
+
+// Implementation for serving requests.
+impl AuthFs {
+ /// Handles the file associated with `inode` if found. This function returns whatever
+ /// `handle_fn` returns.
+ fn handle_inode<F, R>(&self, inode: &Inode, handle_fn: F) -> io::Result<R>
+ where
+ F: FnOnce(&AuthFsEntry) -> io::Result<R>,
+ {
+ let inode_table = self.inode_table.read().unwrap();
+ handle_inode_locked(&inode_table, inode, |inode_state| handle_fn(&inode_state.entry))
+ }
+
+ /// Adds a new entry `name` created by `create_fn` at `parent_inode`, with an initial ref count
+ /// of one.
+ ///
+ /// The operation involves two updates: adding the name with a new allocated inode to the
+ /// parent directory, and insert the new inode and the actual `AuthFsEntry` to the global inode
+ /// table.
+ ///
+ /// `create_fn` receives the parent directory, through which it can create the new entry at and
+ /// register the new inode to. Its returned entry is then added to the inode table.
+ fn create_new_entry_with_ref_count<F>(
+ &self,
+ parent_inode: Inode,
+ name: &CStr,
+ create_fn: F,
+ ) -> io::Result<Inode>
+ where
+ F: FnOnce(&mut AuthFsEntry, &Path, Inode) -> io::Result<AuthFsEntry>,
+ {
+ let mut inode_table = self.inode_table.write().unwrap();
+ let (new_inode, new_file_entry) = handle_inode_mut_locked(
+ &mut inode_table,
+ &parent_inode,
+ |InodeState { entry, .. }| {
+ let new_inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
+ let basename: &Path = cstr_to_path(name);
+ let new_file_entry = create_fn(entry, basename, new_inode)?;
+ Ok((new_inode, new_file_entry))
+ },
+ )?;
+
+ if let btree_map::Entry::Vacant(entry) = inode_table.entry(new_inode) {
+ entry.insert(InodeState::new_with_ref_count(new_file_entry, 1));
+ Ok(new_inode)
+ } else {
+ unreachable!("Unexpected duplication of inode {}", new_inode);
+ }
+ }
+
+ fn open_dir_store_snapshot(
+ &self,
+ dir_entries: Vec<AuthFsDirEntry>,
+ ) -> io::Result<(Option<Handle>, FuseOpenOptions)> {
+ let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
+ let mut dir_handle_table = self.dir_handle_table.write().unwrap();
+ if let btree_map::Entry::Vacant(value) = dir_handle_table.entry(handle) {
+ value.insert(Arc::new(dir_entries));
+ Ok((Some(handle), FuseOpenOptions::empty()))
+ } else {
+ unreachable!("Unexpected to see new handle {} to existing in the table", handle);
+ }
}
}
@@ -107,25 +394,31 @@
}
}
-enum FileMode {
+#[allow(clippy::enum_variant_names)]
+enum AccessMode {
ReadOnly,
- ReadWrite,
+ Variable(u32),
}
-fn create_stat(ino: libc::ino_t, file_size: u64, file_mode: FileMode) -> io::Result<libc::stat64> {
+fn create_stat(
+ ino: libc::ino_t,
+ file_size: u64,
+ access_mode: AccessMode,
+) -> io::Result<libc::stat64> {
+ // SAFETY: stat64 is a plan C struct without pointer.
let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
st.st_ino = ino;
- st.st_mode = match file_mode {
- // Until needed, let's just grant the owner access.
- FileMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
- FileMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
+ st.st_mode = match access_mode {
+ AccessMode::ReadOnly => {
+ // Until needed, let's just grant the owner access.
+ libc::S_IFREG | libc::S_IRUSR
+ }
+ AccessMode::Variable(mode) => libc::S_IFREG | mode,
};
- st.st_dev = 0;
st.st_nlink = 1;
st.st_uid = 0;
st.st_gid = 0;
- st.st_rdev = 0;
st.st_size = libc::off64_t::try_from(file_size)
.map_err(|_| io::Error::from_raw_os_error(libc::EFBIG))?;
st.st_blksize = blk_size();
@@ -135,6 +428,34 @@
Ok(st)
}
+fn create_dir_stat(
+ ino: libc::ino_t,
+ file_number: u16,
+ access_mode: AccessMode,
+) -> io::Result<libc::stat64> {
+ // SAFETY: stat64 is a plan C struct without pointer.
+ let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
+
+ st.st_ino = ino;
+ st.st_mode = match access_mode {
+ AccessMode::ReadOnly => {
+ // Until needed, let's just grant the owner access and search to group and others.
+ libc::S_IFDIR | libc::S_IXUSR | libc::S_IRUSR | libc::S_IXGRP | libc::S_IXOTH
+ }
+ AccessMode::Variable(mode) => libc::S_IFDIR | mode,
+ };
+
+ // 2 extra for . and ..
+ st.st_nlink = file_number
+ .checked_add(2)
+ .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?
+ .into();
+
+ st.st_uid = 0;
+ st.st_gid = 0;
+ Ok(st)
+}
+
fn offset_to_chunk_index(offset: u64) -> u64 {
offset / CHUNK_SIZE
}
@@ -174,22 +495,13 @@
Ok(total)
}
-// No need to support enumerating directory entries.
-struct EmptyDirectoryIterator {}
-
-impl DirectoryIterator for EmptyDirectoryIterator {
- fn next(&mut self) -> Option<DirEntry> {
- None
- }
-}
-
impl FileSystem for AuthFs {
type Inode = Inode;
type Handle = Handle;
- type DirIter = EmptyDirectoryIterator;
+ type DirIter = DirEntriesSnapshotIterator;
fn max_buffer_size(&self) -> u32 {
- self.max_write
+ MAX_WRITE_BYTES
}
fn init(&self, _capable: FsOptions) -> io::Result<FsOptions> {
@@ -198,25 +510,54 @@
Ok(FsOptions::WRITEBACK_CACHE)
}
- fn lookup(&self, _ctx: Context, _parent: Inode, name: &CStr) -> io::Result<Entry> {
- // Only accept file name that looks like an integrer. Files in the pool are simply exposed
- // by their inode number. Also, there is currently no directory structure.
- let num = name.to_str().map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?;
- // Normally, `lookup` is required to increase a reference count for the inode (while
- // `forget` will decrease it). It is not necessary here since the files are configured to
- // be static.
- let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
- let st = match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { file_size, .. }
- | FileConfig::LocalUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteVerifiedReadonlyFile { file_size, .. } => {
- create_stat(inode, *file_size, FileMode::ReadOnly)?
- }
- FileConfig::RemoteVerifiedNewFile { editor } => {
- create_stat(inode, editor.size(), FileMode::ReadWrite)?
- }
- };
+ fn lookup(&self, _ctx: Context, parent: Inode, name: &CStr) -> io::Result<Entry> {
+ let inode_table = self.inode_table.read().unwrap();
+
+ // Look up the entry's inode number in parent directory.
+ let inode =
+ handle_inode_locked(&inode_table, &parent, |inode_state| match &inode_state.entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ let path = cstr_to_path(name);
+ dir.lookup_inode(path).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ }
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ let path = cstr_to_path(name);
+ dir.find_inode(path)
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+ })?;
+
+ // Create the entry's stat if found.
+ let st = handle_inode_locked(
+ &inode_table,
+ &inode,
+ |InodeState { entry, handle_ref_count, .. }| {
+ let st = match entry {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ create_dir_stat(inode, dir.number_of_entries(), AccessMode::ReadOnly)
+ }
+ AuthFsEntry::UnverifiedReadonly { file_size, .. } => {
+ create_stat(inode, *file_size, AccessMode::ReadOnly)
+ }
+ AuthFsEntry::VerifiedReadonly { reader } => {
+ create_stat(inode, reader.file_size()?, AccessMode::ReadOnly)
+ }
+ AuthFsEntry::VerifiedNew { editor, attr, .. } => {
+ create_stat(inode, editor.size(), AccessMode::Variable(attr.mode()))
+ }
+ AuthFsEntry::VerifiedNewDirectory { dir, attr } => create_dir_stat(
+ inode,
+ dir.number_of_entries(),
+ AccessMode::Variable(attr.mode()),
+ ),
+ }?;
+ if handle_ref_count.fetch_add(1, Ordering::Relaxed) == u64::MAX {
+ panic!("Handle reference count overflow");
+ }
+ Ok(st)
+ },
+ )?;
+
Ok(Entry {
inode,
generation: 0,
@@ -226,26 +567,69 @@
})
}
+ fn forget(&self, _ctx: Context, inode: Self::Inode, count: u64) {
+ let mut inode_table = self.inode_table.write().unwrap();
+ let delete_now = handle_inode_mut_locked(
+ &mut inode_table,
+ &inode,
+ |InodeState { handle_ref_count, unlinked, .. }| {
+ let current = handle_ref_count.get_mut();
+ if count > *current {
+ error!(
+ "Trying to decrease refcount of inode {} by {} (> current {})",
+ inode, count, *current
+ );
+ panic!(); // log to logcat with error!
+ }
+ *current -= count;
+ Ok(*unlinked && *current == 0)
+ },
+ );
+
+ match delete_now {
+ Ok(true) => {
+ let _ = inode_table.remove(&inode).expect("Removed an existing entry");
+ }
+ Ok(false) => { /* Let the inode stay */ }
+ Err(e) => {
+ warn!(
+ "Unexpected failure when tries to forget an inode {} by refcount {}: {:?}",
+ inode, count, e
+ );
+ }
+ }
+ }
+
fn getattr(
&self,
_ctx: Context,
inode: Inode,
_handle: Option<Handle>,
) -> io::Result<(libc::stat64, Duration)> {
- Ok((
- match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { file_size, .. }
- | FileConfig::LocalUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteVerifiedReadonlyFile { file_size, .. } => {
- create_stat(inode, *file_size, FileMode::ReadOnly)?
- }
- FileConfig::RemoteVerifiedNewFile { editor } => {
- create_stat(inode, editor.size(), FileMode::ReadWrite)?
- }
- },
- DEFAULT_METADATA_TIMEOUT,
- ))
+ self.handle_inode(&inode, |config| {
+ Ok((
+ match config {
+ AuthFsEntry::ReadonlyDirectory { dir } => {
+ create_dir_stat(inode, dir.number_of_entries(), AccessMode::ReadOnly)
+ }
+ AuthFsEntry::UnverifiedReadonly { file_size, .. } => {
+ create_stat(inode, *file_size, AccessMode::ReadOnly)
+ }
+ AuthFsEntry::VerifiedReadonly { reader } => {
+ create_stat(inode, reader.file_size()?, AccessMode::ReadOnly)
+ }
+ AuthFsEntry::VerifiedNew { editor, attr, .. } => {
+ create_stat(inode, editor.size(), AccessMode::Variable(attr.mode()))
+ }
+ AuthFsEntry::VerifiedNewDirectory { dir, attr } => create_dir_stat(
+ inode,
+ dir.number_of_entries(),
+ AccessMode::Variable(attr.mode()),
+ ),
+ }?,
+ DEFAULT_METADATA_TIMEOUT,
+ ))
+ })
}
fn open(
@@ -253,24 +637,67 @@
_ctx: Context,
inode: Self::Inode,
flags: u32,
- ) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
+ ) -> io::Result<(Option<Self::Handle>, FuseOpenOptions)> {
// Since file handle is not really used in later operations (which use Inode directly),
// return None as the handle.
- match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { .. }
- | FileConfig::LocalUnverifiedReadonlyFile { .. }
- | FileConfig::RemoteVerifiedReadonlyFile { .. }
- | FileConfig::RemoteUnverifiedReadonlyFile { .. } => {
- check_access_mode(flags, libc::O_RDONLY)?;
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedReadonly { .. } | AuthFsEntry::UnverifiedReadonly { .. } => {
+ check_access_mode(flags, libc::O_RDONLY)?;
+ }
+ AuthFsEntry::VerifiedNew { .. } => {
+ // TODO(victorhsieh): Imeplement ACL check using the attr and ctx. Always allow
+ // for now.
+ }
+ AuthFsEntry::ReadonlyDirectory { .. }
+ | AuthFsEntry::VerifiedNewDirectory { .. } => {
+ // TODO(victorhsieh): implement when needed.
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
}
- FileConfig::RemoteVerifiedNewFile { .. } => {
- // No need to check access modes since all the modes are allowed to the
- // read-writable file.
- }
- }
- // Always cache the file content. There is currently no need to support direct I/O or avoid
- // the cache buffer. Memory mapping is only possible with cache enabled.
- Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ // Always cache the file content. There is currently no need to support direct I/O or
+ // avoid the cache buffer. Memory mapping is only possible with cache enabled.
+ Ok((None, FuseOpenOptions::KEEP_CACHE))
+ })
+ }
+
+ fn create(
+ &self,
+ _ctx: Context,
+ parent: Self::Inode,
+ name: &CStr,
+ mode: u32,
+ _flags: u32,
+ umask: u32,
+ ) -> io::Result<(Entry, Option<Self::Handle>, FuseOpenOptions)> {
+ let new_inode = self.create_new_entry_with_ref_count(
+ parent,
+ name,
+ |parent_entry, basename, new_inode| match parent_entry {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ if dir.has_entry(basename) {
+ return Err(io::Error::from_raw_os_error(libc::EEXIST));
+ }
+ let mode = mode & !umask;
+ let (new_file, new_attr) = dir.create_file(basename, new_inode, mode)?;
+ Ok(AuthFsEntry::VerifiedNew { editor: new_file, attr: new_attr })
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+ },
+ )?;
+
+ Ok((
+ Entry {
+ inode: new_inode,
+ generation: 0,
+ attr: create_stat(new_inode, /* file_size */ 0, AccessMode::Variable(mode))?,
+ entry_timeout: DEFAULT_METADATA_TIMEOUT,
+ attr_timeout: DEFAULT_METADATA_TIMEOUT,
+ },
+ // See also `open`.
+ /* handle */ None,
+ FuseOpenOptions::KEEP_CACHE,
+ ))
}
fn read<W: io::Write + ZeroCopyWriter>(
@@ -284,25 +711,25 @@
_lock_owner: Option<u64>,
_flags: u32,
) -> io::Result<usize> {
- match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedReadonly { reader } => {
+ read_chunks(w, reader, reader.file_size()?, offset, size)
+ }
+ AuthFsEntry::UnverifiedReadonly { reader, file_size } => {
+ read_chunks(w, reader, *file_size, offset, size)
+ }
+ AuthFsEntry::VerifiedNew { editor, .. } => {
+ // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
+ // request a read even if the file is open with O_WRONLY.
+ read_chunks(w, editor, editor.size(), offset, size)
+ }
+ AuthFsEntry::ReadonlyDirectory { .. }
+ | AuthFsEntry::VerifiedNewDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EISDIR))
+ }
}
- FileConfig::LocalUnverifiedReadonlyFile { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::RemoteVerifiedReadonlyFile { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::RemoteUnverifiedReadonlyFile { reader, file_size } => {
- read_chunks(w, reader, *file_size, offset, size)
- }
- FileConfig::RemoteVerifiedNewFile { editor } => {
- // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
- // request a read even if the file is open with O_WRONLY.
- read_chunks(w, editor, editor.size(), offset, size)
- }
- }
+ })
}
fn write<R: io::Read + ZeroCopyReader>(
@@ -317,97 +744,330 @@
_delayed_write: bool,
_flags: u32,
) -> io::Result<usize> {
- match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ self.handle_inode(&inode, |config| match config {
+ AuthFsEntry::VerifiedNew { editor, .. } => {
let mut buf = vec![0; size as usize];
r.read_exact(&mut buf)?;
editor.write_at(&buf, offset)
}
- _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
- }
+ AuthFsEntry::VerifiedReadonly { .. } | AuthFsEntry::UnverifiedReadonly { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EPERM))
+ }
+ AuthFsEntry::ReadonlyDirectory { .. } | AuthFsEntry::VerifiedNewDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EISDIR))
+ }
+ })
}
fn setattr(
&self,
_ctx: Context,
inode: Inode,
- attr: libc::stat64,
+ in_attr: libc::stat64,
_handle: Option<Handle>,
valid: SetattrValid,
) -> io::Result<(libc::stat64, Duration)> {
- match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ let mut inode_table = self.inode_table.write().unwrap();
+ handle_inode_mut_locked(&mut inode_table, &inode, |InodeState { entry, .. }| match entry {
+ AuthFsEntry::VerifiedNew { editor, attr } => {
+ check_unsupported_setattr_request(valid)?;
+
// Initialize the default stat.
- let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
+ let mut new_attr =
+ create_stat(inode, editor.size(), AccessMode::Variable(attr.mode()))?;
// `valid` indicates what fields in `attr` are valid. Update to return correctly.
if valid.contains(SetattrValid::SIZE) {
// st_size is i64, but the cast should be safe since kernel should not give a
// negative size.
- debug_assert!(attr.st_size >= 0);
- new_attr.st_size = attr.st_size;
- editor.resize(attr.st_size as u64)?;
+ debug_assert!(in_attr.st_size >= 0);
+ new_attr.st_size = in_attr.st_size;
+ editor.resize(in_attr.st_size as u64)?;
}
-
if valid.contains(SetattrValid::MODE) {
- warn!("Changing st_mode is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::UID) {
- warn!("Changing st_uid is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::GID) {
- warn!("Changing st_gid is not currently supported");
- return Err(io::Error::from_raw_os_error(libc::ENOSYS));
- }
- if valid.contains(SetattrValid::CTIME) {
- debug!("Ignoring ctime change as authfs does not maintain timestamp currently");
- }
- if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
- debug!("Ignoring atime change as authfs does not maintain timestamp currently");
- }
- if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
- debug!("Ignoring mtime change as authfs does not maintain timestamp currently");
+ attr.set_mode(in_attr.st_mode)?;
+ new_attr.st_mode = in_attr.st_mode;
}
Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
}
- _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+ AuthFsEntry::VerifiedNewDirectory { dir, attr } => {
+ check_unsupported_setattr_request(valid)?;
+ if valid.contains(SetattrValid::SIZE) {
+ return Err(io::Error::from_raw_os_error(libc::EISDIR));
+ }
+
+ // Initialize the default stat.
+ let mut new_attr = create_dir_stat(
+ inode,
+ dir.number_of_entries(),
+ AccessMode::Variable(attr.mode()),
+ )?;
+ if valid.contains(SetattrValid::MODE) {
+ attr.set_mode(in_attr.st_mode)?;
+ new_attr.st_mode = in_attr.st_mode;
+ }
+ Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EPERM)),
+ })
+ }
+
+ fn getxattr(
+ &self,
+ _ctx: Context,
+ inode: Self::Inode,
+ name: &CStr,
+ size: u32,
+ ) -> io::Result<GetxattrReply> {
+ self.handle_inode(&inode, |config| {
+ match config {
+ AuthFsEntry::VerifiedNew { editor, .. } => {
+ // FUSE ioctl is limited, thus we can't implement fs-verity ioctls without a kernel
+ // change (see b/196635431). Until it's possible, use xattr to expose what we need
+ // as an authfs specific API.
+ if name != CStr::from_bytes_with_nul(b"authfs.fsverity.digest\0").unwrap() {
+ return Err(io::Error::from_raw_os_error(libc::ENODATA));
+ }
+
+ if size == 0 {
+ // Per protocol, when size is 0, return the value size.
+ Ok(GetxattrReply::Count(editor.get_fsverity_digest_size() as u32))
+ } else {
+ let digest = editor.calculate_fsverity_digest()?;
+ if digest.len() > size as usize {
+ Err(io::Error::from_raw_os_error(libc::ERANGE))
+ } else {
+ Ok(GetxattrReply::Value(digest.to_vec()))
+ }
+ }
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::ENODATA)),
+ }
+ })
+ }
+
+ fn mkdir(
+ &self,
+ _ctx: Context,
+ parent: Self::Inode,
+ name: &CStr,
+ mode: u32,
+ umask: u32,
+ ) -> io::Result<Entry> {
+ let new_inode = self.create_new_entry_with_ref_count(
+ parent,
+ name,
+ |parent_entry, basename, new_inode| match parent_entry {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ if dir.has_entry(basename) {
+ return Err(io::Error::from_raw_os_error(libc::EEXIST));
+ }
+ let mode = mode & !umask;
+ let (new_dir, new_attr) = dir.mkdir(basename, new_inode, mode)?;
+ Ok(AuthFsEntry::VerifiedNewDirectory { dir: new_dir, attr: new_attr })
+ }
+ AuthFsEntry::ReadonlyDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EACCES))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+ },
+ )?;
+
+ Ok(Entry {
+ inode: new_inode,
+ generation: 0,
+ attr: create_dir_stat(new_inode, /* file_number */ 0, AccessMode::Variable(mode))?,
+ entry_timeout: DEFAULT_METADATA_TIMEOUT,
+ attr_timeout: DEFAULT_METADATA_TIMEOUT,
+ })
+ }
+
+ fn unlink(&self, _ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
+ let mut inode_table = self.inode_table.write().unwrap();
+ handle_inode_mut_locked(
+ &mut inode_table,
+ &parent,
+ |InodeState { entry, unlinked, .. }| match entry {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ let basename: &Path = cstr_to_path(name);
+ // Delete the file from in both the local and remote directories.
+ let _inode = dir.delete_file(basename)?;
+ *unlinked = true;
+ Ok(())
+ }
+ AuthFsEntry::ReadonlyDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EACCES))
+ }
+ AuthFsEntry::VerifiedNew { .. } => {
+ // Deleting a entry in filesystem root is not currently supported.
+ Err(io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+ AuthFsEntry::UnverifiedReadonly { .. } | AuthFsEntry::VerifiedReadonly { .. } => {
+ Err(io::Error::from_raw_os_error(libc::ENOTDIR))
+ }
+ },
+ )
+ }
+
+ fn rmdir(&self, _ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
+ let mut inode_table = self.inode_table.write().unwrap();
+
+ // Check before actual removal, with readonly borrow.
+ handle_inode_locked(&inode_table, &parent, |inode_state| match &inode_state.entry {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ let basename: &Path = cstr_to_path(name);
+ let existing_inode = dir.find_inode(basename)?;
+ handle_inode_locked(&inode_table, &existing_inode, |inode_state| {
+ inode_state.entry.expect_empty_deletable_directory()
+ })
+ }
+ AuthFsEntry::ReadonlyDirectory { .. } => {
+ Err(io::Error::from_raw_os_error(libc::EACCES))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+ })?;
+
+ // Look up again, this time with mutable borrow. This needs to be done separately because
+ // the previous lookup needs to borrow multiple entry references in the table.
+ handle_inode_mut_locked(
+ &mut inode_table,
+ &parent,
+ |InodeState { entry, unlinked, .. }| match entry {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => {
+ let basename: &Path = cstr_to_path(name);
+ let _inode = dir.force_delete_directory(basename)?;
+ *unlinked = true;
+ Ok(())
+ }
+ _ => unreachable!("Mismatched entry type that is just checked"),
+ },
+ )
+ }
+
+ fn opendir(
+ &self,
+ _ctx: Context,
+ inode: Self::Inode,
+ _flags: u32,
+ ) -> io::Result<(Option<Self::Handle>, FuseOpenOptions)> {
+ let entries = self.handle_inode(&inode, |config| match config {
+ AuthFsEntry::VerifiedNewDirectory { dir, .. } => dir.retrieve_entries(),
+ AuthFsEntry::ReadonlyDirectory { dir } => dir.retrieve_entries(),
+ _ => Err(io::Error::from_raw_os_error(libc::ENOTDIR)),
+ })?;
+ self.open_dir_store_snapshot(entries)
+ }
+
+ fn readdir(
+ &self,
+ _ctx: Context,
+ _inode: Self::Inode,
+ handle: Self::Handle,
+ _size: u32,
+ offset: u64,
+ ) -> io::Result<Self::DirIter> {
+ let dir_handle_table = self.dir_handle_table.read().unwrap();
+ if let Some(entry) = dir_handle_table.get(&handle) {
+ Ok(DirEntriesSnapshotIterator {
+ snapshot: entry.clone(),
+ prev_offset: offset.try_into().unwrap(),
+ })
+ } else {
+ Err(io::Error::from_raw_os_error(libc::EBADF))
}
}
+
+ fn releasedir(
+ &self,
+ _ctx: Context,
+ inode: Self::Inode,
+ _flags: u32,
+ handle: Self::Handle,
+ ) -> io::Result<()> {
+ let mut dir_handle_table = self.dir_handle_table.write().unwrap();
+ if dir_handle_table.remove(&handle).is_none() {
+ unreachable!("Unknown directory handle {}, inode {}", handle, inode);
+ }
+ Ok(())
+ }
+
+ fn statfs(&self, _ctx: Context, _inode: Self::Inode) -> io::Result<libc::statvfs64> {
+ let remote_stat = self.remote_fs_stats_reader.statfs()?;
+
+ // Safe because we are zero-initializing a struct with only POD fields. Not all fields
+ // matter to FUSE. See also:
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/fuse/inode.c?h=v5.15#n460
+ let mut st: libc::statvfs64 = unsafe { zeroed() };
+
+ // Use the remote stat as a template, since it'd matter the most to consider the writable
+ // files/directories that are written to the remote.
+ st.f_bsize = remote_stat.block_size;
+ st.f_frsize = remote_stat.fragment_size;
+ st.f_blocks = remote_stat.block_numbers;
+ st.f_bavail = remote_stat.block_available;
+ st.f_favail = remote_stat.inodes_available;
+ st.f_namemax = remote_stat.max_filename;
+ // Assuming we are not privileged to use all free spaces on the remote server, set the free
+ // blocks/fragment to the same available amount.
+ st.f_bfree = st.f_bavail;
+ st.f_ffree = st.f_favail;
+ // Number of inodes on the filesystem
+ st.f_files = self.inode_table.read().unwrap().len() as u64;
+
+ Ok(st)
+ }
}
-/// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
-pub fn loop_forever(
- file_pool: BTreeMap<Inode, FileConfig>,
- mountpoint: &Path,
-) -> Result<(), fuse::Error> {
- let max_read: u32 = 65536;
- let max_write: u32 = 65536;
- let dev_fuse = OpenOptions::new()
- .read(true)
- .write(true)
- .open("/dev/fuse")
- .expect("Failed to open /dev/fuse");
+fn handle_inode_locked<F, R>(
+ inode_table: &BTreeMap<Inode, InodeState>,
+ inode: &Inode,
+ handle_fn: F,
+) -> io::Result<R>
+where
+ F: FnOnce(&InodeState) -> io::Result<R>,
+{
+ if let Some(inode_state) = inode_table.get(inode) {
+ handle_fn(inode_state)
+ } else {
+ Err(io::Error::from_raw_os_error(libc::ENOENT))
+ }
+}
- fuse::mount(
- mountpoint,
- "authfs",
- libc::MS_NOSUID | libc::MS_NODEV,
- &[
- MountOption::FD(dev_fuse.as_raw_fd()),
- MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
- MountOption::AllowOther,
- MountOption::UserId(0),
- MountOption::GroupId(0),
- MountOption::MaxRead(max_read),
- ],
- )
- .expect("Failed to mount fuse");
+fn handle_inode_mut_locked<F, R>(
+ inode_table: &mut BTreeMap<Inode, InodeState>,
+ inode: &Inode,
+ handle_fn: F,
+) -> io::Result<R>
+where
+ F: FnOnce(&mut InodeState) -> io::Result<R>,
+{
+ if let Some(inode_state) = inode_table.get_mut(inode) {
+ handle_fn(inode_state)
+ } else {
+ Err(io::Error::from_raw_os_error(libc::ENOENT))
+ }
+}
- fuse::worker::start_message_loop(
- dev_fuse,
- max_write,
- max_read,
- AuthFs::new(file_pool, max_write),
- )
+fn check_unsupported_setattr_request(valid: SetattrValid) -> io::Result<()> {
+ if valid.contains(SetattrValid::UID) {
+ warn!("Changing st_uid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::GID) {
+ warn!("Changing st_gid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.intersects(
+ SetattrValid::CTIME
+ | SetattrValid::ATIME
+ | SetattrValid::ATIME_NOW
+ | SetattrValid::MTIME
+ | SetattrValid::MTIME_NOW,
+ ) {
+ debug!("Ignoring ctime/atime/mtime change as authfs does not maintain timestamp currently");
+ }
+ Ok(())
+}
+
+fn cstr_to_path(cstr: &CStr) -> &Path {
+ OsStr::from_bytes(cstr.to_bytes()).as_ref()
}
diff --git a/authfs/src/fusefs/file.rs b/authfs/src/fusefs/file.rs
new file mode 100644
index 0000000..8c02281
--- /dev/null
+++ b/authfs/src/fusefs/file.rs
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use log::error;
+use std::convert::TryInto;
+use std::io;
+use std::path::PathBuf;
+use std::sync::Mutex;
+
+use crate::file::{
+ ChunkBuffer, EagerChunkReader, ReadByChunk, RemoteFileReader, RemoteMerkleTreeReader,
+ VirtFdService,
+};
+use crate::fsverity::{merkle_tree_size, VerifiedFileReader};
+
+enum FileInfo {
+ ByPathUnderDirFd(i32, PathBuf),
+ ByFd(i32),
+}
+
+type Reader = VerifiedFileReader<RemoteFileReader, EagerChunkReader>;
+
+/// A lazily created read-only file that is verified against the given fs-verity digest.
+///
+/// The main purpose of this struct is to wrap and construct `VerifiedFileReader` lazily.
+pub struct LazyVerifiedReadonlyFile {
+ expected_digest: Vec<u8>,
+
+ service: VirtFdService,
+ file_info: FileInfo,
+
+ /// A lazily instantiated reader.
+ reader: Mutex<Option<Reader>>,
+}
+
+impl LazyVerifiedReadonlyFile {
+ /// Prepare the file by a remote path, related to a remote directory FD.
+ pub fn prepare_by_path(
+ service: VirtFdService,
+ remote_dir_fd: i32,
+ remote_path: PathBuf,
+ expected_digest: Vec<u8>,
+ ) -> Self {
+ LazyVerifiedReadonlyFile {
+ service,
+ file_info: FileInfo::ByPathUnderDirFd(remote_dir_fd, remote_path),
+ expected_digest,
+ reader: Mutex::new(None),
+ }
+ }
+
+ /// Prepare the file by a remote file FD.
+ pub fn prepare_by_fd(service: VirtFdService, remote_fd: i32, expected_digest: Vec<u8>) -> Self {
+ LazyVerifiedReadonlyFile {
+ service,
+ file_info: FileInfo::ByFd(remote_fd),
+ expected_digest,
+ reader: Mutex::new(None),
+ }
+ }
+
+ fn ensure_init_then<F, T>(&self, callback: F) -> io::Result<T>
+ where
+ F: FnOnce(&Reader) -> io::Result<T>,
+ {
+ let mut reader = self.reader.lock().unwrap();
+ if reader.is_none() {
+ let remote_file = match &self.file_info {
+ FileInfo::ByPathUnderDirFd(dir_fd, related_path) => {
+ RemoteFileReader::new_by_path(self.service.clone(), *dir_fd, related_path)?
+ }
+ FileInfo::ByFd(file_fd) => RemoteFileReader::new(self.service.clone(), *file_fd),
+ };
+ let remote_fd = remote_file.get_remote_fd();
+ let file_size = self
+ .service
+ .getFileSize(remote_fd)
+ .map_err(|e| {
+ error!("Failed to get file size of remote fd {}: {}", remote_fd, e);
+ io::Error::from_raw_os_error(libc::EIO)
+ })?
+ .try_into()
+ .map_err(|e| {
+ error!("Failed convert file size: {}", e);
+ io::Error::from_raw_os_error(libc::EIO)
+ })?;
+ let instance = VerifiedFileReader::new(
+ remote_file,
+ file_size,
+ &self.expected_digest,
+ EagerChunkReader::new(
+ RemoteMerkleTreeReader::new(self.service.clone(), remote_fd),
+ merkle_tree_size(file_size),
+ )?,
+ )
+ .map_err(|e| {
+ error!("Failed instantiate a verified file reader: {}", e);
+ io::Error::from_raw_os_error(libc::EIO)
+ })?;
+ *reader = Some(instance);
+ }
+ callback(reader.as_ref().unwrap())
+ }
+
+ pub fn file_size(&self) -> io::Result<u64> {
+ self.ensure_init_then(|reader| Ok(reader.file_size))
+ }
+}
+
+impl ReadByChunk for LazyVerifiedReadonlyFile {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+ self.ensure_init_then(|reader| reader.read_chunk(chunk_index, buf))
+ }
+}
diff --git a/authfs/src/fusefs/mount.rs b/authfs/src/fusefs/mount.rs
new file mode 100644
index 0000000..294c6b1
--- /dev/null
+++ b/authfs/src/fusefs/mount.rs
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use fuse::mount::MountOption;
+use std::fs::OpenOptions;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+
+use super::AuthFs;
+
+/// Maximum bytes in the write transaction to the FUSE device. This limits the maximum buffer
+/// size in a read request (including FUSE protocol overhead) that the filesystem writes to.
+pub const MAX_WRITE_BYTES: u32 = 65536;
+
+/// Maximum bytes in a read operation.
+/// TODO(victorhsieh): This option is deprecated by FUSE. Figure out if we can remove this.
+const MAX_READ_BYTES: u32 = 65536;
+
+/// Mount and start the FUSE instance to handle messages. This requires CAP_SYS_ADMIN.
+pub fn mount_and_enter_message_loop(
+ authfs: AuthFs,
+ mountpoint: &Path,
+ extra_options: &Option<String>,
+) -> Result<(), fuse::Error> {
+ let dev_fuse = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .open("/dev/fuse")
+ .expect("Failed to open /dev/fuse");
+
+ let mut mount_options = vec![
+ MountOption::FD(dev_fuse.as_raw_fd()),
+ MountOption::RootMode(libc::S_IFDIR | libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH),
+ MountOption::AllowOther,
+ MountOption::UserId(0),
+ MountOption::GroupId(0),
+ MountOption::MaxRead(MAX_READ_BYTES),
+ ];
+ if let Some(value) = extra_options {
+ mount_options.push(MountOption::Extra(value));
+ }
+
+ fuse::mount(
+ mountpoint,
+ "authfs",
+ libc::MS_NOSUID | libc::MS_NODEV | libc::MS_NOEXEC,
+ &mount_options,
+ )
+ .expect("Failed to mount fuse");
+
+ fuse::worker::start_message_loop(dev_fuse, MAX_WRITE_BYTES, MAX_READ_BYTES, authfs)
+}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 9d36c3f..bdca5b4 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -17,34 +17,41 @@
//! This crate implements AuthFS, a FUSE-based, non-generic filesystem where file access is
//! authenticated. This filesystem assumes the underlying layer is not trusted, e.g. file may be
//! provided by an untrusted host/VM, so that the content can't be simply trusted. However, with a
-//! public key from a trusted party, this filesystem can still verify a (read-only) file signed by
-//! the trusted party even if the host/VM as the blob provider is malicious. With the Merkle tree,
-//! each read of file block can be verified individually only when needed.
+//! known file hash from trusted party, this filesystem can still verify a (read-only) file even if
+//! the host/VM as the blob provider is malicious. With the Merkle tree, each read of file block can
+//! be verified individually only when needed.
//!
-//! AuthFS only serve files that are specifically configured. A file configuration may include the
-//! source (e.g. local file or remote file server), verification method (e.g. certificate for
-//! fs-verity verification, or no verification if expected to mount over dm-verity), and file ID.
-//! Regardless of the actual file name, the exposed file names through AuthFS are currently integer,
-//! e.g. /mountpoint/42.
+//! AuthFS only serve files that are specifically configured. Each remote file can be configured to
+//! appear as a local file at the mount point. A file configuration may include its remote file
+//! identifier and its verification method (e.g. by known digest).
+//!
+//! AuthFS also support remote directories. A remote directory may be defined by a manifest file,
+//! which contains file paths and their corresponding digests.
+//!
+//! AuthFS can also be configured for write, in which case the remote file server is treated as a
+//! (untrusted) storage. The file/directory integrity is maintained in memory in the VM. Currently,
+//! the state is not persistent, thus only new file/directory are supported.
-use anyhow::{bail, Context, Result};
-use std::collections::BTreeMap;
+use anyhow::{anyhow, bail, Result};
+use log::error;
+use protobuf::Message;
+use std::convert::TryInto;
use std::fs::File;
-use std::io::Read;
use std::path::{Path, PathBuf};
use structopt::StructOpt;
-mod auth;
mod common;
mod crypto;
mod file;
+mod fsstat;
mod fsverity;
mod fusefs;
-use auth::FakeAuthenticator;
-use file::{LocalFileReader, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
-use fsverity::{VerifiedFileEditor, VerifiedFileReader};
-use fusefs::{FileConfig, Inode};
+use file::{Attr, InMemoryDir, RemoteDirEditor, RemoteFileEditor, RemoteFileReader};
+use fsstat::RemoteFsStatsReader;
+use fsverity::VerifiedFileEditor;
+use fsverity_digests_proto::fsverity_digests::FSVerityDigests;
+use fusefs::{AuthFs, AuthFsEntry, LazyVerifiedReadonlyFile};
#[derive(StructOpt)]
struct Args {
@@ -54,291 +61,279 @@
/// CID of the VM where the service runs.
#[structopt(long)]
- cid: Option<u32>,
+ cid: u32,
+
+ /// Extra options to FUSE
+ #[structopt(short = "o")]
+ extra_options: Option<String>,
/// A read-only remote file with integrity check. Can be multiple.
///
- /// For example, `--remote-verified-file 5:10:1234:/path/to/cert` tells the filesystem to
- /// associate entry 5 with a remote file 10 of size 1234 bytes, and need to be verified against
- /// the /path/to/cert.
+ /// For example, `--remote-ro-file 5:sha256-1234abcd` tells the filesystem to associate the
+ /// file $MOUNTPOINT/5 with a remote FD 5, and has a fs-verity digest with sha256 of the hex
+ /// value 1234abcd.
#[structopt(long, parse(try_from_str = parse_remote_ro_file_option))]
remote_ro_file: Vec<OptionRemoteRoFile>,
/// A read-only remote file without integrity check. Can be multiple.
///
- /// For example, `--remote-unverified-file 5:10:1234` tells the filesystem to associate entry 5
- /// with a remote file 10 of size 1234 bytes.
- #[structopt(long, parse(try_from_str = parse_remote_ro_file_unverified_option))]
- remote_ro_file_unverified: Vec<OptionRemoteRoFileUnverified>,
+ /// For example, `--remote-ro-file-unverified 5` tells the filesystem to associate the file
+ /// $MOUNTPOINT/5 with a remote FD 5.
+ #[structopt(long)]
+ remote_ro_file_unverified: Vec<i32>,
/// A new read-writable remote file with integrity check. Can be multiple.
///
- /// For example, `--remote-new-verified-file 12:34` tells the filesystem to associate entry 12
- /// with a remote file 34.
- #[structopt(long, parse(try_from_str = parse_remote_new_rw_file_option))]
- remote_new_rw_file: Vec<OptionRemoteRwFile>,
+ /// For example, `--remote-new-rw-file 5` tells the filesystem to associate the file
+ /// $MOUNTPOINT/5 with a remote FD 5.
+ #[structopt(long)]
+ remote_new_rw_file: Vec<i32>,
- /// Debug only. A read-only local file with integrity check. Can be multiple.
- #[structopt(long, parse(try_from_str = parse_local_file_ro_option))]
- local_ro_file: Vec<OptionLocalFileRo>,
+ /// A read-only directory that represents a remote directory. The directory view is constructed
+ /// and finalized during the filesystem initialization based on the provided mapping file
+ /// (which is a serialized protobuf of android.security.fsverity.FSVerityDigests, which
+ /// essentially provides <file path, fs-verity digest> mappings of exported files). The mapping
+ /// file is supposed to come from a trusted location in order to provide a trusted view as well
+ /// as verified access of included files with their fs-verity digest. Not all files on the
+ /// remote host may be included in the mapping file, so the directory view may be partial. The
+ /// directory structure won't change throughout the filesystem lifetime.
+ ///
+ /// For example, `--remote-ro-dir 5:/path/to/mapping:prefix/` tells the filesystem to
+ /// construct a directory structure defined in the mapping file at $MOUNTPOINT/5, which may
+ /// include a file like /5/system/framework/framework.jar. "prefix/" tells the filesystem to
+ /// strip the path (e.g. "system/") from the mount point to match the expected location of the
+ /// remote FD (e.g. a directory FD of "/system" in the remote).
+ #[structopt(long, parse(try_from_str = parse_remote_new_ro_dir_option))]
+ remote_ro_dir: Vec<OptionRemoteRoDir>,
- /// Debug only. A read-only local file without integrity check. Can be multiple.
- #[structopt(long, parse(try_from_str = parse_local_ro_file_unverified_ro_option))]
- local_ro_file_unverified: Vec<OptionLocalRoFileUnverified>,
+ /// A new directory that is assumed empty in the backing filesystem. New files created in this
+ /// directory are integrity-protected in the same way as --remote-new-verified-file. Can be
+ /// multiple.
+ ///
+ /// For example, `--remote-new-rw-dir 5` tells the filesystem to associate $MOUNTPOINT/5
+ /// with a remote dir FD 5.
+ #[structopt(long)]
+ remote_new_rw_dir: Vec<i32>,
/// Enable debugging features.
#[structopt(long)]
debug: bool,
}
-impl Args {
- fn has_remote_files(&self) -> bool {
- !self.remote_ro_file.is_empty()
- || !self.remote_ro_file_unverified.is_empty()
- || !self.remote_new_rw_file.is_empty()
- }
-}
-
struct OptionRemoteRoFile {
- ino: Inode,
-
/// ID to refer to the remote file.
- remote_id: i32,
+ remote_fd: i32,
- /// Expected size of the remote file. Necessary for signature check and Merkle tree
- /// verification.
- file_size: u64,
-
- /// Certificate to verify the authenticity of the file's fs-verity signature.
- /// TODO(170494765): Implement PKCS#7 signature verification.
- _certificate_path: PathBuf,
+ /// Expected fs-verity digest (with sha256) for the remote file.
+ digest: String,
}
-struct OptionRemoteRoFileUnverified {
- ino: Inode,
+struct OptionRemoteRoDir {
+ /// ID to refer to the remote dir.
+ remote_dir_fd: i32,
- /// ID to refer to the remote file.
- remote_id: i32,
+ /// A mapping file that describes the expecting file/directory structure and integrity metadata
+ /// in the remote directory. The file contains serialized protobuf of
+ /// android.security.fsverity.FSVerityDigests.
+ mapping_file_path: PathBuf,
- /// Expected size of the remote file.
- file_size: u64,
-}
-
-struct OptionRemoteRwFile {
- ino: Inode,
-
- /// ID to refer to the remote file.
- remote_id: i32,
-}
-
-struct OptionLocalFileRo {
- ino: Inode,
-
- /// Local path of the backing file.
- file_path: PathBuf,
-
- /// Local path of the backing file's fs-verity Merkle tree dump.
- merkle_tree_dump_path: PathBuf,
-
- /// Local path of fs-verity signature for the backing file.
- signature_path: PathBuf,
-
- /// Certificate to verify the authenticity of the file's fs-verity signature.
- /// TODO(170494765): Implement PKCS#7 signature verification.
- _certificate_path: PathBuf,
-}
-
-struct OptionLocalRoFileUnverified {
- ino: Inode,
-
- /// Local path of the backing file.
- file_path: PathBuf,
+ prefix: String,
}
fn parse_remote_ro_file_option(option: &str) -> Result<OptionRemoteRoFile> {
let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 4 {
+ if strs.len() != 2 {
bail!("Invalid option: {}", option);
}
- Ok(OptionRemoteRoFile {
- ino: strs[0].parse::<Inode>()?,
- remote_id: strs[1].parse::<i32>()?,
- file_size: strs[2].parse::<u64>()?,
- _certificate_path: PathBuf::from(strs[3]),
- })
+ if let Some(digest) = strs[1].strip_prefix("sha256-") {
+ Ok(OptionRemoteRoFile { remote_fd: strs[0].parse::<i32>()?, digest: String::from(digest) })
+ } else {
+ bail!("Unsupported hash algorithm or invalid format: {}", strs[1]);
+ }
}
-fn parse_remote_ro_file_unverified_option(option: &str) -> Result<OptionRemoteRoFileUnverified> {
+fn parse_remote_new_ro_dir_option(option: &str) -> Result<OptionRemoteRoDir> {
let strs: Vec<&str> = option.split(':').collect();
if strs.len() != 3 {
bail!("Invalid option: {}", option);
}
- Ok(OptionRemoteRoFileUnverified {
- ino: strs[0].parse::<Inode>()?,
- remote_id: strs[1].parse::<i32>()?,
- file_size: strs[2].parse::<u64>()?,
+ Ok(OptionRemoteRoDir {
+ remote_dir_fd: strs[0].parse::<i32>().unwrap(),
+ mapping_file_path: PathBuf::from(strs[1]),
+ prefix: String::from(strs[2]),
})
}
-fn parse_remote_new_rw_file_option(option: &str) -> Result<OptionRemoteRwFile> {
- let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 2 {
- bail!("Invalid option: {}", option);
+fn from_hex_string(s: &str) -> Result<Vec<u8>> {
+ if s.len() % 2 == 1 {
+ bail!("Incomplete hex string: {}", s);
+ } else {
+ let results = (0..s.len())
+ .step_by(2)
+ .map(|i| {
+ u8::from_str_radix(&s[i..i + 2], 16)
+ .map_err(|e| anyhow!("Cannot parse hex {}: {}", &s[i..i + 2], e))
+ })
+ .collect::<Result<Vec<_>>>();
+ Ok(results?)
}
- Ok(OptionRemoteRwFile {
- ino: strs[0].parse::<Inode>().unwrap(),
- remote_id: strs[1].parse::<i32>().unwrap(),
- })
}
-fn parse_local_file_ro_option(option: &str) -> Result<OptionLocalFileRo> {
- let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 5 {
- bail!("Invalid option: {}", option);
- }
- Ok(OptionLocalFileRo {
- ino: strs[0].parse::<Inode>()?,
- file_path: PathBuf::from(strs[1]),
- merkle_tree_dump_path: PathBuf::from(strs[2]),
- signature_path: PathBuf::from(strs[3]),
- _certificate_path: PathBuf::from(strs[4]),
- })
-}
-
-fn parse_local_ro_file_unverified_ro_option(option: &str) -> Result<OptionLocalRoFileUnverified> {
- let strs: Vec<&str> = option.split(':').collect();
- if strs.len() != 2 {
- bail!("Invalid option: {}", option);
- }
- Ok(OptionLocalRoFileUnverified {
- ino: strs[0].parse::<Inode>()?,
- file_path: PathBuf::from(strs[1]),
- })
-}
-
-fn new_config_remote_verified_file(
+fn new_remote_verified_file_entry(
service: file::VirtFdService,
- remote_id: i32,
+ remote_fd: i32,
+ expected_digest: &str,
+) -> Result<AuthFsEntry> {
+ Ok(AuthFsEntry::VerifiedReadonly {
+ reader: LazyVerifiedReadonlyFile::prepare_by_fd(
+ service,
+ remote_fd,
+ from_hex_string(expected_digest)?,
+ ),
+ })
+}
+
+fn new_remote_unverified_file_entry(
+ service: file::VirtFdService,
+ remote_fd: i32,
file_size: u64,
-) -> Result<FileConfig> {
- let signature = service.readFsveritySignature(remote_id).context("Failed to read signature")?;
+) -> Result<AuthFsEntry> {
+ let reader = RemoteFileReader::new(service, remote_fd);
+ Ok(AuthFsEntry::UnverifiedReadonly { reader, file_size })
+}
- let authenticator = FakeAuthenticator::always_succeed();
- Ok(FileConfig::RemoteVerifiedReadonlyFile {
- reader: VerifiedFileReader::new(
- &authenticator,
- RemoteFileReader::new(service.clone(), remote_id),
- file_size,
- signature,
- RemoteMerkleTreeReader::new(service.clone(), remote_id),
- )?,
- file_size,
+fn new_remote_new_verified_file_entry(
+ service: file::VirtFdService,
+ remote_fd: i32,
+) -> Result<AuthFsEntry> {
+ let remote_file = RemoteFileEditor::new(service.clone(), remote_fd);
+ Ok(AuthFsEntry::VerifiedNew {
+ editor: VerifiedFileEditor::new(remote_file),
+ attr: Attr::new_file(service, remote_fd),
})
}
-fn new_config_remote_unverified_file(
+fn new_remote_new_verified_dir_entry(
service: file::VirtFdService,
- remote_id: i32,
- file_size: u64,
-) -> Result<FileConfig> {
- let reader = RemoteFileReader::new(service, remote_id);
- Ok(FileConfig::RemoteUnverifiedReadonlyFile { reader, file_size })
+ remote_fd: i32,
+) -> Result<AuthFsEntry> {
+ let dir = RemoteDirEditor::new(service.clone(), remote_fd);
+ let attr = Attr::new_dir(service, remote_fd);
+ Ok(AuthFsEntry::VerifiedNewDirectory { dir, attr })
}
-fn new_config_local_ro_file(
- protected_file: &Path,
- merkle_tree_dump: &Path,
- signature: &Path,
-) -> Result<FileConfig> {
- let file = File::open(&protected_file)?;
- let file_size = file.metadata()?.len();
- let file_reader = LocalFileReader::new(file)?;
- let merkle_tree_reader = LocalFileReader::new(File::open(merkle_tree_dump)?)?;
- let authenticator = FakeAuthenticator::always_succeed();
- let mut sig = Vec::new();
- let _ = File::open(signature)?.read_to_end(&mut sig)?;
- let reader =
- VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree_reader)?;
- Ok(FileConfig::LocalVerifiedReadonlyFile { reader, file_size })
-}
-
-fn new_config_local_ro_file_unverified(file_path: &Path) -> Result<FileConfig> {
- let reader = LocalFileReader::new(File::open(file_path)?)?;
- let file_size = reader.len();
- Ok(FileConfig::LocalUnverifiedReadonlyFile { reader, file_size })
-}
-
-fn new_config_remote_new_verified_file(
+fn prepare_root_dir_entries(
service: file::VirtFdService,
- remote_id: i32,
-) -> Result<FileConfig> {
- let remote_file = RemoteFileEditor::new(service, remote_id);
- Ok(FileConfig::RemoteVerifiedNewFile { editor: VerifiedFileEditor::new(remote_file) })
-}
-
-fn prepare_file_pool(args: &Args) -> Result<BTreeMap<Inode, FileConfig>> {
- let mut file_pool = BTreeMap::new();
-
- if args.has_remote_files() {
- let service = file::get_binder_service(args.cid)?;
-
- for config in &args.remote_ro_file {
- file_pool.insert(
- config.ino,
- new_config_remote_verified_file(
- service.clone(),
- config.remote_id,
- config.file_size,
- )?,
- );
- }
-
- for config in &args.remote_ro_file_unverified {
- file_pool.insert(
- config.ino,
- new_config_remote_unverified_file(
- service.clone(),
- config.remote_id,
- config.file_size,
- )?,
- );
- }
-
- for config in &args.remote_new_rw_file {
- file_pool.insert(
- config.ino,
- new_config_remote_new_verified_file(service.clone(), config.remote_id)?,
- );
- }
+ authfs: &mut AuthFs,
+ args: &Args,
+) -> Result<()> {
+ for config in &args.remote_ro_file {
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(config.remote_fd),
+ new_remote_verified_file_entry(service.clone(), config.remote_fd, &config.digest)?,
+ )?;
}
- for config in &args.local_ro_file {
- file_pool.insert(
- config.ino,
- new_config_local_ro_file(
- &config.file_path,
- &config.merkle_tree_dump_path,
- &config.signature_path,
+ for remote_fd in &args.remote_ro_file_unverified {
+ let remote_fd = *remote_fd;
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(remote_fd),
+ new_remote_unverified_file_entry(
+ service.clone(),
+ remote_fd,
+ service.getFileSize(remote_fd)?.try_into()?,
)?,
- );
+ )?;
}
- for config in &args.local_ro_file_unverified {
- file_pool.insert(config.ino, new_config_local_ro_file_unverified(&config.file_path)?);
+ for remote_fd in &args.remote_new_rw_file {
+ let remote_fd = *remote_fd;
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(remote_fd),
+ new_remote_new_verified_file_entry(service.clone(), remote_fd)?,
+ )?;
}
- Ok(file_pool)
+ for remote_fd in &args.remote_new_rw_dir {
+ let remote_fd = *remote_fd;
+ authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(remote_fd),
+ new_remote_new_verified_dir_entry(service.clone(), remote_fd)?,
+ )?;
+ }
+
+ for config in &args.remote_ro_dir {
+ let dir_root_inode = authfs.add_entry_at_root_dir(
+ remote_fd_to_path_buf(config.remote_dir_fd),
+ AuthFsEntry::ReadonlyDirectory { dir: InMemoryDir::new() },
+ )?;
+
+ // Build the directory tree based on the mapping file.
+ let mut reader = File::open(&config.mapping_file_path)?;
+ let proto = FSVerityDigests::parse_from_reader(&mut reader)?;
+ for (path_str, digest) in &proto.digests {
+ if digest.hash_alg != "sha256" {
+ bail!("Unsupported hash algorithm: {}", digest.hash_alg);
+ }
+
+ let file_entry = {
+ let remote_path_str = path_str.strip_prefix(&config.prefix).ok_or_else(|| {
+ anyhow!("Expect path {} to match prefix {}", path_str, config.prefix)
+ })?;
+ AuthFsEntry::VerifiedReadonly {
+ reader: LazyVerifiedReadonlyFile::prepare_by_path(
+ service.clone(),
+ config.remote_dir_fd,
+ PathBuf::from(remote_path_str),
+ digest.digest.clone(),
+ ),
+ }
+ };
+ authfs.add_entry_at_ro_dir_by_path(dir_root_inode, Path::new(path_str), file_entry)?;
+ }
+ }
+
+ Ok(())
}
-fn main() -> Result<()> {
- let args = Args::from_args();
+fn remote_fd_to_path_buf(fd: i32) -> PathBuf {
+ PathBuf::from(fd.to_string())
+}
+
+fn try_main() -> Result<()> {
+ let args = Args::from_args_safe()?;
let log_level = if args.debug { log::Level::Debug } else { log::Level::Info };
android_logger::init_once(
android_logger::Config::default().with_tag("authfs").with_min_level(log_level),
);
- let file_pool = prepare_file_pool(&args)?;
- fusefs::loop_forever(file_pool, &args.mount_point)?;
+ let service = file::get_rpc_binder_service(args.cid)?;
+ let mut authfs = AuthFs::new(RemoteFsStatsReader::new(service.clone()));
+ prepare_root_dir_entries(service, &mut authfs, &args)?;
+
+ fusefs::mount_and_enter_message_loop(authfs, &args.mount_point, &args.extra_options)?;
bail!("Unexpected exit after the handler loop")
}
+
+fn main() {
+ if let Err(e) = try_main() {
+ error!("failed with {:?}", e);
+ std::process::exit(1);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn parse_hex_string() {
+ assert_eq!(from_hex_string("deadbeef").unwrap(), vec![0xde, 0xad, 0xbe, 0xef]);
+ assert_eq!(from_hex_string("DEADBEEF").unwrap(), vec![0xde, 0xad, 0xbe, 0xef]);
+ assert_eq!(from_hex_string("").unwrap(), Vec::<u8>::new());
+
+ assert!(from_hex_string("deadbee").is_err());
+ assert!(from_hex_string("X").is_err());
+ }
+}
diff --git a/authfs/testdata/README.md b/authfs/testdata/README.md
index 113fe62..cf641a9 100644
--- a/authfs/testdata/README.md
+++ b/authfs/testdata/README.md
@@ -1,9 +1,10 @@
fs-verity signing
=================
With a key pair, fs-verity signature can be generated by simply running
-`fsverity` command line tool from
+`fsverity_metadata_generator` command line tool, which uses
[fsverity-util](https://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/fsverity-utils.git).
```
-fsverity sign test.data test.data.fsv_sig --key=key.pem --cert=cert.pem
+fsverity_metadata_generator --fsverity-path {fsverity_path} --key key.pem --key-format pem \
+ --cert cert.pem --signature pkcs7 --output test.data.fsv_meta test.data
```
diff --git a/authfs/testdata/input.4k.fsv_meta b/authfs/testdata/input.4k.fsv_meta
new file mode 100644
index 0000000..218f038
--- /dev/null
+++ b/authfs/testdata/input.4k.fsv_meta
Binary files differ
diff --git a/authfs/testdata/input.4k.fsv_sig b/authfs/testdata/input.4k.fsv_sig
deleted file mode 100644
index 247a297..0000000
--- a/authfs/testdata/input.4k.fsv_sig
+++ /dev/null
Binary files differ
diff --git a/authfs/testdata/input.4k.merkle_dump b/authfs/testdata/input.4k.merkle_dump
deleted file mode 100644
index d93cd33..0000000
--- a/authfs/testdata/input.4k.merkle_dump
+++ /dev/null
Binary files differ
diff --git a/authfs/testdata/input.4k1.fsv_meta b/authfs/testdata/input.4k1.fsv_meta
new file mode 100644
index 0000000..acbbb40
--- /dev/null
+++ b/authfs/testdata/input.4k1.fsv_meta
Binary files differ
diff --git a/authfs/testdata/input.4k1.fsv_sig b/authfs/testdata/input.4k1.fsv_sig
deleted file mode 100644
index 02f0056..0000000
--- a/authfs/testdata/input.4k1.fsv_sig
+++ /dev/null
Binary files differ
diff --git a/authfs/testdata/input.4k1.merkle_dump b/authfs/testdata/input.4k1.merkle_dump
deleted file mode 100644
index 4ebdc3c..0000000
--- a/authfs/testdata/input.4k1.merkle_dump
+++ /dev/null
Binary files differ
diff --git a/authfs/testdata/input.4m.merkle_dump b/authfs/testdata/input.4m.fsv_meta
similarity index 89%
rename from authfs/testdata/input.4m.merkle_dump
rename to authfs/testdata/input.4m.fsv_meta
index b369bab..447a780 100644
--- a/authfs/testdata/input.4m.merkle_dump
+++ b/authfs/testdata/input.4m.fsv_meta
Binary files differ
diff --git a/authfs/testdata/input.4m.merkle_dump b/authfs/testdata/input.4m.fsv_meta.bad_merkle
similarity index 89%
copy from authfs/testdata/input.4m.merkle_dump
copy to authfs/testdata/input.4m.fsv_meta.bad_merkle
index b369bab..fd61c3e 100644
--- a/authfs/testdata/input.4m.merkle_dump
+++ b/authfs/testdata/input.4m.fsv_meta.bad_merkle
Binary files differ
diff --git a/authfs/testdata/input.4m.fsv_sig b/authfs/testdata/input.4m.fsv_sig
deleted file mode 100644
index 12adca3..0000000
--- a/authfs/testdata/input.4m.fsv_sig
+++ /dev/null
Binary files differ
diff --git a/authfs/testdata/input.4m.merkle_dump.bad b/authfs/testdata/input.4m.merkle_dump.bad
deleted file mode 100644
index eec67ca..0000000
--- a/authfs/testdata/input.4m.merkle_dump.bad
+++ /dev/null
Binary files differ
diff --git a/authfs/tests/Android.bp b/authfs/tests/Android.bp
index 8061c56..1b5cf09 100644
--- a/authfs/tests/Android.bp
+++ b/authfs/tests/Android.bp
@@ -14,8 +14,27 @@
"VirtualizationTestHelper",
],
test_suites: ["general-tests"],
+ data_device_bins: ["open_then_run"],
+ per_testcase_directory: true,
data: [
":authfs_test_files",
- ":MicrodroidTestApp.signed",
+ ":MicrodroidTestApp",
],
}
+
+rust_test {
+ name: "open_then_run",
+ crate_name: "open_then_run",
+ srcs: ["open_then_run.rs"],
+ edition: "2018",
+ rustlibs: [
+ "libandroid_logger",
+ "libanyhow",
+ "libclap",
+ "libcommand_fds",
+ "liblog_rust",
+ "libnix",
+ ],
+ test_suites: ["general-tests"],
+ test_harness: false,
+}
diff --git a/authfs/tests/AndroidTest.xml b/authfs/tests/AndroidTest.xml
index 6100ab9..cc358f2 100644
--- a/authfs/tests/AndroidTest.xml
+++ b/authfs/tests/AndroidTest.xml
@@ -23,7 +23,7 @@
<target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
<option name="throw-if-cmd-fail" value="true" />
- <!-- Prepare test directory. -->
+ <!-- Prepare test directories. -->
<option name="run-command" value="mkdir -p /data/local/tmp/authfs/mnt" />
<option name="teardown-command" value="rm -rf /data/local/tmp/authfs" />
</target_preparer>
@@ -31,24 +31,23 @@
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="true" />
<option name="abort-on-push-failure" value="true" />
+
+ <!-- Test executable -->
+ <option name="push-file" key="open_then_run" value="/data/local/tmp/open_then_run" />
+
+ <!-- Test data files -->
<option name="push-file" key="cert.der" value="/data/local/tmp/authfs/cert.der" />
<option name="push-file" key="input.4m" value="/data/local/tmp/authfs/input.4m" />
<option name="push-file" key="input.4k1" value="/data/local/tmp/authfs/input.4k1" />
<option name="push-file" key="input.4k" value="/data/local/tmp/authfs/input.4k" />
- <option name="push-file" key="input.4m.fsv_sig"
- value="/data/local/tmp/authfs/input.4m.fsv_sig" />
- <option name="push-file" key="input.4k1.fsv_sig"
- value="/data/local/tmp/authfs/input.4k1.fsv_sig" />
- <option name="push-file" key="input.4k.fsv_sig"
- value="/data/local/tmp/authfs/input.4k.fsv_sig" />
- <option name="push-file" key="input.4m.merkle_dump"
- value="/data/local/tmp/authfs/input.4m.merkle_dump" />
- <option name="push-file" key="input.4m.merkle_dump.bad"
- value="/data/local/tmp/authfs/input.4m.merkle_dump.bad" />
- <option name="push-file" key="input.4k1.merkle_dump"
- value="/data/local/tmp/authfs/input.4k1.merkle_dump" />
- <option name="push-file" key="input.4k.merkle_dump"
- value="/data/local/tmp/authfs/input.4k.merkle_dump" />
+ <option name="push-file" key="input.4m.fsv_meta"
+ value="/data/local/tmp/authfs/input.4m.fsv_meta" />
+ <option name="push-file" key="input.4k1.fsv_meta"
+ value="/data/local/tmp/authfs/input.4k1.fsv_meta" />
+ <option name="push-file" key="input.4k.fsv_meta"
+ value="/data/local/tmp/authfs/input.4k.fsv_meta" />
+ <option name="push-file" key="input.4m.fsv_meta.bad_merkle"
+ value="/data/local/tmp/authfs/input.4m.fsv_meta.bad_merkle" />
</target_preparer>
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
index 6e1c890..5d36f16 100644
--- a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -16,6 +16,8 @@
package com.android.virt.fs;
+import static com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestLogData;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@@ -35,15 +37,20 @@
import com.android.tradefed.testtype.junit4.AfterClassWithInfo;
import com.android.tradefed.testtype.junit4.BeforeClassWithInfo;
import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.CommandStatus;
import org.junit.After;
import org.junit.AssumptionViolatedException;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TestName;
import org.junit.runner.RunWith;
+import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
@RootPermissionTest
@RunWith(DeviceJUnit4ClassRunner.class)
@@ -52,6 +59,12 @@
/** Test directory on Android where data are located */
private static final String TEST_DIR = "/data/local/tmp/authfs";
+ /** Output directory where the test can generate output on Android */
+ private static final String TEST_OUTPUT_DIR = "/data/local/tmp/authfs/output_dir";
+
+ /** Path to open_then_run on Android */
+ private static final String OPEN_THEN_RUN_BIN = "/data/local/tmp/open_then_run";
+
/** Mount point of authfs on Microdroid during the test */
private static final String MOUNT_DIR = "/data/local/tmp";
@@ -61,21 +74,41 @@
/** Path to authfs on Microdroid */
private static final String AUTHFS_BIN = "/system/bin/authfs";
+ /** Idsig paths to be created for each APK in the "extra_apks" of vm_config_extra_apk.json. */
+ private static final String[] EXTRA_IDSIG_PATHS = new String[] {
+ TEST_DIR + "BuildManifest.apk.idsig",
+ };
+
+ /** Build manifest path in the VM. 0 is the index of extra_apks in vm_config_extra_apk.json. */
+ private static final String BUILD_MANIFEST_PATH = "/mnt/extra-apk/0/assets/build_manifest.pb";
+
/** Plenty of time for authfs to get ready */
private static final int AUTHFS_INIT_TIMEOUT_MS = 3000;
/** FUSE's magic from statfs(2) */
private static final String FUSE_SUPER_MAGIC_HEX = "65735546";
+ // fs-verity digest (sha256) of testdata/input.{4k, 4k1, 4m}
+ private static final String DIGEST_4K =
+ "sha256-9828cd65f4744d6adda216d3a63d8205375be485bfa261b3b8153d3358f5a576";
+ private static final String DIGEST_4K1 =
+ "sha256-3c70dcd4685ed256ebf1ef116c12e472f35b5017eaca422c0483dadd7d0b5a9f";
+ private static final String DIGEST_4M =
+ "sha256-f18a268d565348fb4bbf11f10480b198f98f2922eb711de149857b3cecf98a8d";
+
+ private static final int VMADDR_CID_HOST = 2;
+
private static CommandRunner sAndroid;
private static String sCid;
private static boolean sAssumptionFailed;
private ExecutorService mThreadPool = Executors.newCachedThreadPool();
+ @Rule public TestLogData mTestLogs = new TestLogData();
+ @Rule public TestName mTestName = new TestName();
+
@BeforeClassWithInfo
- public static void beforeClassWithDevice(TestInformation testInfo)
- throws DeviceNotAvailableException {
+ public static void beforeClassWithDevice(TestInformation testInfo) throws Exception {
assertNotNull(testInfo.getDevice());
ITestDevice androidDevice = testInfo.getDevice();
sAndroid = new CommandRunner(androidDevice);
@@ -100,15 +133,19 @@
CLog.i("Starting the shared VM");
final String apkName = "MicrodroidTestApp.apk";
final String packageName = "com.android.microdroid.test";
- final String configPath = "assets/vm_config.json"; // path inside the APK
+ final String configPath = "assets/vm_config_extra_apk.json"; // path inside the APK
sCid =
startMicrodroid(
androidDevice,
testInfo.getBuildInfo(),
apkName,
packageName,
+ EXTRA_IDSIG_PATHS,
configPath,
- /* debug */ false);
+ /* debug */ true,
+ /* use default memoryMib */ 0,
+ Optional.empty(),
+ Optional.empty());
adbConnectToMicrodroid(androidDevice, sCid);
// Root because authfs (started from shell in this test) currently require root to open
@@ -132,91 +169,97 @@
}
@Before
- public void setUp() {
+ public void setUp() throws Exception {
assumeFalse(sAssumptionFailed);
+ sAndroid.run("mkdir " + TEST_OUTPUT_DIR);
}
@After
- public void tearDown() throws DeviceNotAvailableException {
+ public void tearDown() throws Exception {
sAndroid.tryRun("killall fd_server");
- sAndroid.tryRun("rm -f " + TEST_DIR + "/output");
-
tryRunOnMicrodroid("killall authfs");
tryRunOnMicrodroid("umount " + MOUNT_DIR);
+
+ // Even though we only run one VM for the whole class, and could have collect the VM log
+ // after all tests are done, TestLogData doesn't seem to work at class level. Hence,
+ // collect recent logs manually for each test method.
+ String vmRecentLog = TEST_OUTPUT_DIR + "/vm_recent.log";
+ sAndroid.tryRun("tail -n 50 " + LOG_PATH + " > " + vmRecentLog);
+ archiveLogThenDelete(mTestLogs, getDevice(), vmRecentLog,
+ "vm_recent.log-" + mTestName.getMethodName());
+
+ sAndroid.run("rm -rf " + TEST_OUTPUT_DIR);
}
@Test
- public void testReadWithFsverityVerification_RemoteFile()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testReadWithFsverityVerification_RemoteFile() throws Exception {
// Setup
runFdServerOnAndroid(
- "3<input.4m 4<input.4m.merkle_dump 5<input.4m.fsv_sig 6<input.4m",
- "--ro-fds 3:4:5 --ro-fds 6 --rpc-binder");
+ "--open-ro 3:input.4m --open-ro 4:input.4m.fsv_meta --open-ro 6:input.4m",
+ "--ro-fds 3:4 --ro-fds 6");
runAuthFsOnMicrodroid(
- "--remote-ro-file-unverified 10:6:4194304 --remote-ro-file 11:3:4194304:cert.der"
- + " --cid 2");
+ "--remote-ro-file-unverified 6 --remote-ro-file 3:" + DIGEST_4M + " --cid "
+ + VMADDR_CID_HOST);
// Action
- String actualHashUnverified4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/10");
- String actualHash4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/11");
+ String actualHashUnverified4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/6");
+ String actualHash4m = computeFileHashOnMicrodroid(MOUNT_DIR + "/3");
// Verify
String expectedHash4m = computeFileHashOnAndroid(TEST_DIR + "/input.4m");
- assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4m, actualHashUnverified4m);
- assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4m, actualHash4m);
+ assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4m, actualHashUnverified4m);
+ assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4m, actualHash4m);
}
// Separate the test from the above simply because exec in shell does not allow open too many
// files.
@Test
- public void testReadWithFsverityVerification_RemoteSmallerFile()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testReadWithFsverityVerification_RemoteSmallerFile() throws Exception {
// Setup
runFdServerOnAndroid(
- "3<input.4k 4<input.4k.merkle_dump 5<input.4k.fsv_sig"
- + " 6<input.4k1 7<input.4k1.merkle_dump 8<input.4k1.fsv_sig",
- "--ro-fds 3:4:5 --ro-fds 6:7:8 --rpc-binder");
+ "--open-ro 3:input.4k --open-ro 4:input.4k.fsv_meta --open-ro"
+ + " 6:input.4k1 --open-ro 7:input.4k1.fsv_meta",
+ "--ro-fds 3:4 --ro-fds 6:7");
runAuthFsOnMicrodroid(
- "--remote-ro-file 10:3:4096:cert.der --remote-ro-file 11:6:4097:cert.der --cid 2");
+ "--remote-ro-file 3:" + DIGEST_4K + " --remote-ro-file 6:" + DIGEST_4K1 + " --cid "
+ + VMADDR_CID_HOST);
// Action
- String actualHash4k = computeFileHashOnMicrodroid(MOUNT_DIR + "/10");
- String actualHash4k1 = computeFileHashOnMicrodroid(MOUNT_DIR + "/11");
+ String actualHash4k = computeFileHashOnMicrodroid(MOUNT_DIR + "/3");
+ String actualHash4k1 = computeFileHashOnMicrodroid(MOUNT_DIR + "/6");
// Verify
String expectedHash4k = computeFileHashOnAndroid(TEST_DIR + "/input.4k");
String expectedHash4k1 = computeFileHashOnAndroid(TEST_DIR + "/input.4k1");
- assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4k, actualHash4k);
- assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4k1, actualHash4k1);
+ assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4k, actualHash4k);
+ assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4k1, actualHash4k1);
}
@Test
- public void testReadWithFsverityVerification_TamperedMerkleTree()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testReadWithFsverityVerification_TamperedMerkleTree() throws Exception {
// Setup
runFdServerOnAndroid(
- "3<input.4m 4<input.4m.merkle_dump.bad 5<input.4m.fsv_sig",
- "--ro-fds 3:4:5 --rpc-binder");
- runAuthFsOnMicrodroid("--remote-ro-file 10:3:4096:cert.der --cid 2");
+ "--open-ro 3:input.4m --open-ro 4:input.4m.fsv_meta.bad_merkle",
+ "--ro-fds 3:4");
+ runAuthFsOnMicrodroid("--remote-ro-file 3:" + DIGEST_4M + " --cid " + VMADDR_CID_HOST);
// Verify
- assertFalse(copyFileOnMicrodroid(MOUNT_DIR + "/10", "/dev/null"));
+ assertFalse(copyFileOnMicrodroid(MOUNT_DIR + "/3", "/dev/null"));
}
@Test
- public void testWriteThroughCorrectly()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testWriteThroughCorrectly() throws Exception {
// Setup
- runFdServerOnAndroid("3<>output", "--rw-fds 3 --rpc-binder");
- runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid 2");
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
// Action
String srcPath = "/system/bin/linker64";
- String destPath = MOUNT_DIR + "/20";
- String backendPath = TEST_DIR + "/output";
+ String destPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
assertTrue(copyFileOnMicrodroid(srcPath, destPath));
// Verify
@@ -225,61 +268,110 @@
}
@Test
- public void testWriteFailedIfDetectsTampering()
- throws DeviceNotAvailableException, InterruptedException {
+ public void testWriteFailedIfDetectsTampering() throws Exception {
// Setup
- runFdServerOnAndroid("3<>output", "--rw-fds 3 --rpc-binder");
- runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid 2");
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
String srcPath = "/system/bin/linker64";
- String destPath = MOUNT_DIR + "/20";
- String backendPath = TEST_DIR + "/output";
+ String destPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
assertTrue(copyFileOnMicrodroid(srcPath, destPath));
// Action
- // Tampering with the first 2 4K block of the backing file.
- sAndroid.run("dd if=/dev/zero of=" + backendPath + " bs=1 count=8192");
+ // Tampering with the first 2 4K-blocks of the backing file.
+ zeroizeFileOnAndroid(backendPath, /* size */ 8192, /* offset */ 0);
// Verify
// Write to a block partially requires a read back to calculate the new hash. It should fail
// when the content is inconsistent to the known hash. Use direct I/O to avoid simply
// writing to the filesystem cache.
- assertEquals(
- tryRunOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 direct"),
- null);
+ assertFalse(
+ writeZerosAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 0, /* number */ 1024, /* writeThrough */ true));
// A full 4K write does not require to read back, so write can succeed even if the backing
// block has already been tampered.
- runOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=4096 skip=4096");
+ assertTrue(
+ writeZerosAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 4096, /* number */ 4096, /* writeThrough */ false));
// Otherwise, a partial write with correct backing file should still succeed.
- runOnMicrodroid("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 skip=8192");
+ assertTrue(
+ writeZerosAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 8192, /* number */ 1024, /* writeThrough */ false));
}
@Test
- public void testFileResize() throws DeviceNotAvailableException, InterruptedException {
+ public void testReadFailedIfDetectsTampering() throws Exception {
// Setup
- runFdServerOnAndroid("3<>output", "--rw-fds 3 --rpc-binder");
- runAuthFsOnMicrodroid("--remote-new-rw-file 20:3 --cid 2");
- String outputPath = MOUNT_DIR + "/20";
- String backendPath = TEST_DIR + "/output";
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+
+ String srcPath = "/system/bin/linker64";
+ String destPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
+ assertTrue(copyFileOnMicrodroid(srcPath, destPath));
+
+ // Action
+ // Tampering with the first 4K-block of the backing file.
+ zeroizeFileOnAndroid(backendPath, /* size */ 4096, /* offset */ 0);
+
+ // Verify
+ // Force dropping the page cache, so that the next read can be validated.
+ runOnMicrodroid("echo 1 > /proc/sys/vm/drop_caches");
+ // A read will fail if the backing data has been tampered.
+ assertFalse(checkReadAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 0, /* number */ 4096));
+ assertTrue(checkReadAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 4096, /* number */ 4096));
+ }
+
+ @Test
+ public void testResizeFailedIfDetectsTampering() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+
+ String outputPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
+ createFileWithOnesOnMicrodroid(outputPath, 8192);
+
+ // Action
+ // Tampering with the last 4K-block of the backing file.
+ zeroizeFileOnAndroid(backendPath, /* size */ 1, /* offset */ 4096);
+
+ // Verify
+ // A resize (to a non-multiple of 4K) will fail if the last backing chunk has been
+ // tampered. The original data is necessary (and has to be verified) to calculate the new
+ // hash with shorter data.
+ assertFalse(resizeFileOnMicrodroid(outputPath, 8000));
+ }
+
+ @Test
+ public void testFileResize() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+ String outputPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
// Action & Verify
- runOnMicrodroid("yes $'\\x01' | tr -d '\\n' | dd bs=1 count=10000 of=" + outputPath);
+ createFileWithOnesOnMicrodroid(outputPath, 10000);
assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 10000);
expectBackingFileConsistency(
outputPath,
backendPath,
"684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
- resizeFileOnMicrodroid(outputPath, 15000);
+ assertTrue(resizeFileOnMicrodroid(outputPath, 15000));
assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 15000);
expectBackingFileConsistency(
outputPath,
backendPath,
"567c89f62586e0d33369157afdfe99a2fa36cdffb01e91dcdc0b7355262d610d");
- resizeFileOnMicrodroid(outputPath, 5000);
+ assertTrue(resizeFileOnMicrodroid(outputPath, 5000));
assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 5000);
expectBackingFileConsistency(
outputPath,
@@ -287,6 +379,335 @@
"e53130831c13dabff71d5d1797e3aaa467b4b7d32b3b8782c4ff03d76976f2aa");
}
+ @Test
+ public void testOutputDirectory_WriteNewFiles() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ // Can create a new file to write.
+ String expectedAndroidPath = androidOutputDir + "/file";
+ String authfsPath = authfsOutputDir + "/file";
+ createFileWithOnesOnMicrodroid(authfsPath, 10000);
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 10000);
+ expectBackingFileConsistency(
+ authfsPath,
+ expectedAndroidPath,
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+
+ // Regular file operations work, e.g. resize.
+ assertTrue(resizeFileOnMicrodroid(authfsPath, 15000));
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 15000);
+ expectBackingFileConsistency(
+ authfsPath,
+ expectedAndroidPath,
+ "567c89f62586e0d33369157afdfe99a2fa36cdffb01e91dcdc0b7355262d610d");
+ }
+
+ @Test
+ public void testOutputDirectory_MkdirAndWriteFile() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action
+ // Can create nested directories and can create a file in one.
+ runOnMicrodroid("mkdir " + authfsOutputDir + "/new_dir");
+ runOnMicrodroid("mkdir -p " + authfsOutputDir + "/we/need/to/go/deeper");
+ createFileWithOnesOnMicrodroid(authfsOutputDir + "/new_dir/file1", 10000);
+ createFileWithOnesOnMicrodroid(authfsOutputDir + "/we/need/file2", 10000);
+
+ // Verify
+ // Directories show up in Android.
+ sAndroid.run("test -d " + androidOutputDir + "/new_dir");
+ sAndroid.run("test -d " + androidOutputDir + "/we/need/to/go/deeper");
+ // Files exist in Android. Hashes on Microdroid and Android are consistent.
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/new_dir/file1"), 10000);
+ expectBackingFileConsistency(
+ authfsOutputDir + "/new_dir/file1",
+ androidOutputDir + "/new_dir/file1",
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+ // Same to file in a nested directory.
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/we/need/file2"), 10000);
+ expectBackingFileConsistency(
+ authfsOutputDir + "/we/need/file2",
+ androidOutputDir + "/we/need/file2",
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+ }
+
+ @Test
+ public void testOutputDirectory_CreateAndTruncateExistingFile() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/file");
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/file"), 3);
+ // Can override a file and write normally.
+ createFileWithOnesOnMicrodroid(authfsOutputDir + "/file", 10000);
+ assertEquals(getFileSizeInBytesOnMicrodroid(authfsOutputDir + "/file"), 10000);
+ expectBackingFileConsistency(
+ authfsOutputDir + "/file",
+ androidOutputDir + "/file",
+ "684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
+ }
+
+ @Test
+ public void testOutputDirectory_CanDeleteFile() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/file");
+ runOnMicrodroid("test -f " + authfsOutputDir + "/file");
+ sAndroid.run("test -f " + androidOutputDir + "/file");
+
+ // Action & Verify
+ runOnMicrodroid("rm " + authfsOutputDir + "/file");
+ runOnMicrodroid("test ! -f " + authfsOutputDir + "/file");
+ sAndroid.run("test ! -f " + androidOutputDir + "/file");
+ }
+
+ @Test
+ public void testOutputDirectory_CanDeleteDirectoryOnlyIfEmpty() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ runOnMicrodroid("mkdir -p " + authfsOutputDir + "/dir/dir2");
+ runOnMicrodroid("echo -n foo > " + authfsOutputDir + "/dir/file");
+ sAndroid.run("test -d " + androidOutputDir + "/dir/dir2");
+
+ // Action & Verify
+ runOnMicrodroid("rmdir " + authfsOutputDir + "/dir/dir2");
+ runOnMicrodroid("test ! -d " + authfsOutputDir + "/dir/dir2");
+ sAndroid.run("test ! -d " + androidOutputDir + "/dir/dir2");
+ // Can only delete a directory if empty
+ assertFailedOnMicrodroid("rmdir " + authfsOutputDir + "/dir");
+ runOnMicrodroid("test -d " + authfsOutputDir + "/dir"); // still there
+ runOnMicrodroid("rm " + authfsOutputDir + "/dir/file");
+ runOnMicrodroid("rmdir " + authfsOutputDir + "/dir");
+ runOnMicrodroid("test ! -d " + authfsOutputDir + "/dir");
+ sAndroid.run("test ! -d " + androidOutputDir + "/dir");
+ }
+
+ @Test
+ public void testOutputDirectory_CannotRecreateDirectoryIfNameExists() throws Exception {
+ // Setup
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ runOnMicrodroid("touch " + authfsOutputDir + "/some_file");
+ runOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir");
+ runOnMicrodroid("touch " + authfsOutputDir + "/some_dir/file");
+ runOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/dir");
+
+ // Action & Verify
+ // Cannot create directory if an entry with the same name already exists.
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_file");
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir");
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/file");
+ assertFailedOnMicrodroid("mkdir " + authfsOutputDir + "/some_dir/dir");
+ }
+
+ @Test
+ public void testOutputDirectory_WriteToFdOfDeletedFile() throws Exception {
+ // Setup
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ String androidOutputDir = TEST_OUTPUT_DIR + "/dir";
+ sAndroid.run("mkdir " + androidOutputDir);
+ runFdServerOnAndroid("--open-dir 3:" + androidOutputDir, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Create a file with some data. Test the existence.
+ String outputPath = authfsOutputDir + "/out";
+ String androidOutputPath = androidOutputDir + "/out";
+ runOnMicrodroid("echo -n 123 > " + outputPath);
+ runOnMicrodroid("test -f " + outputPath);
+ sAndroid.run("test -f " + androidOutputPath);
+
+ // Action
+ String output = runOnMicrodroid(
+ // Open the file for append and read
+ "exec 4>>" + outputPath + " 5<" + outputPath + "; "
+ // Delete the file from the directory
+ + "rm " + outputPath + "; "
+ // Append more data to the file descriptor
+ + "echo -n 456 >&4; "
+ // Print the whole file from the file descriptor
+ + "cat <&5");
+
+ // Verify
+ // Output contains all written data, while the files are deleted.
+ assertEquals("123456", output);
+ runOnMicrodroid("test ! -f " + outputPath);
+ sAndroid.run("test ! -f " + androidOutputDir + "/out");
+ }
+
+ @Test
+ public void testInputDirectory_CanReadFile() throws Exception {
+ // Setup
+ String authfsInputDir = MOUNT_DIR + "/3";
+ runFdServerOnAndroid("--open-dir 3:/system", "--ro-dirs 3");
+ runAuthFsOnMicrodroid("--remote-ro-dir 3:" + BUILD_MANIFEST_PATH + ":system/ --cid "
+ + VMADDR_CID_HOST);
+
+ // Action
+ String actualHash =
+ computeFileHashOnMicrodroid(authfsInputDir + "/system/framework/framework.jar");
+
+ // Verify
+ String expectedHash = computeFileHashOnAndroid("/system/framework/framework.jar");
+ assertEquals("Expect consistent hash through /authfs/3: ", expectedHash, actualHash);
+ }
+
+ @Test
+ public void testInputDirectory_OnlyAllowlistedFilesExist() throws Exception {
+ // Setup
+ String authfsInputDir = MOUNT_DIR + "/3";
+ runFdServerOnAndroid("--open-dir 3:/system", "--ro-dirs 3");
+ runAuthFsOnMicrodroid("--remote-ro-dir 3:" + BUILD_MANIFEST_PATH + ":system/ --cid "
+ + VMADDR_CID_HOST);
+
+ // Verify
+ runOnMicrodroid("test -f " + authfsInputDir + "/system/framework/services.jar");
+ assertFailedOnMicrodroid("test -f " + authfsInputDir + "/system/bin/sh");
+ }
+
+ @Test
+ public void testReadOutputDirectory() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ runOnMicrodroid("mkdir -p " + authfsOutputDir + "/dir/dir2/dir3");
+ runOnMicrodroid("touch " + authfsOutputDir + "/dir/dir2/dir3/file1");
+ runOnMicrodroid("touch " + authfsOutputDir + "/dir/dir2/dir3/file2");
+ runOnMicrodroid("touch " + authfsOutputDir + "/dir/dir2/dir3/file3");
+ runOnMicrodroid("touch " + authfsOutputDir + "/file");
+
+ // Verify
+ String[] actual = runOnMicrodroid("cd " + authfsOutputDir + "; find |sort").split("\n");
+ String[] expected = new String[] {
+ ".",
+ "./dir",
+ "./dir/dir2",
+ "./dir/dir2/dir3",
+ "./dir/dir2/dir3/file1",
+ "./dir/dir2/dir3/file2",
+ "./dir/dir2/dir3/file3",
+ "./file"};
+ assertEquals(expected, actual);
+
+ // Add more entries.
+ runOnMicrodroid("mkdir -p " + authfsOutputDir + "/dir2");
+ runOnMicrodroid("touch " + authfsOutputDir + "/file2");
+ // Check new entries. Also check that the types are correct.
+ actual = runOnMicrodroid(
+ "cd " + authfsOutputDir + "; find -maxdepth 1 -type f |sort").split("\n");
+ expected = new String[] {"./file", "./file2"};
+ assertEquals(expected, actual);
+ actual = runOnMicrodroid(
+ "cd " + authfsOutputDir + "; find -maxdepth 1 -type d |sort").split("\n");
+ expected = new String[] {".", "./dir", "./dir2"};
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testChmod_File() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ // Change mode
+ runOnMicrodroid("chmod 321 " + MOUNT_DIR + "/3");
+ expectFileMode("--wx-w---x", MOUNT_DIR + "/3", TEST_OUTPUT_DIR + "/file");
+ // Can't set the disallowed bits
+ assertFailedOnMicrodroid("chmod +s " + MOUNT_DIR + "/3");
+ assertFailedOnMicrodroid("chmod +t " + MOUNT_DIR + "/3");
+ }
+
+ @Test
+ public void testChmod_Dir() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ // Create with umask
+ runOnMicrodroid("umask 000; mkdir " + authfsOutputDir + "/dir");
+ runOnMicrodroid("umask 022; mkdir " + authfsOutputDir + "/dir/dir2");
+ expectFileMode("drwxrwxrwx", authfsOutputDir + "/dir", TEST_OUTPUT_DIR + "/dir");
+ expectFileMode("drwxr-xr-x", authfsOutputDir + "/dir/dir2", TEST_OUTPUT_DIR + "/dir/dir2");
+ // Change mode
+ runOnMicrodroid("chmod -w " + authfsOutputDir + "/dir/dir2");
+ expectFileMode("dr-xr-xr-x", authfsOutputDir + "/dir/dir2", TEST_OUTPUT_DIR + "/dir/dir2");
+ runOnMicrodroid("chmod 321 " + authfsOutputDir + "/dir");
+ expectFileMode("d-wx-w---x", authfsOutputDir + "/dir", TEST_OUTPUT_DIR + "/dir");
+ // Can't set the disallowed bits
+ assertFailedOnMicrodroid("chmod +s " + authfsOutputDir + "/dir/dir2");
+ assertFailedOnMicrodroid("chmod +t " + authfsOutputDir + "/dir");
+ }
+
+ @Test
+ public void testChmod_FileInOutputDirectory() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Action & Verify
+ String authfsOutputDir = MOUNT_DIR + "/3";
+ // Create with umask
+ runOnMicrodroid("umask 000; echo -n foo > " + authfsOutputDir + "/file");
+ runOnMicrodroid("umask 022; echo -n foo > " + authfsOutputDir + "/file2");
+ expectFileMode("-rw-rw-rw-", authfsOutputDir + "/file", TEST_OUTPUT_DIR + "/file");
+ expectFileMode("-rw-r--r--", authfsOutputDir + "/file2", TEST_OUTPUT_DIR + "/file2");
+ // Change mode
+ runOnMicrodroid("chmod -w " + authfsOutputDir + "/file");
+ expectFileMode("-r--r--r--", authfsOutputDir + "/file", TEST_OUTPUT_DIR + "/file");
+ runOnMicrodroid("chmod 321 " + authfsOutputDir + "/file2");
+ expectFileMode("--wx-w---x", authfsOutputDir + "/file2", TEST_OUTPUT_DIR + "/file2");
+ // Can't set the disallowed bits
+ assertFailedOnMicrodroid("chmod +s " + authfsOutputDir + "/file");
+ assertFailedOnMicrodroid("chmod +t " + authfsOutputDir + "/file2");
+ }
+
+ @Test
+ public void testStatfs() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-dir 3:" + TEST_OUTPUT_DIR, "--rw-dirs 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-dir 3 --cid " + VMADDR_CID_HOST);
+
+ // Verify
+ // Magic matches. Has only 2 inodes (root and "/3").
+ assertEquals(
+ FUSE_SUPER_MAGIC_HEX + " 2", runOnMicrodroid("stat -f -c '%t %c' " + MOUNT_DIR));
+ }
+
private void expectBackingFileConsistency(
String authFsPath, String backendPath, String expectedHash)
throws DeviceNotAvailableException {
@@ -328,22 +749,71 @@
}
}
- private void resizeFileOnMicrodroid(String path, long size) {
- runOnMicrodroid("truncate -c -s " + size + " " + path);
+ private void expectFileMode(String expected, String microdroidPath, String androidPath)
+ throws DeviceNotAvailableException {
+ String actual = runOnMicrodroid("stat -c '%A' " + microdroidPath);
+ assertEquals("Inconsistent mode for " + microdroidPath, expected, actual);
+
+ actual = sAndroid.run("stat -c '%A' " + androidPath);
+ assertEquals("Inconsistent mode for " + androidPath + " (android)", expected, actual);
+ }
+
+ private boolean resizeFileOnMicrodroid(String path, long size) {
+ CommandResult result = runOnMicrodroidForResult("truncate -c -s " + size + " " + path);
+ return result.getStatus() == CommandStatus.SUCCESS;
}
private long getFileSizeInBytesOnMicrodroid(String path) {
return Long.parseLong(runOnMicrodroid("stat -c '%s' " + path));
}
+ private void createFileWithOnesOnMicrodroid(String filePath, long numberOfOnes) {
+ runOnMicrodroid(
+ "yes $'\\x01' | tr -d '\\n' | dd bs=1 count=" + numberOfOnes + " of=" + filePath);
+ }
+
+ private boolean checkReadAtFileOffsetOnMicrodroid(String filePath, long offset, long size) {
+ String cmd = "dd if=" + filePath + " of=/dev/null bs=1 count=" + size;
+ if (offset > 0) {
+ cmd += " skip=" + offset;
+ }
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
+ private boolean writeZerosAtFileOffsetOnMicrodroid(
+ String filePath, long offset, long numberOfZeros, boolean writeThrough) {
+ String cmd = "dd if=/dev/zero of=" + filePath + " bs=1 count=" + numberOfZeros
+ + " conv=notrunc";
+ if (offset > 0) {
+ cmd += " seek=" + offset;
+ }
+ if (writeThrough) {
+ cmd += " direct";
+ }
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
+ private void zeroizeFileOnAndroid(String filePath, long size, long offset)
+ throws DeviceNotAvailableException {
+ sAndroid.run("dd if=/dev/zero of=" + filePath + " bs=1 count=" + size + " conv=notrunc"
+ + " seek=" + offset);
+ }
+
private void runAuthFsOnMicrodroid(String flags) {
String cmd = AUTHFS_BIN + " " + MOUNT_DIR + " " + flags;
+ AtomicBoolean starting = new AtomicBoolean(true);
mThreadPool.submit(
() -> {
- CLog.i("Starting authfs");
- CommandResult result = runOnMicrodroidForResult(cmd);
- CLog.w("authfs has stopped: " + result);
+ // authfs may fail to start if fd_server is not yet listening on the vsock
+ // ("Error: Invalid raw AIBinder"). Just restart if that happens.
+ while (starting.get()) {
+ CLog.i("Starting authfs");
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ CLog.w("authfs has stopped: " + result);
+ }
});
try {
PollingCheck.waitFor(
@@ -353,13 +823,25 @@
// methods. waitFor throws Exception because the callback, Callable#call(), has a
// signature to throw an Exception.
throw new RuntimeException(e);
+ } finally {
+ starting.set(false);
}
}
- private void runFdServerOnAndroid(String execParamsForOpeningFds, String flags)
+ private void runFdServerOnAndroid(String helperFlags, String fdServerFlags)
throws DeviceNotAvailableException {
- String cmd = "cd " + TEST_DIR + " && exec " + execParamsForOpeningFds + " " + FD_SERVER_BIN
- + " " + flags;
+ String cmd =
+ "cd "
+ + TEST_DIR
+ + " && "
+ + OPEN_THEN_RUN_BIN
+ + " "
+ + helperFlags
+ + " -- "
+ + FD_SERVER_BIN
+ + " "
+ + fdServerFlags;
+
mThreadPool.submit(
() -> {
try {
diff --git a/authfs/tests/open_then_run.rs b/authfs/tests/open_then_run.rs
new file mode 100644
index 0000000..a540f9d
--- /dev/null
+++ b/authfs/tests/open_then_run.rs
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! This is a test helper program that opens files and/or directories, then passes the file
+//! descriptors to the specified command. When passing the file descriptors, they are mapped to the
+//! specified numbers in the child process.
+
+use anyhow::{bail, Context, Result};
+use clap::{App, Arg, Values};
+use command_fds::{CommandFdExt, FdMapping};
+use log::{debug, error};
+use nix::{dir::Dir, fcntl::OFlag, sys::stat::Mode};
+use std::fs::{File, OpenOptions};
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::process::Command;
+
+// `PseudoRawFd` is just an integer and not necessarily backed by a real FD. It is used to denote
+// the expecting FD number, when trying to set up FD mapping in the child process. The intention
+// with this alias is to improve readability by distinguishing from actual RawFd.
+type PseudoRawFd = RawFd;
+
+struct FileMapping<T: AsRawFd> {
+ file: T,
+ target_fd: PseudoRawFd,
+}
+
+impl<T: AsRawFd> FileMapping<T> {
+ fn as_fd_mapping(&self) -> FdMapping {
+ FdMapping { parent_fd: self.file.as_raw_fd(), child_fd: self.target_fd }
+ }
+}
+
+struct Args {
+ ro_files: Vec<FileMapping<File>>,
+ rw_files: Vec<FileMapping<File>>,
+ dir_files: Vec<FileMapping<Dir>>,
+ cmdline_args: Vec<String>,
+}
+
+fn parse_and_create_file_mapping<F, T>(
+ values: Option<Values<'_>>,
+ opener: F,
+) -> Result<Vec<FileMapping<T>>>
+where
+ F: Fn(&str) -> Result<T>,
+ T: AsRawFd,
+{
+ if let Some(options) = values {
+ options
+ .map(|option| {
+ // Example option: 10:/some/path
+ let strs: Vec<&str> = option.split(':').collect();
+ if strs.len() != 2 {
+ bail!("Invalid option: {}", option);
+ }
+ let fd = strs[0].parse::<PseudoRawFd>().context("Invalid FD format")?;
+ let path = strs[1];
+ Ok(FileMapping { target_fd: fd, file: opener(path)? })
+ })
+ .collect::<Result<_>>()
+ } else {
+ Ok(Vec::new())
+ }
+}
+
+fn parse_args() -> Result<Args> {
+ #[rustfmt::skip]
+ let matches = App::new("open_then_run")
+ .arg(Arg::with_name("open-ro")
+ .long("open-ro")
+ .value_name("FD:PATH")
+ .help("Open <PATH> read-only to pass as fd <FD>")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(Arg::with_name("open-rw")
+ .long("open-rw")
+ .value_name("FD:PATH")
+ .help("Open/create <PATH> read-write to pass as fd <FD>")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(Arg::with_name("open-dir")
+ .long("open-dir")
+ .value_name("FD:DIR")
+ .help("Open <DIR> to pass as fd <FD>")
+ .multiple(true)
+ .number_of_values(1))
+ .arg(Arg::with_name("args")
+ .help("Command line to execute with pre-opened FD inherited")
+ .last(true)
+ .required(true)
+ .multiple(true))
+ .get_matches();
+
+ let ro_files = parse_and_create_file_mapping(matches.values_of("open-ro"), |path| {
+ OpenOptions::new().read(true).open(path).with_context(|| format!("Open {} read-only", path))
+ })?;
+
+ let rw_files = parse_and_create_file_mapping(matches.values_of("open-rw"), |path| {
+ OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .with_context(|| format!("Open {} read-write", path))
+ })?;
+
+ let dir_files = parse_and_create_file_mapping(matches.values_of("open-dir"), |path| {
+ Dir::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::S_IRWXU)
+ .with_context(|| format!("Open {} directory", path))
+ })?;
+
+ let cmdline_args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
+
+ Ok(Args { ro_files, rw_files, dir_files, cmdline_args })
+}
+
+fn try_main() -> Result<()> {
+ let args = parse_args()?;
+
+ let mut command = Command::new(&args.cmdline_args[0]);
+ command.args(&args.cmdline_args[1..]);
+
+ // Set up FD mappings in the child process.
+ let mut fd_mappings = Vec::new();
+ fd_mappings.extend(args.ro_files.iter().map(FileMapping::as_fd_mapping));
+ fd_mappings.extend(args.rw_files.iter().map(FileMapping::as_fd_mapping));
+ fd_mappings.extend(args.dir_files.iter().map(FileMapping::as_fd_mapping));
+ command.fd_mappings(fd_mappings)?;
+
+ debug!("Spawning {:?}", command);
+ command.spawn()?;
+ Ok(())
+}
+
+fn main() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("open_then_run")
+ .with_min_level(log::Level::Debug),
+ );
+
+ if let Err(e) = try_main() {
+ error!("Failed with {:?}", e);
+ std::process::exit(1);
+ }
+}
diff --git a/compos/Android.bp b/compos/Android.bp
index ec3f67f..69b22d6 100644
--- a/compos/Android.bp
+++ b/compos/Android.bp
@@ -2,90 +2,41 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-rust_binary {
- name: "pvm_exec",
- srcs: ["src/pvm_exec.rs"],
+rust_defaults {
+ name: "compsvc_defaults",
+ srcs: ["src/compsvc_main.rs"],
rustlibs: [
+ "android.system.virtualmachineservice-rust",
+ "authfs_aidl_interface-rust",
"compos_aidl_interface-rust",
+ "libandroid_logger",
"libanyhow",
+ "libbinder_common",
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
"libclap",
+ "libcompos_common",
"liblibc",
"liblog_rust",
"libminijail_rust",
"libnix",
+ "libodsign_proto_rust",
+ "libprotobuf",
+ "libregex",
+ "librustutils",
"libscopeguard",
],
prefer_rlib: true,
shared_libs: [
"libbinder_rpc_unstable",
- ],
- apex_available: [
- "com.android.compos",
+ "libcrypto",
],
}
rust_binary {
name: "compsvc",
- srcs: ["src/compsvc_main.rs"],
- rustlibs: [
- "compos_aidl_interface-rust",
- "libandroid_logger",
- "libanyhow",
- "libbinder_rpc_unstable_bindgen",
- "libbinder_rs",
- "libclap",
- "liblog_rust",
- "libminijail_rust",
- ],
- prefer_rlib: true,
- shared_libs: [
- "libbinder_rpc_unstable",
- ],
+ defaults: ["compsvc_defaults"],
apex_available: [
"com.android.compos",
],
}
-
-rust_binary {
- name: "compsvc_worker",
- srcs: ["src/compsvc_worker.rs"],
- rustlibs: [
- "libandroid_logger",
- "libanyhow",
- "libclap",
- "liblog_rust",
- "libminijail_rust",
- "libnix",
- ],
- prefer_rlib: true,
- apex_available: [
- "com.android.compos",
- ],
-}
-
-rust_binary {
- name: "compos_key_main",
- srcs: ["src/compos_key_main.rs"],
- edition: "2018",
- rustlibs: [
- "compos_aidl_interface-rust",
- "android.system.keystore2-V1-rust",
- "android.hardware.security.keymint-V1-rust",
- "libandroid_logger",
- "libanyhow",
- "libbinder_rs",
- "libbinder_rpc_unstable_bindgen",
- "libclap",
- "liblog_rust",
- "libminijail_rust",
- "libring",
- "libscopeguard",
- ],
- prefer_rlib: true,
- shared_libs: [
- "libbinder_rpc_unstable",
- ],
- apex_available: ["com.android.compos"],
-}
diff --git a/compos/aidl/Android.bp b/compos/aidl/Android.bp
index 4d36d3d..7036511 100644
--- a/compos/aidl/Android.bp
+++ b/compos/aidl/Android.bp
@@ -9,6 +9,11 @@
"com/android/compos/*.aidl",
],
backend: {
+ java: {
+ apex_available: [
+ "com.android.compos",
+ ],
+ },
rust: {
enabled: true,
apex_available: [
diff --git a/compos/aidl/com/android/compos/ICompOsKeyService.aidl b/compos/aidl/com/android/compos/ICompOsKeyService.aidl
deleted file mode 100644
index eb2caa7..0000000
--- a/compos/aidl/com/android/compos/ICompOsKeyService.aidl
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.compos;
-
-import com.android.compos.CompOsKeyData;
-import com.android.compos.ICompService;
-
-/** {@hide} */
-interface ICompOsKeyService {
- /**
- * Generate a new public/private key pair suitable for signing CompOs output files.
- *
- * @return a certificate for the public key and the encrypted private key
- */
- CompOsKeyData generateSigningKey();
-
- /**
- * Check that the supplied encrypted private key is valid for signing CompOs output files, and
- * corresponds to the public key.
- *
- * @param keyBlob The encrypted blob containing the private key, as returned by
- * generateSigningKey().
- * @param publicKey The public key, as a DER encoded RSAPublicKey (RFC 3447 Appendix-A.1.1).
- * @return whether the inputs are valid and correspond to each other.
- */
- boolean verifySigningKey(in byte[] keyBlob, in byte[] publicKey);
-
- /**
- * Use the supplied encrypted private key to sign some data.
- *
- * @param keyBlob The encrypted blob containing the private key, as returned by
- * generateSigningKey().
- * @param data The data to be signed. (Large data sizes may cause failure.)
- * @return the signature.
- */
- // STOPSHIP(b/193241041): We must not expose this from the PVM.
- byte[] sign(in byte[] keyBlob, in byte[] data);
-
- /**
- * Return an instance of ICompService that will sign output files with a given encrypted
- * private key.
- *
- * @param keyBlob The encrypted blob containing the private key, as returned by
- * generateSigningKey().
- */
- ICompService getCompService(in byte[] keyBlob);
-}
diff --git a/compos/aidl/com/android/compos/ICompOsService.aidl b/compos/aidl/com/android/compos/ICompOsService.aidl
new file mode 100644
index 0000000..ef48ccf
--- /dev/null
+++ b/compos/aidl/com/android/compos/ICompOsService.aidl
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.compos;
+
+/** {@hide} */
+interface ICompOsService {
+ /**
+ * What type of compilation to perform.
+ */
+ @Backing(type="int")
+ enum CompilationMode {
+ /** Compile artifacts required by the current set of APEXes for use on reboot. */
+ NORMAL_COMPILE = 0,
+ /** Compile a full set of artifacts for test purposes. */
+ TEST_COMPILE = 1,
+ }
+
+ /**
+ * Run odrefresh in the VM context.
+ *
+ * The execution is based on the VM's APEX mounts, files on Android's /system (by accessing
+ * through systemDirFd over AuthFS), and *CLASSPATH derived in the VM, to generate the same
+ * odrefresh output artifacts to the output directory (through outputDirFd).
+ *
+ * @param compilationMode The type of compilation to be performed
+ * @param systemDirFd An fd referring to /system
+ * @param outputDirFd An fd referring to the output directory, ART_APEX_DATA
+ * @param stagingDirFd An fd referring to the staging directory, e.g. ART_APEX_DATA/staging
+ * @param targetDirName The sub-directory of the output directory to which artifacts are to be
+ * written (e.g. dalvik-cache)
+ * @param zygoteArch The zygote architecture (ro.zygote)
+ * @param systemServerCompilerFilter The compiler filter used to compile system server
+ * @return odrefresh exit code
+ */
+ byte odrefresh(CompilationMode compilation_mode, int systemDirFd, int outputDirFd,
+ int stagingDirFd, String targetDirName, String zygoteArch,
+ String systemServerCompilerFilter);
+
+ /**
+ * Returns the current VM's signing key, as an Ed25519 public key
+ * (https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5).
+ */
+ byte[] getPublicKey();
+}
diff --git a/compos/aidl/com/android/compos/ICompService.aidl b/compos/aidl/com/android/compos/ICompService.aidl
deleted file mode 100644
index 0e18442..0000000
--- a/compos/aidl/com/android/compos/ICompService.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.compos;
-
-import com.android.compos.Metadata;
-
-/** {@hide} */
-interface ICompService {
- /**
- * Execute a command composed of the args, in a context that may be specified in the Metadata,
- * e.g. with file descriptors pre-opened. The service is responsible to decide what executables
- * it may run.
- *
- * @param args The command line arguments to run. The 0-th args is normally the program name,
- * which may not be used by the service. The service may be configured to always use
- * a fixed executable, or possibly use the 0-th args are the executable lookup hint.
- * @param metadata Additional information of the execution
- * @return exit code of the program
- */
- byte execute(in String[] args, in Metadata metadata);
-}
diff --git a/compos/aidl/com/android/compos/InputFdAnnotation.aidl b/compos/aidl/com/android/compos/InputFdAnnotation.aidl
deleted file mode 100644
index 44a5591..0000000
--- a/compos/aidl/com/android/compos/InputFdAnnotation.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.compos;
-
-/** {@hide} */
-parcelable InputFdAnnotation {
- /**
- * File descriptor number to be passed to the program. This is also the same file descriptor
- * number used in the backend server.
- */
- int fd;
-
- /** The actual file size in bytes of the backing file to be read. */
- long file_size;
-}
diff --git a/compos/aidl/com/android/compos/OutputFdAnnotation.aidl b/compos/aidl/com/android/compos/OutputFdAnnotation.aidl
deleted file mode 100644
index 95ce425..0000000
--- a/compos/aidl/com/android/compos/OutputFdAnnotation.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.compos;
-
-/** {@hide} */
-parcelable OutputFdAnnotation {
- /**
- * File descriptor number to be passed to the program. This is currently assumed to be same as
- * the file descriptor number used in the backend server.
- */
- int fd;
-}
diff --git a/compos/apex/Android.bp b/compos/apex/Android.bp
index 061c362..4ff0635 100644
--- a/compos/apex/Android.bp
+++ b/compos/apex/Android.bp
@@ -33,23 +33,44 @@
key: "com.android.compos.key",
certificate: ":com.android.compos.certificate",
- // TODO(victorhsieh): make it updatable
+ // TODO(b/206618706): make it updatable
updatable: false,
+ future_updatable: true,
platform_apis: true,
+ system_ext_specific: true,
+
binaries: [
- "compos_key_cmd",
- "compos_key_main",
+ // Used in Android
+ "compos_verify",
+ "composd",
+ "composd_cmd",
+
+ // Used in VM
+ "compos_key_helper",
"compsvc",
- "compsvc_worker",
- "pvm_exec",
],
+ systemserverclasspath_fragments: ["com.android.compos-systemserverclasspath-fragment"],
+
apps: [
"CompOSPayloadApp",
],
prebuilts: [
- "CompOSPayloadApp.apk.idsig",
+ "com.android.compos.init.rc",
],
}
+
+systemserverclasspath_fragment {
+ name: "com.android.compos-systemserverclasspath-fragment",
+ contents: ["service-compos"],
+ apex_available: ["com.android.compos"],
+}
+
+prebuilt_etc {
+ name: "com.android.compos.init.rc",
+ src: "composd.rc",
+ filename: "init.rc",
+ installable: false,
+}
diff --git a/compos/apex/composd.rc b/compos/apex/composd.rc
new file mode 100644
index 0000000..3e2efb1
--- /dev/null
+++ b/compos/apex/composd.rc
@@ -0,0 +1,21 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+service composd /apex/com.android.compos/bin/composd
+ class main
+ user root
+ group system
+ interface aidl android.system.composd
+ disabled
+ oneshot
diff --git a/compos/apk/Android.bp b/compos/apk/Android.bp
index 3a68b8e..c6192b9 100644
--- a/compos/apk/Android.bp
+++ b/compos/apk/Android.bp
@@ -3,42 +3,7 @@
}
android_app {
- name: "CompOSPayloadApp.unsigned",
+ name: "CompOSPayloadApp",
sdk_version: "current",
apex_available: ["com.android.compos"],
}
-
-// TODO(b/190409306) this is temporary until we have a solid way to pass merkle tree
-java_genrule {
- name: "CompOSPayloadApp.signing",
- out: [
- "CompOSPayloadApp.apk",
- "CompOSPayloadApp.apk.idsig",
- ],
- srcs: [":CompOSPayloadApp.unsigned"],
- tools: ["apksigner"],
- tool_files: ["test.keystore"],
- cmd: "$(location apksigner) sign " +
- "--ks $(location test.keystore) " +
- "--ks-pass=pass:testkey --key-pass=pass:testkey " +
- "--in $(in) " +
- "--out $(genDir)/CompOSPayloadApp.apk",
- // $(genDir)/CompOSPayloadApp.apk.idsig is generated implicitly
-}
-
-android_app_import {
- name: "CompOSPayloadApp",
- // Make sure the build system doesn't try to resign the APK
- dex_preopt: {
- enabled: false,
- },
- apk: ":CompOSPayloadApp.signing{CompOSPayloadApp.apk}",
- presigned: true,
- filename: "CompOSPayloadApp.apk",
- apex_available: ["com.android.compos"],
-}
-
-prebuilt_etc {
- name: "CompOSPayloadApp.apk.idsig",
- src: ":CompOSPayloadApp.signing{CompOSPayloadApp.apk.idsig}",
-}
diff --git a/compos/apk/assets/key_service_vm_config.json b/compos/apk/assets/key_service_vm_config.json
deleted file mode 100644
index 3b6b88c..0000000
--- a/compos/apk/assets/key_service_vm_config.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "version": 1,
- "os": {
- "name": "microdroid"
- },
- "task": {
- "type": "executable",
- "command": "/apex/com.android.compos/bin/compos_key_main",
- "args": [
- "--rpc-binder"
- ]
- },
- "apexes": [
- {
- "name": "com.android.compos"
- }
- ]
-}
\ No newline at end of file
diff --git a/compos/apk/assets/vm_config.json b/compos/apk/assets/vm_config.json
index f9f1f90..b93c3f7 100644
--- a/compos/apk/assets/vm_config.json
+++ b/compos/apk/assets/vm_config.json
@@ -5,18 +5,25 @@
},
"task": {
"type": "executable",
- "command": "/apex/com.android.compos/bin/compsvc",
- "args": [
- "--rpc-binder",
- "/apex/com.android.art/bin/dex2oat64"
- ]
+ "command": "/apex/com.android.compos/bin/compsvc"
},
+ "extra_apks": [
+ {
+ "path": "/system/etc/security/fsverity/BuildManifest.apk"
+ }
+ ],
"apexes": [
{
"name": "com.android.art"
},
{
"name": "com.android.compos"
+ },
+ {
+ "name": "com.android.sdkext"
+ },
+ {
+ "name": "{CLASSPATH}"
}
]
-}
\ No newline at end of file
+}
diff --git a/compos/apk/assets/vm_config_staged.json b/compos/apk/assets/vm_config_staged.json
new file mode 100644
index 0000000..83fa6eb
--- /dev/null
+++ b/compos/apk/assets/vm_config_staged.json
@@ -0,0 +1,30 @@
+{
+ "version": 1,
+ "os": {
+ "name": "microdroid"
+ },
+ "task": {
+ "type": "executable",
+ "command": "/apex/com.android.compos/bin/compsvc"
+ },
+ "prefer_staged": true,
+ "extra_apks": [
+ {
+ "path": "/system/etc/security/fsverity/BuildManifest.apk"
+ }
+ ],
+ "apexes": [
+ {
+ "name": "com.android.art"
+ },
+ {
+ "name": "com.android.compos"
+ },
+ {
+ "name": "com.android.sdkext"
+ },
+ {
+ "name": "{CLASSPATH}"
+ }
+ ]
+}
diff --git a/compos/apk/test.keystore b/compos/apk/test.keystore
deleted file mode 100644
index 2f024d8..0000000
--- a/compos/apk/test.keystore
+++ /dev/null
Binary files differ
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
new file mode 100644
index 0000000..39e7c0a
--- /dev/null
+++ b/compos/common/Android.bp
@@ -0,0 +1,28 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+ name: "libcompos_common",
+ crate_name: "compos_common",
+ srcs: ["lib.rs"],
+ edition: "2018",
+ rustlibs: [
+ "android.system.virtualizationservice-rust",
+ "compos_aidl_interface-rust",
+ "libanyhow",
+ "libbinder_common",
+ "libbinder_rpc_unstable_bindgen",
+ "libbinder_rs",
+ "liblog_rust",
+ "libnum_traits",
+ "librustutils",
+ ],
+ proc_macros: ["libnum_derive"],
+ shared_libs: [
+ "libbinder_rpc_unstable",
+ ],
+ apex_available: [
+ "com.android.compos",
+ ],
+}
diff --git a/compos/common/binder.rs b/compos/common/binder.rs
new file mode 100644
index 0000000..ae857e0
--- /dev/null
+++ b/compos/common/binder.rs
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Helper for converting Error types to what Binder expects
+
+use anyhow::Result;
+use binder::{ExceptionCode, Result as BinderResult};
+use binder_common::new_binder_exception;
+use log::warn;
+use std::fmt::Debug;
+
+/// Convert a Result<T, E> to BinderResult<T> to allow it to be returned from a binder RPC,
+/// preserving the content as far as possible.
+/// Also log the error if there is one.
+pub fn to_binder_result<T, E: Debug>(result: Result<T, E>) -> BinderResult<T> {
+ result.map_err(|e| {
+ let message = format!("{:?}", e);
+ warn!("Returning binder error: {}", &message);
+ new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, message)
+ })
+}
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
new file mode 100644
index 0000000..072b90b
--- /dev/null
+++ b/compos/common/compos_client.rs
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Support for starting CompOS in a VM and connecting to the service
+
+use crate::timeouts::timeouts;
+use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT, DEFAULT_VM_CONFIG_PATH};
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
+ DeathReason::DeathReason,
+ IVirtualMachine::IVirtualMachine,
+ IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
+ IVirtualizationService::IVirtualizationService,
+ VirtualMachineAppConfig::{DebugLevel::DebugLevel, VirtualMachineAppConfig},
+ VirtualMachineConfig::VirtualMachineConfig,
+};
+use android_system_virtualizationservice::binder::{
+ wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface, ParcelFileDescriptor,
+ Result as BinderResult, Strong,
+};
+use anyhow::{anyhow, bail, Context, Result};
+use binder::{
+ unstable_api::{new_spibinder, AIBinder},
+ FromIBinder,
+};
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
+use log::{info, warn};
+use rustutils::system_properties;
+use std::fs::File;
+use std::io::{BufRead, BufReader};
+use std::num::NonZeroU32;
+use std::os::raw;
+use std::os::unix::io::IntoRawFd;
+use std::path::Path;
+use std::sync::{Arc, Condvar, Mutex};
+use std::thread;
+
+/// This owns an instance of the CompOS VM.
+pub struct VmInstance {
+ #[allow(dead_code)] // Keeps the VM alive even if we don`t touch it
+ vm: Strong<dyn IVirtualMachine>,
+ cid: i32,
+}
+
+/// Parameters to be used when creating a virtual machine instance.
+#[derive(Default, Debug, Clone)]
+pub struct VmParameters {
+ /// Whether the VM should be debuggable.
+ pub debug_mode: bool,
+ /// Number of vCPUs to have in the VM. If None, defaults to 1.
+ pub cpus: Option<NonZeroU32>,
+ /// Comma separated list of host CPUs where vCPUs are assigned to. If None, any host CPU can be
+ /// used to run any vCPU.
+ pub cpu_set: Option<String>,
+ /// If present, overrides the path to the VM config JSON file
+ pub config_path: Option<String>,
+ /// If present, overrides the amount of RAM to give the VM
+ pub memory_mib: Option<i32>,
+}
+
+impl VmInstance {
+ /// Return a new connection to the Virtualization Service binder interface. This will start the
+ /// service if necessary.
+ pub fn connect_to_virtualization_service() -> Result<Strong<dyn IVirtualizationService>> {
+ wait_for_interface::<dyn IVirtualizationService>("android.system.virtualizationservice")
+ .context("Failed to find VirtualizationService")
+ }
+
+ /// Start a new CompOS VM instance using the specified instance image file and parameters.
+ pub fn start(
+ service: &dyn IVirtualizationService,
+ instance_image: File,
+ idsig: &Path,
+ idsig_manifest_apk: &Path,
+ parameters: &VmParameters,
+ ) -> Result<VmInstance> {
+ let protected_vm = want_protected_vm()?;
+
+ let instance_fd = ParcelFileDescriptor::new(instance_image);
+
+ let apex_dir = Path::new(COMPOS_APEX_ROOT);
+ let data_dir = Path::new(COMPOS_DATA_ROOT);
+
+ let apk_fd = File::open(apex_dir.join("app/CompOSPayloadApp/CompOSPayloadApp.apk"))
+ .context("Failed to open config APK file")?;
+ let apk_fd = ParcelFileDescriptor::new(apk_fd);
+ let idsig_fd = prepare_idsig(service, &apk_fd, idsig)?;
+
+ let manifest_apk_fd = File::open("/system/etc/security/fsverity/BuildManifest.apk")
+ .context("Failed to open build manifest APK file")?;
+ let manifest_apk_fd = ParcelFileDescriptor::new(manifest_apk_fd);
+ let idsig_manifest_apk_fd = prepare_idsig(service, &manifest_apk_fd, idsig_manifest_apk)?;
+
+ let (console_fd, log_fd, debug_level) = if parameters.debug_mode {
+ // Console output and the system log output from the VM are redirected to file.
+ let console_fd = File::create(data_dir.join("vm_console.log"))
+ .context("Failed to create console log file")?;
+ let log_fd = File::create(data_dir.join("vm.log"))
+ .context("Failed to create system log file")?;
+ let console_fd = ParcelFileDescriptor::new(console_fd);
+ let log_fd = ParcelFileDescriptor::new(log_fd);
+ info!("Running in debug mode");
+ (Some(console_fd), Some(log_fd), DebugLevel::FULL)
+ } else {
+ (None, None, DebugLevel::NONE)
+ };
+
+ let config_path = parameters.config_path.as_deref().unwrap_or(DEFAULT_VM_CONFIG_PATH);
+ let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
+ apk: Some(apk_fd),
+ idsig: Some(idsig_fd),
+ instanceImage: Some(instance_fd),
+ configPath: config_path.to_owned(),
+ debugLevel: debug_level,
+ extraIdsigs: vec![idsig_manifest_apk_fd],
+ protectedVm: protected_vm,
+ memoryMib: parameters.memory_mib.unwrap_or(0), // 0 means use the default
+ numCpus: parameters.cpus.map_or(1, NonZeroU32::get) as i32,
+ cpuAffinity: parameters.cpu_set.clone(),
+ });
+
+ let vm = service
+ .createVm(&config, console_fd.as_ref(), log_fd.as_ref())
+ .context("Failed to create VM")?;
+ let vm_state = Arc::new(VmStateMonitor::default());
+
+ let vm_state_clone = Arc::clone(&vm_state);
+ let mut death_recipient = DeathRecipient::new(move || {
+ vm_state_clone.set_died();
+ log::error!("VirtualizationService died");
+ });
+ // Note that dropping death_recipient cancels this, so we can't use a temporary here.
+ vm.as_binder().link_to_death(&mut death_recipient)?;
+
+ let vm_state_clone = Arc::clone(&vm_state);
+ let callback = BnVirtualMachineCallback::new_binder(
+ VmCallback(vm_state_clone),
+ BinderFeatures::default(),
+ );
+ vm.registerCallback(&callback)?;
+
+ vm.start()?;
+
+ let cid = vm_state.wait_until_ready()?;
+
+ Ok(VmInstance { vm, cid })
+ }
+
+ /// Create and return an RPC Binder connection to the Comp OS service in the VM.
+ pub fn get_service(&self) -> Result<Strong<dyn ICompOsService>> {
+ let mut vsock_factory = VsockFactory::new(&*self.vm);
+
+ let ibinder = vsock_factory
+ .connect_rpc_client()
+ .ok_or_else(|| anyhow!("Failed to connect to CompOS service"))?;
+
+ FromIBinder::try_from(ibinder).context("Connecting to CompOS service")
+ }
+
+ /// Return the CID of the VM.
+ pub fn cid(&self) -> i32 {
+ // TODO: Do we actually need/use this?
+ self.cid
+ }
+}
+
+fn prepare_idsig(
+ service: &dyn IVirtualizationService,
+ apk_fd: &ParcelFileDescriptor,
+ idsig_path: &Path,
+) -> Result<ParcelFileDescriptor> {
+ if !idsig_path.exists() {
+ // Prepare idsig file via VirtualizationService
+ let idsig_file = File::create(idsig_path).context("Failed to create idsig file")?;
+ let idsig_fd = ParcelFileDescriptor::new(idsig_file);
+ service
+ .createOrUpdateIdsigFile(apk_fd, &idsig_fd)
+ .context("Failed to update idsig file")?;
+ }
+
+ // Open idsig as read-only
+ let idsig_file = File::open(idsig_path).context("Failed to open idsig file")?;
+ let idsig_fd = ParcelFileDescriptor::new(idsig_file);
+ Ok(idsig_fd)
+}
+
+fn want_protected_vm() -> Result<bool> {
+ let have_protected_vm =
+ system_properties::read_bool("ro.boot.hypervisor.protected_vm.supported", false)?;
+ if have_protected_vm {
+ info!("Starting protected VM");
+ return Ok(true);
+ }
+
+ let is_debug_build = system_properties::read("ro.debuggable")?.as_deref().unwrap_or("0") == "1";
+ if !is_debug_build {
+ bail!("Protected VM not supported, unable to start VM");
+ }
+
+ let have_unprotected_vm =
+ system_properties::read_bool("ro.boot.hypervisor.vm.supported", false)?;
+ if have_unprotected_vm {
+ warn!("Protected VM not supported, falling back to unprotected on debuggable build");
+ return Ok(false);
+ }
+
+ bail!("No VM support available")
+}
+
+struct VsockFactory<'a> {
+ vm: &'a dyn IVirtualMachine,
+}
+
+impl<'a> VsockFactory<'a> {
+ fn new(vm: &'a dyn IVirtualMachine) -> Self {
+ Self { vm }
+ }
+
+ fn connect_rpc_client(&mut self) -> Option<binder::SpIBinder> {
+ let param = self.as_void_ptr();
+
+ unsafe {
+ // SAFETY: AIBinder returned by RpcPreconnectedClient has correct reference count, and
+ // the ownership can be safely taken by new_spibinder.
+ // RpcPreconnectedClient does not take ownership of param, only passing it to
+ // request_fd.
+ let binder =
+ binder_rpc_unstable_bindgen::RpcPreconnectedClient(Some(Self::request_fd), param)
+ as *mut AIBinder;
+ new_spibinder(binder)
+ }
+ }
+
+ fn as_void_ptr(&mut self) -> *mut raw::c_void {
+ self as *mut _ as *mut raw::c_void
+ }
+
+ fn try_new_vsock_fd(&self) -> Result<i32> {
+ let vsock = self.vm.connectVsock(COMPOS_VSOCK_PORT as i32)?;
+ // Ownership of the fd is transferred to binder
+ Ok(vsock.into_raw_fd())
+ }
+
+ fn new_vsock_fd(&self) -> i32 {
+ self.try_new_vsock_fd().unwrap_or_else(|e| {
+ warn!("Connecting vsock failed: {}", e);
+ -1_i32
+ })
+ }
+
+ unsafe extern "C" fn request_fd(param: *mut raw::c_void) -> raw::c_int {
+ // SAFETY: This is only ever called by RpcPreconnectedClient, within the lifetime of the
+ // VsockFactory, with param taking the value returned by as_void_ptr (so a properly aligned
+ // non-null pointer to an initialized instance).
+ let vsock_factory = param as *mut Self;
+ vsock_factory.as_ref().unwrap().new_vsock_fd()
+ }
+}
+
+#[derive(Debug, Default)]
+struct VmState {
+ has_died: bool,
+ cid: Option<i32>,
+}
+
+#[derive(Debug)]
+struct VmStateMonitor {
+ mutex: Mutex<VmState>,
+ state_ready: Condvar,
+}
+
+impl Default for VmStateMonitor {
+ fn default() -> Self {
+ Self { mutex: Mutex::new(Default::default()), state_ready: Condvar::new() }
+ }
+}
+
+impl VmStateMonitor {
+ fn set_died(&self) {
+ let mut state = self.mutex.lock().unwrap();
+ state.has_died = true;
+ state.cid = None;
+ drop(state); // Unlock the mutex prior to notifying
+ self.state_ready.notify_all();
+ }
+
+ fn set_ready(&self, cid: i32) {
+ let mut state = self.mutex.lock().unwrap();
+ if state.has_died {
+ return;
+ }
+ state.cid = Some(cid);
+ drop(state); // Unlock the mutex prior to notifying
+ self.state_ready.notify_all();
+ }
+
+ fn wait_until_ready(&self) -> Result<i32> {
+ let (state, result) = self
+ .state_ready
+ .wait_timeout_while(
+ self.mutex.lock().unwrap(),
+ timeouts()?.vm_max_time_to_ready,
+ |state| state.cid.is_none() && !state.has_died,
+ )
+ .unwrap();
+ if result.timed_out() {
+ bail!("Timed out waiting for VM")
+ }
+ state.cid.ok_or_else(|| anyhow!("VM died"))
+ }
+}
+
+#[derive(Debug)]
+struct VmCallback(Arc<VmStateMonitor>);
+
+impl Interface for VmCallback {}
+
+impl IVirtualMachineCallback for VmCallback {
+ fn onDied(&self, cid: i32, reason: DeathReason) -> BinderResult<()> {
+ self.0.set_died();
+ log::warn!("VM died, cid = {}, reason = {:?}", cid, reason);
+ Ok(())
+ }
+
+ fn onPayloadStarted(
+ &self,
+ cid: i32,
+ stream: Option<&ParcelFileDescriptor>,
+ ) -> BinderResult<()> {
+ if let Some(pfd) = stream {
+ if let Err(e) = start_logging(pfd) {
+ warn!("Can't log vm output: {}", e);
+ };
+ }
+ log::info!("VM payload started, cid = {}", cid);
+ Ok(())
+ }
+
+ fn onPayloadReady(&self, cid: i32) -> BinderResult<()> {
+ self.0.set_ready(cid);
+ log::info!("VM payload ready, cid = {}", cid);
+ Ok(())
+ }
+
+ fn onPayloadFinished(&self, cid: i32, exit_code: i32) -> BinderResult<()> {
+ // This should probably never happen in our case, but if it does we means our VM is no
+ // longer running
+ self.0.set_died();
+ log::warn!("VM payload finished, cid = {}, exit code = {}", cid, exit_code);
+ Ok(())
+ }
+
+ fn onError(&self, cid: i32, error_code: i32, message: &str) -> BinderResult<()> {
+ self.0.set_died();
+ log::warn!("VM error, cid = {}, error code = {}, message = {}", cid, error_code, message,);
+ Ok(())
+ }
+}
+
+fn start_logging(pfd: &ParcelFileDescriptor) -> Result<()> {
+ let reader = BufReader::new(pfd.as_ref().try_clone().context("Cloning fd failed")?);
+ thread::spawn(move || {
+ for line in reader.lines() {
+ match line {
+ Ok(line) => info!("VM: {}", line),
+ Err(e) => {
+ warn!("Reading VM output failed: {}", e);
+ break;
+ }
+ }
+ }
+ });
+ Ok(())
+}
diff --git a/compos/common/lib.rs b/compos/common/lib.rs
new file mode 100644
index 0000000..efbde06
--- /dev/null
+++ b/compos/common/lib.rs
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Common items used by CompOS server and/or clients
+
+pub mod binder;
+pub mod compos_client;
+pub mod odrefresh;
+pub mod timeouts;
+
+/// Special CID indicating "any".
+pub const VMADDR_CID_ANY: u32 = -1i32 as u32;
+
+/// VSock port that the CompOS server listens on for RPC binder connections. This should be out of
+/// future port range (if happens) that microdroid may reserve for system components.
+pub const COMPOS_VSOCK_PORT: u32 = 6432;
+
+/// The root directory where the CompOS APEX is mounted (read only).
+pub const COMPOS_APEX_ROOT: &str = "/apex/com.android.compos";
+
+/// The root of the data directory available for private use by the CompOS APEX.
+pub const COMPOS_DATA_ROOT: &str = "/data/misc/apexdata/com.android.compos";
+
+/// The sub-directory where we store information relating to the instance of CompOS used for
+/// real compilation.
+pub const CURRENT_INSTANCE_DIR: &str = "current";
+
+/// The sub-directory where we store information relating to the instance of CompOS used for
+/// tests.
+pub const TEST_INSTANCE_DIR: &str = "test";
+
+/// The file that holds the instance image for a CompOS instance.
+pub const INSTANCE_IMAGE_FILE: &str = "instance.img";
+
+/// The file that holds the idsig for the CompOS Payload APK.
+pub const IDSIG_FILE: &str = "idsig";
+
+/// The file that holds the idsig for the build manifest APK (that makes enumerated files from
+/// /system available in CompOS).
+pub const IDSIG_MANIFEST_APK_FILE: &str = "idsig_manifest_apk";
+
+/// The path within our config APK of our default VM configuration file, used at boot time.
+pub const DEFAULT_VM_CONFIG_PATH: &str = "assets/vm_config.json";
+
+/// The path within our config APK of the VM configuration file we use when compiling staged
+/// APEXes before reboot.
+pub const PREFER_STAGED_VM_CONFIG_PATH: &str = "assets/vm_config_staged.json";
+
+/// Number of CPUs to run dex2oat (actually the entire compos VM) with
+pub const DEX2OAT_THREADS_PROP_NAME: &str = "dalvik.vm.boot-dex2oat-threads";
+
+/// Set of host-side CPUs to run dex2oat (actually the entire compos VM) on
+pub const DEX2OAT_CPU_SET_PROP_NAME: &str = "dalvik.vm.boot-dex2oat-cpu-set";
diff --git a/compos/common/odrefresh.rs b/compos/common/odrefresh.rs
new file mode 100644
index 0000000..390e50c
--- /dev/null
+++ b/compos/common/odrefresh.rs
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Helpers for running odrefresh
+
+use anyhow::{anyhow, Result};
+use num_derive::FromPrimitive;
+use num_traits::FromPrimitive;
+
+/// The path to the odrefresh binary
+pub const ODREFRESH_PATH: &str = "/apex/com.android.art/bin/odrefresh";
+
+/// The path under which odrefresh writes compiled artifacts
+pub const ODREFRESH_OUTPUT_ROOT_DIR: &str = "/data/misc/apexdata/com.android.art";
+
+/// The directory under ODREFRESH_OUTPUT_ROOT_DIR where pending artifacts are written
+pub const PENDING_ARTIFACTS_SUBDIR: &str = "compos-pending";
+
+/// The directory under ODREFRESH_OUTPUT_ROOT_DIR where test artifacts are written
+pub const TEST_ARTIFACTS_SUBDIR: &str = "test-artifacts";
+
+/// The directory under ODREFRESH_OUTPUT_ROOT_DIR where the current (active) artifacts are stored
+pub const CURRENT_ARTIFACTS_SUBDIR: &str = "dalvik-cache";
+
+// The highest "standard" exit code defined in sysexits.h (as EX__MAX); odrefresh error codes
+// start above here to avoid clashing.
+// TODO: What if this changes?
+const EX_MAX: i8 = 78;
+
+/// The defined odrefresh exit codes - see art/odrefresh/include/odrefresh/odrefresh.h
+#[derive(Debug, PartialEq, Eq, FromPrimitive)]
+#[repr(i8)]
+pub enum ExitCode {
+ /// No compilation required, all artifacts look good
+ Okay = 0,
+ /// Compilation required
+ CompilationRequired = EX_MAX + 1,
+ /// New artifacts successfully generated
+ CompilationSuccess = EX_MAX + 2,
+ /// Compilation failed
+ CompilationFailed = EX_MAX + 3,
+ /// Removal of existing invalid artifacts failed
+ CleanupFailed = EX_MAX + 4,
+}
+
+impl ExitCode {
+ /// Map an integer to the corresponding ExitCode enum, if there is one
+ pub fn from_i32(exit_code: i32) -> Result<Self> {
+ FromPrimitive::from_i32(exit_code)
+ .ok_or_else(|| anyhow!("Unexpected odrefresh exit code: {}", exit_code))
+ }
+}
diff --git a/compos/common/timeouts.rs b/compos/common/timeouts.rs
new file mode 100644
index 0000000..bdabb1e
--- /dev/null
+++ b/compos/common/timeouts.rs
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Timeouts for common situations, with support for longer timeouts when using nested
+//! virtualization.
+
+use anyhow::Result;
+use rustutils::system_properties;
+use std::time::Duration;
+
+/// Holder for the various timeouts we use.
+#[derive(Debug, Copy, Clone)]
+pub struct Timeouts {
+ /// Total time that odrefresh may take to perform compilation
+ pub odrefresh_max_execution_time: Duration,
+ /// Time allowed for the CompOS VM to start up and become ready.
+ pub vm_max_time_to_ready: Duration,
+}
+
+/// Whether the current platform requires extra time for operations inside a VM.
+pub fn need_extra_time() -> Result<bool> {
+ // Nested virtualization is slow. Check if we are running on vsoc as a proxy for this.
+ if let Some(value) = system_properties::read("ro.build.product")? {
+ Ok(value == "vsoc_x86_64" || value == "vsoc_x86")
+ } else {
+ Ok(false)
+ }
+}
+
+/// Return the timeouts that are appropriate on the current platform.
+pub fn timeouts() -> Result<&'static Timeouts> {
+ if need_extra_time()? {
+ Ok(&EXTENDED_TIMEOUTS)
+ } else {
+ Ok(&NORMAL_TIMEOUTS)
+ }
+}
+
+/// The timeouts that we use normally.
+pub const NORMAL_TIMEOUTS: Timeouts = Timeouts {
+ // Note: the source of truth for these odrefresh timeouts is art/odrefresh/odr_config.h.
+ odrefresh_max_execution_time: Duration::from_secs(300),
+ vm_max_time_to_ready: Duration::from_secs(15),
+};
+
+/// The timeouts that we use when need_extra_time() returns true.
+pub const EXTENDED_TIMEOUTS: Timeouts = Timeouts {
+ odrefresh_max_execution_time: Duration::from_secs(480),
+ vm_max_time_to_ready: Duration::from_secs(120),
+};
diff --git a/compos/compos_key_cmd/Android.bp b/compos/compos_key_cmd/Android.bp
deleted file mode 100644
index 00d1035..0000000
--- a/compos/compos_key_cmd/Android.bp
+++ /dev/null
@@ -1,23 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-cc_binary {
- name: "compos_key_cmd",
- srcs: ["compos_key_cmd.cpp"],
- apex_available: ["com.android.compos"],
-
- static_libs: [
- "lib_compos_proto",
- ],
-
- shared_libs: [
- "compos_aidl_interface-ndk_platform",
- "libbase",
- "libbinder_rpc_unstable",
- "libbinder_ndk",
- "libcrypto",
- "libfsverity",
- "libprotobuf-cpp-lite",
- ],
-}
diff --git a/compos/compos_key_cmd/compos_key_cmd.cpp b/compos/compos_key_cmd/compos_key_cmd.cpp
deleted file mode 100644
index bee9de1..0000000
--- a/compos/compos_key_cmd/compos_key_cmd.cpp
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <aidl/com/android/compos/ICompOsKeyService.h>
-#include <android-base/file.h>
-#include <android-base/result.h>
-#include <android-base/unique_fd.h>
-#include <android/binder_auto_utils.h>
-#include <android/binder_manager.h>
-#include <asm/byteorder.h>
-#include <libfsverity.h>
-#include <linux/fsverity.h>
-#include <openssl/evp.h>
-#include <openssl/mem.h>
-#include <openssl/sha.h>
-#include <openssl/x509.h>
-
-#include <filesystem>
-#include <iostream>
-#include <string>
-#include <string_view>
-
-#include "compos_signature.pb.h"
-
-// From frameworks/native/libs/binder/rust/src/binder_rpc_unstable.hpp
-extern "C" {
-AIBinder* RpcClient(unsigned int cid, unsigned int port);
-}
-
-using namespace std::literals;
-
-using aidl::com::android::compos::CompOsKeyData;
-using aidl::com::android::compos::ICompOsKeyService;
-using android::base::ErrnoError;
-using android::base::Error;
-using android::base::Result;
-using android::base::unique_fd;
-using compos::proto::Signature;
-
-const unsigned int kRpcPort = 3142;
-
-static bool writeBytesToFile(const std::vector<uint8_t>& bytes, const std::string& path) {
- std::string str(bytes.begin(), bytes.end());
- return android::base::WriteStringToFile(str, path);
-}
-
-static Result<std::vector<uint8_t>> readBytesFromFile(const std::string& path) {
- std::string str;
- if (!android::base::ReadFileToString(path, &str)) {
- return Error() << "Failed to read " << path;
- }
- return std::vector<uint8_t>(str.begin(), str.end());
-}
-
-static std::shared_ptr<ICompOsKeyService> getService(int cid) {
- ndk::SpAIBinder binder(cid == 0 ? AServiceManager_getService("android.system.composkeyservice")
- : RpcClient(cid, kRpcPort));
- return ICompOsKeyService::fromBinder(binder);
-}
-
-static Result<std::vector<uint8_t>> extractRsaPublicKey(
- const std::vector<uint8_t>& der_certificate) {
- auto data = der_certificate.data();
- bssl::UniquePtr<X509> x509(d2i_X509(nullptr, &data, der_certificate.size()));
- if (!x509) {
- return Error() << "Failed to parse certificate";
- }
- if (data != der_certificate.data() + der_certificate.size()) {
- return Error() << "Certificate has unexpected trailing data";
- }
-
- bssl::UniquePtr<EVP_PKEY> pkey(X509_get_pubkey(x509.get()));
- if (EVP_PKEY_base_id(pkey.get()) != EVP_PKEY_RSA) {
- return Error() << "Subject key is not RSA";
- }
- RSA* rsa = EVP_PKEY_get0_RSA(pkey.get());
- if (!rsa) {
- return Error() << "Failed to extract RSA key";
- }
-
- uint8_t* out = nullptr;
- int size = i2d_RSAPublicKey(rsa, &out);
- if (size < 0 || !out) {
- return Error() << "Failed to convert to RSAPublicKey";
- }
-
- bssl::UniquePtr<uint8_t> buffer(out);
- std::vector<uint8_t> result(out, out + size);
- return result;
-}
-
-static Result<void> generate(int cid, const std::string& blob_file,
- const std::string& public_key_file) {
- auto service = getService(cid);
- if (!service) {
- return Error() << "No service";
- }
-
- CompOsKeyData key_data;
- auto status = service->generateSigningKey(&key_data);
- if (!status.isOk()) {
- return Error() << "Failed to generate key: " << status.getDescription();
- }
-
- auto public_key = extractRsaPublicKey(key_data.certificate);
- if (!public_key.ok()) {
- return Error() << "Failed to extract public key from cert: " << public_key.error();
- }
- if (!writeBytesToFile(key_data.keyBlob, blob_file)) {
- return Error() << "Failed to write keyBlob to " << blob_file;
- }
-
- if (!writeBytesToFile(public_key.value(), public_key_file)) {
- return Error() << "Failed to write public key to " << public_key_file;
- }
-
- return {};
-}
-
-static Result<bool> verify(int cid, const std::string& blob_file,
- const std::string& public_key_file) {
- auto service = getService(cid);
- if (!service) {
- return Error() << "No service";
- }
-
- auto blob = readBytesFromFile(blob_file);
- if (!blob.ok()) {
- return blob.error();
- }
-
- auto public_key = readBytesFromFile(public_key_file);
- if (!public_key.ok()) {
- return public_key.error();
- }
-
- bool result = false;
- auto status = service->verifySigningKey(blob.value(), public_key.value(), &result);
- if (!status.isOk()) {
- return Error() << "Failed to verify key: " << status.getDescription();
- }
-
- return result;
-}
-
-static Result<void> signFile(ICompOsKeyService* service, const std::vector<uint8_t>& key_blob,
- const std::string& file) {
- unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
- if (!fd.ok()) {
- return ErrnoError() << "Failed to open";
- }
-
- std::filesystem::path signature_path{file};
- signature_path += ".signature";
- unique_fd out_fd(TEMP_FAILURE_RETRY(open(signature_path.c_str(),
- O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC,
- S_IRUSR | S_IWUSR | S_IRGRP)));
- if (!out_fd.ok()) {
- return ErrnoError() << "Unable to create signature file";
- }
-
- struct stat filestat;
- if (fstat(fd, &filestat) != 0) {
- return ErrnoError() << "Failed to fstat";
- }
-
- struct libfsverity_merkle_tree_params params = {
- .version = 1,
- .hash_algorithm = FS_VERITY_HASH_ALG_SHA256,
- .file_size = static_cast<uint64_t>(filestat.st_size),
- .block_size = 4096,
- };
-
- auto read_callback = [](void* file, void* buf, size_t count) {
- int* fd = static_cast<int*>(file);
- if (TEMP_FAILURE_RETRY(read(*fd, buf, count)) < 0) return -errno;
- return 0;
- };
-
- struct libfsverity_digest* digest;
- int ret = libfsverity_compute_digest(&fd, read_callback, ¶ms, &digest);
- if (ret < 0) {
- return Error(-ret) << "Failed to compute fs-verity digest";
- }
- std::unique_ptr<libfsverity_digest, decltype(&std::free)> digestOwner{digest, std::free};
-
- std::vector<uint8_t> buffer(sizeof(fsverity_formatted_digest) + digest->digest_size);
- auto to_be_signed = new (buffer.data()) fsverity_formatted_digest;
- memcpy(to_be_signed->magic, "FSVerity", sizeof(to_be_signed->magic));
- to_be_signed->digest_algorithm = __cpu_to_le16(digest->digest_algorithm);
- to_be_signed->digest_size = __cpu_to_le16(digest->digest_size);
- memcpy(to_be_signed->digest, digest->digest, digest->digest_size);
-
- std::vector<uint8_t> signature;
- auto status = service->sign(key_blob, buffer, &signature);
- if (!status.isOk()) {
- return Error() << "Failed to sign: " << status.getDescription();
- }
-
- Signature compos_signature;
- compos_signature.set_digest(digest->digest, digest->digest_size);
- compos_signature.set_signature(signature.data(), signature.size());
- if (!compos_signature.SerializeToFileDescriptor(out_fd.get())) {
- return Error() << "Failed to write signature";
- }
- if (close(out_fd.release()) != 0) {
- return ErrnoError() << "Failed to close signature file";
- }
-
- return {};
-}
-
-static Result<void> sign(int cid, const std::string& blob_file,
- const std::vector<std::string>& files) {
- auto service = getService(cid);
- if (!service) {
- return Error() << "No service";
- }
-
- auto blob = readBytesFromFile(blob_file);
- if (!blob.ok()) {
- return blob.error();
- }
-
- for (auto& file : files) {
- auto result = signFile(service.get(), blob.value(), file);
- if (!result.ok()) {
- return Error() << result.error() << ": " << file;
- }
- }
- return {};
-}
-
-int main(int argc, char** argv) {
- // Restrict access to our outputs to the current user.
- umask(077);
-
- int cid = 0;
- if (argc >= 3 && argv[1] == "--cid"sv) {
- cid = atoi(argv[2]);
- if (cid == 0) {
- std::cerr << "Invalid cid\n";
- return 1;
- }
- argc -= 2;
- argv += 2;
- }
-
- if (argc == 4 && argv[1] == "generate"sv) {
- auto result = generate(cid, argv[2], argv[3]);
- if (result.ok()) {
- return 0;
- } else {
- std::cerr << result.error() << '\n';
- }
- } else if (argc == 4 && argv[1] == "verify"sv) {
- auto result = verify(cid, argv[2], argv[3]);
- if (result.ok()) {
- if (result.value()) {
- std::cerr << "Key files are valid.\n";
- return 0;
- } else {
- std::cerr << "Key files are not valid.\n";
- }
- } else {
- std::cerr << result.error() << '\n';
- }
- } else if (argc >= 4 && argv[1] == "sign"sv) {
- const std::vector<std::string> files{&argv[3], &argv[argc]};
- auto result = sign(cid, argv[2], files);
- if (result.ok()) {
- std::cerr << "All signatures generated.\n";
- return 0;
- } else {
- std::cerr << result.error() << '\n';
- }
- } else {
- std::cerr << "Usage: compos_key_cmd [--cid <cid>] generate|verify|sign\n"
- << " generate <blob file> <public key file> Generate new key pair and "
- "write\n"
- << " the private key blob and public key to the specified files.\n "
- << " verify <blob file> <public key file> Verify that the content of the\n"
- << " specified private key blob and public key files are valid.\n "
- << " sign <blob file> <files to be signed> Generate signatures for one or\n"
- << " more files using the supplied private key blob.\n"
- << "Specify --cid to connect to a VM rather than the host\n";
- }
- return 1;
-}
diff --git a/compos/compos_key_helper/Android.bp b/compos/compos_key_helper/Android.bp
new file mode 100644
index 0000000..a932b40
--- /dev/null
+++ b/compos/compos_key_helper/Android.bp
@@ -0,0 +1,44 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_defaults {
+ name: "compos_key_defaults",
+ apex_available: ["com.android.compos"],
+
+ shared_libs: [
+ "libbase",
+ "libcrypto",
+ ],
+}
+
+cc_library {
+ name: "libcompos_key",
+ defaults: ["compos_key_defaults"],
+ srcs: ["compos_key.cpp"],
+ export_include_dirs: ["."],
+}
+
+cc_binary {
+ name: "compos_key_helper",
+ defaults: ["compos_key_defaults"],
+ srcs: ["compos_key_main.cpp"],
+
+ static_libs: ["libcompos_key"],
+ shared_libs: [
+ "android.hardware.security.dice-V1-ndk",
+ "android.security.dice-ndk",
+ "libbinder_ndk",
+ ],
+}
+
+cc_test {
+ name: "compos_key_tests",
+ defaults: ["compos_key_defaults"],
+ test_suites: [
+ "general-tests",
+ ],
+
+ srcs: ["compos_key_test.cpp"],
+ static_libs: ["libcompos_key"],
+}
diff --git a/compos/compos_key_helper/compos_key.cpp b/compos/compos_key_helper/compos_key.cpp
new file mode 100644
index 0000000..2e3252c
--- /dev/null
+++ b/compos/compos_key_helper/compos_key.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compos_key.h"
+
+#include <openssl/digest.h>
+#include <openssl/hkdf.h>
+#include <openssl/mem.h>
+
+using android::base::ErrnoError;
+using android::base::Error;
+using android::base::Result;
+using compos_key::Ed25519KeyPair;
+using compos_key::Signature;
+
+// Used to ensure the key we derive is distinct from any other.
+constexpr const char* kSigningKeyInfo = "CompOS signing key";
+
+namespace compos_key {
+Result<Ed25519KeyPair> deriveKeyFromSecret(const uint8_t* secret, size_t secret_size) {
+ // Ed25519 private keys are derived from a 32 byte seed:
+ // https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5
+ std::array<uint8_t, 32> seed;
+
+ // We derive the seed from the secret using HKDF - see
+ // https://datatracker.ietf.org/doc/html/rfc5869#section-2.
+ if (!HKDF(seed.data(), seed.size(), EVP_sha256(), secret, secret_size, /*salt=*/nullptr,
+ /*salt_len=*/0, reinterpret_cast<const uint8_t*>(kSigningKeyInfo),
+ strlen(kSigningKeyInfo))) {
+ return Error() << "HKDF failed";
+ }
+
+ Ed25519KeyPair result;
+ ED25519_keypair_from_seed(result.public_key.data(), result.private_key.data(), seed.data());
+ return result;
+}
+
+Result<Signature> sign(const PrivateKey& private_key, const uint8_t* data, size_t data_size) {
+ Signature result;
+ if (!ED25519_sign(result.data(), data, data_size, private_key.data())) {
+ return Error() << "Failed to sign";
+ }
+ return result;
+}
+
+bool verify(const PublicKey& public_key, const Signature& signature, const uint8_t* data,
+ size_t data_size) {
+ return ED25519_verify(data, data_size, signature.data(), public_key.data()) == 1;
+}
+} // namespace compos_key
diff --git a/compos/compos_key_helper/compos_key.h b/compos/compos_key_helper/compos_key.h
new file mode 100644
index 0000000..e9c6061
--- /dev/null
+++ b/compos/compos_key_helper/compos_key.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/result.h>
+#include <openssl/curve25519.h>
+
+#include <array>
+
+namespace compos_key {
+using PrivateKey = std::array<uint8_t, ED25519_PRIVATE_KEY_LEN>;
+using PublicKey = std::array<uint8_t, ED25519_PUBLIC_KEY_LEN>;
+using Signature = std::array<uint8_t, ED25519_SIGNATURE_LEN>;
+
+struct Ed25519KeyPair {
+ PrivateKey private_key;
+ PublicKey public_key;
+};
+
+android::base::Result<Ed25519KeyPair> deriveKeyFromSecret(const uint8_t* secret,
+ size_t secret_size);
+
+android::base::Result<Signature> sign(const PrivateKey& private_key, const uint8_t* data,
+ size_t data_size);
+
+bool verify(const PublicKey& public_key, const Signature& signature, const uint8_t* data,
+ size_t data_size);
+} // namespace compos_key
diff --git a/compos/compos_key_helper/compos_key_main.cpp b/compos/compos_key_helper/compos_key_main.cpp
new file mode 100644
index 0000000..a0d0b18
--- /dev/null
+++ b/compos/compos_key_helper/compos_key_main.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aidl/android/security/dice/IDiceNode.h>
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_manager.h>
+#include <unistd.h>
+
+#include <string_view>
+
+#include "compos_key.h"
+
+using aidl::android::hardware::security::dice::BccHandover;
+using aidl::android::hardware::security::dice::InputValues;
+using aidl::android::security::dice::IDiceNode;
+using android::base::Error;
+using android::base::ReadFdToString;
+using android::base::Result;
+using android::base::WriteFully;
+using namespace std::literals;
+using compos_key::Ed25519KeyPair;
+
+namespace {
+Result<Ed25519KeyPair> deriveKeyFromDice() {
+ ndk::SpAIBinder binder{AServiceManager_getService("android.security.dice.IDiceNode")};
+ auto dice_node = IDiceNode::fromBinder(binder);
+ if (!dice_node) {
+ return Error() << "Unable to connect to IDiceNode";
+ }
+
+ const std::vector<InputValues> empty_input_values;
+ BccHandover bcc;
+ auto status = dice_node->derive(empty_input_values, &bcc);
+ if (!status.isOk()) {
+ return Error() << "Derive failed: " << status.getDescription();
+ }
+
+ // We use the sealing CDI because we want stability - the key needs to be the same
+ // for any instance of the "same" VM.
+ return compos_key::deriveKeyFromSecret(bcc.cdiSeal.data(), bcc.cdiSeal.size());
+}
+
+int write_public_key() {
+ auto key_pair = deriveKeyFromDice();
+ if (!key_pair.ok()) {
+ LOG(ERROR) << key_pair.error();
+ return 1;
+ }
+ if (!WriteFully(STDOUT_FILENO, key_pair->public_key.data(), key_pair->public_key.size())) {
+ PLOG(ERROR) << "Write failed";
+ return 1;
+ }
+ return 0;
+}
+
+int sign_input() {
+ std::string to_sign;
+ if (!ReadFdToString(STDIN_FILENO, &to_sign)) {
+ PLOG(ERROR) << "Read failed";
+ return 1;
+ }
+
+ auto key_pair = deriveKeyFromDice();
+ if (!key_pair.ok()) {
+ LOG(ERROR) << key_pair.error();
+ return 1;
+ }
+
+ auto signature =
+ compos_key::sign(key_pair->private_key,
+ reinterpret_cast<const uint8_t*>(to_sign.data()), to_sign.size());
+ if (!signature.ok()) {
+ LOG(ERROR) << signature.error();
+ return 1;
+ }
+
+ if (!WriteFully(STDOUT_FILENO, signature->data(), signature->size())) {
+ PLOG(ERROR) << "Write failed";
+ return 1;
+ }
+ return 0;
+}
+} // namespace
+
+int main(int argc, char** argv) {
+ android::base::InitLogging(argv, android::base::LogdLogger(android::base::SYSTEM));
+
+ if (argc == 2) {
+ if (argv[1] == "public_key"sv) {
+ return write_public_key();
+ } else if (argv[1] == "sign"sv) {
+ return sign_input();
+ }
+ }
+
+ LOG(INFO) << "Usage: compos_key_helper <command>. Available commands are:\n"
+ "public_key Write current public key to stdout\n"
+ "sign Consume stdin, sign it and write signature to stdout\n";
+ return 1;
+}
diff --git a/compos/compos_key_helper/compos_key_test.cpp b/compos/compos_key_helper/compos_key_test.cpp
new file mode 100644
index 0000000..e4c3e8a
--- /dev/null
+++ b/compos/compos_key_helper/compos_key_test.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compos_key.h"
+
+#include <vector>
+
+#include "gtest/gtest.h"
+
+using namespace compos_key;
+
+const std::vector<uint8_t> secret = {1, 2, 3};
+const std::vector<uint8_t> other_secret = {3, 2, 3};
+const std::vector<uint8_t> data = {42, 180, 65, 0};
+
+struct ComposKeyTest : public testing::Test {
+ Ed25519KeyPair key_pair;
+
+ void SetUp() override {
+ auto key_pair = deriveKeyFromSecret(secret.data(), secret.size());
+ ASSERT_TRUE(key_pair.ok()) << key_pair.error();
+ this->key_pair = *key_pair;
+ }
+};
+
+TEST_F(ComposKeyTest, SameSecretSameKey) {
+ auto other_key_pair = deriveKeyFromSecret(secret.data(), secret.size());
+ ASSERT_TRUE(other_key_pair.ok()) << other_key_pair.error();
+
+ ASSERT_EQ(key_pair.private_key, other_key_pair->private_key);
+ ASSERT_EQ(key_pair.public_key, other_key_pair->public_key);
+}
+
+TEST_F(ComposKeyTest, DifferentSecretDifferentKey) {
+ auto other_key_pair = deriveKeyFromSecret(other_secret.data(), other_secret.size());
+ ASSERT_TRUE(other_key_pair.ok()) << other_key_pair.error();
+
+ ASSERT_NE(key_pair.private_key, other_key_pair->private_key);
+ ASSERT_NE(key_pair.public_key, other_key_pair->public_key);
+}
+
+TEST_F(ComposKeyTest, CanVerifyValidSignature) {
+ auto signature = sign(key_pair.private_key, data.data(), data.size());
+ ASSERT_TRUE(signature.ok()) << signature.error();
+
+ bool verified = verify(key_pair.public_key, *signature, data.data(), data.size());
+ ASSERT_TRUE(verified);
+}
+
+TEST_F(ComposKeyTest, WrongSignatureDoesNotVerify) {
+ auto signature = sign(key_pair.private_key, data.data(), data.size());
+ ASSERT_TRUE(signature.ok()) << signature.error();
+
+ (*signature)[0] ^= 1;
+
+ bool verified = verify(key_pair.public_key, *signature, data.data(), data.size());
+ ASSERT_FALSE(verified);
+}
+
+TEST_F(ComposKeyTest, WrongDataDoesNotVerify) {
+ auto signature = sign(key_pair.private_key, data.data(), data.size());
+ ASSERT_TRUE(signature.ok()) << signature.error();
+
+ auto other_data = data;
+ other_data[0] ^= 1;
+
+ bool verified = verify(key_pair.public_key, *signature, other_data.data(), other_data.size());
+ ASSERT_FALSE(verified);
+}
+
+TEST_F(ComposKeyTest, WrongKeyDoesNotVerify) {
+ auto signature = sign(key_pair.private_key, data.data(), data.size());
+
+ auto other_key_pair = deriveKeyFromSecret(other_secret.data(), other_secret.size());
+ ASSERT_TRUE(other_key_pair.ok()) << other_key_pair.error();
+
+ bool verified = verify(other_key_pair->public_key, *signature, data.data(), data.size());
+ ASSERT_FALSE(verified);
+}
diff --git a/compos/compos_key_helper/tests/AndroidTest.xml b/compos/compos_key_helper/tests/AndroidTest.xml
new file mode 100644
index 0000000..3c1c657
--- /dev/null
+++ b/compos/compos_key_helper/tests/AndroidTest.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2022 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Tests for compos_key">
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="module-name" value="compos_key_tests" />
+ </test>
+</configuration>
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
new file mode 100644
index 0000000..55a3107
--- /dev/null
+++ b/compos/composd/Android.bp
@@ -0,0 +1,31 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "composd",
+ srcs: ["src/composd_main.rs"],
+ edition: "2018",
+ prefer_rlib: true,
+ rustlibs: [
+ "android.system.composd-rust",
+ "android.system.virtualizationservice-rust",
+ "compos_aidl_interface-rust",
+ "libandroid_logger",
+ "libanyhow",
+ "libbinder_common",
+ "libbinder_rs",
+ "libcompos_common",
+ "libcomposd_native_rust",
+ "libminijail_rust",
+ "libnum_cpus",
+ "libnix",
+ "liblibc",
+ "liblog_rust",
+ "librustutils",
+ "libshared_child",
+ ],
+ apex_available: [
+ "com.android.compos",
+ ],
+}
diff --git a/compos/composd/aidl/Android.bp b/compos/composd/aidl/Android.bp
new file mode 100644
index 0000000..56b0b60
--- /dev/null
+++ b/compos/composd/aidl/Android.bp
@@ -0,0 +1,21 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+aidl_interface {
+ name: "android.system.composd",
+ srcs: ["android/system/composd/*.aidl"],
+ // TODO: Make this stable when the APEX becomes updatable.
+ unstable: true,
+ backend: {
+ java: {
+ apex_available: ["com.android.compos"],
+ },
+ rust: {
+ enabled: true,
+ apex_available: [
+ "com.android.compos",
+ ],
+ },
+ },
+}
diff --git a/compos/aidl/com/android/compos/CompOsKeyData.aidl b/compos/composd/aidl/android/system/composd/ICompilationTask.aidl
similarity index 60%
copy from compos/aidl/com/android/compos/CompOsKeyData.aidl
copy to compos/composd/aidl/android/system/composd/ICompilationTask.aidl
index 381ec0d..c1da0a5 100644
--- a/compos/aidl/com/android/compos/CompOsKeyData.aidl
+++ b/compos/composd/aidl/android/system/composd/ICompilationTask.aidl
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021 The Android Open Source Project
+ * Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,18 +13,15 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+package android.system.composd;
-package com.android.compos;
-
-/** {@hide} */
-parcelable CompOsKeyData {
+/**
+ * Represents a compilation in process.
+ */
+interface ICompilationTask {
/**
- * Self-signed certificate (X.509 DER) containing the public key.
+ * Attempt to cancel compilation. If successful compilation will end and no further success or
+ * failed callbacks will be received (although any in flight may still be delivered).
*/
- byte[] certificate;
-
- /**
- * Opaque encrypted blob containing the private key and related metadata.
- */
- byte[] keyBlob;
+ oneway void cancel();
}
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
new file mode 100644
index 0000000..b334d8b
--- /dev/null
+++ b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.composd;
+
+/**
+ * Interface to be implemented by clients of IIsolatedCompilationService to be notified when a
+ * requested compilation task completes.
+ */
+oneway interface ICompilationTaskCallback {
+ /**
+ * Called if a compilation task has ended successfully, generating all the required artifacts.
+ */
+ void onSuccess();
+
+ /**
+ * Called if a compilation task has ended unsuccessfully.
+ */
+ void onFailure();
+}
diff --git a/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
new file mode 100644
index 0000000..dde75e1
--- /dev/null
+++ b/compos/composd/aidl/android/system/composd/IIsolatedCompilationService.aidl
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.composd;
+
+import android.system.composd.ICompilationTask;
+import android.system.composd.ICompilationTaskCallback;
+
+interface IIsolatedCompilationService {
+ enum ApexSource {
+ /** Only use the activated APEXes */
+ NoStaged,
+ /** Prefer any staged APEXes, otherwise use the activated ones */
+ PreferStaged,
+ }
+
+ /**
+ * Compile BCP extensions and system server, using any staged APEXes that are present in
+ * preference to active APEXes, writing the results to the pending artifacts directory to be
+ * verified by odsing on next boot.
+ *
+ * Compilation continues in the background, and success/failure is reported via the supplied
+ * callback, unless the returned ICompilationTask is cancelled. The caller should maintain
+ * a reference to the ICompilationTask until compilation completes or is cancelled.
+ */
+ ICompilationTask startStagedApexCompile(ICompilationTaskCallback callback);
+
+ /**
+ * Run odrefresh in a test instance of CompOS until completed or failed.
+ *
+ * This compiles BCP extensions and system server, even if the system artifacts are up to date,
+ * and writes the results to a test directory to avoid disrupting any real artifacts in
+ * existence.
+ *
+ * Compilation continues in the background, and success/failure is reported via the supplied
+ * callback, unless the returned ICompilationTask is cancelled. The caller should maintain
+ * a reference to the ICompilationTask until compilation completes or is cancelled.
+ */
+ ICompilationTask startTestCompile(ApexSource apexSource, ICompilationTaskCallback callback);
+}
diff --git a/compos/composd/native/Android.bp b/compos/composd/native/Android.bp
new file mode 100644
index 0000000..ccd8651
--- /dev/null
+++ b/compos/composd/native/Android.bp
@@ -0,0 +1,17 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+ name: "libcomposd_native_rust",
+ crate_name: "composd_native",
+ srcs: ["lib.rs"],
+ rustlibs: [
+ "libanyhow",
+ "liblibc",
+ ],
+ shared_libs: [
+ "libartpalette-system",
+ ],
+ apex_available: ["com.android.compos"],
+}
diff --git a/compos/composd/native/lib.rs b/compos/composd/native/lib.rs
new file mode 100644
index 0000000..042eb2a
--- /dev/null
+++ b/compos/composd/native/lib.rs
@@ -0,0 +1,52 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Native helpers for composd.
+
+pub use art::*;
+
+mod art {
+ use anyhow::{anyhow, Result};
+ use libc::c_char;
+ use std::ffi::{CStr, OsStr};
+ use std::io::Error;
+ use std::os::unix::ffi::OsStrExt;
+ use std::path::Path;
+ use std::ptr::null;
+
+ // From libartpalette(-system)
+ extern "C" {
+ fn PaletteCreateOdrefreshStagingDirectory(out_staging_dir: *mut *const c_char) -> i32;
+ }
+ const PALETTE_STATUS_OK: i32 = 0;
+ const PALETTE_STATUS_CHECK_ERRNO: i32 = 1;
+
+ /// Creates and returns the staging directory for odrefresh.
+ pub fn palette_create_odrefresh_staging_directory() -> Result<&'static Path> {
+ let mut staging_dir: *const c_char = null();
+ // SAFETY: The C function always returns a non-null C string (after created the directory).
+ let status = unsafe { PaletteCreateOdrefreshStagingDirectory(&mut staging_dir) };
+ match status {
+ PALETTE_STATUS_OK => {
+ // SAFETY: The previously returned `*const c_char` should point to a legitimate C
+ // string.
+ let cstr = unsafe { CStr::from_ptr(staging_dir) };
+ let path = OsStr::from_bytes(cstr.to_bytes()).as_ref();
+ Ok(path)
+ }
+ PALETTE_STATUS_CHECK_ERRNO => Err(anyhow!(Error::last_os_error().to_string())),
+ _ => Err(anyhow!("Failed with palette status {}", status)),
+ }
+ }
+}
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
new file mode 100644
index 0000000..d1b711d
--- /dev/null
+++ b/compos/composd/src/composd_main.rs
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Exposes an on-demand binder service to perform system compilation tasks using CompOS. It is
+//! responsible for managing the lifecycle of the CompOS VM instances, providing key management for
+//! them, and orchestrating trusted compilation.
+
+mod fd_server_helper;
+mod instance_manager;
+mod instance_starter;
+mod odrefresh_task;
+mod service;
+
+use crate::instance_manager::InstanceManager;
+use android_system_composd::binder::{register_lazy_service, ProcessState};
+use anyhow::{Context, Result};
+use compos_common::compos_client::VmInstance;
+use log::{error, info};
+use std::panic;
+use std::sync::Arc;
+
+fn try_main() -> Result<()> {
+ let debuggable = env!("TARGET_BUILD_VARIANT") != "user";
+ let log_level = if debuggable { log::Level::Debug } else { log::Level::Info };
+ android_logger::init_once(
+ android_logger::Config::default().with_tag("composd").with_min_level(log_level),
+ );
+
+ // Redirect panic messages to logcat.
+ panic::set_hook(Box::new(|panic_info| {
+ log::error!("{}", panic_info);
+ }));
+
+ ProcessState::start_thread_pool();
+
+ let virtualization_service = VmInstance::connect_to_virtualization_service()?;
+ let instance_manager = Arc::new(InstanceManager::new(virtualization_service));
+ let composd_service = service::new_binder(instance_manager);
+ register_lazy_service("android.system.composd", composd_service.as_binder())
+ .context("Registering composd service")?;
+
+ info!("Registered services, joining threadpool");
+ ProcessState::join_thread_pool();
+
+ info!("Exiting");
+ Ok(())
+}
+
+fn main() {
+ if let Err(e) = try_main() {
+ error!("{:?}", e);
+ std::process::exit(1)
+ }
+}
diff --git a/compos/composd/src/fd_server_helper.rs b/compos/composd/src/fd_server_helper.rs
new file mode 100644
index 0000000..24dc9e7
--- /dev/null
+++ b/compos/composd/src/fd_server_helper.rs
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A helper library to start a fd_server.
+
+use anyhow::{Context, Result};
+use log::{debug, warn};
+use minijail::Minijail;
+use nix::fcntl::OFlag;
+use nix::unistd::pipe2;
+use std::fs::File;
+use std::io::Read;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::path::Path;
+
+const FD_SERVER_BIN: &str = "/apex/com.android.virt/bin/fd_server";
+
+/// Config for starting a `FdServer`
+#[derive(Default)]
+pub struct FdServerConfig {
+ /// List of file FDs exposed for read-only operations.
+ pub ro_file_fds: Vec<RawFd>,
+ /// List of file FDs exposed for read-write operations.
+ pub rw_file_fds: Vec<RawFd>,
+ /// List of directory FDs exposed for read-only operations.
+ pub ro_dir_fds: Vec<RawFd>,
+ /// List of directory FDs exposed for read-write operations.
+ pub rw_dir_fds: Vec<RawFd>,
+}
+
+impl FdServerConfig {
+ /// Creates a `FdServer` based on the current config.
+ pub fn into_fd_server(self) -> Result<FdServer> {
+ let (ready_read_fd, ready_write_fd) = create_pipe()?;
+ let fd_server_jail = self.do_spawn_fd_server(ready_write_fd)?;
+ wait_for_fd_server_ready(ready_read_fd)?;
+ Ok(FdServer { jailed_process: fd_server_jail })
+ }
+
+ fn do_spawn_fd_server(self, ready_file: File) -> Result<Minijail> {
+ let mut inheritable_fds = Vec::new();
+ let mut args = vec![FD_SERVER_BIN.to_string()];
+ for fd in self.ro_file_fds {
+ args.push("--ro-fds".to_string());
+ args.push(fd.to_string());
+ inheritable_fds.push(fd);
+ }
+ for fd in self.rw_file_fds {
+ args.push("--rw-fds".to_string());
+ args.push(fd.to_string());
+ inheritable_fds.push(fd);
+ }
+ for fd in self.ro_dir_fds {
+ args.push("--ro-dirs".to_string());
+ args.push(fd.to_string());
+ inheritable_fds.push(fd);
+ }
+ for fd in self.rw_dir_fds {
+ args.push("--rw-dirs".to_string());
+ args.push(fd.to_string());
+ inheritable_fds.push(fd);
+ }
+ let ready_fd = ready_file.as_raw_fd();
+ args.push("--ready-fd".to_string());
+ args.push(ready_fd.to_string());
+ inheritable_fds.push(ready_fd);
+
+ debug!("Spawn fd_server {:?} (inheriting FDs: {:?})", args, inheritable_fds);
+ let jail = Minijail::new()?;
+ let _pid = jail.run(Path::new(FD_SERVER_BIN), &inheritable_fds, &args)?;
+ Ok(jail)
+ }
+}
+
+/// `FdServer` represents a running `fd_server` process. The process lifetime is associated with
+/// the instance lifetime.
+pub struct FdServer {
+ jailed_process: Minijail,
+}
+
+impl Drop for FdServer {
+ fn drop(&mut self) {
+ if let Err(e) = self.jailed_process.kill() {
+ if !matches!(e, minijail::Error::Killed(_)) {
+ warn!("Failed to kill fd_server: {}", e);
+ }
+ }
+ }
+}
+
+fn create_pipe() -> Result<(File, File)> {
+ let (raw_read, raw_write) = pipe2(OFlag::O_CLOEXEC)?;
+ // SAFETY: We are the sole owners of these fds as they were just created.
+ let read_fd = unsafe { File::from_raw_fd(raw_read) };
+ let write_fd = unsafe { File::from_raw_fd(raw_write) };
+ Ok((read_fd, write_fd))
+}
+
+fn wait_for_fd_server_ready(mut ready_fd: File) -> Result<()> {
+ let mut buffer = [0];
+ // When fd_server is ready it closes its end of the pipe. And if it exits, the pipe is also
+ // closed. Either way this read will return 0 bytes at that point, and there's no point waiting
+ // any longer.
+ let _ = ready_fd.read(&mut buffer).context("Waiting for fd_server to be ready")?;
+ debug!("fd_server is ready");
+ Ok(())
+}
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
new file mode 100644
index 0000000..587314c
--- /dev/null
+++ b/compos/composd/src/instance_manager.rs
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Manages running instances of the CompOS VM. At most one instance should be running at
+//! a time, started on demand.
+
+use crate::instance_starter::{CompOsInstance, InstanceStarter};
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice;
+use anyhow::{bail, Result};
+use compos_aidl_interface::binder::Strong;
+use compos_common::compos_client::VmParameters;
+use compos_common::{
+ CURRENT_INSTANCE_DIR, DEX2OAT_CPU_SET_PROP_NAME, DEX2OAT_THREADS_PROP_NAME,
+ PREFER_STAGED_VM_CONFIG_PATH, TEST_INSTANCE_DIR,
+};
+use rustutils::system_properties;
+use std::num::NonZeroU32;
+use std::str::FromStr;
+use std::sync::{Arc, Mutex, Weak};
+use virtualizationservice::IVirtualizationService::IVirtualizationService;
+
+// Enough memory to complete odrefresh in the VM.
+const VM_MEMORY_MIB: i32 = 1024;
+
+pub struct InstanceManager {
+ service: Strong<dyn IVirtualizationService>,
+ state: Mutex<State>,
+}
+
+impl InstanceManager {
+ pub fn new(service: Strong<dyn IVirtualizationService>) -> Self {
+ Self { service, state: Default::default() }
+ }
+
+ pub fn start_current_instance(&self) -> Result<Arc<CompOsInstance>> {
+ let mut vm_parameters = new_vm_parameters()?;
+ vm_parameters.config_path = Some(PREFER_STAGED_VM_CONFIG_PATH.to_owned());
+ self.start_instance(CURRENT_INSTANCE_DIR, vm_parameters)
+ }
+
+ pub fn start_test_instance(&self, prefer_staged: bool) -> Result<Arc<CompOsInstance>> {
+ let mut vm_parameters = new_vm_parameters()?;
+ vm_parameters.debug_mode = true;
+ if prefer_staged {
+ vm_parameters.config_path = Some(PREFER_STAGED_VM_CONFIG_PATH.to_owned());
+ }
+ self.start_instance(TEST_INSTANCE_DIR, vm_parameters)
+ }
+
+ fn start_instance(
+ &self,
+ instance_name: &str,
+ vm_parameters: VmParameters,
+ ) -> Result<Arc<CompOsInstance>> {
+ let mut state = self.state.lock().unwrap();
+ state.mark_starting()?;
+ // Don't hold the lock while we start the instance to avoid blocking other callers.
+ drop(state);
+
+ let instance_starter = InstanceStarter::new(instance_name, vm_parameters);
+ let instance = self.try_start_instance(instance_starter);
+
+ let mut state = self.state.lock().unwrap();
+ if let Ok(ref instance) = instance {
+ state.mark_started(instance)?;
+ } else {
+ state.mark_stopped();
+ }
+ instance
+ }
+
+ fn try_start_instance(&self, instance_starter: InstanceStarter) -> Result<Arc<CompOsInstance>> {
+ let compos_instance = instance_starter.start_new_instance(&*self.service)?;
+ Ok(Arc::new(compos_instance))
+ }
+}
+
+fn new_vm_parameters() -> Result<VmParameters> {
+ let cpus = match system_properties::read(DEX2OAT_THREADS_PROP_NAME)? {
+ Some(s) => Some(NonZeroU32::from_str(&s)?),
+ None => {
+ // dex2oat uses all CPUs by default. To match the behavior, give the VM all CPUs by
+ // default.
+ NonZeroU32::new(num_cpus::get() as u32)
+ }
+ };
+ let cpu_set = system_properties::read(DEX2OAT_CPU_SET_PROP_NAME)?;
+ Ok(VmParameters { cpus, cpu_set, memory_mib: Some(VM_MEMORY_MIB), ..Default::default() })
+}
+
+// Ensures we only run one instance at a time.
+// Valid states:
+// Starting: is_starting is true, running_instance is None.
+// Started: is_starting is false, running_instance is Some(x) and there is a strong ref to x.
+// Stopped: is_starting is false and running_instance is None or a weak ref to a dropped instance.
+// The panic calls here should never happen, unless the code above in InstanceManager is buggy.
+// In particular nothing the client does should be able to trigger them.
+#[derive(Default)]
+struct State {
+ running_instance: Option<Weak<CompOsInstance>>,
+ is_starting: bool,
+}
+
+impl State {
+ // Move to Starting iff we are Stopped.
+ fn mark_starting(&mut self) -> Result<()> {
+ if self.is_starting {
+ bail!("An instance is already starting");
+ }
+ if let Some(weak) = &self.running_instance {
+ if weak.strong_count() != 0 {
+ bail!("An instance is already running");
+ }
+ }
+ self.running_instance = None;
+ self.is_starting = true;
+ Ok(())
+ }
+
+ // Move from Starting to Stopped.
+ fn mark_stopped(&mut self) {
+ if !self.is_starting || self.running_instance.is_some() {
+ panic!("Tried to mark stopped when not starting");
+ }
+ self.is_starting = false;
+ }
+
+ // Move from Starting to Started.
+ fn mark_started(&mut self, instance: &Arc<CompOsInstance>) -> Result<()> {
+ if !self.is_starting {
+ panic!("Tried to mark started when not starting")
+ }
+ if self.running_instance.is_some() {
+ panic!("Attempted to mark started when already started");
+ }
+ self.is_starting = false;
+ self.running_instance = Some(Arc::downgrade(instance));
+ Ok(())
+ }
+}
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
new file mode 100644
index 0000000..4873d7a
--- /dev/null
+++ b/compos/composd/src/instance_starter.rs
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Responsible for validating and starting an existing instance of the CompOS VM, or creating and
+//! starting a new instance if necessary.
+
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
+ IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
+};
+use anyhow::{Context, Result};
+use binder_common::lazy_service::LazyServiceGuard;
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
+use compos_aidl_interface::binder::{ParcelFileDescriptor, Strong};
+use compos_common::compos_client::{VmInstance, VmParameters};
+use compos_common::{COMPOS_DATA_ROOT, IDSIG_FILE, IDSIG_MANIFEST_APK_FILE, INSTANCE_IMAGE_FILE};
+use log::info;
+use std::fs;
+use std::path::{Path, PathBuf};
+
+pub struct CompOsInstance {
+ service: Strong<dyn ICompOsService>,
+ #[allow(dead_code)] // Keeps VirtualizationService & the VM alive
+ vm_instance: VmInstance,
+ #[allow(dead_code)] // Keeps composd process alive
+ lazy_service_guard: LazyServiceGuard,
+}
+
+impl CompOsInstance {
+ pub fn get_service(&self) -> Strong<dyn ICompOsService> {
+ self.service.clone()
+ }
+}
+
+pub struct InstanceStarter {
+ instance_name: String,
+ instance_root: PathBuf,
+ instance_image: PathBuf,
+ idsig: PathBuf,
+ idsig_manifest_apk: PathBuf,
+ vm_parameters: VmParameters,
+}
+
+impl InstanceStarter {
+ pub fn new(instance_name: &str, vm_parameters: VmParameters) -> Self {
+ let instance_root = Path::new(COMPOS_DATA_ROOT).join(instance_name);
+ let instance_root_path = instance_root.as_path();
+ let instance_image = instance_root_path.join(INSTANCE_IMAGE_FILE);
+ let idsig = instance_root_path.join(IDSIG_FILE);
+ let idsig_manifest_apk = instance_root_path.join(IDSIG_MANIFEST_APK_FILE);
+ Self {
+ instance_name: instance_name.to_owned(),
+ instance_root,
+ instance_image,
+ idsig,
+ idsig_manifest_apk,
+ vm_parameters,
+ }
+ }
+
+ pub fn start_new_instance(
+ &self,
+ virtualization_service: &dyn IVirtualizationService,
+ ) -> Result<CompOsInstance> {
+ info!("Creating {} CompOs instance", self.instance_name);
+
+ // Ignore failure here - the directory may already exist.
+ let _ = fs::create_dir(&self.instance_root);
+
+ // Overwrite any existing instance - it's unlikely to be valid with the current set
+ // of APEXes, and finding out it isn't is much more expensive than creating a new one.
+ self.create_instance_image(virtualization_service)?;
+
+ // Delete existing idsig files. Ignore error in case idsig doesn't exist.
+ let _ = fs::remove_file(&self.idsig);
+ let _ = fs::remove_file(&self.idsig_manifest_apk);
+
+ self.start_vm(virtualization_service)
+ }
+
+ fn start_vm(
+ &self,
+ virtualization_service: &dyn IVirtualizationService,
+ ) -> Result<CompOsInstance> {
+ let instance_image = fs::OpenOptions::new()
+ .read(true)
+ .write(true)
+ .open(&self.instance_image)
+ .context("Failed to open instance image")?;
+ let vm_instance = VmInstance::start(
+ virtualization_service,
+ instance_image,
+ &self.idsig,
+ &self.idsig_manifest_apk,
+ &self.vm_parameters,
+ )
+ .context("Starting VM")?;
+ let service = vm_instance.get_service().context("Connecting to CompOS")?;
+ Ok(CompOsInstance { vm_instance, service, lazy_service_guard: Default::default() })
+ }
+
+ fn create_instance_image(
+ &self,
+ virtualization_service: &dyn IVirtualizationService,
+ ) -> Result<()> {
+ let instance_image = fs::OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(&self.instance_image)
+ .context("Creating instance image file")?;
+ let instance_image = ParcelFileDescriptor::new(instance_image);
+ // TODO: Where does this number come from?
+ let size = 10 * 1024 * 1024;
+ virtualization_service
+ .initializeWritablePartition(&instance_image, size, PartitionType::ANDROID_VM_INSTANCE)
+ .context("Writing instance image file")?;
+ Ok(())
+ }
+}
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
new file mode 100644
index 0000000..9dec1c1
--- /dev/null
+++ b/compos/composd/src/odrefresh_task.rs
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Handle running odrefresh in the VM, with an async interface to allow cancellation
+
+use crate::fd_server_helper::FdServerConfig;
+use crate::instance_starter::CompOsInstance;
+use android_system_composd::aidl::android::system::composd::{
+ ICompilationTask::ICompilationTask, ICompilationTaskCallback::ICompilationTaskCallback,
+};
+use android_system_composd::binder::{Interface, Result as BinderResult, Strong};
+use anyhow::{Context, Result};
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::{
+ CompilationMode::CompilationMode, ICompOsService,
+};
+use compos_common::odrefresh::{ExitCode, ODREFRESH_OUTPUT_ROOT_DIR};
+use log::{error, info, warn};
+use rustutils::system_properties;
+use std::fs::{remove_dir_all, File, OpenOptions};
+use std::os::unix::fs::OpenOptionsExt;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+#[derive(Clone)]
+pub struct OdrefreshTask {
+ running_task: Arc<Mutex<Option<RunningTask>>>,
+}
+
+impl Interface for OdrefreshTask {}
+
+impl ICompilationTask for OdrefreshTask {
+ fn cancel(&self) -> BinderResult<()> {
+ let task = self.take();
+ // Drop the VM, which should end compilation - and cause our thread to exit
+ drop(task);
+ Ok(())
+ }
+}
+
+struct RunningTask {
+ callback: Strong<dyn ICompilationTaskCallback>,
+ #[allow(dead_code)] // Keeps the CompOS VM alive
+ comp_os: Arc<CompOsInstance>,
+}
+
+impl OdrefreshTask {
+ /// Return the current running task, if any, removing it from this CompilationTask.
+ /// Once removed, meaning the task has ended or been canceled, further calls will always return
+ /// None.
+ fn take(&self) -> Option<RunningTask> {
+ self.running_task.lock().unwrap().take()
+ }
+
+ pub fn start(
+ comp_os: Arc<CompOsInstance>,
+ compilation_mode: CompilationMode,
+ target_dir_name: String,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<OdrefreshTask> {
+ let service = comp_os.get_service();
+ let task = RunningTask { comp_os, callback: callback.clone() };
+ let task = OdrefreshTask { running_task: Arc::new(Mutex::new(Some(task))) };
+
+ task.clone().start_thread(service, compilation_mode, target_dir_name);
+
+ Ok(task)
+ }
+
+ fn start_thread(
+ self,
+ service: Strong<dyn ICompOsService>,
+ compilation_mode: CompilationMode,
+ target_dir_name: String,
+ ) {
+ thread::spawn(move || {
+ let exit_code = run_in_vm(service, compilation_mode, &target_dir_name);
+
+ let task = self.take();
+ // We don't do the callback if cancel has already happened.
+ if let Some(task) = task {
+ let result = match exit_code {
+ Ok(ExitCode::CompilationSuccess) => {
+ info!("CompilationSuccess");
+ task.callback.onSuccess()
+ }
+ Ok(exit_code) => {
+ error!("Unexpected odrefresh result: {:?}", exit_code);
+ task.callback.onFailure()
+ }
+ Err(e) => {
+ error!("Running odrefresh failed: {:?}", e);
+ task.callback.onFailure()
+ }
+ };
+ if let Err(e) = result {
+ warn!("Failed to deliver callback: {:?}", e);
+ }
+ }
+ });
+ }
+}
+
+fn run_in_vm(
+ service: Strong<dyn ICompOsService>,
+ compilation_mode: CompilationMode,
+ target_dir_name: &str,
+) -> Result<ExitCode> {
+ let output_root = Path::new(ODREFRESH_OUTPUT_ROOT_DIR);
+
+ // We need to remove the target directory because odrefresh running in compos will create it
+ // (and can't see the existing one, since authfs doesn't show it existing files in an output
+ // directory).
+ let target_path = output_root.join(target_dir_name);
+ if target_path.exists() {
+ remove_dir_all(&target_path)
+ .with_context(|| format!("Failed to delete {}", target_path.display()))?;
+ }
+
+ let staging_dir = open_dir(composd_native::palette_create_odrefresh_staging_directory()?)?;
+ let system_dir = open_dir(Path::new("/system"))?;
+ let output_dir = open_dir(output_root)?;
+
+ // Spawn a fd_server to serve the FDs.
+ let fd_server_config = FdServerConfig {
+ ro_dir_fds: vec![system_dir.as_raw_fd()],
+ rw_dir_fds: vec![staging_dir.as_raw_fd(), output_dir.as_raw_fd()],
+ ..Default::default()
+ };
+ let fd_server_raii = fd_server_config.into_fd_server()?;
+
+ let zygote_arch = system_properties::read("ro.zygote")?.context("ro.zygote not set")?;
+ let system_server_compiler_filter =
+ system_properties::read("dalvik.vm.systemservercompilerfilter")?.unwrap_or_default();
+ let exit_code = service.odrefresh(
+ compilation_mode,
+ system_dir.as_raw_fd(),
+ output_dir.as_raw_fd(),
+ staging_dir.as_raw_fd(),
+ target_dir_name,
+ &zygote_arch,
+ &system_server_compiler_filter,
+ )?;
+
+ drop(fd_server_raii);
+ ExitCode::from_i32(exit_code.into())
+}
+
+/// Returns an owned FD of the directory. It currently returns a `File` as a FD owner, but
+/// it's better to use `std::os::unix::io::OwnedFd` once/if it becomes standard.
+fn open_dir(path: &Path) -> Result<File> {
+ OpenOptions::new()
+ .custom_flags(libc::O_DIRECTORY)
+ .read(true) // O_DIRECTORY can only be opened with read
+ .open(path)
+ .with_context(|| format!("Failed to open {:?} directory as path fd", path))
+}
diff --git a/compos/composd/src/service.rs b/compos/composd/src/service.rs
new file mode 100644
index 0000000..a9b8202
--- /dev/null
+++ b/compos/composd/src/service.rs
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Implementation of IIsolatedCompilationService, called from system server when compilation is
+//! desired.
+
+use crate::instance_manager::InstanceManager;
+use crate::odrefresh_task::OdrefreshTask;
+use android_system_composd::aidl::android::system::composd::{
+ ICompilationTask::{BnCompilationTask, ICompilationTask},
+ ICompilationTaskCallback::ICompilationTaskCallback,
+ IIsolatedCompilationService::{
+ ApexSource::ApexSource, BnIsolatedCompilationService, IIsolatedCompilationService,
+ },
+};
+use android_system_composd::binder::{
+ self, BinderFeatures, ExceptionCode, Interface, Status, Strong, ThreadState,
+};
+use anyhow::{Context, Result};
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::CompilationMode::CompilationMode;
+use compos_common::binder::to_binder_result;
+use compos_common::odrefresh::{PENDING_ARTIFACTS_SUBDIR, TEST_ARTIFACTS_SUBDIR};
+use rustutils::{users::AID_ROOT, users::AID_SYSTEM};
+use std::sync::Arc;
+
+pub struct IsolatedCompilationService {
+ instance_manager: Arc<InstanceManager>,
+}
+
+pub fn new_binder(
+ instance_manager: Arc<InstanceManager>,
+) -> Strong<dyn IIsolatedCompilationService> {
+ let service = IsolatedCompilationService { instance_manager };
+ BnIsolatedCompilationService::new_binder(service, BinderFeatures::default())
+}
+
+impl Interface for IsolatedCompilationService {}
+
+impl IIsolatedCompilationService for IsolatedCompilationService {
+ fn startStagedApexCompile(
+ &self,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> binder::Result<Strong<dyn ICompilationTask>> {
+ check_permissions()?;
+ to_binder_result(self.do_start_staged_apex_compile(callback))
+ }
+
+ fn startTestCompile(
+ &self,
+ apex_source: ApexSource,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> binder::Result<Strong<dyn ICompilationTask>> {
+ check_permissions()?;
+ let prefer_staged = match apex_source {
+ ApexSource::NoStaged => false,
+ ApexSource::PreferStaged => true,
+ _ => unreachable!("Invalid ApexSource {:?}", apex_source),
+ };
+ to_binder_result(self.do_start_test_compile(prefer_staged, callback))
+ }
+}
+
+impl IsolatedCompilationService {
+ fn do_start_staged_apex_compile(
+ &self,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<Strong<dyn ICompilationTask>> {
+ let comp_os = self.instance_manager.start_current_instance().context("Starting CompOS")?;
+
+ let target_dir_name = PENDING_ARTIFACTS_SUBDIR.to_owned();
+ let task = OdrefreshTask::start(
+ comp_os,
+ CompilationMode::NORMAL_COMPILE,
+ target_dir_name,
+ callback,
+ )?;
+
+ Ok(BnCompilationTask::new_binder(task, BinderFeatures::default()))
+ }
+
+ fn do_start_test_compile(
+ &self,
+ prefer_staged: bool,
+ callback: &Strong<dyn ICompilationTaskCallback>,
+ ) -> Result<Strong<dyn ICompilationTask>> {
+ let comp_os =
+ self.instance_manager.start_test_instance(prefer_staged).context("Starting CompOS")?;
+
+ let target_dir_name = TEST_ARTIFACTS_SUBDIR.to_owned();
+ let task = OdrefreshTask::start(
+ comp_os,
+ CompilationMode::TEST_COMPILE,
+ target_dir_name,
+ callback,
+ )?;
+
+ Ok(BnCompilationTask::new_binder(task, BinderFeatures::default()))
+ }
+}
+
+fn check_permissions() -> binder::Result<()> {
+ let calling_uid = ThreadState::get_calling_uid();
+ // This should only be called by system server, or root while testing
+ if calling_uid != AID_SYSTEM && calling_uid != AID_ROOT {
+ Err(Status::new_exception(ExceptionCode::SECURITY, None))
+ } else {
+ Ok(())
+ }
+}
diff --git a/compos/composd_cmd/Android.bp b/compos/composd_cmd/Android.bp
new file mode 100644
index 0000000..c230e13
--- /dev/null
+++ b/compos/composd_cmd/Android.bp
@@ -0,0 +1,20 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "composd_cmd",
+ srcs: ["composd_cmd.rs"],
+ edition: "2018",
+ rustlibs: [
+ "android.system.composd-rust",
+ "libanyhow",
+ "libbinder_rs",
+ "libclap",
+ "libcompos_common",
+ ],
+ prefer_rlib: true,
+ apex_available: [
+ "com.android.compos",
+ ],
+}
diff --git a/compos/composd_cmd/composd_cmd.rs b/compos/composd_cmd/composd_cmd.rs
new file mode 100644
index 0000000..9f535d5
--- /dev/null
+++ b/compos/composd_cmd/composd_cmd.rs
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Simple command-line tool to drive composd for testing and debugging.
+
+use android_system_composd::{
+ aidl::android::system::composd::{
+ ICompilationTask::ICompilationTask,
+ ICompilationTaskCallback::{BnCompilationTaskCallback, ICompilationTaskCallback},
+ IIsolatedCompilationService::ApexSource::ApexSource,
+ IIsolatedCompilationService::IIsolatedCompilationService,
+ },
+ binder::{
+ wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface, ProcessState,
+ Result as BinderResult, Strong,
+ },
+};
+use anyhow::{bail, Context, Result};
+use compos_common::timeouts::timeouts;
+use std::sync::{Arc, Condvar, Mutex};
+use std::time::Duration;
+
+fn main() -> Result<()> {
+ #[rustfmt::skip]
+ let app = clap::App::new("composd_cmd")
+ .subcommand(
+ clap::SubCommand::with_name("staged-apex-compile"))
+ .subcommand(
+ clap::SubCommand::with_name("test-compile")
+ .arg(clap::Arg::with_name("prefer-staged").long("prefer-staged")),
+ );
+ let args = app.get_matches();
+
+ ProcessState::start_thread_pool();
+
+ match args.subcommand() {
+ ("staged-apex-compile", _) => run_staged_apex_compile()?,
+ ("test-compile", Some(sub_matches)) => {
+ let prefer_staged = sub_matches.is_present("prefer-staged");
+ run_test_compile(prefer_staged)?;
+ }
+ _ => panic!("Unrecognized subcommand"),
+ }
+
+ println!("All Ok!");
+
+ Ok(())
+}
+
+struct Callback(Arc<State>);
+
+#[derive(Default)]
+struct State {
+ mutex: Mutex<Option<Outcome>>,
+ completed: Condvar,
+}
+
+#[derive(Copy, Clone)]
+enum Outcome {
+ Succeeded,
+ Failed,
+}
+
+impl Interface for Callback {}
+
+impl ICompilationTaskCallback for Callback {
+ fn onSuccess(&self) -> BinderResult<()> {
+ self.0.set_outcome(Outcome::Succeeded);
+ Ok(())
+ }
+
+ fn onFailure(&self) -> BinderResult<()> {
+ self.0.set_outcome(Outcome::Failed);
+ Ok(())
+ }
+}
+
+impl State {
+ fn set_outcome(&self, outcome: Outcome) {
+ let mut guard = self.mutex.lock().unwrap();
+ *guard = Some(outcome);
+ drop(guard);
+ self.completed.notify_all();
+ }
+
+ fn wait(&self, duration: Duration) -> Result<Outcome> {
+ let (outcome, result) = self
+ .completed
+ .wait_timeout_while(self.mutex.lock().unwrap(), duration, |outcome| outcome.is_none())
+ .unwrap();
+ if result.timed_out() {
+ bail!("Timed out waiting for compilation")
+ }
+ Ok(outcome.unwrap())
+ }
+}
+
+fn run_staged_apex_compile() -> Result<()> {
+ run_async_compilation(|service, callback| service.startStagedApexCompile(callback))
+}
+
+fn run_test_compile(prefer_staged: bool) -> Result<()> {
+ let apex_source = if prefer_staged { ApexSource::PreferStaged } else { ApexSource::NoStaged };
+ run_async_compilation(|service, callback| service.startTestCompile(apex_source, callback))
+}
+
+fn run_async_compilation<F>(start_compile_fn: F) -> Result<()>
+where
+ F: FnOnce(
+ &dyn IIsolatedCompilationService,
+ &Strong<dyn ICompilationTaskCallback>,
+ ) -> BinderResult<Strong<dyn ICompilationTask>>,
+{
+ let service = wait_for_interface::<dyn IIsolatedCompilationService>("android.system.composd")
+ .context("Failed to connect to composd service")?;
+
+ let state = Arc::new(State::default());
+ let callback = Callback(state.clone());
+ let callback = BnCompilationTaskCallback::new_binder(callback, BinderFeatures::default());
+ let task = start_compile_fn(&*service, &callback).context("Compilation failed")?;
+
+ // Make sure composd keeps going even if we don't hold a reference to its service.
+ drop(service);
+
+ let state_clone = state.clone();
+ let mut death_recipient = DeathRecipient::new(move || {
+ eprintln!("CompilationTask died");
+ state_clone.set_outcome(Outcome::Failed);
+ });
+ // Note that dropping death_recipient cancels this, so we can't use a temporary here.
+ task.as_binder().link_to_death(&mut death_recipient)?;
+
+ println!("Waiting");
+
+ match state.wait(timeouts()?.odrefresh_max_execution_time) {
+ Ok(Outcome::Succeeded) => Ok(()),
+ Ok(Outcome::Failed) => bail!("Compilation failed"),
+ Err(e) => {
+ if let Err(e) = task.cancel() {
+ eprintln!("Failed to cancel compilation: {:?}", e);
+ }
+ Err(e)
+ }
+ }
+}
diff --git a/compos/service/Android.bp b/compos/service/Android.bp
new file mode 100644
index 0000000..336ae9b
--- /dev/null
+++ b/compos/service/Android.bp
@@ -0,0 +1,41 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+java_library {
+ name: "service-compos",
+ srcs: [
+ "java/**/*.java",
+ ],
+ defaults: ["framework-system-server-module-defaults"],
+ permitted_packages: [
+ "com.android.server.compos",
+ "com.android.compos",
+ "android.system.composd",
+ ],
+ static_libs: [
+ "android.system.composd-java",
+ ],
+ apex_available: [
+ "com.android.compos",
+ ],
+ // Access to SystemService, ServiceManager#waitForService etc
+ libs: ["services"],
+ sdk_version: "",
+ platform_apis: true,
+ installable: true,
+}
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
new file mode 100644
index 0000000..75f5334
--- /dev/null
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.compos;
+
+import static java.util.Objects.requireNonNull;
+
+import android.app.job.JobInfo;
+import android.app.job.JobParameters;
+import android.app.job.JobScheduler;
+import android.app.job.JobService;
+import android.content.ComponentName;
+import android.os.IBinder;
+import android.os.RemoteException;
+import android.os.ServiceManager;
+import android.system.composd.ICompilationTask;
+import android.system.composd.ICompilationTaskCallback;
+import android.system.composd.IIsolatedCompilationService;
+import android.util.Log;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * A job scheduler service responsible for performing Isolated Compilation when scheduled.
+ *
+ * @hide
+ */
+public class IsolatedCompilationJobService extends JobService {
+ private static final String TAG = IsolatedCompilationJobService.class.getName();
+ private static final int DAILY_JOB_ID = 5132250;
+ private static final int STAGED_APEX_JOB_ID = 5132251;
+
+ private final AtomicReference<CompilationJob> mCurrentJob = new AtomicReference<>();
+
+ static void scheduleDailyJob(JobScheduler scheduler) {
+ // TODO(b/205296305) Remove this
+ ComponentName serviceName =
+ new ComponentName("android", IsolatedCompilationJobService.class.getName());
+
+ int result = scheduler.schedule(new JobInfo.Builder(DAILY_JOB_ID, serviceName)
+ .setRequiresDeviceIdle(true)
+ .setRequiresCharging(true)
+ .setPeriodic(TimeUnit.DAYS.toMillis(1))
+ .build());
+ if (result != JobScheduler.RESULT_SUCCESS) {
+ Log.e(TAG, "Failed to schedule daily job");
+ }
+ }
+
+ static void scheduleStagedApexJob(JobScheduler scheduler) {
+ ComponentName serviceName =
+ new ComponentName("android", IsolatedCompilationJobService.class.getName());
+
+ int result = scheduler.schedule(new JobInfo.Builder(STAGED_APEX_JOB_ID, serviceName)
+ // Wait in case more APEXes are staged
+ .setMinimumLatency(TimeUnit.MINUTES.toMillis(60))
+ // We consume CPU, power, and storage
+ .setRequiresDeviceIdle(true)
+ .setRequiresCharging(true)
+ .setRequiresStorageNotLow(true)
+ .build());
+ if (result != JobScheduler.RESULT_SUCCESS) {
+ Log.e(TAG, "Failed to schedule staged APEX job");
+ }
+ }
+
+ static boolean isStagedApexJobScheduled(JobScheduler scheduler) {
+ return scheduler.getPendingJob(STAGED_APEX_JOB_ID) != null;
+ }
+
+ @Override
+ public boolean onStartJob(JobParameters params) {
+ int jobId = params.getJobId();
+
+ Log.i(TAG, "Starting job " + jobId);
+
+ // This function (and onStopJob) are only ever called on the main thread, so we don't have
+ // to worry about two starts at once, or start and stop happening at once. But onCompletion
+ // can be called on any thread, so we need to be careful with that.
+
+ CompilationJob oldJob = mCurrentJob.get();
+ if (oldJob != null) {
+ // We're already running a job, give up on this one
+ Log.w(TAG, "Another job is in progress, skipping");
+ return false; // Already finished
+ }
+
+ CompilationJob newJob = new CompilationJob(IsolatedCompilationJobService.this::onCompletion,
+ params);
+ mCurrentJob.set(newJob);
+
+ // This can take some time - we need to start up a VM - so we do it on a separate
+ // thread. This thread exits as soon as the compilation Task has been started (or
+ // there's a failure), and then compilation continues in composd and the VM.
+ new Thread("IsolatedCompilationJob_starter") {
+ @Override
+ public void run() {
+ try {
+ newJob.start(jobId);
+ } catch (RuntimeException e) {
+ Log.e(TAG, "Starting CompilationJob failed", e);
+ mCurrentJob.set(null);
+ newJob.stop(); // Just in case it managed to start before failure
+ jobFinished(params, /*wantReschedule=*/ false);
+ }
+ }
+ }.start();
+ return true; // Job is running in the background
+ }
+
+ @Override
+ public boolean onStopJob(JobParameters params) {
+ CompilationJob job = mCurrentJob.getAndSet(null);
+ if (job == null) {
+ return false; // No need to reschedule, we'd finished
+ } else {
+ job.stop();
+ return true; // We didn't get to finish, please re-schedule
+ }
+ }
+
+ void onCompletion(JobParameters params, boolean succeeded) {
+ Log.i(TAG, "onCompletion, succeeded=" + succeeded);
+
+ CompilationJob job = mCurrentJob.getAndSet(null);
+ if (job == null) {
+ // No need to call jobFinished if we've been told to stop.
+ return;
+ }
+ // On success we don't need to reschedule.
+ // On failure we could reschedule, but that could just use a lot of resources and still
+ // fail; instead we just let odsign do compilation on reboot if necessary.
+ jobFinished(params, /*wantReschedule=*/ false);
+ }
+
+ interface CompilationCallback {
+ void onCompletion(JobParameters params, boolean succeeded);
+ }
+
+ static class CompilationJob extends ICompilationTaskCallback.Stub
+ implements IBinder.DeathRecipient {
+ private final AtomicReference<ICompilationTask> mTask = new AtomicReference<>();
+ private final CompilationCallback mCallback;
+ private final JobParameters mParams;
+ private volatile boolean mStopRequested = false;
+ private volatile boolean mCanceled = false;
+
+ CompilationJob(CompilationCallback callback, JobParameters params) {
+ mCallback = requireNonNull(callback);
+ mParams = params;
+ }
+
+ void start(int jobId) {
+ IBinder binder = ServiceManager.waitForService("android.system.composd");
+ IIsolatedCompilationService composd =
+ IIsolatedCompilationService.Stub.asInterface(binder);
+
+ if (composd == null) {
+ throw new IllegalStateException("Unable to find composd service");
+ }
+
+ try {
+ ICompilationTask composTask;
+ if (jobId == DAILY_JOB_ID) {
+ composTask = composd.startTestCompile(
+ IIsolatedCompilationService.ApexSource.NoStaged, this);
+ } else {
+ composTask = composd.startStagedApexCompile(this);
+ }
+ mTask.set(composTask);
+ composTask.asBinder().linkToDeath(this, 0);
+ } catch (RemoteException e) {
+ throw e.rethrowAsRuntimeException();
+ }
+
+ if (mStopRequested) {
+ // We were asked to stop while we were starting the task. We need to
+ // cancel it now, since we couldn't before.
+ cancelTask();
+ }
+ }
+
+ void stop() {
+ mStopRequested = true;
+ cancelTask();
+ }
+
+ private void cancelTask() {
+ ICompilationTask task = mTask.getAndSet(null);
+ if (task != null) {
+ mCanceled = true;
+ Log.i(TAG, "Cancelling task");
+ try {
+ task.cancel();
+ } catch (RuntimeException | RemoteException e) {
+ // If canceling failed we'll assume it means that the task has already failed;
+ // there's nothing else we can do anyway.
+ Log.w(TAG, "Failed to cancel CompilationTask", e);
+ }
+ }
+ }
+
+ @Override
+ public void binderDied() {
+ onFailure();
+ }
+
+ @Override
+ public void onSuccess() {
+ onCompletion(true);
+ }
+
+ @Override
+ public void onFailure() {
+ onCompletion(false);
+ }
+
+ private void onCompletion(boolean succeeded) {
+ mTask.set(null);
+ if (!mCanceled) {
+ mCallback.onCompletion(mParams, succeeded);
+ }
+ }
+ }
+}
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationService.java b/compos/service/java/com/android/server/compos/IsolatedCompilationService.java
new file mode 100644
index 0000000..11e3743
--- /dev/null
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationService.java
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.compos;
+
+import static android.os.Build.isDebuggable;
+
+import android.annotation.NonNull;
+import android.app.job.JobScheduler;
+import android.content.Context;
+import android.content.pm.ApexStagedEvent;
+import android.content.pm.IPackageManagerNative;
+import android.content.pm.IStagedApexObserver;
+import android.content.pm.StagedApexInfo;
+import android.os.RemoteException;
+import android.os.ServiceManager;
+import android.sysprop.HypervisorProperties;
+import android.util.Log;
+
+import com.android.server.SystemService;
+
+/**
+ * A system service responsible for performing Isolated Compilation (compiling boot & system server
+ * classpath JARs in a protected VM) when appropriate.
+ *
+ * @hide
+ */
+public class IsolatedCompilationService extends SystemService {
+ private static final String TAG = IsolatedCompilationService.class.getName();
+
+ public IsolatedCompilationService(@NonNull Context context) {
+ super(context);
+ }
+
+ @Override
+ public void onStart() {
+ // Note that our binder service is exposed directly from native code in composd, so
+ // we don't need to do anything here.
+ }
+
+ @Override
+ public void onBootPhase(/* @BootPhase */ int phase) {
+ if (phase != PHASE_BOOT_COMPLETED) return;
+
+ if (!isIsolatedCompilationSupported()) {
+ Log.i(TAG, "Isolated compilation not supported, not scheduling job");
+ return;
+ }
+
+
+ JobScheduler scheduler = getContext().getSystemService(JobScheduler.class);
+ if (scheduler == null) {
+ Log.e(TAG, "No scheduler");
+ return;
+ }
+
+ IsolatedCompilationJobService.scheduleDailyJob(scheduler);
+ StagedApexObserver.registerForStagedApexUpdates(scheduler);
+ }
+
+ private static boolean isIsolatedCompilationSupported() {
+ // The CompOS APEX is present or we wouldn't be here. So just check that the device
+ // has a suitably capable hypervisor.
+
+ // We really want a protected VM
+ if (HypervisorProperties.hypervisor_protected_vm_supported().orElse(false)) {
+ return true;
+ }
+
+ // But can use a non-protected VM on a debug build
+ if (isDebuggable()) {
+ return HypervisorProperties.hypervisor_vm_supported().orElse(false);
+ }
+
+ return false;
+ }
+
+ private static class StagedApexObserver extends IStagedApexObserver.Stub {
+ private final JobScheduler mScheduler;
+ private final IPackageManagerNative mPackageNative;
+
+ static void registerForStagedApexUpdates(JobScheduler scheduler) {
+ final IPackageManagerNative packageNative = IPackageManagerNative.Stub.asInterface(
+ ServiceManager.getService("package_native"));
+ if (packageNative == null) {
+ Log.e(TAG, "No IPackageManagerNative");
+ return;
+ }
+
+ StagedApexObserver observer = new StagedApexObserver(scheduler, packageNative);
+ try {
+ packageNative.registerStagedApexObserver(observer);
+ // In the unlikely event that an APEX has been staged before we get here, we may
+ // have to schedule compilation immediately.
+ observer.checkModules(packageNative.getStagedApexModuleNames());
+ } catch (RemoteException e) {
+ Log.e(TAG, "Failed to initialize observer", e);
+ }
+ }
+
+ private StagedApexObserver(JobScheduler scheduler,
+ IPackageManagerNative packageNative) {
+ mScheduler = scheduler;
+ mPackageNative = packageNative;
+ }
+
+ @Override
+ public void onApexStaged(ApexStagedEvent event) {
+ Log.d(TAG, "onApexStaged");
+ checkModules(event.stagedApexModuleNames);
+ }
+
+ void checkModules(String[] moduleNames) {
+ if (IsolatedCompilationJobService.isStagedApexJobScheduled(mScheduler)) {
+ Log.d(TAG, "Job already scheduled");
+ // We're going to run anyway, we don't need to check this update
+ return;
+ }
+ boolean needCompilation = false;
+ for (String moduleName : moduleNames) {
+ try {
+ StagedApexInfo apexInfo = mPackageNative.getStagedApexInfo(moduleName);
+ if (apexInfo != null && apexInfo.hasClassPathJars) {
+ Log.i(TAG, "Classpath affecting module updated: " + moduleName);
+ needCompilation = true;
+ break;
+ }
+ } catch (RemoteException e) {
+ Log.w(TAG, "Failed to get getStagedApexInfo for " + moduleName);
+ }
+ }
+ if (needCompilation) {
+ IsolatedCompilationJobService.scheduleStagedApexJob(mScheduler);
+ }
+ }
+ }
+}
diff --git a/compos/src/artifact_signer.rs b/compos/src/artifact_signer.rs
new file mode 100644
index 0000000..a15df28
--- /dev/null
+++ b/compos/src/artifact_signer.rs
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Support for generating and signing an info file listing names and digests of generated
+//! artifacts.
+
+use crate::compos_key;
+use crate::fsverity;
+use anyhow::{anyhow, Context, Result};
+use odsign_proto::odsign_info::OdsignInfo;
+use protobuf::Message;
+use std::fs::File;
+use std::io::Write;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+
+const TARGET_DIRECTORY: &str = "/data/misc/apexdata/com.android.art/dalvik-cache";
+const SIGNATURE_EXTENSION: &str = ".signature";
+
+/// Accumulates and then signs information about generated artifacts.
+pub struct ArtifactSigner<'a> {
+ base_directory: &'a Path,
+ file_digests: Vec<(String, String)>, // (File name, digest in hex)
+}
+
+impl<'a> ArtifactSigner<'a> {
+ /// base_directory specifies the directory under which the artifacts are currently located;
+ /// they will eventually be moved under TARGET_DIRECTORY once they are verified and activated.
+ pub fn new(base_directory: &'a Path) -> Self {
+ Self { base_directory, file_digests: Vec::new() }
+ }
+
+ pub fn add_artifact(&mut self, path: &Path) -> Result<()> {
+ // The path we store is where the file will be when it is verified, not where it is now.
+ let suffix = path
+ .strip_prefix(&self.base_directory)
+ .context("Artifacts must be under base directory")?;
+ let target_path = Path::new(TARGET_DIRECTORY).join(suffix);
+ let target_path = target_path.to_str().ok_or_else(|| anyhow!("Invalid path"))?;
+
+ let file = File::open(path).with_context(|| format!("Opening {}", path.display()))?;
+ let digest = fsverity::measure(file.as_raw_fd())?;
+ let digest = to_hex_string(&digest);
+
+ self.file_digests.push((target_path.to_owned(), digest));
+ Ok(())
+ }
+
+ /// Consume this ArtifactSigner and write details of all its artifacts to the given path,
+ /// with accompanying sigature file.
+ pub fn write_info_and_signature(self, info_path: &Path) -> Result<()> {
+ let mut info = OdsignInfo::new();
+ info.mut_file_hashes().extend(self.file_digests.into_iter());
+ let bytes = info.write_to_bytes()?;
+
+ let signature = compos_key::sign(&bytes)?;
+
+ let mut file =
+ File::create(info_path).with_context(|| format!("Creating {}", info_path.display()))?;
+ file.write_all(&bytes)?;
+
+ let mut signature_name = info_path.file_name().unwrap().to_owned();
+ signature_name.push(SIGNATURE_EXTENSION);
+ let signature_path = info_path.with_file_name(&signature_name);
+ let mut signature_file = File::create(&signature_path)
+ .with_context(|| format!("Creating {}", signature_path.display()))?;
+ signature_file.write_all(&signature)?;
+
+ Ok(())
+ }
+}
+
+fn to_hex_string(buf: &[u8]) -> String {
+ buf.iter().map(|b| format!("{:02x}", b)).collect()
+}
diff --git a/compos/src/authfs.rs b/compos/src/authfs.rs
deleted file mode 100644
index ce9aaf8..0000000
--- a/compos/src/authfs.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use anyhow::{bail, Context, Result};
-use log::warn;
-use minijail::Minijail;
-use nix::sys::statfs::{statfs, FsType};
-use std::fs::{File, OpenOptions};
-use std::path::Path;
-use std::thread::sleep;
-use std::time::{Duration, Instant};
-
-const AUTHFS_BIN: &str = "/system/bin/authfs";
-const AUTHFS_SETUP_POLL_INTERVAL_MS: Duration = Duration::from_millis(50);
-const AUTHFS_SETUP_TIMEOUT_SEC: Duration = Duration::from_secs(10);
-const FUSE_SUPER_MAGIC: FsType = FsType(0x65735546);
-
-/// The number that hints the future file descriptor. These are not really file descriptor, but
-/// represents the file descriptor number to pass to the task.
-pub type PseudoRawFd = i32;
-
-/// Annotation of input file descriptor.
-#[derive(Debug)]
-pub struct InFdAnnotation {
- /// A number/file descriptor that is supposed to represent a remote file.
- pub fd: PseudoRawFd,
-
- /// The file size of the remote file. Remote input files are supposed to be immutable and
- /// to be verified with fs-verity by authfs.
- pub file_size: u64,
-}
-
-/// Annotation of output file descriptor.
-#[derive(Debug)]
-pub struct OutFdAnnotation {
- /// A number/file descriptor that is supposed to represent a remote file.
- pub fd: PseudoRawFd,
-}
-
-/// An `AuthFs` instance is supposed to be backed by the `authfs` process. When the lifetime of the
-/// instance is over, the process is terminated and the FUSE is unmounted.
-pub struct AuthFs {
- mountpoint: String,
- jail: Minijail,
-}
-
-impl AuthFs {
- /// Mount an authfs at `mountpoint` with specified FD annotations.
- pub fn mount_and_wait(
- mountpoint: &str,
- in_fds: &[InFdAnnotation],
- out_fds: &[OutFdAnnotation],
- debuggable: bool,
- ) -> Result<AuthFs> {
- let jail = jail_authfs(mountpoint, in_fds, out_fds, debuggable)?;
- wait_until_authfs_ready(mountpoint)?;
- Ok(AuthFs { mountpoint: mountpoint.to_string(), jail })
- }
-
- /// Open a file at authfs' root directory.
- pub fn open_file(&self, basename: PseudoRawFd, writable: bool) -> Result<File> {
- OpenOptions::new()
- .read(true)
- .write(writable)
- .open(format!("{}/{}", self.mountpoint, basename))
- .with_context(|| format!("open authfs file {}", basename))
- }
-}
-
-impl Drop for AuthFs {
- fn drop(&mut self) {
- if let Err(e) = self.jail.kill() {
- if !matches!(e, minijail::Error::Killed(_)) {
- warn!("Failed to kill authfs: {}", e);
- }
- }
- }
-}
-
-fn jail_authfs(
- mountpoint: &str,
- in_fds: &[InFdAnnotation],
- out_fds: &[OutFdAnnotation],
- debuggable: bool,
-) -> Result<Minijail> {
- // TODO(b/185175567): Run in a more restricted sandbox.
- let jail = Minijail::new()?;
-
- let mut args = vec![
- AUTHFS_BIN.to_string(),
- mountpoint.to_string(),
- "--cid=2".to_string(), // Always use host unless we need to support other cases
- ];
- for conf in in_fds {
- // TODO(b/185178698): Many input files need to be signed and verified.
- // or can we use debug cert for now, which is better than nothing?
- args.push("--remote-ro-file-unverified".to_string());
- args.push(format!("{}:{}:{}", conf.fd, conf.fd, conf.file_size));
- }
- for conf in out_fds {
- args.push("--remote-new-rw-file".to_string());
- args.push(format!("{}:{}", conf.fd, conf.fd));
- }
-
- let preserve_fds = if debuggable {
- vec![1, 2] // inherit/redirect stdout/stderr for debugging
- } else {
- vec![]
- };
-
- let _pid = jail.run(Path::new(AUTHFS_BIN), &preserve_fds, &args)?;
- Ok(jail)
-}
-
-fn wait_until_authfs_ready(mountpoint: &str) -> Result<()> {
- let start_time = Instant::now();
- loop {
- if is_fuse(mountpoint)? {
- break;
- }
- if start_time.elapsed() > AUTHFS_SETUP_TIMEOUT_SEC {
- bail!("Time out mounting authfs");
- }
- sleep(AUTHFS_SETUP_POLL_INTERVAL_MS);
- }
- Ok(())
-}
-
-fn is_fuse(path: &str) -> Result<bool> {
- Ok(statfs(path)?.filesystem_type() == FUSE_SUPER_MAGIC)
-}
diff --git a/compos/src/common.rs b/compos/src/common.rs
deleted file mode 100644
index 6cad63a..0000000
--- a/compos/src/common.rs
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/// Port to listen. This should be out of future port range (if happens) that microdroid may
-/// reserve for system components.
-pub const VSOCK_PORT: u32 = 6432;
-
-/// Service name of local binder. Used only for debugging purpose.
-pub const SERVICE_NAME: &str = "compsvc";
diff --git a/compos/src/compilation.rs b/compos/src/compilation.rs
new file mode 100644
index 0000000..e14cd94
--- /dev/null
+++ b/compos/src/compilation.rs
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{anyhow, bail, Context, Result};
+use log::{debug, info, warn};
+use minijail::{self, Minijail};
+use regex::Regex;
+use rustutils::system_properties;
+use std::collections::HashMap;
+use std::env;
+use std::ffi::OsString;
+use std::path::{self, Path, PathBuf};
+use std::process::Command;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::{
+ AuthFsConfig::{
+ AuthFsConfig, InputDirFdAnnotation::InputDirFdAnnotation,
+ OutputDirFdAnnotation::OutputDirFdAnnotation,
+ },
+ IAuthFsService::IAuthFsService,
+};
+use authfs_aidl_interface::binder::Strong;
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::CompilationMode::CompilationMode;
+use compos_common::odrefresh::ExitCode;
+
+const FD_SERVER_PORT: i32 = 3264; // TODO: support dynamic port
+
+pub struct OdrefreshContext<'a> {
+ compilation_mode: CompilationMode,
+ system_dir_fd: i32,
+ output_dir_fd: i32,
+ staging_dir_fd: i32,
+ target_dir_name: &'a str,
+ zygote_arch: &'a str,
+ system_server_compiler_filter: &'a str,
+}
+
+impl<'a> OdrefreshContext<'a> {
+ pub fn new(
+ compilation_mode: CompilationMode,
+ system_dir_fd: i32,
+ output_dir_fd: i32,
+ staging_dir_fd: i32,
+ target_dir_name: &'a str,
+ zygote_arch: &'a str,
+ system_server_compiler_filter: &'a str,
+ ) -> Result<Self> {
+ if compilation_mode != CompilationMode::NORMAL_COMPILE {
+ // Conservatively check debuggability.
+ let debuggable =
+ system_properties::read_bool("ro.boot.microdroid.app_debuggable", false)
+ .unwrap_or(false);
+ if !debuggable {
+ bail!("Requested compilation mode only available in debuggable VMs");
+ }
+ }
+
+ if system_dir_fd < 0 || output_dir_fd < 0 || staging_dir_fd < 0 {
+ bail!("The remote FDs are expected to be non-negative");
+ }
+ if !matches!(zygote_arch, "zygote64" | "zygote64_32") {
+ bail!("Invalid zygote arch");
+ }
+ // Disallow any sort of path traversal
+ if target_dir_name.contains(path::MAIN_SEPARATOR) {
+ bail!("Invalid target directory {}", target_dir_name);
+ }
+
+ // We're not validating/allowlisting the compiler filter, and just assume the compiler will
+ // reject an invalid string. We need to accept "verify" filter anyway, and potential
+ // performance degration by the attacker is not currently in scope. This also allows ART to
+ // specify new compiler filter and configure through system property without change to
+ // CompOS.
+
+ Ok(Self {
+ compilation_mode,
+ system_dir_fd,
+ output_dir_fd,
+ staging_dir_fd,
+ target_dir_name,
+ zygote_arch,
+ system_server_compiler_filter,
+ })
+ }
+}
+
+pub fn odrefresh<F>(
+ odrefresh_path: &Path,
+ context: OdrefreshContext,
+ authfs_service: Strong<dyn IAuthFsService>,
+ success_fn: F,
+) -> Result<ExitCode>
+where
+ F: FnOnce(PathBuf) -> Result<()>,
+{
+ // Mount authfs (via authfs_service). The authfs instance unmounts once the `authfs` variable
+ // is out of scope.
+ let authfs_config = AuthFsConfig {
+ port: FD_SERVER_PORT,
+ inputDirFdAnnotations: vec![InputDirFdAnnotation {
+ fd: context.system_dir_fd,
+ // 0 is the index of extra_apks in vm_config_extra_apk.json
+ manifestPath: "/mnt/extra-apk/0/assets/build_manifest.pb".to_string(),
+ prefix: "system/".to_string(),
+ }],
+ outputDirFdAnnotations: vec![
+ OutputDirFdAnnotation { fd: context.output_dir_fd },
+ OutputDirFdAnnotation { fd: context.staging_dir_fd },
+ ],
+ ..Default::default()
+ };
+ let authfs = authfs_service.mount(&authfs_config)?;
+ let mountpoint = PathBuf::from(authfs.getMountPoint()?);
+
+ // Make a copy of our environment as the basis of the one we will give odrefresh
+ let mut odrefresh_vars = EnvMap::from_current_env();
+
+ let mut android_root = mountpoint.clone();
+ android_root.push(context.system_dir_fd.to_string());
+ android_root.push("system");
+ odrefresh_vars.set("ANDROID_ROOT", path_to_str(&android_root)?);
+ debug!("ANDROID_ROOT={:?}", &android_root);
+
+ let art_apex_data = mountpoint.join(context.output_dir_fd.to_string());
+ odrefresh_vars.set("ART_APEX_DATA", path_to_str(&art_apex_data)?);
+ debug!("ART_APEX_DATA={:?}", &art_apex_data);
+
+ let staging_dir = mountpoint.join(context.staging_dir_fd.to_string());
+
+ set_classpaths(&mut odrefresh_vars, &android_root)?;
+
+ let mut args = vec![
+ "odrefresh".to_string(),
+ "--compilation-os-mode".to_string(),
+ format!("--zygote-arch={}", context.zygote_arch),
+ format!("--dalvik-cache={}", context.target_dir_name),
+ format!("--staging-dir={}", staging_dir.display()),
+ "--no-refresh".to_string(),
+ ];
+
+ if !context.system_server_compiler_filter.is_empty() {
+ args.push(format!(
+ "--system-server-compiler-filter={}",
+ context.system_server_compiler_filter
+ ));
+ }
+
+ let compile_flag = match context.compilation_mode {
+ CompilationMode::NORMAL_COMPILE => "--compile",
+ CompilationMode::TEST_COMPILE => "--force-compile",
+ other => bail!("Unknown compilation mode {:?}", other),
+ };
+ args.push(compile_flag.to_string());
+
+ debug!("Running odrefresh with args: {:?}", &args);
+ let jail = spawn_jailed_task(odrefresh_path, &args, &odrefresh_vars.into_env())
+ .context("Spawn odrefresh")?;
+ let exit_code = match jail.wait() {
+ Ok(_) => 0,
+ Err(minijail::Error::ReturnCode(exit_code)) => exit_code,
+ Err(e) => bail!("Unexpected minijail error: {}", e),
+ };
+
+ let exit_code = ExitCode::from_i32(exit_code.into())?;
+ info!("odrefresh exited with {:?}", exit_code);
+
+ if exit_code == ExitCode::CompilationSuccess {
+ let target_dir = art_apex_data.join(context.target_dir_name);
+ success_fn(target_dir)?;
+ }
+
+ Ok(exit_code)
+}
+
+fn path_to_str(path: &Path) -> Result<&str> {
+ path.to_str().ok_or_else(|| anyhow!("Bad path {:?}", path))
+}
+
+fn set_classpaths(odrefresh_vars: &mut EnvMap, android_root: &Path) -> Result<()> {
+ let export_lines = run_derive_classpath(android_root)?;
+ load_classpath_vars(odrefresh_vars, &export_lines)
+}
+
+fn run_derive_classpath(android_root: &Path) -> Result<String> {
+ let classpaths_root = android_root.join("etc/classpaths");
+
+ let mut bootclasspath_arg = OsString::new();
+ bootclasspath_arg.push("--bootclasspath-fragment=");
+ bootclasspath_arg.push(classpaths_root.join("bootclasspath.pb"));
+
+ let mut systemserverclasspath_arg = OsString::new();
+ systemserverclasspath_arg.push("--systemserverclasspath-fragment=");
+ systemserverclasspath_arg.push(classpaths_root.join("systemserverclasspath.pb"));
+
+ let result = Command::new("/apex/com.android.sdkext/bin/derive_classpath")
+ .arg(bootclasspath_arg)
+ .arg(systemserverclasspath_arg)
+ .arg("/proc/self/fd/1")
+ .output()
+ .context("Failed to run derive_classpath")?;
+
+ if !result.status.success() {
+ bail!("derive_classpath returned {}", result.status);
+ }
+
+ String::from_utf8(result.stdout).context("Converting derive_classpath output")
+}
+
+fn load_classpath_vars(odrefresh_vars: &mut EnvMap, export_lines: &str) -> Result<()> {
+ // Each line should be in the format "export <var name> <value>"
+ let pattern = Regex::new(r"^export ([^ ]+) ([^ ]+)$").context("Failed to construct Regex")?;
+ for line in export_lines.lines() {
+ if let Some(captures) = pattern.captures(line) {
+ let name = &captures[1];
+ let value = &captures[2];
+ odrefresh_vars.set(name, value);
+ } else {
+ warn!("Malformed line from derive_classpath: {}", line);
+ }
+ }
+
+ Ok(())
+}
+
+fn spawn_jailed_task(executable: &Path, args: &[String], env_vars: &[String]) -> Result<Minijail> {
+ // TODO(b/185175567): Run in a more restricted sandbox.
+ let jail = Minijail::new()?;
+ let keep_fds = [];
+ let command = minijail::Command::new_for_path(executable, &keep_fds, args, Some(env_vars))?;
+ let _pid = jail.run_command(command)?;
+ Ok(jail)
+}
+
+struct EnvMap(HashMap<String, String>);
+
+impl EnvMap {
+ fn from_current_env() -> Self {
+ Self(env::vars().collect())
+ }
+
+ fn set(&mut self, key: &str, value: &str) {
+ self.0.insert(key.to_owned(), value.to_owned());
+ }
+
+ fn into_env(self) -> Vec<String> {
+ // execve() expects an array of "k=v" strings, rather than a list of (k, v) pairs.
+ self.0.into_iter().map(|(k, v)| k + "=" + &v).collect()
+ }
+}
diff --git a/compos/src/compos_key.rs b/compos/src/compos_key.rs
new file mode 100644
index 0000000..eb6248f
--- /dev/null
+++ b/compos/src/compos_key.rs
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{bail, Context, Result};
+use std::io::Write;
+use std::process::{Command, Stdio};
+
+const COMPOS_KEY_HELPER_PATH: &str = "/apex/com.android.compos/bin/compos_key_helper";
+
+pub fn get_public_key() -> Result<Vec<u8>> {
+ let child = Command::new(COMPOS_KEY_HELPER_PATH)
+ .arg("public_key")
+ .stdin(Stdio::null())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .spawn()?;
+ let result = child.wait_with_output()?;
+ if !result.status.success() {
+ bail!("Helper failed: {:?}", result);
+ }
+ Ok(result.stdout)
+}
+
+pub fn sign(data: &[u8]) -> Result<Vec<u8>> {
+ let mut child = Command::new(COMPOS_KEY_HELPER_PATH)
+ .arg("sign")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .spawn()?;
+
+ // No output is written until the entire input is consumed, so this shouldn't deadlock.
+ let result =
+ child.stdin.take().unwrap().write_all(data).context("Failed to write data to be signed");
+ if result.is_ok() {
+ let result = child.wait_with_output()?;
+ if !result.status.success() {
+ bail!("Helper failed: {}", result.status);
+ }
+ return Ok(result.stdout);
+ }
+
+ // The child may have exited already, but if it hasn't then we need to make sure it does.
+ let _ignored = child.kill();
+
+ let result = result.with_context(|| match child.wait() {
+ Ok(exit_status) => format!("Child exited: {}", exit_status),
+ Err(wait_err) => format!("Wait for child failed: {:?}", wait_err),
+ });
+ Err(result.unwrap_err())
+}
diff --git a/compos/src/compos_key_main.rs b/compos/src/compos_key_main.rs
deleted file mode 100644
index 9d57e4d..0000000
--- a/compos/src/compos_key_main.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Run the CompOS key management service, either in the host using normal Binder or in the
-//! VM using RPC Binder.
-
-mod compos_key_service;
-mod compsvc;
-mod signer;
-
-use crate::compos_key_service::KeystoreNamespace;
-use anyhow::{bail, Context, Result};
-use binder::unstable_api::AsNative;
-use compos_aidl_interface::binder::{add_service, ProcessState};
-use log::{info, Level};
-
-const LOG_TAG: &str = "CompOsKeyService";
-const OUR_SERVICE_NAME: &str = "android.system.composkeyservice";
-const OUR_VSOCK_PORT: u32 = 3142;
-
-fn main() -> Result<()> {
- android_logger::init_once(
- android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Info),
- );
-
- let matches = clap::App::new("compos_key_main")
- .arg(clap::Arg::with_name("rpc_binder").long("rpc-binder"))
- .get_matches();
-
- let rpc_binder = matches.is_present("rpc_binder");
-
- let key_namespace =
- if rpc_binder { KeystoreNamespace::VmPayload } else { KeystoreNamespace::Odsign };
- let mut service = compos_key_service::new(key_namespace)?.as_binder();
-
- if rpc_binder {
- info!("Starting RPC service");
- // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
- // Plus the binder objects are threadsafe.
- let retval = unsafe {
- binder_rpc_unstable_bindgen::RunRpcServer(
- service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder,
- OUR_VSOCK_PORT,
- )
- };
- if retval {
- info!("RPC server has shut down gracefully");
- } else {
- bail!("Premature termination of RPC server");
- }
- } else {
- info!("Starting binder service");
- add_service(OUR_SERVICE_NAME, service).context("Adding service failed")?;
- info!("It's alive!");
-
- ProcessState::join_thread_pool();
- }
-
- Ok(())
-}
diff --git a/compos/src/compos_key_service.rs b/compos/src/compos_key_service.rs
deleted file mode 100644
index 40d0f48..0000000
--- a/compos/src/compos_key_service.rs
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Provides a binder service for key generation & verification for CompOs. We assume we have
-//! access to Keystore in the VM, but not persistent storage; instead the host stores the key
-//! on our behalf via this service.
-
-use crate::compsvc;
-use crate::signer::Signer;
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- Algorithm::Algorithm, Digest::Digest, KeyParameter::KeyParameter,
- KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
- SecurityLevel::SecurityLevel, Tag::Tag,
-};
-use android_system_keystore2::aidl::android::system::keystore2::{
- Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel,
- IKeystoreService::IKeystoreService, KeyDescriptor::KeyDescriptor,
-};
-use anyhow::{anyhow, Context, Result};
-use compos_aidl_interface::aidl::com::android::compos::{
- CompOsKeyData::CompOsKeyData,
- ICompOsKeyService::{BnCompOsKeyService, ICompOsKeyService},
- ICompService::ICompService,
-};
-use compos_aidl_interface::binder::{
- self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, Status, Strong,
-};
-use log::warn;
-use ring::rand::{SecureRandom, SystemRandom};
-use ring::signature;
-use scopeguard::ScopeGuard;
-use std::ffi::CString;
-
-/// Keystore2 namespace IDs, used for access control to keys.
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum KeystoreNamespace {
- /// In the host we re-use the ID assigned to odsign. See system/sepolicy/private/keystore2_key_contexts.
- // TODO(alanstokes): Remove this.
- Odsign = 101,
- /// In a VM we can use the generic ID allocated for payloads. See microdroid's keystore2_key_contexts.
- VmPayload = 140,
-}
-
-/// Constructs a binder object that implements ICompOsKeyService. namespace is the Keystore2 namespace to
-/// use for the keys.
-pub fn new(namespace: KeystoreNamespace) -> Result<Strong<dyn ICompOsKeyService>> {
- let keystore_service = wait_for_interface::<dyn IKeystoreService>(KEYSTORE_SERVICE_NAME)
- .context("No Keystore service")?;
-
- let service = CompOsKeyService {
- namespace,
- random: SystemRandom::new(),
- security_level: keystore_service
- .getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT)
- .context("Getting SecurityLevel failed")?,
- };
-
- Ok(BnCompOsKeyService::new_binder(service, BinderFeatures::default()))
-}
-
-const KEYSTORE_SERVICE_NAME: &str = "android.system.keystore2.IKeystoreService/default";
-const PURPOSE_SIGN: KeyParameter =
- KeyParameter { tag: Tag::PURPOSE, value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN) };
-const ALGORITHM: KeyParameter =
- KeyParameter { tag: Tag::ALGORITHM, value: KeyParameterValue::Algorithm(Algorithm::RSA) };
-const PADDING: KeyParameter = KeyParameter {
- tag: Tag::PADDING,
- value: KeyParameterValue::PaddingMode(PaddingMode::RSA_PKCS1_1_5_SIGN),
-};
-const DIGEST: KeyParameter =
- KeyParameter { tag: Tag::DIGEST, value: KeyParameterValue::Digest(Digest::SHA_2_256) };
-const KEY_SIZE: KeyParameter =
- KeyParameter { tag: Tag::KEY_SIZE, value: KeyParameterValue::Integer(2048) };
-const EXPONENT: KeyParameter =
- KeyParameter { tag: Tag::RSA_PUBLIC_EXPONENT, value: KeyParameterValue::LongInteger(65537) };
-const NO_AUTH_REQUIRED: KeyParameter =
- KeyParameter { tag: Tag::NO_AUTH_REQUIRED, value: KeyParameterValue::BoolValue(true) };
-
-const BLOB_KEY_DESCRIPTOR: KeyDescriptor =
- KeyDescriptor { domain: Domain::BLOB, nspace: 0, alias: None, blob: None };
-
-#[derive(Clone)]
-struct CompOsKeyService {
- namespace: KeystoreNamespace,
- random: SystemRandom,
- security_level: Strong<dyn IKeystoreSecurityLevel>,
-}
-
-impl Interface for CompOsKeyService {}
-
-impl ICompOsKeyService for CompOsKeyService {
- fn generateSigningKey(&self) -> binder::Result<CompOsKeyData> {
- self.do_generate()
- .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
- }
-
- fn verifySigningKey(&self, key_blob: &[u8], public_key: &[u8]) -> binder::Result<bool> {
- Ok(if let Err(e) = self.do_verify(key_blob, public_key) {
- warn!("Signing key verification failed: {}", e.to_string());
- false
- } else {
- true
- })
- }
-
- fn sign(&self, key_blob: &[u8], data: &[u8]) -> binder::Result<Vec<u8>> {
- self.do_sign(key_blob, data)
- .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))
- }
-
- fn getCompService(&self, key_blob: &[u8]) -> binder::Result<Strong<dyn ICompService>> {
- let signer =
- Box::new(CompOsSigner { key_blob: key_blob.to_owned(), key_service: self.clone() });
- let debuggable = true;
- Ok(compsvc::new_binder(
- "/apex/com.android.art/bin/dex2oat64".to_owned(),
- debuggable,
- Some(signer),
- ))
- }
-}
-
-/// Constructs a new Binder error `Status` with the given `ExceptionCode` and message.
-fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
- Status::new_exception(exception, CString::new(message.as_ref()).ok().as_deref())
-}
-
-struct CompOsSigner {
- key_blob: Vec<u8>,
- key_service: CompOsKeyService,
-}
-
-impl Signer for CompOsSigner {
- fn sign(&self, data: &[u8]) -> Result<Vec<u8>> {
- self.key_service.do_sign(&self.key_blob, data)
- }
-}
-
-impl CompOsKeyService {
- fn do_generate(&self) -> Result<CompOsKeyData> {
- let key_descriptor = KeyDescriptor { nspace: self.namespace as i64, ..BLOB_KEY_DESCRIPTOR };
- let key_parameters =
- [PURPOSE_SIGN, ALGORITHM, PADDING, DIGEST, KEY_SIZE, EXPONENT, NO_AUTH_REQUIRED];
- let attestation_key = None;
- let flags = 0;
- let entropy = [];
-
- let key_metadata = self
- .security_level
- .generateKey(&key_descriptor, attestation_key, &key_parameters, flags, &entropy)
- .context("Generating key failed")?;
-
- if let (Some(certificate), Some(blob)) = (key_metadata.certificate, key_metadata.key.blob) {
- Ok(CompOsKeyData { certificate, keyBlob: blob })
- } else {
- Err(anyhow!("Missing cert or blob"))
- }
- }
-
- fn do_verify(&self, key_blob: &[u8], public_key: &[u8]) -> Result<()> {
- let mut data = [0u8; 32];
- self.random.fill(&mut data).context("No random data")?;
-
- let signature = self.do_sign(key_blob, &data)?;
-
- let public_key =
- signature::UnparsedPublicKey::new(&signature::RSA_PKCS1_2048_8192_SHA256, public_key);
- public_key.verify(&data, &signature).context("Signature verification failed")?;
-
- Ok(())
- }
-
- fn do_sign(&self, key_blob: &[u8], data: &[u8]) -> Result<Vec<u8>> {
- let key_descriptor = KeyDescriptor {
- nspace: self.namespace as i64,
- blob: Some(key_blob.to_vec()),
- ..BLOB_KEY_DESCRIPTOR
- };
- let operation_parameters = [PURPOSE_SIGN, ALGORITHM, PADDING, DIGEST];
- let forced = false;
-
- let response = self
- .security_level
- .createOperation(&key_descriptor, &operation_parameters, forced)
- .context("Creating key failed")?;
- let operation = scopeguard::guard(
- response.iOperation.ok_or_else(|| anyhow!("No operation created"))?,
- |op| op.abort().unwrap_or_default(),
- );
-
- if response.operationChallenge.is_some() {
- return Err(anyhow!("Key requires user authorization"));
- }
-
- let signature = operation.finish(Some(&data), None).context("Signing failed")?;
- // Operation has finished, we're no longer responsible for aborting it
- ScopeGuard::into_inner(operation);
-
- signature.ok_or_else(|| anyhow!("No signature returned"))
- }
-}
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index 24e52f5..3a794ee 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -14,121 +14,94 @@
* limitations under the License.
*/
-//! compsvc is a service to run computational tasks in a PVM upon request. It is able to set up
-//! file descriptors backed by fd_server and pass the file descriptors to the actual tasks for
-//! read/write. The service also attempts to sandbox the execution so that one task cannot leak or
-//! impact future tasks.
-//!
-//! The current architecture / process hierarchy looks like:
-//! - compsvc (handle requests)
-//! - compsvc_worker (for environment setup)
-//! - authfs (fd translation)
-//! - actual task
+//! compsvc is a service to run compilation tasks in a PVM upon request. It is able to set up
+//! file descriptors backed by authfs (via authfs_service) and pass the file descriptors to the
+//! actual compiler.
-use anyhow::Result;
-use log::error;
-use minijail::{self, Minijail};
-use std::path::PathBuf;
+use anyhow::{bail, Context, Result};
+use std::default::Default;
+use std::fs::read_dir;
+use std::path::{Path, PathBuf};
-use crate::signer::Signer;
-use compos_aidl_interface::aidl::com::android::compos::ICompService::{
- BnCompService, ICompService,
+use crate::artifact_signer::ArtifactSigner;
+use crate::compilation::{odrefresh, OdrefreshContext};
+use crate::compos_key;
+use compos_aidl_interface::aidl::com::android::compos::ICompOsService::{
+ BnCompOsService, CompilationMode::CompilationMode, ICompOsService,
};
-use compos_aidl_interface::aidl::com::android::compos::Metadata::Metadata;
-use compos_aidl_interface::binder::{
- BinderFeatures, Interface, Result as BinderResult, Status, StatusCode, Strong,
-};
+use compos_aidl_interface::binder::{BinderFeatures, Interface, Result as BinderResult, Strong};
+use compos_common::binder::to_binder_result;
+use compos_common::odrefresh::ODREFRESH_PATH;
-const WORKER_BIN: &str = "/apex/com.android.compos/bin/compsvc_worker";
+const AUTHFS_SERVICE_NAME: &str = "authfs_service";
-// TODO: Replace with a valid directory setup in the VM.
-const AUTHFS_MOUNTPOINT: &str = "/data/local/tmp";
-
-/// Constructs a binder object that implements ICompService. task_bin is the path to the binary that will
-/// be run when execute() is called. If debuggable is true then stdout/stderr from the binary will be
-/// available for debugging.
-pub fn new_binder(
- task_bin: String,
- debuggable: bool,
- signer: Option<Box<dyn Signer>>,
-) -> Strong<dyn ICompService> {
- let service = CompService {
- worker_bin: PathBuf::from(WORKER_BIN.to_owned()),
- task_bin,
- debuggable,
- signer,
- };
- BnCompService::new_binder(service, BinderFeatures::default())
+/// Constructs a binder object that implements ICompOsService.
+pub fn new_binder() -> Result<Strong<dyn ICompOsService>> {
+ let service = CompOsService { odrefresh_path: PathBuf::from(ODREFRESH_PATH) };
+ Ok(BnCompOsService::new_binder(service, BinderFeatures::default()))
}
-struct CompService {
- task_bin: String,
- worker_bin: PathBuf,
- debuggable: bool,
- #[allow(dead_code)] // TODO: Make use of this
- signer: Option<Box<dyn Signer>>,
+struct CompOsService {
+ odrefresh_path: PathBuf,
}
-impl CompService {
- fn run_worker_in_jail_and_wait(&self, args: &[String]) -> Result<(), minijail::Error> {
- let mut jail = Minijail::new()?;
+impl Interface for CompOsService {}
- // TODO(b/185175567): New user and uid namespace when supported. Run as nobody.
- // New mount namespace to isolate the FUSE mount.
- jail.namespace_vfs();
+impl ICompOsService for CompOsService {
+ fn odrefresh(
+ &self,
+ compilation_mode: CompilationMode,
+ system_dir_fd: i32,
+ output_dir_fd: i32,
+ staging_dir_fd: i32,
+ target_dir_name: &str,
+ zygote_arch: &str,
+ system_server_compiler_filter: &str,
+ ) -> BinderResult<i8> {
+ let context = to_binder_result(OdrefreshContext::new(
+ compilation_mode,
+ system_dir_fd,
+ output_dir_fd,
+ staging_dir_fd,
+ target_dir_name,
+ zygote_arch,
+ system_server_compiler_filter,
+ ))?;
- let inheritable_fds = if self.debuggable {
- vec![1, 2] // inherit/redirect stdout/stderr for debugging
+ let authfs_service = authfs_aidl_interface::binder::get_interface(AUTHFS_SERVICE_NAME)?;
+ let exit_code = to_binder_result(
+ odrefresh(&self.odrefresh_path, context, authfs_service, |output_dir| {
+ // authfs only shows us the files we created, so it's ok to just sign everything
+ // under the output directory.
+ let mut artifact_signer = ArtifactSigner::new(&output_dir);
+ add_artifacts(&output_dir, &mut artifact_signer)?;
+
+ artifact_signer.write_info_and_signature(&output_dir.join("compos.info"))
+ })
+ .context("odrefresh failed"),
+ )?;
+ Ok(exit_code as i8)
+ }
+
+ fn getPublicKey(&self) -> BinderResult<Vec<u8>> {
+ to_binder_result(compos_key::get_public_key())
+ }
+}
+
+fn add_artifacts(target_dir: &Path, artifact_signer: &mut ArtifactSigner) -> Result<()> {
+ for entry in
+ read_dir(&target_dir).with_context(|| format!("Traversing {}", target_dir.display()))?
+ {
+ let entry = entry?;
+ let file_type = entry.file_type()?;
+ if file_type.is_dir() {
+ add_artifacts(&entry.path(), artifact_signer)?;
+ } else if file_type.is_file() {
+ artifact_signer.add_artifact(&entry.path())?;
} else {
- vec![]
- };
- let _pid = jail.run(&self.worker_bin, &inheritable_fds, &args)?;
- jail.wait()
- }
-
- fn build_worker_args(&self, args: &[String], metadata: &Metadata) -> Vec<String> {
- let mut worker_args = vec![
- WORKER_BIN.to_string(),
- "--authfs-root".to_string(),
- AUTHFS_MOUNTPOINT.to_string(),
- ];
- for annotation in &metadata.input_fd_annotations {
- worker_args.push("--in-fd".to_string());
- worker_args.push(format!("{}:{}", annotation.fd, annotation.file_size));
- }
- for annotation in &metadata.output_fd_annotations {
- worker_args.push("--out-fd".to_string());
- worker_args.push(annotation.fd.to_string());
- }
- if self.debuggable {
- worker_args.push("--debug".to_string());
- }
- worker_args.push("--".to_string());
-
- // Do not accept arbitrary code execution. We want to execute some specific task of this
- // service. Use the associated executable.
- worker_args.push(self.task_bin.clone());
- worker_args.extend_from_slice(&args[1..]);
- worker_args
- }
-}
-
-impl Interface for CompService {}
-
-impl ICompService for CompService {
- fn execute(&self, args: &[String], metadata: &Metadata) -> BinderResult<i8> {
- let worker_args = self.build_worker_args(args, metadata);
-
- match self.run_worker_in_jail_and_wait(&worker_args) {
- Ok(_) => Ok(0), // TODO(b/161471326): Sign the output on succeed.
- Err(minijail::Error::ReturnCode(exit_code)) => {
- error!("Task failed with exit code {}", exit_code);
- Err(Status::from(StatusCode::FAILED_TRANSACTION))
- }
- Err(e) => {
- error!("Unexpected error: {}", e);
- Err(Status::from(StatusCode::UNKNOWN_ERROR))
- }
+ // authfs shouldn't create anything else, but just in case
+ bail!("Unexpected file type in artifacts: {:?}", entry);
}
}
+ Ok(())
}
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index 9f12132..4ecbfe9 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -14,75 +14,77 @@
* limitations under the License.
*/
-//! A tool to start a standalone compsvc server, either in the host using Binder or in a VM using
-//! RPC binder over vsock.
-//!
-//! Example:
-//! $ compsvc /system/bin/sleep
+//! A tool to start a standalone compsvc server that serves over RPC binder.
-mod common;
+mod artifact_signer;
+mod compilation;
+mod compos_key;
mod compsvc;
-mod signer;
+mod fsverity;
-use crate::common::{SERVICE_NAME, VSOCK_PORT};
-use anyhow::{bail, Context, Result};
-use binder::unstable_api::AsNative;
-use compos_aidl_interface::binder::{add_service, ProcessState};
-use log::debug;
+use android_system_virtualmachineservice::{
+ aidl::android::system::virtualmachineservice::IVirtualMachineService::{
+ IVirtualMachineService, VM_BINDER_SERVICE_PORT,
+ },
+ binder::Strong,
+};
+use anyhow::{anyhow, bail, Context, Result};
+use binder::{
+ unstable_api::{new_spibinder, AIBinder},
+ FromIBinder,
+};
+use binder_common::rpc_server::run_rpc_server;
+use compos_common::COMPOS_VSOCK_PORT;
+use log::{debug, error};
+use std::panic;
-struct Config {
- task_bin: String,
- rpc_binder: bool,
- debuggable: bool,
+/// The CID representing the host VM
+const VMADDR_CID_HOST: u32 = 2;
+
+fn main() {
+ if let Err(e) = try_main() {
+ error!("failed with {:?}", e);
+ std::process::exit(1);
+ }
}
-fn parse_args() -> Result<Config> {
- #[rustfmt::skip]
- let matches = clap::App::new("compsvc")
- .arg(clap::Arg::with_name("debug")
- .long("debug"))
- .arg(clap::Arg::with_name("task_bin")
- .required(true))
- .arg(clap::Arg::with_name("rpc_binder")
- .long("rpc-binder"))
- .get_matches();
-
- Ok(Config {
- task_bin: matches.value_of("task_bin").unwrap().to_string(),
- rpc_binder: matches.is_present("rpc_binder"),
- debuggable: matches.is_present("debug"),
- })
-}
-
-fn main() -> Result<()> {
+fn try_main() -> Result<()> {
android_logger::init_once(
android_logger::Config::default().with_tag("compsvc").with_min_level(log::Level::Debug),
);
+ // Redirect panic messages to logcat.
+ panic::set_hook(Box::new(|panic_info| {
+ error!("{}", panic_info);
+ }));
- let config = parse_args()?;
- let mut service = compsvc::new_binder(config.task_bin, config.debuggable, None).as_binder();
- if config.rpc_binder {
- debug!("compsvc is starting as a rpc service.");
- // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
- // Plus the binder objects are threadsafe.
- let retval = unsafe {
- binder_rpc_unstable_bindgen::RunRpcServer(
- service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder,
- VSOCK_PORT,
- )
- };
- if retval {
- debug!("RPC server has shut down gracefully");
- Ok(())
- } else {
- bail!("Premature termination of RPC server");
+ let service = compsvc::new_binder()?.as_binder();
+ let vm_service = get_vm_service()?;
+
+ debug!("compsvc is starting as a rpc service.");
+
+ let retval = run_rpc_server(service, COMPOS_VSOCK_PORT, || {
+ if let Err(e) = vm_service.notifyPayloadReady() {
+ error!("Unable to notify ready: {}", e);
}
+ });
+ if retval {
+ debug!("RPC server has shut down gracefully");
+ Ok(())
} else {
- ProcessState::start_thread_pool();
- debug!("compsvc is starting as a local service.");
- add_service(SERVICE_NAME, service)
- .with_context(|| format!("Failed to register service {}", SERVICE_NAME))?;
- ProcessState::join_thread_pool();
- bail!("Unexpected exit after join_thread_pool")
+ bail!("Premature termination of RPC server");
}
}
+
+fn get_vm_service() -> Result<Strong<dyn IVirtualMachineService>> {
+ // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership
+ // can be safely taken by new_spibinder.
+ let ibinder = unsafe {
+ new_spibinder(binder_rpc_unstable_bindgen::RpcClient(
+ VMADDR_CID_HOST,
+ VM_BINDER_SERVICE_PORT as u32,
+ ) as *mut AIBinder)
+ }
+ .ok_or_else(|| anyhow!("Failed to connect to IVirtualMachineService"))?;
+
+ FromIBinder::try_from(ibinder).context("Connecting to IVirtualMachineService")
+}
diff --git a/compos/src/compsvc_worker.rs b/compos/src/compsvc_worker.rs
deleted file mode 100644
index f33659e..0000000
--- a/compos/src/compsvc_worker.rs
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//! This executable works as a child/worker for the main compsvc service. This worker is mainly
-//! responsible for setting up the execution environment, e.g. to create file descriptors for
-//! remote file access via an authfs mount.
-
-mod authfs;
-
-use anyhow::{bail, Result};
-use minijail::Minijail;
-use std::fs::File;
-use std::os::unix::io::AsRawFd;
-use std::path::Path;
-use std::process::exit;
-
-use crate::authfs::{AuthFs, InFdAnnotation, OutFdAnnotation, PseudoRawFd};
-
-fn open_authfs_files_for_mapping(
- authfs: &AuthFs,
- config: &Config,
-) -> Result<Vec<(File, PseudoRawFd)>> {
- let mut fd_mapping = Vec::with_capacity(config.in_fds.len() + config.out_fds.len());
-
- let results: Result<Vec<_>> =
- config.in_fds.iter().map(|conf| Ok((authfs.open_file(conf.fd, false)?, conf.fd))).collect();
- fd_mapping.append(&mut results?);
-
- let results: Result<Vec<_>> =
- config.out_fds.iter().map(|conf| Ok((authfs.open_file(conf.fd, true)?, conf.fd))).collect();
- fd_mapping.append(&mut results?);
-
- Ok(fd_mapping)
-}
-
-fn spawn_jailed_task(config: &Config, fd_mapping: Vec<(File, PseudoRawFd)>) -> Result<Minijail> {
- // TODO(b/185175567): Run in a more restricted sandbox.
- let jail = Minijail::new()?;
- let mut preserve_fds: Vec<_> = fd_mapping.iter().map(|(f, id)| (f.as_raw_fd(), *id)).collect();
- if config.debuggable {
- // inherit/redirect stdout/stderr for debugging
- preserve_fds.push((1, 1));
- preserve_fds.push((2, 2));
- }
- let _pid =
- jail.run_remap(&Path::new(&config.args[0]), preserve_fds.as_slice(), &config.args)?;
- Ok(jail)
-}
-
-struct Config {
- authfs_root: String,
- in_fds: Vec<InFdAnnotation>,
- out_fds: Vec<OutFdAnnotation>,
- args: Vec<String>,
- debuggable: bool,
-}
-
-fn parse_args() -> Result<Config> {
- #[rustfmt::skip]
- let matches = clap::App::new("compsvc_worker")
- .arg(clap::Arg::with_name("authfs-root")
- .long("authfs-root")
- .value_name("DIR")
- .required(true)
- .takes_value(true))
- .arg(clap::Arg::with_name("in-fd")
- .long("in-fd")
- .multiple(true)
- .takes_value(true)
- .requires("authfs-root"))
- .arg(clap::Arg::with_name("out-fd")
- .long("out-fd")
- .multiple(true)
- .takes_value(true)
- .requires("authfs-root"))
- .arg(clap::Arg::with_name("debug")
- .long("debug"))
- .arg(clap::Arg::with_name("args")
- .last(true)
- .required(true)
- .multiple(true))
- .get_matches();
-
- // Safe to unwrap since the arg is required by the clap rule
- let authfs_root = matches.value_of("authfs-root").unwrap().to_string();
-
- let results: Result<Vec<_>> = matches
- .values_of("in-fd")
- .unwrap_or_default()
- .into_iter()
- .map(|arg| {
- if let Some(index) = arg.find(':') {
- let (fd, size) = arg.split_at(index);
- Ok(InFdAnnotation { fd: fd.parse()?, file_size: size[1..].parse()? })
- } else {
- bail!("Invalid argument: {}", arg);
- }
- })
- .collect();
- let in_fds = results?;
-
- let results: Result<Vec<_>> = matches
- .values_of("out-fd")
- .unwrap_or_default()
- .into_iter()
- .map(|arg| Ok(OutFdAnnotation { fd: arg.parse()? }))
- .collect();
- let out_fds = results?;
-
- let args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
- let debuggable = matches.is_present("debug");
-
- Ok(Config { authfs_root, in_fds, out_fds, args, debuggable })
-}
-
-fn main() -> Result<()> {
- let log_level =
- if env!("TARGET_BUILD_VARIANT") == "eng" { log::Level::Trace } else { log::Level::Info };
- android_logger::init_once(
- android_logger::Config::default().with_tag("compsvc_worker").with_min_level(log_level),
- );
-
- let config = parse_args()?;
-
- let authfs = AuthFs::mount_and_wait(
- &config.authfs_root,
- &config.in_fds,
- &config.out_fds,
- config.debuggable,
- )?;
- let fd_mapping = open_authfs_files_for_mapping(&authfs, &config)?;
-
- let jail = spawn_jailed_task(&config, fd_mapping)?;
- let jail_result = jail.wait();
-
- // Be explicit about the lifetime, which should last at least until the task is finished.
- drop(authfs);
-
- match jail_result {
- Ok(_) => Ok(()),
- Err(minijail::Error::ReturnCode(exit_code)) => {
- exit(exit_code as i32);
- }
- Err(e) => {
- bail!("Unexpected minijail error: {}", e);
- }
- }
-}
diff --git a/compos/src/fsverity.rs b/compos/src/fsverity.rs
new file mode 100644
index 0000000..f5df5f7
--- /dev/null
+++ b/compos/src/fsverity.rs
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{bail, Result};
+use libc::getxattr;
+use std::ffi::CString;
+use std::io;
+use std::os::unix::io::RawFd;
+
+const SHA256_HASH_SIZE: usize = 32;
+
+/// Bytes of SHA256 digest
+pub type Sha256Digest = [u8; SHA256_HASH_SIZE];
+
+/// Returns the fs-verity measurement/digest. Currently only SHA256 is supported.
+pub fn measure(fd: RawFd) -> Result<Sha256Digest> {
+ // TODO(b/196635431): Unfortunately, the FUSE API doesn't allow authfs to implement the standard
+ // fs-verity ioctls. Until the kernel allows, use the alternative xattr that authfs provides.
+ let path = CString::new(format!("/proc/self/fd/{}", fd).as_str()).unwrap();
+ let name = CString::new("authfs.fsverity.digest").unwrap();
+ let mut buf = [0u8; SHA256_HASH_SIZE];
+ // SAFETY: getxattr should not write beyond the given buffer size.
+ let size = unsafe {
+ getxattr(path.as_ptr(), name.as_ptr(), buf.as_mut_ptr() as *mut libc::c_void, buf.len())
+ };
+ if size < 0 {
+ bail!("Failed to getxattr: {}", io::Error::last_os_error());
+ } else if size != SHA256_HASH_SIZE as isize {
+ bail!("Unexpected hash size: {}", size);
+ } else {
+ Ok(buf)
+ }
+}
diff --git a/compos/src/pvm_exec.rs b/compos/src/pvm_exec.rs
deleted file mode 100644
index 03fbf72..0000000
--- a/compos/src/pvm_exec.rs
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//! pvm_exec is a proxy/wrapper command to run a command remotely. It does not transport the
-//! program and just pass the command line arguments to compsvc to execute. The most important task
-//! for this program is to run a `fd_server` that serves remote file read/write requests.
-//!
-//! Example:
-//! $ adb shell exec 3</dev/zero 4<>/dev/null pvm_exec --in-fd 3 --out-fd 4 -- sleep 10
-//!
-//! Note the immediate argument right after "--" (e.g. "sleep" in the example above) is not really
-//! used. It is only for ergonomics.
-
-use anyhow::{bail, Context, Result};
-use binder::unstable_api::{new_spibinder, AIBinder};
-use binder::FromIBinder;
-use log::{error, warn};
-use minijail::Minijail;
-use nix::fcntl::{fcntl, FcntlArg::F_GETFD};
-use nix::sys::stat::fstat;
-use std::os::unix::io::RawFd;
-use std::path::Path;
-use std::process::exit;
-
-use compos_aidl_interface::aidl::com::android::compos::{
- ICompService::ICompService, InputFdAnnotation::InputFdAnnotation, Metadata::Metadata,
- OutputFdAnnotation::OutputFdAnnotation,
-};
-use compos_aidl_interface::binder::Strong;
-
-mod common;
-use common::{SERVICE_NAME, VSOCK_PORT};
-
-const FD_SERVER_BIN: &str = "/apex/com.android.virt/bin/fd_server";
-
-fn get_local_service() -> Result<Strong<dyn ICompService>> {
- compos_aidl_interface::binder::get_interface(SERVICE_NAME).context("get local binder")
-}
-
-fn get_rpc_binder(cid: u32) -> Result<Strong<dyn ICompService>> {
- // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
- // safely taken by new_spibinder.
- let ibinder = unsafe {
- new_spibinder(binder_rpc_unstable_bindgen::RpcClient(cid, VSOCK_PORT) as *mut AIBinder)
- };
- if let Some(ibinder) = ibinder {
- <dyn ICompService>::try_from(ibinder).context("Cannot connect to RPC service")
- } else {
- bail!("Invalid raw AIBinder")
- }
-}
-
-fn spawn_fd_server(metadata: &Metadata, debuggable: bool) -> Result<Minijail> {
- let mut inheritable_fds = if debuggable {
- vec![1, 2] // inherit/redirect stdout/stderr for debugging
- } else {
- vec![]
- };
-
- let mut args = vec![FD_SERVER_BIN.to_string(), "--rpc-binder".to_string()];
- for metadata in &metadata.input_fd_annotations {
- args.push("--ro-fds".to_string());
- args.push(metadata.fd.to_string());
- inheritable_fds.push(metadata.fd);
- }
- for metadata in &metadata.output_fd_annotations {
- args.push("--rw-fds".to_string());
- args.push(metadata.fd.to_string());
- inheritable_fds.push(metadata.fd);
- }
-
- let jail = Minijail::new()?;
- let _pid = jail.run(Path::new(FD_SERVER_BIN), &inheritable_fds, &args)?;
- Ok(jail)
-}
-
-fn is_fd_valid(fd: RawFd) -> Result<bool> {
- let retval = fcntl(fd, F_GETFD)?;
- Ok(retval >= 0)
-}
-
-fn parse_arg_fd(arg: &str) -> Result<RawFd> {
- let fd = arg.parse::<RawFd>()?;
- if !is_fd_valid(fd)? {
- bail!("Bad FD: {}", fd);
- }
- Ok(fd)
-}
-
-struct Config {
- args: Vec<String>,
- metadata: Metadata,
- cid: Option<u32>,
- debuggable: bool,
-}
-
-fn parse_args() -> Result<Config> {
- #[rustfmt::skip]
- let matches = clap::App::new("pvm_exec")
- .arg(clap::Arg::with_name("in-fd")
- .long("in-fd")
- .takes_value(true)
- .multiple(true)
- .use_delimiter(true))
- .arg(clap::Arg::with_name("out-fd")
- .long("out-fd")
- .takes_value(true)
- .multiple(true)
- .use_delimiter(true))
- .arg(clap::Arg::with_name("cid")
- .takes_value(true)
- .long("cid"))
- .arg(clap::Arg::with_name("debug")
- .long("debug"))
- .arg(clap::Arg::with_name("args")
- .last(true)
- .required(true)
- .multiple(true))
- .get_matches();
-
- let results: Result<Vec<_>> = matches
- .values_of("in-fd")
- .unwrap_or_default()
- .map(|arg| {
- let fd = parse_arg_fd(arg)?;
- let file_size = fstat(fd)?.st_size;
- Ok(InputFdAnnotation { fd, file_size })
- })
- .collect();
- let input_fd_annotations = results?;
-
- let results: Result<Vec<_>> = matches
- .values_of("out-fd")
- .unwrap_or_default()
- .map(|arg| {
- let fd = parse_arg_fd(arg)?;
- Ok(OutputFdAnnotation { fd })
- })
- .collect();
- let output_fd_annotations = results?;
-
- let args: Vec<_> = matches.values_of("args").unwrap().map(|s| s.to_string()).collect();
- let cid =
- if let Some(arg) = matches.value_of("cid") { Some(arg.parse::<u32>()?) } else { None };
- let debuggable = matches.is_present("debug");
-
- Ok(Config {
- args,
- metadata: Metadata { input_fd_annotations, output_fd_annotations },
- cid,
- debuggable,
- })
-}
-
-fn main() -> Result<()> {
- // 1. Parse the command line arguments for collect execution data.
- let Config { args, metadata, cid, debuggable } = parse_args()?;
-
- // 2. Spawn and configure a fd_server to serve remote read/write requests.
- let fd_server_jail = spawn_fd_server(&metadata, debuggable)?;
- let fd_server_lifetime = scopeguard::guard(fd_server_jail, |fd_server_jail| {
- if let Err(e) = fd_server_jail.kill() {
- if !matches!(e, minijail::Error::Killed(_)) {
- warn!("Failed to kill fd_server: {}", e);
- }
- }
- });
-
- // 3. Send the command line args to the remote to execute.
- let service = if let Some(cid) = cid { get_rpc_binder(cid) } else { get_local_service() }?;
- let exit_code = service.execute(&args, &metadata).context("Binder call failed")?;
-
- // Be explicit about the lifetime, which should last at least until the task is finished.
- drop(fd_server_lifetime);
-
- if exit_code > 0 {
- error!("remote execution failed with exit code {}", exit_code);
- exit(exit_code as i32);
- }
- Ok(())
-}
diff --git a/compos/tests/Android.bp b/compos/tests/Android.bp
index 7e00d7b..c178ddd 100644
--- a/compos/tests/Android.bp
+++ b/compos/tests/Android.bp
@@ -13,8 +13,7 @@
static_libs: [
"VirtualizationTestHelper",
],
- test_suites: ["general-tests"],
- data: [
- ":CompOSPayloadApp.signing",
+ test_suites: [
+ "general-tests",
],
}
diff --git a/compos/tests/AndroidTest.xml b/compos/tests/AndroidTest.xml
index 61b6d47..2a84291 100644
--- a/compos/tests/AndroidTest.xml
+++ b/compos/tests/AndroidTest.xml
@@ -18,9 +18,10 @@
<option name="force-root" value="true" />
</target_preparer>
- <!-- virtualizationservice doesn't have access to shell_data_file. Instead of giving it
- a test-only permission, run it without selinux -->
- <target_preparer class="com.android.tradefed.targetprep.DisableSELinuxTargetPreparer"/>
+ <target_preparer class="com.android.tradefed.targetprep.DeviceSetup">
+ <!-- Run in single thread to avoid nondeterministics. -->
+ <option name="set-property" key="dalvik.vm.boot-dex2oat-threads" value="1" />
+ </target_preparer>
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
<option name="jar" value="ComposHostTestCases.jar" />
diff --git a/compos/tests/java/android/compos/test/ComposKeyTestCase.java b/compos/tests/java/android/compos/test/ComposKeyTestCase.java
deleted file mode 100644
index 654dc0b..0000000
--- a/compos/tests/java/android/compos/test/ComposKeyTestCase.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.compos.test;
-
-import static com.google.common.truth.Truth.assertThat;
-
-import android.platform.test.annotations.RootPermissionTest;
-import android.virt.test.CommandRunner;
-import android.virt.test.VirtualizationTestCaseBase;
-
-import com.android.compatibility.common.util.PollingCheck;
-import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
-import com.android.tradefed.util.CommandResult;
-import com.android.tradefed.util.CommandStatus;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-
-@RootPermissionTest
-@RunWith(DeviceJUnit4ClassRunner.class)
-public final class ComposKeyTestCase extends VirtualizationTestCaseBase {
-
- /** Wait time for service to be ready on boot */
- private static final int READY_LATENCY_MS = 10 * 1000; // 10 seconds
-
- // Path to compos_key_cmd tool
- private static final String COMPOS_KEY_CMD_BIN = "/apex/com.android.compos/bin/compos_key_cmd";
-
- private String mCid;
-
- @Before
- public void setUp() throws Exception {
- testIfDeviceIsCapable(getDevice());
-
- prepareVirtualizationTestSetup(getDevice());
- }
-
- @After
- public void tearDown() throws Exception {
- if (mCid != null) {
- shutdownMicrodroid(getDevice(), mCid);
- mCid = null;
- }
-
- cleanUpVirtualizationTestSetup(getDevice());
- }
-
- @Test
- public void testKeyService() throws Exception {
- startVm();
- waitForServiceRunning();
-
- CommandRunner android = new CommandRunner(getDevice());
- CommandResult result;
-
- // Generate keys - should succeed
- android.run(
- COMPOS_KEY_CMD_BIN,
- "--cid " + mCid,
- "generate",
- TEST_ROOT + "test_key.blob",
- TEST_ROOT + "test_key.pubkey");
-
- // Verify them - should also succeed, since we just generated them
- android.run(
- COMPOS_KEY_CMD_BIN,
- "--cid " + mCid,
- "verify",
- TEST_ROOT + "test_key.blob",
- TEST_ROOT + "test_key.pubkey");
-
- // Swap public key & blob - should fail to verify
- result =
- android.runForResult(
- COMPOS_KEY_CMD_BIN,
- "--cid " + mCid,
- "verify",
- TEST_ROOT + "test_key.pubkey",
- TEST_ROOT + "test_key.blob");
- assertThat(result.getStatus()).isEqualTo(CommandStatus.FAILED);
-
- // Generate another set of keys - should succeed
- android.run(
- COMPOS_KEY_CMD_BIN,
- "--cid " + mCid,
- "generate",
- TEST_ROOT + "test_key2.blob",
- TEST_ROOT + "test_key2.pubkey");
-
- // They should also verify ok
- android.run(
- COMPOS_KEY_CMD_BIN,
- "--cid " + mCid,
- "verify",
- TEST_ROOT + "test_key2.blob",
- TEST_ROOT + "test_key2.pubkey");
-
- // Mismatched key blob & public key should fail to verify
- result =
- android.runForResult(
- COMPOS_KEY_CMD_BIN,
- "--cid " + mCid,
- "verify",
- TEST_ROOT + "test_key.pubkey",
- TEST_ROOT + "test_key2.blob");
- assertThat(result.getStatus()).isEqualTo(CommandStatus.FAILED);
- }
-
- private void startVm() throws Exception {
- final String apkName = "CompOSPayloadApp.apk";
- final String packageName = "com.android.compos.payload";
- mCid =
- startMicrodroid(
- getDevice(),
- getBuild(),
- apkName,
- packageName,
- "assets/key_service_vm_config.json",
- /* debug */ true);
- adbConnectToMicrodroid(getDevice(), mCid);
- }
-
- private void waitForServiceRunning() {
- try {
- PollingCheck.waitFor(READY_LATENCY_MS, this::isServiceRunning);
- } catch (Exception e) {
- throw new RuntimeException("Service unavailable", e);
- }
- }
-
- private boolean isServiceRunning() {
- return tryRunOnMicrodroid("pidof compos_key_main") != null;
- }
-}
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index f280c99..6773eb7 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -16,102 +16,194 @@
package android.compos.test;
+import static com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestLogData;
+
import static com.google.common.truth.Truth.assertThat;
import android.platform.test.annotations.RootPermissionTest;
import android.virt.test.CommandRunner;
import android.virt.test.VirtualizationTestCaseBase;
-import com.android.compatibility.common.util.PollingCheck;
+import com.android.tradefed.log.LogUtil.CLog;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
import com.android.tradefed.util.CommandResult;
import org.junit.After;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TestName;
import org.junit.runner.RunWith;
@RootPermissionTest
@RunWith(DeviceJUnit4ClassRunner.class)
public final class ComposTestCase extends VirtualizationTestCaseBase {
- /** Path to odrefresh on Microdroid */
+ // Binaries used in test. (These paths are valid both in host and Microdroid.)
private static final String ODREFRESH_BIN = "/apex/com.android.art/bin/odrefresh";
+ private static final String COMPOSD_CMD_BIN = "/apex/com.android.compos/bin/composd_cmd";
+ private static final String COMPOS_VERIFY_BIN =
+ "/apex/com.android.compos/bin/compos_verify";
+
+ private static final String COMPOS_APEXDATA_DIR = "/data/misc/apexdata/com.android.compos";
+
+ /** Output directory of odrefresh */
+ private static final String TEST_ARTIFACTS_DIR = "test-artifacts";
+
+ private static final String ODREFRESH_OUTPUT_DIR =
+ "/data/misc/apexdata/com.android.art/" + TEST_ARTIFACTS_DIR;
/** Timeout of odrefresh to finish */
private static final int ODREFRESH_TIMEOUT_MS = 10 * 60 * 1000; // 10 minutes
- /** Wait time for compsvc to be ready on boot */
- private static final int COMPSVC_READY_LATENCY_MS = 10 * 1000; // 10 seconds
-
// ExitCode expanded from art/odrefresh/include/odrefresh/odrefresh.h.
private static final int OKAY = 0;
private static final int COMPILATION_SUCCESS = 80;
- private String mCid;
+ // Files that define the "test" instance of CompOS
+ private static final String COMPOS_TEST_ROOT = "/data/misc/apexdata/com.android.compos/test/";
+
+ private static final String SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME =
+ "dalvik.vm.systemservercompilerfilter";
+ private String mBackupSystemServerCompilerFilter;
+
+ @Rule public TestLogData mTestLogs = new TestLogData();
+ @Rule public TestName mTestName = new TestName();
@Before
public void setUp() throws Exception {
testIfDeviceIsCapable(getDevice());
- prepareVirtualizationTestSetup(getDevice());
-
- startComposVm();
+ String value = getDevice().getProperty(SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME);
+ if (value == null) {
+ mBackupSystemServerCompilerFilter = "";
+ } else {
+ mBackupSystemServerCompilerFilter = value;
+ }
}
@After
public void tearDown() throws Exception {
- if (mCid != null) {
- shutdownMicrodroid(getDevice(), mCid);
- mCid = null;
- }
+ killVmAndReconnectAdb();
- cleanUpVirtualizationTestSetup(getDevice());
- }
-
- @Test
- public void testOdrefresh() throws Exception {
- waitForServiceRunning();
+ archiveLogThenDelete(mTestLogs, getDevice(), COMPOS_APEXDATA_DIR + "/vm_console.log",
+ "vm_console.log-" + mTestName.getMethodName());
+ archiveLogThenDelete(mTestLogs, getDevice(), COMPOS_APEXDATA_DIR + "/vm.log",
+ "vm.log-" + mTestName.getMethodName());
CommandRunner android = new CommandRunner(getDevice());
- // Expect the compilation to finish successfully.
- CommandResult result =
- android.runForResultWithTimeout(
- ODREFRESH_TIMEOUT_MS,
- ODREFRESH_BIN,
- "--use-compilation-os=" + mCid,
- "--force-compile");
- assertThat(result.getExitCode()).isEqualTo(COMPILATION_SUCCESS);
+ // Clear up any CompOS instance files we created
+ android.tryRun("rm", "-rf", COMPOS_TEST_ROOT);
- // Expect the output to be valid.
- result = android.runForResultWithTimeout(ODREFRESH_TIMEOUT_MS, ODREFRESH_BIN, "--check");
- assertThat(result.getExitCode()).isEqualTo(OKAY);
- }
+ // And any artifacts generated by odrefresh
+ android.tryRun("rm", "-rf", ODREFRESH_OUTPUT_DIR);
- private void startComposVm() throws Exception {
- final String apkName = "CompOSPayloadApp.apk";
- final String packageName = "com.android.compos.payload";
- mCid =
- startMicrodroid(
- getDevice(),
- getBuild(),
- apkName,
- packageName,
- "assets/vm_config.json",
- /* debug */ true);
- adbConnectToMicrodroid(getDevice(), mCid);
- }
-
- private void waitForServiceRunning() {
- try {
- PollingCheck.waitFor(COMPSVC_READY_LATENCY_MS, this::isServiceRunning);
- } catch (Exception e) {
- throw new RuntimeException("Service unavailable", e);
+ if (mBackupSystemServerCompilerFilter != null) {
+ CLog.d("Restore dalvik.vm.systemservercompilerfilter to "
+ + mBackupSystemServerCompilerFilter);
+ getDevice().setProperty(SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME,
+ mBackupSystemServerCompilerFilter);
}
}
- private boolean isServiceRunning() {
- return tryRunOnMicrodroid("pidof compsvc") != null;
+ @Test
+ public void testOdrefreshSpeed() throws Exception {
+ getDevice().setProperty(SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME, "speed");
+ testOdrefresh();
+ }
+
+ @Test
+ public void testOdrefreshSpeedProfile() throws Exception {
+ getDevice().setProperty(SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME, "speed-profile");
+ testOdrefresh();
+ }
+
+ private void testOdrefresh() throws Exception {
+ CommandRunner android = new CommandRunner(getDevice());
+
+ // Prepare the groundtruth. The compilation on Android should finish successfully.
+ {
+ long start = System.currentTimeMillis();
+ CommandResult result = runOdrefresh(android, "--force-compile");
+ long elapsed = System.currentTimeMillis() - start;
+ assertThat(result.getExitCode()).isEqualTo(COMPILATION_SUCCESS);
+ CLog.i("Local compilation took " + elapsed + "ms");
+ }
+
+ // Save the expected checksum for the output directory.
+ String expectedChecksumSnapshot = checksumDirectoryContentPartial(android,
+ ODREFRESH_OUTPUT_DIR);
+
+ // --check may delete the output.
+ CommandResult result = runOdrefresh(android, "--check");
+ assertThat(result.getExitCode()).isEqualTo(OKAY);
+
+ // Make sure we generate a fresh instance.
+ android.tryRun("rm", "-rf", COMPOS_TEST_ROOT);
+ // TODO: remove once composd starts to clean up the directory.
+ android.tryRun("rm", "-rf", ODREFRESH_OUTPUT_DIR);
+
+ // Expect the compilation in Compilation OS to finish successfully.
+ {
+ long start = System.currentTimeMillis();
+ result =
+ android.runForResultWithTimeout(
+ ODREFRESH_TIMEOUT_MS, COMPOSD_CMD_BIN, "test-compile");
+ long elapsed = System.currentTimeMillis() - start;
+ assertThat(result.getExitCode()).isEqualTo(0);
+ CLog.i("Comp OS compilation took " + elapsed + "ms");
+ }
+ killVmAndReconnectAdb();
+
+ // Save the actual checksum for the output directory.
+ String actualChecksumSnapshot = checksumDirectoryContentPartial(android,
+ ODREFRESH_OUTPUT_DIR);
+
+ // Expect the output of Comp OS to be the same as compiled on Android.
+ assertThat(actualChecksumSnapshot).isEqualTo(expectedChecksumSnapshot);
+
+ // Expect extra files generated by CompOS exist.
+ android.run("test -f " + ODREFRESH_OUTPUT_DIR + "/compos.info");
+ android.run("test -f " + ODREFRESH_OUTPUT_DIR + "/compos.info.signature");
+
+ // Expect the CompOS signature to be valid
+ android.run(COMPOS_VERIFY_BIN + " --debug --instance test");
+ }
+
+ private CommandResult runOdrefresh(CommandRunner android, String command) throws Exception {
+ return android.runForResultWithTimeout(
+ ODREFRESH_TIMEOUT_MS,
+ ODREFRESH_BIN,
+ "--dalvik-cache=" + TEST_ARTIFACTS_DIR,
+ command);
+ }
+
+ private void killVmAndReconnectAdb() throws Exception {
+ CommandRunner android = new CommandRunner(getDevice());
+
+ // When a VM exits, we tend to see adb disconnecting. So we attempt to reconnect
+ // when we kill it to avoid problems. Of course VirtualizationService may exit anyway
+ // (it's an on-demand service and all its clients have gone), taking the VM with it,
+ // which makes this a bit unpredictable.
+ reconnectHostAdb(getDevice());
+ android.tryRun("killall", "crosvm");
+ reconnectHostAdb(getDevice());
+ android.tryRun("stop", "virtualizationservice");
+ reconnectHostAdb(getDevice());
+
+ // Delete stale data
+ android.tryRun("rm", "-rf", "/data/misc/virtualizationservice/*");
+ }
+
+ private String checksumDirectoryContentPartial(CommandRunner runner, String path)
+ throws Exception {
+ // Sort by filename (second column) to make comparison easier. Filter out compos.info and
+ // compos.info.signature since it's only generated by CompOS.
+ // TODO(b/211458160): Remove cache-info.xml once we can plumb timestamp and isFactory of
+ // APEXes to the VM.
+ return runner.run("cd " + path + "; find -type f -exec sha256sum {} \\;"
+ + "| grep -v cache-info.xml | grep -v compos.info"
+ + "| sort -k2");
}
}
diff --git a/compos/verify/Android.bp b/compos/verify/Android.bp
new file mode 100644
index 0000000..d6875d1
--- /dev/null
+++ b/compos/verify/Android.bp
@@ -0,0 +1,23 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "compos_verify",
+ srcs: ["verify.rs"],
+ edition: "2018",
+ rustlibs: [
+ "compos_aidl_interface-rust",
+ "libandroid_logger",
+ "libanyhow",
+ "libbinder_rs",
+ "libclap",
+ "libcompos_common",
+ "libcompos_verify_native_rust",
+ "liblog_rust",
+ ],
+ prefer_rlib: true,
+ apex_available: [
+ "com.android.compos",
+ ],
+}
diff --git a/compos/verify/native/Android.bp b/compos/verify/native/Android.bp
new file mode 100644
index 0000000..969c9f4
--- /dev/null
+++ b/compos/verify/native/Android.bp
@@ -0,0 +1,51 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+ name: "libcompos_verify_native_rust",
+ crate_name: "compos_verify_native",
+ srcs: ["lib.rs"],
+ rustlibs: [
+ "libanyhow",
+ "libcxx",
+ "liblibc",
+ ],
+ static_libs: [
+ "libcompos_verify_native_cpp",
+ "libcompos_key",
+ ],
+ shared_libs: [
+ "libcrypto",
+ ],
+ apex_available: ["com.android.compos"],
+}
+
+cc_library_static {
+ name: "libcompos_verify_native_cpp",
+ srcs: ["verify_native.cpp"],
+ static_libs: ["libcompos_key"],
+ shared_libs: [
+ "libbase",
+ "libcrypto",
+ ],
+ generated_headers: ["compos_verify_native_header"],
+ generated_sources: ["compos_verify_native_code"],
+ apex_available: ["com.android.compos"],
+}
+
+genrule {
+ name: "compos_verify_native_code",
+ tools: ["cxxbridge"],
+ cmd: "$(location cxxbridge) $(in) >> $(out)",
+ srcs: ["lib.rs"],
+ out: ["verify_native_cxx_generated.cc"],
+}
+
+genrule {
+ name: "compos_verify_native_header",
+ tools: ["cxxbridge"],
+ cmd: "$(location cxxbridge) $(in) --header >> $(out)",
+ srcs: ["lib.rs"],
+ out: ["lib.rs.h"],
+}
diff --git a/compos/verify/native/lib.rs b/compos/verify/native/lib.rs
new file mode 100644
index 0000000..51050da
--- /dev/null
+++ b/compos/verify/native/lib.rs
@@ -0,0 +1,31 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Native helper for compos_verify to call boringssl.
+
+pub use native::*;
+
+#[cxx::bridge]
+mod native {
+ unsafe extern "C++" {
+ include!("verify_native.h");
+
+ // SAFETY: The C++ implementation manages its own memory, and does not retain or abuse
+ // the references passed to it.
+
+ /// Verify a PureEd25519 signature with the specified public key on the given data,
+ /// returning whether the signature is valid or not.
+ fn verify(public_key: &[u8], signature: &[u8], data: &[u8]) -> bool;
+ }
+}
diff --git a/compos/verify/native/verify_native.cpp b/compos/verify/native/verify_native.cpp
new file mode 100644
index 0000000..2c43d95
--- /dev/null
+++ b/compos/verify/native/verify_native.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verify_native.h"
+
+#include <compos_key.h>
+
+using rust::Slice;
+
+bool verify(Slice<const uint8_t> public_key, Slice<const uint8_t> signature,
+ Slice<const uint8_t> data) {
+ compos_key::PublicKey public_key_array;
+ compos_key::Signature signature_array;
+
+ if (public_key.size() != public_key_array.size() ||
+ signature.size() != signature_array.size()) {
+ return false;
+ }
+
+ std::copy(public_key.begin(), public_key.end(), public_key_array.begin());
+ std::copy(signature.begin(), signature.end(), signature_array.begin());
+
+ return compos_key::verify(public_key_array, signature_array, data.data(), data.size());
+}
diff --git a/compos/aidl/com/android/compos/Metadata.aidl b/compos/verify/native/verify_native.h
similarity index 64%
copy from compos/aidl/com/android/compos/Metadata.aidl
copy to compos/verify/native/verify_native.h
index a15214d..5f000b6 100644
--- a/compos/aidl/com/android/compos/Metadata.aidl
+++ b/compos/verify/native/verify_native.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,13 +14,9 @@
* limitations under the License.
*/
-package com.android.compos;
+#pragma once
-import com.android.compos.InputFdAnnotation;
-import com.android.compos.OutputFdAnnotation;
+#include "lib.rs.h"
-/** {@hide} */
-parcelable Metadata {
- InputFdAnnotation[] input_fd_annotations;
- OutputFdAnnotation[] output_fd_annotations;
-}
+bool verify(rust::Slice<const uint8_t> public_key, rust::Slice<const uint8_t> signature,
+ rust::Slice<const uint8_t> data);
diff --git a/compos/verify/verify.rs b/compos/verify/verify.rs
new file mode 100644
index 0000000..7b77c18
--- /dev/null
+++ b/compos/verify/verify.rs
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A tool to verify a CompOS signature. It starts a CompOS VM as part of this to retrieve the
+//! public key. The tool is intended to be run by odsign during boot.
+
+use android_logger::LogId;
+use anyhow::{bail, Context, Result};
+use compos_aidl_interface::binder::ProcessState;
+use compos_common::compos_client::{VmInstance, VmParameters};
+use compos_common::odrefresh::{
+ CURRENT_ARTIFACTS_SUBDIR, ODREFRESH_OUTPUT_ROOT_DIR, PENDING_ARTIFACTS_SUBDIR,
+ TEST_ARTIFACTS_SUBDIR,
+};
+use compos_common::{
+ COMPOS_DATA_ROOT, CURRENT_INSTANCE_DIR, IDSIG_FILE, IDSIG_MANIFEST_APK_FILE,
+ INSTANCE_IMAGE_FILE, TEST_INSTANCE_DIR,
+};
+use log::error;
+use std::fs::File;
+use std::io::Read;
+use std::panic;
+use std::path::Path;
+
+const MAX_FILE_SIZE_BYTES: u64 = 100 * 1024;
+
+fn main() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("compos_verify")
+ .with_min_level(log::Level::Info)
+ .with_log_id(LogId::System), // Needed to log successfully early in boot
+ );
+
+ // Redirect panic messages to logcat.
+ panic::set_hook(Box::new(|panic_info| {
+ error!("{}", panic_info);
+ }));
+
+ if let Err(e) = try_main() {
+ error!("{:?}", e);
+ std::process::exit(1)
+ }
+}
+
+fn try_main() -> Result<()> {
+ let matches = clap::App::new("compos_verify")
+ .arg(
+ clap::Arg::with_name("instance")
+ .long("instance")
+ .takes_value(true)
+ .required(true)
+ .possible_values(&["current", "pending", "test"]),
+ )
+ .arg(clap::Arg::with_name("debug").long("debug"))
+ .get_matches();
+
+ let debug_mode = matches.is_present("debug");
+ let (instance_dir, artifacts_dir) = match matches.value_of("instance").unwrap() {
+ "current" => (CURRENT_INSTANCE_DIR, CURRENT_ARTIFACTS_SUBDIR),
+ "pending" => (CURRENT_INSTANCE_DIR, PENDING_ARTIFACTS_SUBDIR),
+ "test" => (TEST_INSTANCE_DIR, TEST_ARTIFACTS_SUBDIR),
+ _ => unreachable!("Unexpected instance name"),
+ };
+
+ let instance_dir = Path::new(COMPOS_DATA_ROOT).join(instance_dir);
+ let artifacts_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(artifacts_dir);
+
+ if !instance_dir.is_dir() {
+ bail!("{:?} is not a directory", instance_dir);
+ }
+
+ let instance_image = instance_dir.join(INSTANCE_IMAGE_FILE);
+ let idsig = instance_dir.join(IDSIG_FILE);
+ let idsig_manifest_apk = instance_dir.join(IDSIG_MANIFEST_APK_FILE);
+
+ let instance_image = File::open(instance_image).context("Failed to open instance image")?;
+
+ let info = artifacts_dir.join("compos.info");
+ let signature = artifacts_dir.join("compos.info.signature");
+
+ let info = read_small_file(&info).context("Failed to read compos.info")?;
+ let signature = read_small_file(&signature).context("Failed to read compos.info signature")?;
+
+ // We need to start the thread pool to be able to receive Binder callbacks
+ ProcessState::start_thread_pool();
+
+ let virtualization_service = VmInstance::connect_to_virtualization_service()?;
+ let vm_instance = VmInstance::start(
+ &*virtualization_service,
+ instance_image,
+ &idsig,
+ &idsig_manifest_apk,
+ &VmParameters { debug_mode, ..Default::default() },
+ )?;
+ let service = vm_instance.get_service()?;
+
+ let public_key = service.getPublicKey().context("Getting public key")?;
+
+ if !compos_verify_native::verify(&public_key, &signature, &info) {
+ bail!("Signature verification failed");
+ }
+
+ Ok(())
+}
+
+fn read_small_file(file: &Path) -> Result<Vec<u8>> {
+ let mut file = File::open(file)?;
+ if file.metadata()?.len() > MAX_FILE_SIZE_BYTES {
+ bail!("File is too big");
+ }
+ let mut data = Vec::new();
+ file.read_to_end(&mut data)?;
+ Ok(data)
+}
diff --git a/demo/Android.bp b/demo/Android.bp
index 77049de..1342a26 100644
--- a/demo/Android.bp
+++ b/demo/Android.bp
@@ -9,6 +9,7 @@
static_libs: [
"androidx-constraintlayout_constraintlayout",
"androidx.appcompat_appcompat",
+ "com.android.microdroid.testservice-java",
"com.google.android.material_material",
],
libs: [
diff --git a/demo/AndroidManifest.xml b/demo/AndroidManifest.xml
index 7e1a58d..74ec210 100644
--- a/demo/AndroidManifest.xml
+++ b/demo/AndroidManifest.xml
@@ -6,7 +6,8 @@
<application
android:label="MicrodroidDemo"
- android:theme="@style/Theme.MicrodroidDemo">
+ android:theme="@style/Theme.MicrodroidDemo"
+ android:testOnly="true">
<uses-library android:name="android.system.virtualmachine" android:required="true" />
<activity android:name=".MainActivity" android:exported="true">
<intent-filter>
diff --git a/demo/README.md b/demo/README.md
index 113a14e..43d2ebc 100644
--- a/demo/README.md
+++ b/demo/README.md
@@ -9,11 +9,13 @@
## Installing
```
-adb install out/dist/MicrodroidDemoApp.apk
-adb shell mkdir /data/local/tmp/virt
-adb push out/dist/MicrodroidDemoApp.apk.idsig /data/local/tmp/virt/
+adb install -t out/dist/MicrodroidDemoApp.apk
+adb shell pm grant com.android.microdroid.demo android.permission.MANAGE_VIRTUAL_MACHINE
```
+Don't run the app before granting the permission. Or you will have to uninstall
+the app, and then re-install it.
+
## Running
Run the app by touching the icon on the launcher. Press the `run` button to
diff --git a/demo/java/com/android/microdroid/demo/MainActivity.java b/demo/java/com/android/microdroid/demo/MainActivity.java
index b6c7714..e53f95d 100644
--- a/demo/java/com/android/microdroid/demo/MainActivity.java
+++ b/demo/java/com/android/microdroid/demo/MainActivity.java
@@ -18,12 +18,16 @@
import android.app.Application;
import android.os.Bundle;
+import android.os.IBinder;
import android.os.ParcelFileDescriptor;
+import android.os.RemoteException;
import android.system.virtualmachine.VirtualMachine;
import android.system.virtualmachine.VirtualMachineCallback;
import android.system.virtualmachine.VirtualMachineConfig;
+import android.system.virtualmachine.VirtualMachineConfig.DebugLevel;
import android.system.virtualmachine.VirtualMachineException;
import android.system.virtualmachine.VirtualMachineManager;
+import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.CheckBox;
@@ -37,12 +41,16 @@
import androidx.lifecycle.Observer;
import androidx.lifecycle.ViewModelProvider;
+import com.android.microdroid.testservice.ITestService;
+
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
/**
* This app is to demonstrate the use of APIs in the android.system.virtualmachine library.
@@ -50,51 +58,20 @@
* the virtual machine to the UI.
*/
public class MainActivity extends AppCompatActivity {
+ private static final String TAG = "MicrodroidDemo";
+
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
- TextView consoleView = (TextView) findViewById(R.id.consoleOutput);
- TextView payloadView = (TextView) findViewById(R.id.payloadOutput);
Button runStopButton = (Button) findViewById(R.id.runStopButton);
- ScrollView scrollView = (ScrollView) findViewById(R.id.scrollConsoleOutput);
+ TextView consoleView = (TextView) findViewById(R.id.consoleOutput);
+ TextView logView = (TextView) findViewById(R.id.logOutput);
+ TextView payloadView = (TextView) findViewById(R.id.payloadOutput);
+ ScrollView scrollConsoleView = (ScrollView) findViewById(R.id.scrollConsoleOutput);
+ ScrollView scrollLogView = (ScrollView) findViewById(R.id.scrollLogOutput);
- // When the console output or payload output is updated, append the new line to the
- // corresponding text view.
VirtualMachineModel model = new ViewModelProvider(this).get(VirtualMachineModel.class);
- model.getConsoleOutput()
- .observeForever(
- new Observer<String>() {
- @Override
- public void onChanged(String line) {
- consoleView.append(line + "\n");
- scrollView.fullScroll(View.FOCUS_DOWN);
- }
- });
- model.getPayloadOutput()
- .observeForever(
- new Observer<String>() {
- @Override
- public void onChanged(String line) {
- payloadView.append(line + "\n");
- }
- });
-
- // When the VM status is updated, change the label of the button
- model.getStatus()
- .observeForever(
- new Observer<VirtualMachine.Status>() {
- @Override
- public void onChanged(VirtualMachine.Status status) {
- if (status == VirtualMachine.Status.RUNNING) {
- runStopButton.setText("Stop");
- consoleView.setText("");
- payloadView.setText("");
- } else {
- runStopButton.setText("Run");
- }
- }
- });
// When the button is clicked, run or stop the VM
runStopButton.setOnClickListener(
@@ -109,14 +86,89 @@
}
}
});
+
+ // When the VM status is updated, change the label of the button
+ model.getStatus()
+ .observeForever(
+ new Observer<VirtualMachine.Status>() {
+ @Override
+ public void onChanged(VirtualMachine.Status status) {
+ if (status == VirtualMachine.Status.RUNNING) {
+ runStopButton.setText("Stop");
+ // Clear the outputs from the previous run
+ consoleView.setText("");
+ logView.setText("");
+ payloadView.setText("");
+ } else {
+ runStopButton.setText("Run");
+ }
+ }
+ });
+
+ // When the console, log, or payload output is updated, append the new line to the
+ // corresponding text view.
+ model.getConsoleOutput()
+ .observeForever(
+ new Observer<String>() {
+ @Override
+ public void onChanged(String line) {
+ consoleView.append(line + "\n");
+ scrollConsoleView.fullScroll(View.FOCUS_DOWN);
+ }
+ });
+ model.getLogOutput()
+ .observeForever(
+ new Observer<String>() {
+ @Override
+ public void onChanged(String line) {
+ logView.append(line + "\n");
+ scrollLogView.fullScroll(View.FOCUS_DOWN);
+ }
+ });
+ model.getPayloadOutput()
+ .observeForever(
+ new Observer<String>() {
+ @Override
+ public void onChanged(String line) {
+ payloadView.append(line + "\n");
+ }
+ });
}
- /** Models a virtual machine and console output from it. */
+ /** Reads data from an input stream and posts it to the output data */
+ static class Reader implements Runnable {
+ private final String mName;
+ private final MutableLiveData<String> mOutput;
+ private final InputStream mStream;
+
+ Reader(String name, MutableLiveData<String> output, InputStream stream) {
+ mName = name;
+ mOutput = output;
+ mStream = stream;
+ }
+
+ @Override
+ public void run() {
+ try {
+ BufferedReader reader = new BufferedReader(new InputStreamReader(mStream));
+ String line;
+ while ((line = reader.readLine()) != null && !Thread.interrupted()) {
+ mOutput.postValue(line);
+ }
+ } catch (IOException e) {
+ Log.e(TAG, "Exception while posting " + mName + " output: " + e.getMessage());
+ }
+ }
+ }
+
+ /** Models a virtual machine and outputs from it. */
public static class VirtualMachineModel extends AndroidViewModel {
private VirtualMachine mVirtualMachine;
private final MutableLiveData<String> mConsoleOutput = new MutableLiveData<>();
+ private final MutableLiveData<String> mLogOutput = new MutableLiveData<>();
private final MutableLiveData<String> mPayloadOutput = new MutableLiveData<>();
private final MutableLiveData<VirtualMachine.Status> mStatus = new MutableLiveData<>();
+ private ExecutorService mExecutorService;
public VirtualMachineModel(Application app) {
super(app);
@@ -127,65 +179,133 @@
public void run(boolean debug) {
// Create a VM and run it.
// TODO(jiyong): remove the call to idsigPath
+ mExecutorService = Executors.newFixedThreadPool(4);
+
+ VirtualMachineCallback callback =
+ new VirtualMachineCallback() {
+ // store reference to ExecutorService to avoid race condition
+ private final ExecutorService mService = mExecutorService;
+
+ @Override
+ public void onPayloadStarted(
+ VirtualMachine vm, ParcelFileDescriptor stream) {
+ if (stream == null) {
+ mPayloadOutput.postValue("(no output available)");
+ return;
+ }
+
+ InputStream input = new FileInputStream(stream.getFileDescriptor());
+ mService.execute(new Reader("payload", mPayloadOutput, input));
+ }
+
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ // This check doesn't 100% prevent race condition or UI hang.
+ // However, it's fine for demo.
+ if (mService.isShutdown()) {
+ return;
+ }
+ mPayloadOutput.postValue("(Payload is ready. Testing VM service...)");
+
+ Future<IBinder> service;
+ try {
+ service = vm.connectToVsockServer(ITestService.SERVICE_PORT);
+ } catch (VirtualMachineException e) {
+ mPayloadOutput.postValue(
+ String.format(
+ "(Exception while connecting VM's binder"
+ + " service: %s)",
+ e.getMessage()));
+ return;
+ }
+
+ mService.execute(() -> testVMService(service));
+ }
+
+ private void testVMService(Future<IBinder> service) {
+ IBinder binder;
+ try {
+ binder = service.get();
+ } catch (Exception e) {
+ if (!Thread.interrupted()) {
+ mPayloadOutput.postValue(
+ String.format(
+ "(VM service connection failed: %s)",
+ e.getMessage()));
+ }
+ return;
+ }
+
+ try {
+ ITestService testService = ITestService.Stub.asInterface(binder);
+ int ret = testService.addInteger(123, 456);
+ mPayloadOutput.postValue(
+ String.format(
+ "(VM payload service: %d + %d = %d)",
+ 123, 456, ret));
+ } catch (RemoteException e) {
+ mPayloadOutput.postValue(
+ String.format(
+ "(Exception while testing VM's binder service:"
+ + " %s)",
+ e.getMessage()));
+ }
+ }
+
+ @Override
+ public void onPayloadFinished(VirtualMachine vm, int exitCode) {
+ // This check doesn't 100% prevent race condition, but is fine for demo.
+ if (!mService.isShutdown()) {
+ mPayloadOutput.postValue(
+ String.format(
+ "(Payload finished. exit code: %d)", exitCode));
+ }
+ }
+
+ @Override
+ public void onError(VirtualMachine vm, int errorCode, String message) {
+ // This check doesn't 100% prevent race condition, but is fine for demo.
+ if (!mService.isShutdown()) {
+ mPayloadOutput.postValue(
+ String.format(
+ "(Error occurred. code: %d, message: %s)",
+ errorCode, message));
+ }
+ }
+
+ @Override
+ public void onDied(VirtualMachine vm, @DeathReason int reason) {
+ mService.shutdownNow();
+ mStatus.postValue(VirtualMachine.Status.STOPPED);
+ }
+ };
+
try {
VirtualMachineConfig.Builder builder =
- new VirtualMachineConfig.Builder(getApplication(), "assets/vm_config.json")
- .idsigPath("/data/local/tmp/virt/MicrodroidDemoApp.apk.idsig")
- .debugMode(debug);
+ new VirtualMachineConfig.Builder(getApplication(), "assets/vm_config.json");
+ if (debug) {
+ builder.debugLevel(DebugLevel.FULL);
+ }
VirtualMachineConfig config = builder.build();
VirtualMachineManager vmm = VirtualMachineManager.getInstance(getApplication());
mVirtualMachine = vmm.getOrCreate("demo_vm", config);
+ try {
+ mVirtualMachine.setConfig(config);
+ } catch (VirtualMachineException e) {
+ mVirtualMachine.delete();
+ mVirtualMachine = vmm.create("demo_vm", config);
+ }
mVirtualMachine.run();
- mVirtualMachine.setCallback(
- new VirtualMachineCallback() {
- @Override
- public void onPayloadStarted(
- VirtualMachine vm, ParcelFileDescriptor out) {
- try {
- BufferedReader reader =
- new BufferedReader(
- new InputStreamReader(
- new FileInputStream(
- out.getFileDescriptor())));
- String line;
- while ((line = reader.readLine()) != null) {
- mPayloadOutput.postValue(line);
- }
- } catch (IOException e) {
- // Consume
- }
- }
-
- @Override
- public void onDied(VirtualMachine vm) {
- mStatus.postValue(VirtualMachine.Status.STOPPED);
- }
- });
+ mVirtualMachine.setCallback(Executors.newSingleThreadExecutor(), callback);
mStatus.postValue(mVirtualMachine.getStatus());
+
+ InputStream console = mVirtualMachine.getConsoleOutputStream();
+ InputStream log = mVirtualMachine.getLogOutputStream();
+ mExecutorService.execute(new Reader("console", mConsoleOutput, console));
+ mExecutorService.execute(new Reader("log", mLogOutput, log));
} catch (VirtualMachineException e) {
throw new RuntimeException(e);
}
-
- // Read console output from the VM in the background
- ExecutorService executorService = Executors.newFixedThreadPool(1);
- executorService.execute(
- new Runnable() {
- @Override
- public void run() {
- try {
- BufferedReader reader =
- new BufferedReader(
- new InputStreamReader(
- mVirtualMachine.getConsoleOutputStream()));
- while (true) {
- String line = reader.readLine();
- mConsoleOutput.postValue(line);
- }
- } catch (IOException | VirtualMachineException e) {
- // Consume
- }
- }
- });
}
/** Stops the running VM */
@@ -196,6 +316,7 @@
// Consume
}
mVirtualMachine = null;
+ mExecutorService.shutdownNow();
mStatus.postValue(VirtualMachine.Status.STOPPED);
}
@@ -204,6 +325,11 @@
return mConsoleOutput;
}
+ /** Returns the log output from the VM */
+ public LiveData<String> getLogOutput() {
+ return mLogOutput;
+ }
+
/** Returns the payload output from the VM */
public LiveData<String> getPayloadOutput() {
return mPayloadOutput;
diff --git a/demo/res/layout/activity_main.xml b/demo/res/layout/activity_main.xml
index e100027..f0e35d6 100644
--- a/demo/res/layout/activity_main.xml
+++ b/demo/res/layout/activity_main.xml
@@ -62,17 +62,50 @@
<ScrollView
android:id="@+id/scrollConsoleOutput"
- android:layout_width="match_parent"
+ android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="2">
- <TextView
- android:id="@+id/consoleOutput"
+ <HorizontalScrollView
android:layout_width="match_parent"
- android:layout_height="wrap_content"
- android:background="#FFEB3B"
- android:fontFamily="monospace"
- android:textColor="#000000" />
+ android:layout_height="match_parent">
+
+ <TextView
+ android:id="@+id/consoleOutput"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:background="#FFEB3B"
+ android:fontFamily="monospace"
+ android:textSize="10sp"
+ android:textColor="#000000" />
+ </HorizontalScrollView>
+ </ScrollView>
+
+ <TextView
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_marginTop="10dp"
+ android:text="Log output:" />
+
+ <ScrollView
+ android:id="@+id/scrollLogOutput"
+ android:layout_width="wrap_content"
+ android:layout_height="0dp"
+ android:layout_weight="2">
+
+ <HorizontalScrollView
+ android:layout_width="match_parent"
+ android:layout_height="match_parent">
+
+ <TextView
+ android:id="@+id/logOutput"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:background="#FFEB3B"
+ android:fontFamily="monospace"
+ android:textSize="10sp"
+ android:textColor="#000000" />
+ </HorizontalScrollView>
</ScrollView>
</LinearLayout>
diff --git a/docs/getting_started/goldfish.md b/docs/getting_started/goldfish.md
deleted file mode 100644
index 0705982..0000000
--- a/docs/getting_started/goldfish.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Android Emulator (goldfish)
-
-The built-in local emulator is the quickest way how to get started with KVM and Android.
-
-## x86_64
-
-KVM on x86_64 does not provide the same guest protection as arm64 but you will be able to spawn
-virtual machines and use the same APIs to communicate with the guest. The main reason for choosing
-the x86_64 emulator over its arm64 counterpart is performance. With native virtualization it is
-easily 10x faster than arm64 emulation.
-
-For optimal performance make sure to
-[enable nested virtualization](https://www.linux-kvm.org/page/Nested_Guests) on your machine.
-Don't forget to add your user account into the `kvm` group, then re-login for it to take effect.
-``` shell
-$ sudo gpasswd -a $USER kvm
-```
-
-Build Android for the emulator:
-``` shell
-$ . build/envsetup.sh
-$ lunch sdk_phone_x86_64-eng
-$ m -j$(nproc)
-```
-
-Once you have an Android image, invoke `emulator`. The script will automatically find the image you
-just built and run it in QEMU.
-``` shell
-$ emulator -no-window -show-kernel -writable-system -qemu -cpu host
-```
-Explanation of the arguments:
- * `-no-window`: run headless
- * `-show-kernel`: print kernel UART logs to the console (useful for debugging),
- * `-writable-system`: support remounting `system/` as writable, needed for `adb sync`,
- * `-qemu -cpu host`: needed to enable nested virtualization, instructs QEMU to allow Android
- access CPU features of the host kernel
-
-If you get an error saying “x86_64 emulation currently requires hardware acceleration!”, your
-user account is not in the `kvm` group (see above).
-
-You should now see the virtual device when you run:
-``` shell
-$ adb devices
-List of devices attached
-emulator-5554 device
-```
diff --git a/docs/getting_started/index.md b/docs/getting_started/index.md
index 148b8d8..f598034 100644
--- a/docs/getting_started/index.md
+++ b/docs/getting_started/index.md
@@ -2,88 +2,95 @@
## Prepare a device
-First you will need a device that is capable of running virtual machines. On arm64, this means
-a device which boots the kernel in EL2 and the kernel was built with KVM enabled.
+First you will need a device that is capable of running virtual machines. On arm64, this means a
+device which boots the kernel in EL2 and the kernel was built with KVM enabled. Unfortunately at the
+moment, we don't have an arm64 device in AOSP which does that. Instead, use cuttlefish which
+provides the same functionalities except that the virtual machines are not protected from the host
+(i.e. Android). This however should be enough for functional testing.
-Here are instructions for select devices:
+We support the following device:
- * [yukawa: Khadas VIM3L](yukawa.md) (arm64)
- * [goldfish: Android Emulator](goldfish.md) (x86_64)
+* aosp_cf_x86_64_phone (Cuttlefish a.k.a. Cloud Android)
+
+Building Cuttlefish
+
+```shell
+source build/envsetup.sh
+lunch aosp_cf_x86_64_phone-userdebug
+m
+```
+
+Run Cuttlefish locally by
+
+```shell
+acloud create --local-instance --local-image
+```
+
+## Running demo app
+
+The instruction is [here](../../demo/README.md).
## Running tests
-Virtualization source code and relevant tests are located in
-[packages/modules/Virtualization](https://android.googlesource.com/platform/packages/modules/Virtualization)
-of the AOSP repository.
-
-### Device-side tests
-
-The tests spawn guest VMs and test different aspects of the architecture.
-
-You can build and run them with:
+There are various tests that spawn guest VMs and check different aspects of the architecture. They
+all can run via `atest`.
```shell
-atest VirtualizationTestCases
+atest VirtualizationTestCases.64
+atest MicrodroidHostTestCases
+atest MicrodroidTestApp
```
If you run into problems, inspect the logs produced by `atest`. Their location is printed at the
end. The `host_log_*.zip` file should contain the output of individual commands as well as VM logs.
-## CrosVM
+## Spawning your own VMs with custom kernel
-[CrosVM](https://android.googlesource.com/platform/external/crosvm/) is a Rust-based Virtual Machine
-Monitor (VMM) originally built for ChromeOS and ported to Android.
-
-It is not installed in regular Android builds (yet!), but it's installed in the VIM3L (yukawa)
-build, as part of the `com.android.virt` APEX.
-
-### Spawning your own VMs
-
-You can spawn your own VMs by passing a JSON config file to the VirtualizationService via the `vm` tool on a
-rooted KVM-enabled device. If your device is attached over ADB, you can run:
+You can spawn your own VMs by passing a JSON config file to the VirtualizationService via the `vm`
+tool on a rooted KVM-enabled device. If your device is attached over ADB, you can run:
```shell
-$ cat > vm_config.json
+cat > vm_config.json
{
"kernel": "/data/local/tmp/kernel",
"initrd": "/data/local/tmp/ramdisk",
"params": "rdinit=/bin/init"
}
-$ adb root
-$ adb push <kernel> /data/local/tmp/kernel
-$ adb push <ramdisk> /data/local/tmp/ramdisk
-$ adb push vm_config.json /data/local/tmp/vm_config.json
-$ adb shell "start virtualizationservice"
-$ adb shell "/apex/com.android.virt/bin/vm run /data/local/tmp/vm_config.json"
+adb root
+adb push <kernel> /data/local/tmp/kernel
+adb push <ramdisk> /data/local/tmp/ramdisk
+adb push vm_config.json /data/local/tmp/vm_config.json
+adb shell "start virtualizationservice"
+adb shell "/apex/com.android.virt/bin/vm run /data/local/tmp/vm_config.json"
```
The `vm` command also has other subcommands for debugging; run `/apex/com.android.virt/bin/vm help`
for details.
-### Building and updating CrosVM and VirtualizationService
+## Spawning your own VMs with Microdroid
-You can update CrosVM and the VirtualizationService by updating the `com.android.virt` APEX. If your
-device already has `com.android.virt` (e.g. VIM3L):
+[Microdroid](../../microdroid/README.md) is a lightweight version of Android that is intended to run
+on pVM. You can manually run the demo app on top of Microdroid as follows:
```shell
-$ TARGET_BUILD_APPS="com.android.virt" m
-$ adb install $ANDROID_PRODUCT_OUT/system/apex/com.android.virt.apex
-$ adb reboot
+TARGET_BUILD_APPS=MicrodroidDemoApp m apps_only dist
+adb shell mkdir -p /data/local/tmp/virt
+adb push out/dist/MicrodroidDemoApp.apk /data/local/tmp/virt/
+adb shell /apex/com.android.virt/bin/vm run-app \
+ --debug full \
+ /data/local/tmp/virt/MicrodroidDemoApp.apk \
+ /data/local/tmp/virt/MicrodroidDemoApp.apk.idsig \
+ /data/local/tmp/virt/instance.img assets/vm_config.json
```
-If it doesn't have the APEX yet, you first need to place it manually to the
-system partition.
+## Building and updating CrosVM and VirtualizationService
+
+You can update CrosVM and the VirtualizationService by updating the `com.android.virt` APEX instead
+of rebuilding the entire image.
```shell
-$ adb root
-$ adb disable-verity
-$ adb reboot
-$ adb wait-for-device root
-$ adb remount
-$ m com.android.virt
-$ adb sync
-$ adb reboot
+banchan com.android.virt aosp_arm64 // or aosp_x86_64 if the device is cuttlefish
+UNBUNDLED_BUILD_SDKS_FROM_SOURCE=true m apps_only dist
+adb install out/dist/com.android.virt.apex
+adb reboot
```
-
-Once the APEX is in `/system/apex`, you can use `adb install` to update it
-further.
diff --git a/docs/getting_started/yukawa.md b/docs/getting_started/yukawa.md
deleted file mode 100644
index 8ff569a..0000000
--- a/docs/getting_started/yukawa.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Khadas VIM3L (yukawa)
-
-The [Khadas VIM3L](https://www.khadas.com/vim3l) is an extremely hackable development board with an
-Amlogic Armv8.2 SoC and complete upstream support in U-boot, Linux and even
-[AOSP](https://android.googlesource.com/device/amlogic/yukawa/+/refs/heads/master).
-That makes it a compelling target for testing virtualization.
-
-The [prebuilt kernel](https://android.googlesource.com/device/amlogic/yukawa-kernel/+/refs/heads/master)
-in AOSP is currently not GKI, but it is close and kept up to date.
-
-Note that the `yukawa` target has SELinux policy set to `permissive`.
-
-Resources:
- * [AOSP instructions](https://android.googlesource.com/device/amlogic/yukawa/+/refs/heads/master/sei610/README)
- for flashing a bootloader with `fastboot` support
- * [Manufaturer's wiki](https://docs.khadas.com/vim3/index.html) for things like setting up UART
- and entering recovery mode
- * [go/vim3l](https://goto.google.com/vim3l) is a more detailed document but only accessible to
- Google employees
-
-Build Android for the board:
-``` shell
-$ . build/envsetup.sh
-$ lunch yukawa-userdebug
-$ export TARGET_VIM3L=true
-$ export TARGET_KERNEL_USE=5.10
-$ m
-```
-
-Flash your device and reboot.
diff --git a/javalib/AndroidManifest.xml b/javalib/AndroidManifest.xml
index 21857f8..2a0b903 100644
--- a/javalib/AndroidManifest.xml
+++ b/javalib/AndroidManifest.xml
@@ -18,7 +18,7 @@
package="com.android.virtualmachine.res">
<permission android:name="android.permission.MANAGE_VIRTUAL_MACHINE"
- android:protectionLevel="normal" />
+ android:protectionLevel="signature|development" />
<permission android:name="android.permission.DEBUG_VIRTUAL_MACHINE"
android:protectionLevel="signature" />
diff --git a/javalib/jni/Android.bp b/javalib/jni/Android.bp
new file mode 100644
index 0000000..2939db5
--- /dev/null
+++ b/javalib/jni/Android.bp
@@ -0,0 +1,16 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_library_shared {
+ name: "libvirtualmachine_jni",
+ srcs: ["android_system_virtualmachine_VirtualMachine.cpp"],
+ apex_available: ["com.android.virt"],
+ shared_libs: [
+ "android.system.virtualizationservice-ndk",
+ "libbinder_ndk",
+ "libbinder_rpc_unstable",
+ "liblog",
+ "libnativehelper",
+ ],
+}
diff --git a/javalib/jni/android_system_virtualmachine_VirtualMachine.cpp b/javalib/jni/android_system_virtualmachine_VirtualMachine.cpp
new file mode 100644
index 0000000..7234dad
--- /dev/null
+++ b/javalib/jni/android_system_virtualmachine_VirtualMachine.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VirtualMachine"
+
+#include <tuple>
+
+#include <log/log.h>
+
+#include <aidl/android/system/virtualizationservice/IVirtualMachine.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_ibinder_jni.h>
+#include <binder_rpc_unstable.hpp>
+
+#include <jni.h>
+
+JNIEXPORT jobject JNICALL android_system_virtualmachine_VirtualMachine_connectToVsockServer(
+ JNIEnv* env, [[maybe_unused]] jclass clazz, jobject vmBinder, jint port) {
+ using aidl::android::system::virtualizationservice::IVirtualMachine;
+ using ndk::ScopedFileDescriptor;
+ using ndk::SpAIBinder;
+
+ auto vm = IVirtualMachine::fromBinder(SpAIBinder{AIBinder_fromJavaBinder(env, vmBinder)});
+
+ std::tuple args{env, vm.get(), port};
+ using Args = decltype(args);
+
+ auto requestFunc = [](void* param) {
+ auto [env, vm, port] = *static_cast<Args*>(param);
+
+ ScopedFileDescriptor fd;
+ if (auto status = vm->connectVsock(port, &fd); !status.isOk()) {
+ env->ThrowNew(env->FindClass("android/system/virtualmachine/VirtualMachineException"),
+ ("Failed to connect vsock: " + status.getDescription()).c_str());
+ return -1;
+ }
+
+ // take ownership
+ int ret = fd.get();
+ *fd.getR() = -1;
+
+ return ret;
+ };
+
+ return AIBinder_toJavaBinder(env, RpcPreconnectedClient(requestFunc, &args));
+}
+
+JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* /*reserved*/) {
+ JNIEnv* env;
+ if (vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != JNI_OK) {
+ ALOGE("%s: Failed to get the environment", __FUNCTION__);
+ return JNI_ERR;
+ }
+
+ jclass c = env->FindClass("android/system/virtualmachine/VirtualMachine");
+ if (c == nullptr) {
+ ALOGE("%s: Failed to find class android.system.virtualmachine.VirtualMachine",
+ __FUNCTION__);
+ return JNI_ERR;
+ }
+
+ // Register your class' native methods.
+ static const JNINativeMethod methods[] = {
+ {"nativeConnectToVsockServer", "(Landroid/os/IBinder;I)Landroid/os/IBinder;",
+ reinterpret_cast<void*>(
+ android_system_virtualmachine_VirtualMachine_connectToVsockServer)},
+ };
+ int rc = env->RegisterNatives(c, methods, sizeof(methods) / sizeof(JNINativeMethod));
+ if (rc != JNI_OK) {
+ ALOGE("%s: Failed to register natives", __FUNCTION__);
+ return rc;
+ }
+
+ return JNI_VERSION_1_6;
+}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index c46bb2b..ed2c2a1 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -16,11 +16,14 @@
package android.system.virtualmachine;
+import static android.os.ParcelFileDescriptor.MODE_READ_ONLY;
import static android.os.ParcelFileDescriptor.MODE_READ_WRITE;
+import android.annotation.CallbackExecutor;
import android.annotation.NonNull;
import android.annotation.Nullable;
import android.content.Context;
+import android.os.Binder;
import android.os.IBinder;
import android.os.ParcelFileDescriptor;
import android.os.RemoteException;
@@ -28,7 +31,12 @@
import android.system.virtualizationservice.IVirtualMachine;
import android.system.virtualizationservice.IVirtualMachineCallback;
import android.system.virtualizationservice.IVirtualizationService;
+import android.system.virtualizationservice.PartitionType;
import android.system.virtualizationservice.VirtualMachineAppConfig;
+import android.system.virtualizationservice.VirtualMachineState;
+import android.util.JsonReader;
+
+import com.android.internal.annotations.GuardedBy;
import java.io.File;
import java.io.FileInputStream;
@@ -36,9 +44,19 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.InputStreamReader;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Optional;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Consumer;
+import java.util.zip.ZipFile;
/**
* A handle to the virtual machine. The virtual machine is local to the app which created the
@@ -56,6 +74,12 @@
/** Name of the instance image file for a VM. (Not implemented) */
private static final String INSTANCE_IMAGE_FILE = "instance.img";
+ /** Name of the idsig file for a VM */
+ private static final String IDSIG_FILE = "idsig";
+
+ /** Name of the idsig files for extra APKs. */
+ private static final String EXTRA_IDSIG_FILE_PREFIX = "extra_idsig_";
+
/** Name of the virtualization service. */
private static final String SERVICE_NAME = "android.system.virtualizationservice";
@@ -72,6 +96,9 @@
DELETED,
}
+ /** Lock for internal synchronization. */
+ private final Object mLock = new Object();
+
/** The package which owns this VM. */
private final @NonNull String mPackageName;
@@ -86,6 +113,25 @@
/** Path to the instance image file for this VM. */
private final @NonNull File mInstanceFilePath;
+ /** Path to the idsig file for this VM. */
+ private final @NonNull File mIdsigFilePath;
+
+ private static class ExtraApkSpec {
+ public final File apk;
+ public final File idsig;
+
+ ExtraApkSpec(File apk, File idsig) {
+ this.apk = apk;
+ this.idsig = idsig;
+ }
+ }
+
+ /**
+ * List of extra apks. Apks are specified by the vm config, and corresponding idsigs are to be
+ * generated.
+ */
+ private final @NonNull List<ExtraApkSpec> mExtraApks;
+
/** Size of the instance image. 10 MB. */
private static final long INSTANCE_FILE_SIZE = 10 * 1024 * 1024;
@@ -96,21 +142,38 @@
private @Nullable IVirtualMachine mVirtualMachine;
/** The registered callback */
+ @GuardedBy("mLock")
private @Nullable VirtualMachineCallback mCallback;
+ /** The executor on which the callback will be executed */
+ @GuardedBy("mLock")
+ private @Nullable Executor mCallbackExecutor;
+
private @Nullable ParcelFileDescriptor mConsoleReader;
private @Nullable ParcelFileDescriptor mConsoleWriter;
+ private @Nullable ParcelFileDescriptor mLogReader;
+ private @Nullable ParcelFileDescriptor mLogWriter;
+
+ private final ExecutorService mExecutorService = Executors.newCachedThreadPool();
+
+ static {
+ System.loadLibrary("virtualmachine_jni");
+ }
+
private VirtualMachine(
- @NonNull Context context, @NonNull String name, @NonNull VirtualMachineConfig config) {
+ @NonNull Context context, @NonNull String name, @NonNull VirtualMachineConfig config)
+ throws VirtualMachineException {
mPackageName = context.getPackageName();
mName = name;
mConfig = config;
+ mConfigFilePath = getConfigFilePath(context, name);
final File vmRoot = new File(context.getFilesDir(), VM_DIR);
final File thisVmDir = new File(vmRoot, mName);
- mConfigFilePath = new File(thisVmDir, CONFIG_FILE);
mInstanceFilePath = new File(thisVmDir, INSTANCE_IMAGE_FILE);
+ mIdsigFilePath = new File(thisVmDir, IDSIG_FILE);
+ mExtraApks = setupExtraApks(context, config, thisVmDir);
}
/**
@@ -157,7 +220,8 @@
try {
service.initializeWritablePartition(
ParcelFileDescriptor.open(vm.mInstanceFilePath, MODE_READ_WRITE),
- INSTANCE_FILE_SIZE);
+ INSTANCE_FILE_SIZE,
+ PartitionType.ANDROID_VM_INSTANCE);
} catch (FileNotFoundException e) {
throw new VirtualMachineException("instance image missing", e);
} catch (RemoteException e) {
@@ -168,13 +232,12 @@
}
/** Loads a virtual machine that is already created before. */
- /* package */ static @NonNull VirtualMachine load(
+ /* package */ static @Nullable VirtualMachine load(
@NonNull Context context, @NonNull String name) throws VirtualMachineException {
- VirtualMachine vm = new VirtualMachine(context, name, /* config */ null);
-
- try (FileInputStream input = new FileInputStream(vm.mConfigFilePath)) {
- VirtualMachineConfig config = VirtualMachineConfig.from(input);
- vm.mConfig = config;
+ File configFilePath = getConfigFilePath(context, name);
+ VirtualMachineConfig config;
+ try (FileInputStream input = new FileInputStream(configFilePath)) {
+ config = VirtualMachineConfig.from(input);
} catch (FileNotFoundException e) {
// The VM doesn't exist.
return null;
@@ -182,6 +245,8 @@
throw new VirtualMachineException(e);
}
+ VirtualMachine vm = new VirtualMachine(context, name, config);
+
// If config file exists, but the instance image file doesn't, it means that the VM is
// corrupted. That's different from the case that the VM doesn't exist. Throw an exception
// instead of returning null.
@@ -214,8 +279,18 @@
/** Returns the current status of this virtual machine. */
public @NonNull Status getStatus() throws VirtualMachineException {
try {
- if (mVirtualMachine != null && mVirtualMachine.isRunning()) {
- return Status.RUNNING;
+ if (mVirtualMachine != null) {
+ switch (mVirtualMachine.getState()) {
+ case VirtualMachineState.NOT_STARTED:
+ return Status.STOPPED;
+ case VirtualMachineState.STARTING:
+ case VirtualMachineState.STARTED:
+ case VirtualMachineState.READY:
+ case VirtualMachineState.FINISHED:
+ return Status.RUNNING;
+ case VirtualMachineState.DEAD:
+ return Status.STOPPED;
+ }
}
} catch (RemoteException e) {
throw new VirtualMachineException(e);
@@ -230,13 +305,40 @@
* Registers the callback object to get events from the virtual machine. If a callback was
* already registered, it is replaced with the new one.
*/
- public void setCallback(@Nullable VirtualMachineCallback callback) {
- mCallback = callback;
+ public void setCallback(
+ @NonNull @CallbackExecutor Executor executor,
+ @NonNull VirtualMachineCallback callback) {
+ synchronized (mLock) {
+ mCallback = callback;
+ mCallbackExecutor = executor;
+ }
}
- /** Returns the currently registered callback. */
- public @Nullable VirtualMachineCallback getCallback() {
- return mCallback;
+ /** Clears the currently registered callback. */
+ public void clearCallback() {
+ synchronized (mLock) {
+ mCallback = null;
+ mCallbackExecutor = null;
+ }
+ }
+
+ /** Executes a callback on the callback executor. */
+ private void executeCallback(Consumer<VirtualMachineCallback> fn) {
+ final VirtualMachineCallback callback;
+ final Executor executor;
+ synchronized (mLock) {
+ callback = mCallback;
+ executor = mCallbackExecutor;
+ }
+ if (callback == null || executor == null) {
+ return;
+ }
+ final long restoreToken = Binder.clearCallingIdentity();
+ try {
+ executor.execute(() -> fn.accept(callback));
+ } finally {
+ Binder.restoreCallingIdentity(restoreToken);
+ }
}
/**
@@ -248,6 +350,17 @@
if (getStatus() != Status.STOPPED) {
throw new VirtualMachineException(this + " is not in stopped state");
}
+
+ try {
+ mIdsigFilePath.createNewFile();
+ for (ExtraApkSpec extraApk : mExtraApks) {
+ extraApk.idsig.createNewFile();
+ }
+ } catch (IOException e) {
+ // If the file already exists, exception is not thrown.
+ throw new VirtualMachineException("failed to create idsig file", e);
+ }
+
IVirtualizationService service =
IVirtualizationService.Stub.asInterface(
ServiceManager.waitForService(SERVICE_NAME));
@@ -259,46 +372,82 @@
mConsoleWriter = pipe[1];
}
+ if (mLogReader == null && mLogWriter == null) {
+ ParcelFileDescriptor[] pipe = ParcelFileDescriptor.createPipe();
+ mLogReader = pipe[0];
+ mLogWriter = pipe[1];
+ }
+
VirtualMachineAppConfig appConfig = getConfig().toParcel();
+
+ // Fill the idsig file by hashing the apk
+ service.createOrUpdateIdsigFile(
+ appConfig.apk, ParcelFileDescriptor.open(mIdsigFilePath, MODE_READ_WRITE));
+
+ for (ExtraApkSpec extraApk : mExtraApks) {
+ service.createOrUpdateIdsigFile(
+ ParcelFileDescriptor.open(extraApk.apk, MODE_READ_ONLY),
+ ParcelFileDescriptor.open(extraApk.idsig, MODE_READ_WRITE));
+ }
+
+ // Re-open idsig file in read-only mode
+ appConfig.idsig = ParcelFileDescriptor.open(mIdsigFilePath, MODE_READ_ONLY);
appConfig.instanceImage = ParcelFileDescriptor.open(mInstanceFilePath, MODE_READ_WRITE);
+ List<ParcelFileDescriptor> extraIdsigs = new ArrayList<>();
+ for (ExtraApkSpec extraApk : mExtraApks) {
+ extraIdsigs.add(ParcelFileDescriptor.open(extraApk.idsig, MODE_READ_ONLY));
+ }
+ appConfig.extraIdsigs = extraIdsigs;
android.system.virtualizationservice.VirtualMachineConfig vmConfigParcel =
android.system.virtualizationservice.VirtualMachineConfig.appConfig(appConfig);
- mVirtualMachine = service.startVm(vmConfigParcel, mConsoleWriter);
+ // The VM should only be observed to die once
+ AtomicBoolean onDiedCalled = new AtomicBoolean(false);
+
+ IBinder.DeathRecipient deathRecipient = new IBinder.DeathRecipient() {
+ @Override
+ public void binderDied() {
+ if (onDiedCalled.compareAndSet(false, true)) {
+ executeCallback((cb) -> cb.onDied(VirtualMachine.this,
+ VirtualMachineCallback.DEATH_REASON_VIRTUALIZATIONSERVICE_DIED));
+ }
+ }
+ };
+
+ mVirtualMachine = service.createVm(vmConfigParcel, mConsoleWriter, mLogWriter);
mVirtualMachine.registerCallback(
new IVirtualMachineCallback.Stub() {
@Override
public void onPayloadStarted(int cid, ParcelFileDescriptor stream) {
- final VirtualMachineCallback cb = mCallback;
- if (cb == null) {
- return;
- }
- cb.onPayloadStarted(VirtualMachine.this, stream);
+ executeCallback(
+ (cb) -> cb.onPayloadStarted(VirtualMachine.this, stream));
}
-
@Override
- public void onDied(int cid) {
- final VirtualMachineCallback cb = mCallback;
- if (cb == null) {
- return;
- }
- cb.onDied(VirtualMachine.this);
+ public void onPayloadReady(int cid) {
+ executeCallback((cb) -> cb.onPayloadReady(VirtualMachine.this));
}
- });
- service.asBinder()
- .linkToDeath(
- new IBinder.DeathRecipient() {
- @Override
- public void binderDied() {
- final VirtualMachineCallback cb = mCallback;
- if (cb != null) {
- cb.onDied(VirtualMachine.this);
- }
- }
- },
- 0);
-
+ @Override
+ public void onPayloadFinished(int cid, int exitCode) {
+ executeCallback(
+ (cb) -> cb.onPayloadFinished(VirtualMachine.this, exitCode));
+ }
+ @Override
+ public void onError(int cid, int errorCode, String message) {
+ executeCallback(
+ (cb) -> cb.onError(VirtualMachine.this, errorCode, message));
+ }
+ @Override
+ public void onDied(int cid, int reason) {
+ service.asBinder().unlinkToDeath(deathRecipient, 0);
+ if (onDiedCalled.compareAndSet(false, true)) {
+ executeCallback((cb) -> cb.onDied(VirtualMachine.this, reason));
+ }
+ }
+ }
+ );
+ service.asBinder().linkToDeath(deathRecipient, 0);
+ mVirtualMachine.start();
} catch (IOException e) {
throw new VirtualMachineException(e);
} catch (RemoteException e) {
@@ -314,6 +463,14 @@
return new FileInputStream(mConsoleReader.getFileDescriptor());
}
+ /** Returns the stream object representing the log output from the virtual machine. */
+ public @NonNull InputStream getLogOutputStream() throws VirtualMachineException {
+ if (mLogReader == null) {
+ throw new VirtualMachineException("Log output not available");
+ }
+ return new FileInputStream(mLogReader.getFileDescriptor());
+ }
+
/**
* Stops this virtual machine. Stopping a virtual machine is like pulling the plug on a real
* computer; the machine halts immediately. Software running on the virtual machine is not
@@ -336,8 +493,12 @@
throw new VirtualMachineException("Virtual machine is not stopped");
}
final File vmRootDir = mConfigFilePath.getParentFile();
+ for (ExtraApkSpec extraApks : mExtraApks) {
+ extraApks.idsig.delete();
+ }
mConfigFilePath.delete();
mInstanceFilePath.delete();
+ mIdsigFilePath.delete();
vmRootDir.delete();
}
@@ -387,6 +548,25 @@
return oldConfig;
}
+ private static native IBinder nativeConnectToVsockServer(IBinder vmBinder, int port);
+
+ /**
+ * Connects to a VM's RPC server via vsock, and returns a root IBinder object. Guest VMs are
+ * expected to set up vsock servers in their payload. After the host app receives onPayloadReady
+ * callback, the host app can use this method to establish an RPC session to the guest VMs.
+ *
+ * <p>If the connection succeeds, the root IBinder object will be returned via {@link
+ * VirtualMachineCallback.onVsockServerReady()}. If the connection fails, {@link
+ * VirtualMachineCallback.onVsockServerConnectionFailed()} will be called.
+ */
+ public Future<IBinder> connectToVsockServer(int port) throws VirtualMachineException {
+ if (getStatus() != Status.RUNNING) {
+ throw new VirtualMachineException("VM is not running");
+ }
+ return mExecutorService.submit(
+ () -> nativeConnectToVsockServer(mVirtualMachine.asBinder(), port));
+ }
+
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
@@ -397,4 +577,76 @@
sb.append(")");
return sb.toString();
}
+
+ private static List<String> parseExtraApkListFromPayloadConfig(JsonReader reader)
+ throws VirtualMachineException {
+ /**
+ * JSON schema from packages/modules/Virtualization/microdroid/payload/config/src/lib.rs:
+ *
+ * <p>{ "extra_apks": [ { "path": "/system/app/foo.apk", }, ... ], ... }
+ */
+ try {
+ List<String> apks = new ArrayList<>();
+
+ reader.beginObject();
+ while (reader.hasNext()) {
+ if (reader.nextName().equals("extra_apks")) {
+ reader.beginArray();
+ while (reader.hasNext()) {
+ reader.beginObject();
+ String name = reader.nextName();
+ if (name.equals("path")) {
+ apks.add(reader.nextString());
+ } else {
+ reader.skipValue();
+ }
+ reader.endObject();
+ }
+ reader.endArray();
+ } else {
+ reader.skipValue();
+ }
+ }
+ reader.endObject();
+ return apks;
+ } catch (IOException e) {
+ throw new VirtualMachineException(e);
+ }
+ }
+
+ /**
+ * Reads the payload config inside the application, parses extra APK information, and then
+ * creates corresponding idsig file paths.
+ */
+ private static List<ExtraApkSpec> setupExtraApks(
+ @NonNull Context context, @NonNull VirtualMachineConfig config, @NonNull File vmDir)
+ throws VirtualMachineException {
+ try {
+ ZipFile zipFile = new ZipFile(context.getPackageCodePath());
+ String payloadPath = config.getPayloadConfigPath();
+ InputStream inputStream =
+ zipFile.getInputStream(zipFile.getEntry(config.getPayloadConfigPath()));
+ List<String> apkList =
+ parseExtraApkListFromPayloadConfig(
+ new JsonReader(new InputStreamReader(inputStream)));
+
+ List<ExtraApkSpec> extraApks = new ArrayList<>();
+ for (int i = 0; i < apkList.size(); ++i) {
+ extraApks.add(
+ new ExtraApkSpec(
+ new File(apkList.get(i)),
+ new File(vmDir, EXTRA_IDSIG_FILE_PREFIX + i)));
+ }
+
+ return extraApks;
+ } catch (IOException e) {
+ throw new VirtualMachineException("Couldn't parse extra apks from the vm config", e);
+ }
+ }
+
+ private static File getConfigFilePath(@NonNull Context context, @NonNull String name) {
+ final File vmRoot = new File(context.getFilesDir(), VM_DIR);
+ final File thisVmDir = new File(vmRoot, name);
+ return new File(thisVmDir, CONFIG_FILE);
+ }
}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java b/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java
index 07af4a1..a49c9be 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineCallback.java
@@ -16,9 +16,14 @@
package android.system.virtualmachine;
+import android.annotation.IntDef;
import android.annotation.NonNull;
+import android.annotation.Nullable;
import android.os.ParcelFileDescriptor;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
/**
* Callback interface to get notified with the events from the virtual machine. The methods are
* executed on a binder thread. Implementations can make blocking calls in the methods.
@@ -26,10 +31,84 @@
* @hide
*/
public interface VirtualMachineCallback {
+ /** @hide */
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({
+ ERROR_UNKNOWN,
+ ERROR_PAYLOAD_VERIFICATION_FAILED,
+ ERROR_PAYLOAD_CHANGED,
+ ERROR_PAYLOAD_INVALID_CONFIG
+ })
+ @interface ErrorCode {}
+
+ /** Error code for all other errors not listed below. */
+ int ERROR_UNKNOWN = 0;
+
+ /**
+ * Error code indicating that the payload can't be verified due to various reasons (e.g invalid
+ * merkle tree, invalid formats, etc).
+ */
+ int ERROR_PAYLOAD_VERIFICATION_FAILED = 1;
+
+ /** Error code indicating that the payload is verified, but has changed since the last boot. */
+ int ERROR_PAYLOAD_CHANGED = 2;
+
+ /** Error code indicating that the payload config is invalid. */
+ int ERROR_PAYLOAD_INVALID_CONFIG = 3;
+
+ /** @hide */
+ @Retention(RetentionPolicy.SOURCE)
+ @IntDef({
+ DEATH_REASON_VIRTUALIZATIONSERVICE_DIED,
+ DEATH_REASON_INFRASTRUCTURE_ERROR,
+ DEATH_REASON_KILLED,
+ DEATH_REASON_UNKNOWN,
+ DEATH_REASON_SHUTDOWN,
+ DEATH_REASON_ERROR,
+ DEATH_REASON_REBOOT,
+ DEATH_REASON_CRASH
+ })
+ @interface DeathReason {}
+
+ /**
+ * virtualizationservice itself died, taking the VM down with it. This is a negative number to
+ * avoid conflicting with the other death reasons which match the ones in the AIDL interface.
+ */
+ int DEATH_REASON_VIRTUALIZATIONSERVICE_DIED = -1;
+
+ /** There was an error waiting for the VM. */
+ int DEATH_REASON_INFRASTRUCTURE_ERROR = 0;
+
+ /** The VM was killed. */
+ int DEATH_REASON_KILLED = 1;
+
+ /** The VM died for an unknown reason. */
+ int DEATH_REASON_UNKNOWN = 2;
+
+ /** The VM requested to shut down. */
+ int DEATH_REASON_SHUTDOWN = 3;
+
+ /** crosvm had an error starting the VM. */
+ int DEATH_REASON_ERROR = 4;
+
+ /** The VM requested to reboot, possibly as the result of a kernel panic. */
+ int DEATH_REASON_REBOOT = 5;
+
+ /** The VM or crosvm crashed. */
+ int DEATH_REASON_CRASH = 6;
/** Called when the payload starts in the VM. */
- void onPayloadStarted(@NonNull VirtualMachine vm, @NonNull ParcelFileDescriptor stdout);
+ void onPayloadStarted(@NonNull VirtualMachine vm, @Nullable ParcelFileDescriptor stream);
+
+ /** Called when the payload in the VM is ready to serve. */
+ void onPayloadReady(@NonNull VirtualMachine vm);
+
+ /** Called when the payload has finished in the VM. */
+ void onPayloadFinished(@NonNull VirtualMachine vm, int exitCode);
+
+ /** Called when an error occurs in the VM. */
+ void onError(@NonNull VirtualMachine vm, @ErrorCode int errorCode, @NonNull String message);
/** Called when the VM died. */
- void onDied(@NonNull VirtualMachine vm);
+ void onDied(@NonNull VirtualMachine vm, @DeathReason int reason);
}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
index 21e1a46..3a2d581 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -24,6 +24,7 @@
import android.content.pm.Signature; // This actually is certificate!
import android.os.ParcelFileDescriptor;
import android.os.PersistableBundle;
+import android.sysprop.HypervisorProperties;
import android.system.virtualizationservice.VirtualMachineAppConfig;
import java.io.File;
@@ -34,6 +35,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import java.util.regex.Pattern;
/**
* Represents a configuration of a virtual machine. A configuration consists of hardware
@@ -48,15 +50,61 @@
private static final String KEY_VERSION = "version";
private static final String KEY_CERTS = "certs";
private static final String KEY_APKPATH = "apkPath";
- private static final String KEY_IDSIGPATH = "idsigPath";
private static final String KEY_PAYLOADCONFIGPATH = "payloadConfigPath";
- private static final String KEY_DEBUGMODE = "debugMode";
+ private static final String KEY_DEBUGLEVEL = "debugLevel";
+ private static final String KEY_PROTECTED_VM = "protectedVm";
+ private static final String KEY_MEMORY_MIB = "memoryMib";
+ private static final String KEY_NUM_CPUS = "numCpus";
+ private static final String KEY_CPU_AFFINITY = "cpuAffinity";
- // Paths to the APK and its idsig file of this application.
+ // Paths to the APK file of this application.
private final @NonNull String mApkPath;
private final @NonNull Signature[] mCerts;
- private final @NonNull String mIdsigPath;
- private final boolean mDebugMode;
+
+ /** A debug level defines the set of debug features that the VM can be configured to. */
+ public enum DebugLevel {
+ /**
+ * Not debuggable at all. No log is exported from the VM. Debugger can't be attached to the
+ * app process running in the VM. This is the default level.
+ */
+ NONE,
+
+ /**
+ * Only the app is debuggable. Log from the app is exported from the VM. Debugger can be
+ * attached to the app process. Rest of the VM is not debuggable.
+ */
+ APP_ONLY,
+
+ /**
+ * Fully debuggable. All logs (both logcat and kernel message) are exported. All processes
+ * running in the VM can be attached to the debugger. Rooting is possible.
+ */
+ FULL,
+ }
+
+ private final DebugLevel mDebugLevel;
+
+ /**
+ * Whether to run the VM in protected mode, so the host can't access its memory.
+ */
+ private final boolean mProtectedVm;
+
+ /**
+ * The amount of RAM to give the VM, in MiB. If this is 0 or negative the default will be used.
+ */
+ private final int mMemoryMib;
+
+ /**
+ * Number of vCPUs in the VM. Defaults to 1 when not specified.
+ */
+ private final int mNumCpus;
+
+ /**
+ * Comma-separated list of CPUs or CPU ranges to run vCPUs on (e.g. 0,1-3,5), or
+ * colon-separated list of assignments of vCPU to host CPU assignments (e.g. 0=0:1=1:2=2).
+ * Default is no mask which means a vCPU can run on any host CPU.
+ */
+ private final String mCpuAffinity;
/**
* Path within the APK to the payload config file that defines software aspects of this config.
@@ -68,14 +116,20 @@
private VirtualMachineConfig(
@NonNull String apkPath,
@NonNull Signature[] certs,
- @NonNull String idsigPath,
@NonNull String payloadConfigPath,
- boolean debugMode) {
+ DebugLevel debugLevel,
+ boolean protectedVm,
+ int memoryMib,
+ int numCpus,
+ String cpuAffinity) {
mApkPath = apkPath;
mCerts = certs;
- mIdsigPath = idsigPath;
mPayloadConfigPath = payloadConfigPath;
- mDebugMode = debugMode;
+ mDebugLevel = debugLevel;
+ mProtectedVm = protectedVm;
+ mMemoryMib = memoryMib;
+ mNumCpus = numCpus;
+ mCpuAffinity = cpuAffinity;
}
/** Loads a config from a stream, for example a file. */
@@ -99,16 +153,17 @@
certList.add(new Signature(s));
}
Signature[] certs = certList.toArray(new Signature[0]);
- final String idsigPath = b.getString(KEY_IDSIGPATH);
- if (idsigPath == null) {
- throw new VirtualMachineException("No idsigPath");
- }
final String payloadConfigPath = b.getString(KEY_PAYLOADCONFIGPATH);
if (payloadConfigPath == null) {
throw new VirtualMachineException("No payloadConfigPath");
}
- final boolean debugMode = b.getBoolean(KEY_DEBUGMODE);
- return new VirtualMachineConfig(apkPath, certs, idsigPath, payloadConfigPath, debugMode);
+ final DebugLevel debugLevel = DebugLevel.values()[b.getInt(KEY_DEBUGLEVEL)];
+ final boolean protectedVm = b.getBoolean(KEY_PROTECTED_VM);
+ final int memoryMib = b.getInt(KEY_MEMORY_MIB);
+ final int numCpus = b.getInt(KEY_NUM_CPUS);
+ final String cpuAffinity = b.getString(KEY_CPU_AFFINITY);
+ return new VirtualMachineConfig(apkPath, certs, payloadConfigPath, debugLevel, protectedVm,
+ memoryMib, numCpus, cpuAffinity);
}
/** Persists this config to a stream, for example a file. */
@@ -122,9 +177,13 @@
}
String[] certs = certList.toArray(new String[0]);
b.putStringArray(KEY_CERTS, certs);
- b.putString(KEY_IDSIGPATH, mIdsigPath);
b.putString(KEY_PAYLOADCONFIGPATH, mPayloadConfigPath);
- b.putBoolean(KEY_DEBUGMODE, mDebugMode);
+ b.putInt(KEY_DEBUGLEVEL, mDebugLevel.ordinal());
+ b.putBoolean(KEY_PROTECTED_VM, mProtectedVm);
+ b.putInt(KEY_NUM_CPUS, mNumCpus);
+ if (mMemoryMib > 0) {
+ b.putInt(KEY_MEMORY_MIB, mMemoryMib);
+ }
b.writeToStream(output);
}
@@ -144,7 +203,11 @@
if (!Arrays.equals(this.mCerts, other.mCerts)) {
return false;
}
- if (this.mDebugMode != other.mDebugMode) {
+ if (this.mDebugLevel != other.mDebugLevel) {
+ // TODO(jiyong): should we treat APP_ONLY and FULL the same?
+ return false;
+ }
+ if (this.mProtectedVm != other.mProtectedVm) {
return false;
}
return true;
@@ -159,9 +222,22 @@
/* package */ VirtualMachineAppConfig toParcel() throws FileNotFoundException {
VirtualMachineAppConfig parcel = new VirtualMachineAppConfig();
parcel.apk = ParcelFileDescriptor.open(new File(mApkPath), MODE_READ_ONLY);
- parcel.idsig = ParcelFileDescriptor.open(new File(mIdsigPath), MODE_READ_ONLY);
parcel.configPath = mPayloadConfigPath;
- parcel.debug = mDebugMode;
+ switch (mDebugLevel) {
+ case NONE:
+ parcel.debugLevel = VirtualMachineAppConfig.DebugLevel.NONE;
+ break;
+ case APP_ONLY:
+ parcel.debugLevel = VirtualMachineAppConfig.DebugLevel.APP_ONLY;
+ break;
+ case FULL:
+ parcel.debugLevel = VirtualMachineAppConfig.DebugLevel.FULL;
+ break;
+ }
+ parcel.protectedVm = mProtectedVm;
+ parcel.memoryMib = mMemoryMib;
+ parcel.numCpus = mNumCpus;
+ parcel.cpuAffinity = mCpuAffinity;
return parcel;
}
@@ -169,28 +245,59 @@
public static class Builder {
private Context mContext;
private String mPayloadConfigPath;
- private boolean mDebugMode;
- private String mIdsigPath; // TODO(jiyong): remove this
- // TODO(jiyong): add more items like # of cpu, size of ram, debuggability, etc.
+ private DebugLevel mDebugLevel;
+ private boolean mProtectedVm;
+ private int mMemoryMib;
+ private int mNumCpus;
+ private String mCpuAffinity;
/** Creates a builder for the given context (APK), and the payload config file in APK. */
public Builder(@NonNull Context context, @NonNull String payloadConfigPath) {
mContext = context;
mPayloadConfigPath = payloadConfigPath;
- mDebugMode = false;
+ mDebugLevel = DebugLevel.NONE;
+ mProtectedVm = false;
+ mNumCpus = 1;
+ mCpuAffinity = null;
}
- /** Enables or disables the debug mode */
- public Builder debugMode(boolean enableOrDisable) {
- mDebugMode = enableOrDisable;
+ /** Sets the debug level */
+ public Builder debugLevel(DebugLevel debugLevel) {
+ mDebugLevel = debugLevel;
return this;
}
- // TODO(jiyong): remove this. Apps shouldn't need to set the path to the idsig file. It
- // should be automatically found or created on demand.
- /** Set the path to the idsig file for the current application. */
- public Builder idsigPath(@NonNull String idsigPath) {
- mIdsigPath = idsigPath;
+ /** Sets whether to protect the VM memory from the host. Defaults to false. */
+ public Builder protectedVm(boolean protectedVm) {
+ mProtectedVm = protectedVm;
+ return this;
+ }
+
+ /**
+ * Sets the amount of RAM to give the VM. If this is zero or negative then the default will
+ * be used.
+ */
+ public Builder memoryMib(int memoryMib) {
+ mMemoryMib = memoryMib;
+ return this;
+ }
+
+ /**
+ * Sets the number of vCPUs in the VM. Defaults to 1.
+ */
+ public Builder numCpus(int num) {
+ mNumCpus = num;
+ return this;
+ }
+
+ /**
+ * Sets on which host CPUs the vCPUs can run. The format is a comma-separated list of CPUs
+ * or CPU ranges to run vCPUs on. e.g. "0,1-3,5" to choose host CPUs 0, 1, 2, 3, and 5.
+ * Or this can be a colon-separated list of assignments of vCPU to host CPU assignments.
+ * e.g. "0=0:1=1:2=2" to map vCPU 0 to host CPU 0, and so on.
+ */
+ public Builder cpuAffinity(String affinity) {
+ mCpuAffinity = affinity;
return this;
}
@@ -211,8 +318,32 @@
throw new RuntimeException(e);
}
+ final int availableCpus = Runtime.getRuntime().availableProcessors();
+ if (mNumCpus < 1 || mNumCpus > availableCpus) {
+ throw new IllegalArgumentException("Number of vCPUs (" + mNumCpus + ") is out of "
+ + "range [1, " + availableCpus + "]");
+ }
+
+ if (mCpuAffinity != null
+ && !Pattern.matches("[\\d]+(-[\\d]+)?(,[\\d]+(-[\\d]+)?)*", mCpuAffinity)
+ && !Pattern.matches("[\\d]+=[\\d]+(:[\\d]+=[\\d]+)*", mCpuAffinity)) {
+ throw new IllegalArgumentException("CPU affinity [" + mCpuAffinity + "]"
+ + " is invalid");
+ }
+
+ if (mProtectedVm
+ && !HypervisorProperties.hypervisor_protected_vm_supported().orElse(false)) {
+ throw new UnsupportedOperationException(
+ "Protected VMs are not supported on this device.");
+ }
+ if (!mProtectedVm && !HypervisorProperties.hypervisor_vm_supported().orElse(false)) {
+ throw new UnsupportedOperationException(
+ "Unprotected VMs are not supported on this device.");
+ }
+
return new VirtualMachineConfig(
- apkPath, certs, mIdsigPath, mPayloadConfigPath, mDebugMode);
+ apkPath, certs, mPayloadConfigPath, mDebugLevel, mProtectedVm, mMemoryMib,
+ mNumCpus, mCpuAffinity);
}
}
}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineManager.java b/javalib/src/android/system/virtualmachine/VirtualMachineManager.java
index 3654886..51fa51f 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineManager.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineManager.java
@@ -17,6 +17,7 @@
package android.system.virtualmachine;
import android.annotation.NonNull;
+import android.annotation.Nullable;
import android.content.Context;
import java.lang.ref.WeakReference;
@@ -72,7 +73,7 @@
* Returns an existing {@link VirtualMachine} with the given name. Returns null if there is no
* such virtual machine.
*/
- public @NonNull VirtualMachine get(@NonNull String name) throws VirtualMachineException {
+ public @Nullable VirtualMachine get(@NonNull String name) throws VirtualMachineException {
return VirtualMachine.load(mContext, name);
}
diff --git a/libs/apkverify/Android.bp b/libs/apkverify/Android.bp
new file mode 100644
index 0000000..df1cac6
--- /dev/null
+++ b/libs/apkverify/Android.bp
@@ -0,0 +1,45 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libapkverify.defaults",
+ crate_name: "apkverify",
+ srcs: ["src/lib.rs"],
+ prefer_rlib: true,
+ edition: "2018",
+ rustlibs: [
+ "libanyhow",
+ "libbyteorder",
+ "libbytes",
+ "liblog_rust",
+ "libring",
+ "libx509_parser",
+ "libzip",
+ ],
+}
+
+rust_library {
+ name: "libapkverify",
+ defaults: ["libapkverify.defaults"],
+}
+
+rust_test {
+ name: "libapkverify.test",
+ defaults: ["libapkverify.defaults"],
+ test_suites: ["general-tests"],
+}
+
+rust_test {
+ name: "libapkverify.integration_test",
+ crate_name: "apkverify_test",
+ srcs: ["tests/*_test.rs"],
+ prefer_rlib: true,
+ edition: "2018",
+ test_suites: ["general-tests"],
+ rustlibs: [
+ "libapkverify",
+ "libzip",
+ ],
+ data: ["tests/data/*"],
+}
diff --git a/libs/apkverify/TEST_MAPPING b/libs/apkverify/TEST_MAPPING
new file mode 100644
index 0000000..9248716
--- /dev/null
+++ b/libs/apkverify/TEST_MAPPING
@@ -0,0 +1,10 @@
+{
+ "presubmit" : [
+ {
+ "name" : "libapkverify.test"
+ },
+ {
+ "name" : "libapkverify.integration_test"
+ }
+ ]
+}
diff --git a/libs/apkverify/src/bytes_ext.rs b/libs/apkverify/src/bytes_ext.rs
new file mode 100644
index 0000000..22a3085
--- /dev/null
+++ b/libs/apkverify/src/bytes_ext.rs
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Provides extension methods Bytes::read<T>(), which calls back ReadFromBytes::read_from_byte()
+
+use anyhow::{bail, Result};
+use bytes::{Buf, Bytes};
+use std::ops::Deref;
+
+#[derive(Clone, Debug)]
+pub struct LengthPrefixed<T> {
+ inner: T,
+}
+
+impl<T> Deref for LengthPrefixed<T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+pub trait BytesExt {
+ fn read<T: ReadFromBytes>(&mut self) -> Result<T>;
+}
+
+impl BytesExt for Bytes {
+ fn read<T: ReadFromBytes>(&mut self) -> Result<T> {
+ T::read_from_bytes(self)
+ }
+}
+
+pub trait ReadFromBytes {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self>
+ where
+ Self: Sized;
+}
+
+impl ReadFromBytes for u32 {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ Ok(buf.get_u32_le())
+ }
+}
+
+impl<T: ReadFromBytes> ReadFromBytes for Vec<T> {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ let mut result = vec![];
+ while buf.has_remaining() {
+ result.push(buf.read()?);
+ }
+ Ok(result)
+ }
+}
+
+impl<T: ReadFromBytes> ReadFromBytes for LengthPrefixed<T> {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ let mut inner = read_length_prefixed_slice(buf)?;
+ let inner = inner.read()?;
+ Ok(LengthPrefixed { inner })
+ }
+}
+
+impl ReadFromBytes for Bytes {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ Ok(buf.slice(..))
+ }
+}
+
+fn read_length_prefixed_slice(buf: &mut Bytes) -> Result<Bytes> {
+ if buf.remaining() < 4 {
+ bail!(
+ "Remaining buffer too short to contain length of length-prefixed field. Remaining: {}",
+ buf.remaining()
+ );
+ }
+ let len = buf.get_u32_le() as usize;
+ if len > buf.remaining() {
+ bail!(
+ "length-prefixed field longer than remaining buffer. Field length: {}, remaining: {}",
+ len,
+ buf.remaining()
+ );
+ }
+ Ok(buf.split_to(len))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use bytes::{BufMut, BytesMut};
+
+ #[test]
+ fn test_read_length_prefixed_slice() {
+ let data = b"hello world";
+ let mut b = BytesMut::new();
+ b.put_u32_le(data.len() as u32);
+ b.put_slice(data);
+ let mut slice = b.freeze();
+ let res = read_length_prefixed_slice(&mut slice);
+ assert!(res.is_ok());
+ assert_eq!(data, res.ok().unwrap().as_ref());
+ }
+}
diff --git a/libs/apkverify/src/lib.rs b/libs/apkverify/src/lib.rs
new file mode 100644
index 0000000..71ea857
--- /dev/null
+++ b/libs/apkverify/src/lib.rs
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Verifies APK/APEX signing with v2/v3 scheme
+
+mod bytes_ext;
+mod sigutil;
+#[allow(dead_code)]
+pub mod testing;
+mod v3;
+mod ziputil;
+
+use anyhow::Result;
+use std::path::Path;
+
+/// Verifies APK/APEX signing with v2/v3 scheme. On success, the public key (in DER format) is
+/// returned.
+pub fn verify<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
+ // TODO(jooyung) fallback to v2 when v3 not found
+ v3::verify(path)
+}
+
+/// Gets the public key (in DER format) that was used to sign the given APK/APEX file
+pub fn get_public_key_der<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
+ v3::get_public_key_der(path)
+}
diff --git a/libs/apkverify/src/sigutil.rs b/libs/apkverify/src/sigutil.rs
new file mode 100644
index 0000000..23dd91e
--- /dev/null
+++ b/libs/apkverify/src/sigutil.rs
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Utilities for Signature Verification
+
+use anyhow::{anyhow, bail, Result};
+use byteorder::{LittleEndian, ReadBytesExt};
+use bytes::{Buf, BufMut, Bytes, BytesMut};
+use ring::digest;
+use std::cmp::min;
+use std::io::{Cursor, Read, Seek, SeekFrom, Take};
+
+use crate::ziputil::{set_central_directory_offset, zip_sections};
+
+const APK_SIG_BLOCK_MIN_SIZE: u32 = 32;
+const APK_SIG_BLOCK_MAGIC: u128 = 0x3234206b636f6c4220676953204b5041;
+
+// TODO(jooyung): introduce type
+pub const SIGNATURE_RSA_PSS_WITH_SHA256: u32 = 0x0101;
+pub const SIGNATURE_RSA_PSS_WITH_SHA512: u32 = 0x0102;
+pub const SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256: u32 = 0x0103;
+pub const SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512: u32 = 0x0104;
+pub const SIGNATURE_ECDSA_WITH_SHA256: u32 = 0x0201;
+pub const SIGNATURE_ECDSA_WITH_SHA512: u32 = 0x0202;
+pub const SIGNATURE_DSA_WITH_SHA256: u32 = 0x0301;
+pub const SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256: u32 = 0x0421;
+pub const SIGNATURE_VERITY_ECDSA_WITH_SHA256: u32 = 0x0423;
+pub const SIGNATURE_VERITY_DSA_WITH_SHA256: u32 = 0x0425;
+
+// TODO(jooyung): introduce type
+const CONTENT_DIGEST_CHUNKED_SHA256: u32 = 1;
+const CONTENT_DIGEST_CHUNKED_SHA512: u32 = 2;
+const CONTENT_DIGEST_VERITY_CHUNKED_SHA256: u32 = 3;
+#[allow(unused)]
+const CONTENT_DIGEST_SHA256: u32 = 4;
+
+const CHUNK_SIZE_BYTES: u64 = 1024 * 1024;
+
+pub struct ApkSections<R> {
+ inner: R,
+ signing_block_offset: u32,
+ signing_block_size: u32,
+ central_directory_offset: u32,
+ central_directory_size: u32,
+ eocd_offset: u32,
+ eocd_size: u32,
+}
+
+impl<R: Read + Seek> ApkSections<R> {
+ pub fn new(reader: R) -> Result<ApkSections<R>> {
+ let (mut reader, zip_sections) = zip_sections(reader)?;
+ let (signing_block_offset, signing_block_size) =
+ find_signing_block(&mut reader, zip_sections.central_directory_offset)?;
+ Ok(ApkSections {
+ inner: reader,
+ signing_block_offset,
+ signing_block_size,
+ central_directory_offset: zip_sections.central_directory_offset,
+ central_directory_size: zip_sections.central_directory_size,
+ eocd_offset: zip_sections.eocd_offset,
+ eocd_size: zip_sections.eocd_size,
+ })
+ }
+
+ /// Returns the APK Signature Scheme block contained in the provided file for the given ID
+ /// and the additional information relevant for verifying the block against the file.
+ pub fn find_signature(&mut self, block_id: u32) -> Result<Bytes> {
+ let signing_block = self.bytes(self.signing_block_offset, self.signing_block_size)?;
+ // TODO(jooyung): propagate NotFound error so that verification can fallback to V2
+ find_signature_scheme_block(Bytes::from(signing_block), block_id)
+ }
+
+ /// Computes digest with "signature algorithm" over APK contents, central directory, and EOCD.
+ /// 1. The digest of each chunk is computed over the concatenation of byte 0xa5, the chunk’s
+ /// length in bytes (little-endian uint32), and the chunk’s contents.
+ /// 2. The top-level digest is computed over the concatenation of byte 0x5a, the number of
+ /// chunks (little-endian uint32), and the concatenation of digests of the chunks in the
+ /// order the chunks appear in the APK.
+ /// (see https://source.android.com/security/apksigning/v2#integrity-protected-contents)
+ pub fn compute_digest(&mut self, signature_algorithm_id: u32) -> Result<Vec<u8>> {
+ let digester = Digester::new(signature_algorithm_id)?;
+
+ let mut digests_of_chunks = BytesMut::new();
+ let mut chunk_count = 0u32;
+ let mut chunk = vec![0u8; CHUNK_SIZE_BYTES as usize];
+ for data in &[
+ ApkSections::zip_entries,
+ ApkSections::central_directory,
+ ApkSections::eocd_for_verification,
+ ] {
+ let mut data = data(self)?;
+ while data.limit() > 0 {
+ let chunk_size = min(CHUNK_SIZE_BYTES, data.limit());
+ let slice = &mut chunk[..(chunk_size as usize)];
+ data.read_exact(slice)?;
+ digests_of_chunks.put_slice(
+ digester.digest(slice, CHUNK_HEADER_MID, chunk_size as u32).as_ref(),
+ );
+ chunk_count += 1;
+ }
+ }
+ Ok(digester.digest(&digests_of_chunks, CHUNK_HEADER_TOP, chunk_count).as_ref().into())
+ }
+
+ fn zip_entries(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+ scoped_read(&mut self.inner, 0, self.signing_block_offset as u64)
+ }
+
+ fn central_directory(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+ scoped_read(
+ &mut self.inner,
+ self.central_directory_offset as u64,
+ self.central_directory_size as u64,
+ )
+ }
+
+ fn eocd_for_verification(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+ let mut eocd = self.bytes(self.eocd_offset, self.eocd_size)?;
+ // Protection of section 4 (ZIP End of Central Directory) is complicated by the section
+ // containing the offset of ZIP Central Directory. The offset changes when the size of the
+ // APK Signing Block changes, for instance, when a new signature is added. Thus, when
+ // computing digest over the ZIP End of Central Directory, the field containing the offset
+ // of ZIP Central Directory must be treated as containing the offset of the APK Signing
+ // Block.
+ set_central_directory_offset(&mut eocd, self.signing_block_offset)?;
+ Ok(Read::take(Box::new(Cursor::new(eocd)), self.eocd_size as u64))
+ }
+
+ fn bytes(&mut self, offset: u32, size: u32) -> Result<Vec<u8>> {
+ self.inner.seek(SeekFrom::Start(offset as u64))?;
+ let mut buf = vec![0u8; size as usize];
+ self.inner.read_exact(&mut buf)?;
+ Ok(buf)
+ }
+}
+
+fn scoped_read<'a, R: Read + Seek>(
+ src: &'a mut R,
+ offset: u64,
+ size: u64,
+) -> Result<Take<Box<dyn Read + 'a>>> {
+ src.seek(SeekFrom::Start(offset))?;
+ Ok(Read::take(Box::new(src), size))
+}
+
+struct Digester {
+ algorithm: &'static digest::Algorithm,
+}
+
+const CHUNK_HEADER_TOP: &[u8] = &[0x5a];
+const CHUNK_HEADER_MID: &[u8] = &[0xa5];
+
+impl Digester {
+ fn new(signature_algorithm_id: u32) -> Result<Digester> {
+ let digest_algorithm_id = to_content_digest_algorithm(signature_algorithm_id)?;
+ let algorithm = match digest_algorithm_id {
+ CONTENT_DIGEST_CHUNKED_SHA256 => &digest::SHA256,
+ CONTENT_DIGEST_CHUNKED_SHA512 => &digest::SHA512,
+ // TODO(jooyung): implement
+ CONTENT_DIGEST_VERITY_CHUNKED_SHA256 => {
+ bail!("TODO(b/190343842): CONTENT_DIGEST_VERITY_CHUNKED_SHA256: not implemented")
+ }
+ _ => bail!("Unknown digest algorithm: {}", digest_algorithm_id),
+ };
+ Ok(Digester { algorithm })
+ }
+
+ // v2/v3 digests are computed after prepending "header" byte and "size" info.
+ fn digest(&self, data: &[u8], header: &[u8], size: u32) -> digest::Digest {
+ let mut ctx = digest::Context::new(self.algorithm);
+ ctx.update(header);
+ ctx.update(&size.to_le_bytes());
+ ctx.update(data);
+ ctx.finish()
+ }
+}
+
+fn find_signing_block<T: Read + Seek>(
+ reader: &mut T,
+ central_directory_offset: u32,
+) -> Result<(u32, u32)> {
+ // FORMAT:
+ // OFFSET DATA TYPE DESCRIPTION
+ // * @+0 bytes uint64: size in bytes (excluding this field)
+ // * @+8 bytes payload
+ // * @-24 bytes uint64: size in bytes (same as the one above)
+ // * @-16 bytes uint128: magic
+ if central_directory_offset < APK_SIG_BLOCK_MIN_SIZE {
+ bail!(
+ "APK too small for APK Signing Block. ZIP Central Directory offset: {}",
+ central_directory_offset
+ );
+ }
+ reader.seek(SeekFrom::Start((central_directory_offset - 24) as u64))?;
+ let size_in_footer = reader.read_u64::<LittleEndian>()? as u32;
+ if reader.read_u128::<LittleEndian>()? != APK_SIG_BLOCK_MAGIC {
+ bail!("No APK Signing Block before ZIP Central Directory")
+ }
+ let total_size = size_in_footer + 8;
+ let signing_block_offset = central_directory_offset
+ .checked_sub(total_size)
+ .ok_or_else(|| anyhow!("APK Signing Block size out of range: {}", size_in_footer))?;
+ reader.seek(SeekFrom::Start(signing_block_offset as u64))?;
+ let size_in_header = reader.read_u64::<LittleEndian>()? as u32;
+ if size_in_header != size_in_footer {
+ bail!(
+ "APK Signing Block sizes in header and footer do not match: {} vs {}",
+ size_in_header,
+ size_in_footer
+ );
+ }
+ Ok((signing_block_offset, total_size))
+}
+
+fn find_signature_scheme_block(buf: Bytes, block_id: u32) -> Result<Bytes> {
+ // FORMAT:
+ // OFFSET DATA TYPE DESCRIPTION
+ // * @+0 bytes uint64: size in bytes (excluding this field)
+ // * @+8 bytes pairs
+ // * @-24 bytes uint64: size in bytes (same as the one above)
+ // * @-16 bytes uint128: magic
+ let mut pairs = buf.slice(8..(buf.len() - 24));
+ let mut entry_count = 0;
+ while pairs.has_remaining() {
+ entry_count += 1;
+ if pairs.remaining() < 8 {
+ bail!("Insufficient data to read size of APK Signing Block entry #{}", entry_count);
+ }
+ let length = pairs.get_u64_le();
+ let mut pair = pairs.split_to(length as usize);
+ let id = pair.get_u32_le();
+ if id == block_id {
+ return Ok(pair);
+ }
+ }
+ // TODO(jooyung): return NotFound error
+ bail!("No APK Signature Scheme block in APK Signing Block with ID: {}", block_id)
+}
+
+pub fn is_supported_signature_algorithm(algorithm_id: u32) -> bool {
+ matches!(
+ algorithm_id,
+ SIGNATURE_RSA_PSS_WITH_SHA256
+ | SIGNATURE_RSA_PSS_WITH_SHA512
+ | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256
+ | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512
+ | SIGNATURE_ECDSA_WITH_SHA256
+ | SIGNATURE_ECDSA_WITH_SHA512
+ | SIGNATURE_DSA_WITH_SHA256
+ | SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256
+ | SIGNATURE_VERITY_ECDSA_WITH_SHA256
+ | SIGNATURE_VERITY_DSA_WITH_SHA256
+ )
+}
+
+fn to_content_digest_algorithm(algorithm_id: u32) -> Result<u32> {
+ match algorithm_id {
+ SIGNATURE_RSA_PSS_WITH_SHA256
+ | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256
+ | SIGNATURE_ECDSA_WITH_SHA256
+ | SIGNATURE_DSA_WITH_SHA256 => Ok(CONTENT_DIGEST_CHUNKED_SHA256),
+ SIGNATURE_RSA_PSS_WITH_SHA512
+ | SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512
+ | SIGNATURE_ECDSA_WITH_SHA512 => Ok(CONTENT_DIGEST_CHUNKED_SHA512),
+ SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256
+ | SIGNATURE_VERITY_ECDSA_WITH_SHA256
+ | SIGNATURE_VERITY_DSA_WITH_SHA256 => Ok(CONTENT_DIGEST_VERITY_CHUNKED_SHA256),
+ _ => bail!("Unknown signature algorithm: {}", algorithm_id),
+ }
+}
+
+pub fn rank_signature_algorithm(algo: u32) -> Result<u32> {
+ rank_content_digest_algorithm(to_content_digest_algorithm(algo)?)
+}
+
+fn rank_content_digest_algorithm(id: u32) -> Result<u32> {
+ match id {
+ CONTENT_DIGEST_CHUNKED_SHA256 => Ok(0),
+ CONTENT_DIGEST_VERITY_CHUNKED_SHA256 => Ok(1),
+ CONTENT_DIGEST_CHUNKED_SHA512 => Ok(2),
+ _ => bail!("Unknown digest algorithm: {}", id),
+ }
+}
diff --git a/compos/src/signer.rs b/libs/apkverify/src/testing.rs
similarity index 71%
rename from compos/src/signer.rs
rename to libs/apkverify/src/testing.rs
index 9ff1477..301a9c3 100644
--- a/compos/src/signer.rs
+++ b/libs/apkverify/src/testing.rs
@@ -14,10 +14,9 @@
* limitations under the License.
*/
-use anyhow::Result;
+//! A collection of utilities for testing
-/// Provides the ability to cryptographically sign messages.
-pub trait Signer: Send + Sync {
- /// Sign the supplied data. The result is a raw signature over the input data.
- fn sign(&self, data: &[u8]) -> Result<Vec<u8>>;
+/// Asserts if `haystack.contains(needle)`
+pub fn assert_contains(haystack: &str, needle: &str) {
+ assert!(haystack.contains(needle), "{} is not found in {}", needle, haystack);
}
diff --git a/libs/apkverify/src/v3.rs b/libs/apkverify/src/v3.rs
new file mode 100644
index 0000000..797911b
--- /dev/null
+++ b/libs/apkverify/src/v3.rs
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Verifies APK Signature Scheme V3
+
+// TODO(jooyung) remove this
+#![allow(dead_code)]
+
+use anyhow::{anyhow, bail, Context, Result};
+use bytes::Bytes;
+use ring::signature::{
+ UnparsedPublicKey, VerificationAlgorithm, ECDSA_P256_SHA256_ASN1, RSA_PKCS1_2048_8192_SHA256,
+ RSA_PKCS1_2048_8192_SHA512, RSA_PSS_2048_8192_SHA256, RSA_PSS_2048_8192_SHA512,
+};
+use std::fs::File;
+use std::io::{Read, Seek};
+use std::ops::Range;
+use std::path::Path;
+use x509_parser::{parse_x509_certificate, prelude::FromDer, x509::SubjectPublicKeyInfo};
+
+use crate::bytes_ext::{BytesExt, LengthPrefixed, ReadFromBytes};
+use crate::sigutil::*;
+
+pub const APK_SIGNATURE_SCHEME_V3_BLOCK_ID: u32 = 0xf05368c0;
+
+// TODO(jooyung): get "ro.build.version.sdk"
+const SDK_INT: u32 = 31;
+
+/// Data model for Signature Scheme V3
+/// https://source.android.com/security/apksigning/v3#verification
+
+type Signers = LengthPrefixed<Vec<LengthPrefixed<Signer>>>;
+
+struct Signer {
+ signed_data: LengthPrefixed<Bytes>, // not verified yet
+ min_sdk: u32,
+ max_sdk: u32,
+ signatures: LengthPrefixed<Vec<LengthPrefixed<Signature>>>,
+ public_key: LengthPrefixed<Bytes>,
+}
+
+impl Signer {
+ fn sdk_range(&self) -> Range<u32> {
+ self.min_sdk..self.max_sdk
+ }
+}
+
+struct SignedData {
+ digests: LengthPrefixed<Vec<LengthPrefixed<Digest>>>,
+ certificates: LengthPrefixed<Vec<LengthPrefixed<X509Certificate>>>,
+ min_sdk: u32,
+ max_sdk: u32,
+ additional_attributes: LengthPrefixed<Vec<LengthPrefixed<AdditionalAttributes>>>,
+}
+
+impl SignedData {
+ fn sdk_range(&self) -> Range<u32> {
+ self.min_sdk..self.max_sdk
+ }
+}
+
+#[derive(Debug)]
+struct Signature {
+ signature_algorithm_id: u32,
+ signature: LengthPrefixed<Bytes>,
+}
+
+struct Digest {
+ signature_algorithm_id: u32,
+ digest: LengthPrefixed<Bytes>,
+}
+
+type X509Certificate = Bytes;
+type AdditionalAttributes = Bytes;
+
+/// Verifies APK Signature Scheme v3 signatures of the provided APK and returns the public key
+/// associated with the signer.
+pub fn verify<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
+ let f = File::open(path.as_ref())?;
+ let mut sections = ApkSections::new(f)?;
+ find_signer_and_then(&mut sections, |(signer, sections)| signer.verify(sections))
+}
+
+/// Finds the supported signer and execute a function on it.
+fn find_signer_and_then<R, U, F>(sections: &mut ApkSections<R>, f: F) -> Result<U>
+where
+ R: Read + Seek,
+ F: FnOnce((&Signer, &mut ApkSections<R>)) -> Result<U>,
+{
+ let mut block = sections.find_signature(APK_SIGNATURE_SCHEME_V3_BLOCK_ID)?;
+ // parse v3 scheme block
+ let signers = block.read::<Signers>()?;
+
+ // find supported by platform
+ let supported = signers.iter().filter(|s| s.sdk_range().contains(&SDK_INT)).collect::<Vec<_>>();
+
+ // there should be exactly one
+ if supported.len() != 1 {
+ bail!(
+ "APK Signature Scheme V3 only supports one signer: {} signers found.",
+ supported.len()
+ )
+ }
+
+ // Call the supplied function
+ f((supported[0], sections))
+}
+
+/// Gets the public key (in DER format) that was used to sign the given APK/APEX file
+pub fn get_public_key_der<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
+ let f = File::open(path.as_ref())?;
+ let mut sections = ApkSections::new(f)?;
+ find_signer_and_then(&mut sections, |(signer, _)| {
+ Ok(signer.public_key.to_vec().into_boxed_slice())
+ })
+}
+
+impl Signer {
+ fn verify<R: Read + Seek>(&self, sections: &mut ApkSections<R>) -> Result<Box<[u8]>> {
+ // 1. Choose the strongest supported signature algorithm ID from signatures. The strength
+ // ordering is up to each implementation/platform version.
+ let strongest: &Signature = self
+ .signatures
+ .iter()
+ .filter(|sig| is_supported_signature_algorithm(sig.signature_algorithm_id))
+ .max_by_key(|sig| rank_signature_algorithm(sig.signature_algorithm_id).unwrap())
+ .ok_or_else(|| anyhow!("No supported signatures found"))?;
+
+ // 2. Verify the corresponding signature from signatures against signed data using public key.
+ // (It is now safe to parse signed data.)
+ let (_, key_info) = SubjectPublicKeyInfo::from_der(self.public_key.as_ref())?;
+ verify_signed_data(&self.signed_data, strongest, &key_info)?;
+
+ // It is now safe to parse signed data.
+ let signed_data: SignedData = self.signed_data.slice(..).read()?;
+
+ // 3. Verify the min and max SDK versions in the signed data match those specified for the
+ // signer.
+ if self.sdk_range() != signed_data.sdk_range() {
+ bail!("SDK versions mismatch between signed and unsigned in v3 signer block.");
+ }
+
+ // 4. Verify that the ordered list of signature algorithm IDs in digests and signatures is
+ // identical. (This is to prevent signature stripping/addition.)
+ if !self
+ .signatures
+ .iter()
+ .map(|sig| sig.signature_algorithm_id)
+ .eq(signed_data.digests.iter().map(|dig| dig.signature_algorithm_id))
+ {
+ bail!("Signature algorithms don't match between digests and signatures records");
+ }
+
+ // 5. Compute the digest of APK contents using the same digest algorithm as the digest
+ // algorithm used by the signature algorithm.
+ let digest = signed_data
+ .digests
+ .iter()
+ .find(|&dig| dig.signature_algorithm_id == strongest.signature_algorithm_id)
+ .unwrap(); // ok to unwrap since we check if two lists are the same above
+ let computed = sections.compute_digest(digest.signature_algorithm_id)?;
+
+ // 6. Verify that the computed digest is identical to the corresponding digest from digests.
+ if computed != digest.digest.as_ref() {
+ bail!(
+ "Digest mismatch: computed={:?} vs expected={:?}",
+ to_hex_string(&computed),
+ to_hex_string(&digest.digest),
+ );
+ }
+
+ // 7. Verify that SubjectPublicKeyInfo of the first certificate of certificates is identical
+ // to public key.
+ let cert = signed_data.certificates.first().context("No certificates listed")?;
+ let (_, cert) = parse_x509_certificate(cert.as_ref())?;
+ if cert.tbs_certificate.subject_pki != key_info {
+ bail!("Public key mismatch between certificate and signature record");
+ }
+
+ // TODO(jooyung) 8. If the proof-of-rotation attribute exists for the signer verify that the struct is valid and this signer is the last certificate in the list.
+ Ok(self.public_key.to_vec().into_boxed_slice())
+ }
+}
+
+fn verify_signed_data(
+ data: &Bytes,
+ signature: &Signature,
+ key_info: &SubjectPublicKeyInfo,
+) -> Result<()> {
+ let verification_alg: &dyn VerificationAlgorithm = match signature.signature_algorithm_id {
+ SIGNATURE_RSA_PSS_WITH_SHA256 => &RSA_PSS_2048_8192_SHA256,
+ SIGNATURE_RSA_PSS_WITH_SHA512 => &RSA_PSS_2048_8192_SHA512,
+ SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256 | SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256 => {
+ &RSA_PKCS1_2048_8192_SHA256
+ }
+ SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512 => &RSA_PKCS1_2048_8192_SHA512,
+ SIGNATURE_ECDSA_WITH_SHA256 | SIGNATURE_VERITY_ECDSA_WITH_SHA256 => &ECDSA_P256_SHA256_ASN1,
+ // TODO(b/190343842) not implemented signature algorithm
+ SIGNATURE_ECDSA_WITH_SHA512
+ | SIGNATURE_DSA_WITH_SHA256
+ | SIGNATURE_VERITY_DSA_WITH_SHA256 => {
+ bail!(
+ "TODO(b/190343842) not implemented signature algorithm: {:#x}",
+ signature.signature_algorithm_id
+ );
+ }
+ _ => bail!("Unsupported signature algorithm: {:#x}", signature.signature_algorithm_id),
+ };
+ let key = UnparsedPublicKey::new(verification_alg, &key_info.subject_public_key);
+ key.verify(data.as_ref(), signature.signature.as_ref())?;
+ Ok(())
+}
+
+// ReadFromBytes implementations
+// TODO(jooyung): add derive macro: #[derive(ReadFromBytes)]
+
+impl ReadFromBytes for Signer {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ Ok(Self {
+ signed_data: buf.read()?,
+ min_sdk: buf.read()?,
+ max_sdk: buf.read()?,
+ signatures: buf.read()?,
+ public_key: buf.read()?,
+ })
+ }
+}
+
+impl ReadFromBytes for SignedData {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ Ok(Self {
+ digests: buf.read()?,
+ certificates: buf.read()?,
+ min_sdk: buf.read()?,
+ max_sdk: buf.read()?,
+ additional_attributes: buf.read()?,
+ })
+ }
+}
+
+impl ReadFromBytes for Signature {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ Ok(Signature { signature_algorithm_id: buf.read()?, signature: buf.read()? })
+ }
+}
+
+impl ReadFromBytes for Digest {
+ fn read_from_bytes(buf: &mut Bytes) -> Result<Self> {
+ Ok(Self { signature_algorithm_id: buf.read()?, digest: buf.read()? })
+ }
+}
+
+#[inline]
+fn to_hex_string(buf: &[u8]) -> String {
+ buf.iter().map(|b| format!("{:02X}", b)).collect()
+}
diff --git a/libs/apkverify/src/ziputil.rs b/libs/apkverify/src/ziputil.rs
new file mode 100644
index 0000000..f18a38a
--- /dev/null
+++ b/libs/apkverify/src/ziputil.rs
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Utilities for zip handling
+
+use anyhow::{bail, Result};
+use bytes::{Buf, BufMut};
+use std::io::{Read, Seek, SeekFrom};
+use zip::ZipArchive;
+
+const EOCD_MIN_SIZE: usize = 22;
+const EOCD_CENTRAL_DIRECTORY_SIZE_FIELD_OFFSET: usize = 12;
+const EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET: usize = 16;
+const EOCD_MAGIC: u32 = 0x06054b50;
+const ZIP64_MARK: u32 = 0xffffffff;
+
+#[derive(Debug, PartialEq)]
+pub struct ZipSections {
+ pub central_directory_offset: u32,
+ pub central_directory_size: u32,
+ pub eocd_offset: u32,
+ pub eocd_size: u32,
+}
+
+/// Discover the layout of a zip file.
+pub fn zip_sections<R: Read + Seek>(mut reader: R) -> Result<(R, ZipSections)> {
+ // open a zip to parse EOCD
+ let archive = ZipArchive::new(reader)?;
+ let eocd_size = archive.comment().len() + EOCD_MIN_SIZE;
+ if archive.offset() != 0 {
+ bail!("Invalid ZIP: offset should be 0, but {}.", archive.offset());
+ }
+ // retrieve reader back
+ reader = archive.into_inner();
+ // the current position should point EOCD offset
+ let eocd_offset = reader.seek(SeekFrom::Current(0))? as u32;
+ let mut eocd = vec![0u8; eocd_size as usize];
+ reader.read_exact(&mut eocd)?;
+ if (&eocd[0..]).get_u32_le() != EOCD_MAGIC {
+ bail!("Invalid ZIP: ZipArchive::new() should point EOCD after reading.");
+ }
+ let (central_directory_size, central_directory_offset) = get_central_directory(&eocd)?;
+ if central_directory_offset == ZIP64_MARK || central_directory_size == ZIP64_MARK {
+ bail!("Unsupported ZIP: ZIP64 is not supported.");
+ }
+ if central_directory_offset + central_directory_size != eocd_offset {
+ bail!("Invalid ZIP: EOCD should follow CD with no extra data or overlap.");
+ }
+
+ Ok((
+ reader,
+ ZipSections {
+ central_directory_offset,
+ central_directory_size,
+ eocd_offset,
+ eocd_size: eocd_size as u32,
+ },
+ ))
+}
+
+fn get_central_directory(buf: &[u8]) -> Result<(u32, u32)> {
+ if buf.len() < EOCD_MIN_SIZE {
+ bail!("Invalid EOCD size: {}", buf.len());
+ }
+ let mut buf = &buf[EOCD_CENTRAL_DIRECTORY_SIZE_FIELD_OFFSET..];
+ let size = buf.get_u32_le();
+ let offset = buf.get_u32_le();
+ Ok((size, offset))
+}
+
+/// Update EOCD's central_directory_offset field.
+pub fn set_central_directory_offset(buf: &mut [u8], value: u32) -> Result<()> {
+ if buf.len() < EOCD_MIN_SIZE {
+ bail!("Invalid EOCD size: {}", buf.len());
+ }
+ (&mut buf[EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET..]).put_u32_le(value);
+ Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::testing::assert_contains;
+ use std::io::{Cursor, Write};
+ use zip::{write::FileOptions, ZipWriter};
+
+ fn create_test_zip() -> Cursor<Vec<u8>> {
+ let mut writer = ZipWriter::new(Cursor::new(Vec::new()));
+ writer.start_file("testfile", FileOptions::default()).unwrap();
+ writer.write_all(b"testcontent").unwrap();
+ writer.finish().unwrap()
+ }
+
+ #[test]
+ fn test_zip_sections() {
+ let (cursor, sections) = zip_sections(create_test_zip()).unwrap();
+ assert_eq!(sections.eocd_offset, (cursor.get_ref().len() - EOCD_MIN_SIZE) as u32);
+ }
+
+ #[test]
+ fn test_reject_if_extra_data_between_cd_and_eocd() {
+ // prepare normal zip
+ let buf = create_test_zip().into_inner();
+
+ // insert garbage between CD and EOCD.
+ // by the way, to mock zip-rs, use CD as garbage. This is implementation detail of zip-rs,
+ // which reads CD at (eocd_offset - cd_size) instead of at cd_offset from EOCD.
+ let (pre_eocd, eocd) = buf.split_at(buf.len() - EOCD_MIN_SIZE);
+ let (_, cd_offset) = get_central_directory(eocd).unwrap();
+ let cd = &pre_eocd[cd_offset as usize..];
+
+ // ZipArchive::new() succeeds, but we should reject
+ let res = zip_sections(Cursor::new([pre_eocd, cd, eocd].concat()));
+ assert!(res.is_err());
+ assert_contains(&res.err().unwrap().to_string(), "Invalid ZIP: offset should be 0");
+ }
+}
diff --git a/libs/apkverify/tests/apkverify_test.rs b/libs/apkverify/tests/apkverify_test.rs
new file mode 100644
index 0000000..ade4468
--- /dev/null
+++ b/libs/apkverify/tests/apkverify_test.rs
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use apkverify::{testing::assert_contains, verify};
+use std::matches;
+
+#[test]
+fn test_verify_v3() {
+ assert!(verify("tests/data/test.apex").is_ok());
+}
+
+#[test]
+fn test_verify_v3_digest_mismatch() {
+ let res = verify("tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk");
+ assert!(res.is_err());
+ assert_contains(&res.unwrap_err().to_string(), "Digest mismatch");
+}
+
+#[test]
+fn test_verify_v3_cert_and_public_key_mismatch() {
+ let res = verify("tests/data/v3-only-cert-and-public-key-mismatch.apk");
+ assert!(res.is_err());
+ assert_contains(&res.unwrap_err().to_string(), "Public key mismatch");
+}
+
+#[test]
+fn test_verify_truncated_cd() {
+ use zip::result::ZipError;
+ let res = verify("tests/data/v2-only-truncated-cd.apk");
+ // TODO(jooyung): consider making a helper for err assertion
+ assert!(matches!(
+ res.unwrap_err().root_cause().downcast_ref::<ZipError>().unwrap(),
+ ZipError::InvalidArchive(_),
+ ));
+}
diff --git a/libs/apkverify/tests/data/README.md b/libs/apkverify/tests/data/README.md
new file mode 100644
index 0000000..7556921
--- /dev/null
+++ b/libs/apkverify/tests/data/README.md
@@ -0,0 +1,14 @@
+test.apex is copied from ADBD apex built in AOSP.
+
+```sh
+$ apksigner verify -v test.apex
+Verifies
+Verified using v1 scheme (JAR signing): false
+Verified using v2 scheme (APK Signature Scheme v2): false
+Verified using v3 scheme (APK Signature Scheme v3): true
+Verified using v4 scheme (APK Signature Scheme v4): false
+Verified for SourceStamp: false
+Number of signers: 1
+```
+
+APK files are copied from tools/apksig/src/test/resources/com/android/apksig/.
diff --git a/libs/apkverify/tests/data/test.apex b/libs/apkverify/tests/data/test.apex
new file mode 100644
index 0000000..0e6a576
--- /dev/null
+++ b/libs/apkverify/tests/data/test.apex
Binary files differ
diff --git a/libs/apkverify/tests/data/v2-only-truncated-cd.apk b/libs/apkverify/tests/data/v2-only-truncated-cd.apk
new file mode 100644
index 0000000..d2e3e8d
--- /dev/null
+++ b/libs/apkverify/tests/data/v2-only-truncated-cd.apk
Binary files differ
diff --git a/libs/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk b/libs/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk
new file mode 100644
index 0000000..2291e7e
--- /dev/null
+++ b/libs/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk
Binary files differ
diff --git a/libs/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk b/libs/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
new file mode 100644
index 0000000..2800929
--- /dev/null
+++ b/libs/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
Binary files differ
diff --git a/libs/avb_bindgen/Android.bp b/libs/avb_bindgen/Android.bp
new file mode 100644
index 0000000..1035498
--- /dev/null
+++ b/libs/avb_bindgen/Android.bp
@@ -0,0 +1,31 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_bindgen {
+ name: "libavb_bindgen",
+ wrapper_src: "bindgen/avb.h",
+ crate_name: "avb_bindgen",
+ source_stem: "bindings",
+ bindgen_flags: [
+ "--size_t-is-usize",
+ "--allowlist-function=.*",
+ ],
+ static_libs: [
+ "libavb",
+ ],
+ shared_libs: [
+ "libcrypto",
+ ],
+ cflags: ["-DBORINGSSL_NO_CXX"],
+}
+
+rust_test {
+ name: "libavb_bindgen_test",
+ srcs: [":libavb_bindgen"],
+ crate_name: "avb_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+}
diff --git a/compos/aidl/com/android/compos/Metadata.aidl b/libs/avb_bindgen/bindgen/avb.h
similarity index 64%
copy from compos/aidl/com/android/compos/Metadata.aidl
copy to libs/avb_bindgen/bindgen/avb.h
index a15214d..b3d5385 100644
--- a/compos/aidl/com/android/compos/Metadata.aidl
+++ b/libs/avb_bindgen/bindgen/avb.h
@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,6 @@
* limitations under the License.
*/
-package com.android.compos;
+#pragma once
-import com.android.compos.InputFdAnnotation;
-import com.android.compos.OutputFdAnnotation;
-
-/** {@hide} */
-parcelable Metadata {
- InputFdAnnotation[] input_fd_annotations;
- OutputFdAnnotation[] output_fd_annotations;
-}
+#include <libavb/libavb.h>
diff --git a/libs/binder_common/Android.bp b/libs/binder_common/Android.bp
new file mode 100644
index 0000000..209955d
--- /dev/null
+++ b/libs/binder_common/Android.bp
@@ -0,0 +1,19 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+ name: "libbinder_common",
+ crate_name: "binder_common",
+ srcs: ["lib.rs"],
+ edition: "2018",
+ rustlibs: [
+ "libbinder_rs",
+ "libbinder_rpc_unstable_bindgen",
+ "liblazy_static",
+ ],
+ apex_available: [
+ "com.android.compos",
+ "com.android.virt",
+ ],
+}
diff --git a/libs/binder_common/lazy_service.rs b/libs/binder_common/lazy_service.rs
new file mode 100644
index 0000000..9d605b6
--- /dev/null
+++ b/libs/binder_common/lazy_service.rs
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Rust API for lazy (aka dynamic) AIDL services.
+//! See https://source.android.com/devices/architecture/aidl/dynamic-aidl.
+
+use binder::force_lazy_services_persist;
+use lazy_static::lazy_static;
+use std::sync::Mutex;
+
+// TODO(b/200924402): Move this class to libbinder_rs once the infrastructure needed exists.
+
+/// An RAII object to ensure a server of lazy services is not killed. During the lifetime of any of
+/// these objects the service manager will not not kill the current process even if none of its
+/// lazy services are in use.
+#[must_use]
+#[derive(Debug)]
+pub struct LazyServiceGuard {
+ // Prevent construction outside this module.
+ _private: (),
+}
+
+lazy_static! {
+ // Count of how many LazyServiceGuard objects are in existence.
+ static ref GUARD_COUNT: Mutex<u64> = Mutex::new(0);
+}
+
+impl LazyServiceGuard {
+ /// Create a new LazyServiceGuard to prevent the service manager prematurely killing this
+ /// process.
+ pub fn new() -> Self {
+ let mut count = GUARD_COUNT.lock().unwrap();
+ *count += 1;
+ if *count == 1 {
+ // It's important that we make this call with the mutex held, to make sure
+ // that multiple calls (e.g. if the count goes 1 -> 0 -> 1) are correctly
+ // sequenced. (That also means we can't just use an AtomicU64.)
+ force_lazy_services_persist(true);
+ }
+ Self { _private: () }
+ }
+}
+
+impl Drop for LazyServiceGuard {
+ fn drop(&mut self) {
+ let mut count = GUARD_COUNT.lock().unwrap();
+ *count -= 1;
+ if *count == 0 {
+ force_lazy_services_persist(false);
+ }
+ }
+}
+
+impl Clone for LazyServiceGuard {
+ fn clone(&self) -> Self {
+ Self::new()
+ }
+}
+
+impl Default for LazyServiceGuard {
+ fn default() -> Self {
+ Self::new()
+ }
+}
diff --git a/libs/binder_common/lib.rs b/libs/binder_common/lib.rs
new file mode 100644
index 0000000..fd81da5
--- /dev/null
+++ b/libs/binder_common/lib.rs
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Common items useful for binder clients and/or servers.
+
+pub mod lazy_service;
+pub mod rpc_client;
+pub mod rpc_server;
+
+use binder::{ExceptionCode, Status};
+use std::ffi::CString;
+
+/// Constructs a new Binder error `Status` with the given `ExceptionCode` and message.
+pub fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
+ match exception {
+ ExceptionCode::SERVICE_SPECIFIC => new_binder_service_specific_error(-1, message),
+ _ => Status::new_exception(exception, to_cstring(message).as_deref()),
+ }
+}
+
+/// Constructs a Binder `Status` representing a service-specific exception with the given code and
+/// message.
+pub fn new_binder_service_specific_error<T: AsRef<str>>(code: i32, message: T) -> Status {
+ Status::new_service_specific_error(code, to_cstring(message).as_deref())
+}
+
+fn to_cstring<T: AsRef<str>>(message: T) -> Option<CString> {
+ CString::new(message.as_ref()).ok()
+}
diff --git a/libs/binder_common/rpc_client.rs b/libs/binder_common/rpc_client.rs
new file mode 100644
index 0000000..1aabe84
--- /dev/null
+++ b/libs/binder_common/rpc_client.rs
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Helpers for implementing an RPC Binder client.
+
+use binder::unstable_api::{new_spibinder, AIBinder};
+use binder::{StatusCode, Strong};
+
+/// Connects to a binder RPC server.
+pub fn connect_rpc_binder<T: binder::FromIBinder + ?Sized>(
+ cid: u32,
+ port: u32,
+) -> Result<Strong<T>, StatusCode> {
+ // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
+ // safely taken by new_spibinder.
+ let ibinder = unsafe {
+ new_spibinder(binder_rpc_unstable_bindgen::RpcClient(cid, port) as *mut AIBinder)
+ };
+ if let Some(ibinder) = ibinder {
+ <T>::try_from(ibinder)
+ } else {
+ Err(StatusCode::BAD_VALUE)
+ }
+}
diff --git a/libs/binder_common/rpc_server.rs b/libs/binder_common/rpc_server.rs
new file mode 100644
index 0000000..5c9d2a0
--- /dev/null
+++ b/libs/binder_common/rpc_server.rs
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Helpers for implementing an RPC Binder server.
+
+use binder::unstable_api::AsNative;
+use binder::SpIBinder;
+use std::os::raw;
+
+/// Run a binder RPC server, serving the supplied binder service implementation on the given vsock
+/// port.
+/// If and when the server is ready for connections (it is listening on the port) on_ready
+/// is called to allow appropriate action to be taken - e.g. to notify clients they
+/// may now attempt to connect.
+/// The current thread is joined to the binder thread pool to handle incoming messages.
+/// Returns true if the server has shutdown normally, false if it failed in some way.
+pub fn run_rpc_server<F>(service: SpIBinder, port: u32, on_ready: F) -> bool
+where
+ F: FnOnce(),
+{
+ let mut ready_notifier = ReadyNotifier(Some(on_ready));
+ ready_notifier.run_server(service, port)
+}
+
+struct ReadyNotifier<F>(Option<F>)
+where
+ F: FnOnce();
+
+impl<F> ReadyNotifier<F>
+where
+ F: FnOnce(),
+{
+ fn run_server(&mut self, mut service: SpIBinder, port: u32) -> bool {
+ let service = service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder;
+ let param = self.as_void_ptr();
+
+ // SAFETY: Service ownership is transferring to the server and won't be valid afterward.
+ // Plus the binder objects are threadsafe.
+ // RunRpcServerCallback does not retain a reference to ready_callback, and only ever
+ // calls it with the param we provide during the lifetime of self.
+ unsafe {
+ binder_rpc_unstable_bindgen::RunRpcServerCallback(
+ service,
+ port,
+ Some(Self::ready_callback),
+ param,
+ )
+ }
+ }
+
+ fn as_void_ptr(&mut self) -> *mut raw::c_void {
+ self as *mut _ as *mut raw::c_void
+ }
+
+ unsafe extern "C" fn ready_callback(param: *mut raw::c_void) {
+ // SAFETY: This is only ever called by RunRpcServerCallback, within the lifetime of the
+ // ReadyNotifier, with param taking the value returned by as_void_ptr (so a properly aligned
+ // non-null pointer to an initialized instance).
+ let ready_notifier = param as *mut Self;
+ ready_notifier.as_mut().unwrap().notify()
+ }
+
+ fn notify(&mut self) {
+ if let Some(on_ready) = self.0.take() {
+ on_ready();
+ }
+ }
+}
diff --git a/libs/idsig/Android.bp b/libs/idsig/Android.bp
new file mode 100644
index 0000000..3f70a64
--- /dev/null
+++ b/libs/idsig/Android.bp
@@ -0,0 +1,34 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libidsig.defaults",
+ crate_name: "idsig",
+ srcs: ["src/lib.rs"],
+ edition: "2018",
+ prefer_rlib: true,
+ rustlibs: [
+ "libanyhow",
+ "libbyteorder",
+ "libring",
+ "libnum_traits",
+ ],
+ proc_macros: ["libnum_derive"],
+}
+
+rust_library {
+ name: "libidsig",
+ defaults: ["libidsig.defaults"],
+ apex_available: ["com.android.virt"],
+}
+
+rust_test {
+ name: "libidsig.test",
+ defaults: ["libidsig.defaults"],
+ test_suites: ["general-tests"],
+ compile_multilib: "first",
+ data: [
+ "testdata/input.*",
+ ],
+}
diff --git a/libs/idsig/Cargo.toml b/libs/idsig/Cargo.toml
new file mode 100644
index 0000000..49ae18d
--- /dev/null
+++ b/libs/idsig/Cargo.toml
@@ -0,0 +1,12 @@
+[package]
+name = "idsig"
+version = "0.1.0"
+authors = ["Jiyong Park <jiyong@google.com>"]
+edition = "2018"
+
+[dependencies]
+anyhow = "1.0"
+byteorder = "1.1"
+ring = "0.16"
+num-derive = "0.3"
+num-traits = "0.2"
diff --git a/libs/idsig/src/apksigv4.rs b/libs/idsig/src/apksigv4.rs
new file mode 100644
index 0000000..a5578d8
--- /dev/null
+++ b/libs/idsig/src/apksigv4.rs
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use anyhow::{anyhow, bail, Context, Result};
+use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
+use num_derive::{FromPrimitive, ToPrimitive};
+use num_traits::{FromPrimitive, ToPrimitive};
+use std::io::{copy, Cursor, Read, Seek, SeekFrom, Write};
+
+use crate::hashtree::*;
+
+// `apksigv4` module provides routines to decode and encode the idsig file as defined in [APK
+// signature scheme v4] (https://source.android.com/security/apksigning/v4).
+
+/// `V4Signature` provides access to the various fields in an idsig file.
+#[derive(Default)]
+pub struct V4Signature<R: Read + Seek> {
+ /// Version of the header. Should be 2.
+ pub version: Version,
+ /// Provides access to the information about how the APK is hashed.
+ pub hashing_info: HashingInfo,
+ /// Provides access to the information that can be used to verify this file
+ pub signing_info: SigningInfo,
+ /// Total size of the merkle tree
+ pub merkle_tree_size: u32,
+ /// Offset of the merkle tree in the idsig file
+ pub merkle_tree_offset: u64,
+
+ // Provides access to the underlying data
+ data: R,
+}
+
+/// `HashingInfo` provides information about how the APK is hashed.
+#[derive(Default)]
+pub struct HashingInfo {
+ /// Hash algorithm used when creating the merkle tree for the APK.
+ pub hash_algorithm: HashAlgorithm,
+ /// The log size of a block used when creating the merkle tree. 12 if 4k block was used.
+ pub log2_blocksize: u8,
+ /// The salt used when creating the merkle tree. 32 bytes max.
+ pub salt: Box<[u8]>,
+ /// The root hash of the merkle tree created.
+ pub raw_root_hash: Box<[u8]>,
+}
+
+/// `SigningInfo` provides information that can be used to verify the idsig file.
+#[derive(Default)]
+pub struct SigningInfo {
+ /// Digest of the APK that this idsig file is for.
+ pub apk_digest: Box<[u8]>,
+ /// Certificate of the signer that signed this idsig file. ASN.1 DER form.
+ pub x509_certificate: Box<[u8]>,
+ /// A free-form binary data
+ pub additional_data: Box<[u8]>,
+ /// Public key of the signer in ASN.1 DER form. This must match the `x509_certificate` field.
+ pub public_key: Box<[u8]>,
+ /// Signature algorithm used to sign this file.
+ pub signature_algorithm_id: SignatureAlgorithmId,
+ /// The signature of this file.
+ pub signature: Box<[u8]>,
+}
+
+/// Version of the idsig file format
+#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive)]
+#[repr(u32)]
+pub enum Version {
+ /// Version 2, the only supported version.
+ V2 = 2,
+}
+
+impl Version {
+ fn from(val: u32) -> Result<Version> {
+ Self::from_u32(val).ok_or_else(|| anyhow!("{} is an unsupported version", val))
+ }
+}
+
+impl Default for Version {
+ fn default() -> Self {
+ Version::V2
+ }
+}
+
+/// Hash algorithm that can be used for idsig file.
+#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive)]
+#[repr(u32)]
+pub enum HashAlgorithm {
+ /// SHA2-256
+ SHA256 = 1,
+}
+
+impl HashAlgorithm {
+ fn from(val: u32) -> Result<HashAlgorithm> {
+ Self::from_u32(val).ok_or_else(|| anyhow!("{} is an unsupported hash algorithm", val))
+ }
+}
+
+impl Default for HashAlgorithm {
+ fn default() -> Self {
+ HashAlgorithm::SHA256
+ }
+}
+
+/// Signature algorithm that can be used for idsig file
+#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive)]
+#[allow(non_camel_case_types)]
+#[repr(u32)]
+pub enum SignatureAlgorithmId {
+ /// RSASSA-PSS with SHA2-256 digest, SHA2-256 MGF1, 32 bytes of salt, trailer: 0xbc
+ RSASSA_PSS_SHA2_256 = 0x0101,
+ /// RSASSA-PSS with SHA2-512 digest, SHA2-512 MGF1, 64 bytes of salt, trailer: 0xbc
+ RSASSA_PSS_SHA2_512 = 0x0102,
+ /// RSASSA-PKCS1-v1_5 with SHA2-256 digest.
+ RSASSA_PKCS1_SHA2_256 = 0x0103,
+ /// RSASSA-PKCS1-v1_5 with SHA2-512 digest.
+ RSASSA_PKCS1_SHA2_512 = 0x0104,
+ /// ECDSA with SHA2-256 digest.
+ ECDSA_SHA2_256 = 0x0201,
+ /// ECDSA with SHA2-512 digest.
+ ECDSA_SHA2_512 = 0x0202,
+ /// DSA with SHA2-256 digest
+ DSA_SHA2_256 = 0x0301,
+}
+
+impl SignatureAlgorithmId {
+ fn from(val: u32) -> Result<SignatureAlgorithmId> {
+ Self::from_u32(val)
+ .with_context(|| format!("{:#06x} is an unsupported signature algorithm", val))
+ }
+}
+
+impl Default for SignatureAlgorithmId {
+ fn default() -> Self {
+ SignatureAlgorithmId::DSA_SHA2_256
+ }
+}
+
+impl<R: Read + Seek> V4Signature<R> {
+ /// Consumes a stream for an idsig file into a `V4Signature` struct.
+ pub fn from(mut r: R) -> Result<V4Signature<R>> {
+ Ok(V4Signature {
+ version: Version::from(r.read_u32::<LittleEndian>()?)?,
+ hashing_info: HashingInfo::from(&mut r)?,
+ signing_info: SigningInfo::from(&mut r)?,
+ merkle_tree_size: r.read_u32::<LittleEndian>()?,
+ merkle_tree_offset: r.stream_position()?,
+ data: r,
+ })
+ }
+
+ /// Read a stream for an APK file and creates a corresponding `V4Signature` struct that digests
+ /// the APK file. Note that the signing is not done.
+ pub fn create(
+ mut apk: &mut R,
+ block_size: usize,
+ salt: &[u8],
+ algorithm: HashAlgorithm,
+ ) -> Result<V4Signature<Cursor<Vec<u8>>>> {
+ // Determine the size of the apk
+ let start = apk.stream_position()?;
+ let size = apk.seek(SeekFrom::End(0))? as usize;
+ apk.seek(SeekFrom::Start(start))?;
+
+ // Create hash tree (and root hash)
+ let algorithm = match algorithm {
+ HashAlgorithm::SHA256 => &ring::digest::SHA256,
+ };
+ let hash_tree = HashTree::from(&mut apk, size, salt, block_size, algorithm)?;
+
+ let mut ret = V4Signature {
+ version: Version::default(),
+ hashing_info: HashingInfo::default(),
+ signing_info: SigningInfo::default(),
+ merkle_tree_size: hash_tree.tree.len() as u32,
+ merkle_tree_offset: 0, // merkle tree starts from the beginning of `data`
+ data: Cursor::new(hash_tree.tree),
+ };
+ ret.hashing_info.raw_root_hash = hash_tree.root_hash.into_boxed_slice();
+ ret.hashing_info.log2_blocksize = log2(block_size);
+
+ // TODO(jiyong): fill the signing_info struct by reading the APK file. The information,
+ // especially `apk_digest` is needed to check if `V4Signature` is outdated, in which case
+ // it needs to be created from the updated APK.
+
+ Ok(ret)
+ }
+
+ /// Writes the data into a writer
+ pub fn write_into<W: Write + Seek>(&mut self, mut w: &mut W) -> Result<()> {
+ // Writes the header part
+ w.write_u32::<LittleEndian>(self.version.to_u32().unwrap())?;
+ self.hashing_info.write_into(&mut w)?;
+ self.signing_info.write_into(&mut w)?;
+ w.write_u32::<LittleEndian>(self.merkle_tree_size)?;
+
+ // Writes the merkle tree
+ self.data.seek(SeekFrom::Start(self.merkle_tree_offset))?;
+ let copied_size = copy(&mut self.data, &mut w)?;
+ if copied_size != self.merkle_tree_size as u64 {
+ bail!(
+ "merkle tree is {} bytes, but only {} bytes are written.",
+ self.merkle_tree_size,
+ copied_size
+ );
+ }
+ Ok(())
+ }
+
+ /// Returns the bytes that represents the merkle tree
+ pub fn merkle_tree(&mut self) -> Result<Vec<u8>> {
+ self.data.seek(SeekFrom::Start(self.merkle_tree_offset))?;
+ let mut out = Vec::new();
+ self.data.read_to_end(&mut out)?;
+ Ok(out)
+ }
+}
+
+impl HashingInfo {
+ fn from(mut r: &mut dyn Read) -> Result<HashingInfo> {
+ // Size of the entire hashing_info struct. We don't need this because each variable-sized
+ // fields in the struct are also length encoded.
+ r.read_u32::<LittleEndian>()?;
+ Ok(HashingInfo {
+ hash_algorithm: HashAlgorithm::from(r.read_u32::<LittleEndian>()?)?,
+ log2_blocksize: r.read_u8()?,
+ salt: read_sized_array(&mut r)?,
+ raw_root_hash: read_sized_array(&mut r)?,
+ })
+ }
+
+ fn write_into<W: Write + Seek>(&self, mut w: &mut W) -> Result<()> {
+ let start = w.stream_position()?;
+ // Size of the entire hashing_info struct. Since we don't know the size yet, fill the place
+ // with 0. The exact size will then be written below.
+ w.write_u32::<LittleEndian>(0)?;
+
+ w.write_u32::<LittleEndian>(self.hash_algorithm.to_u32().unwrap())?;
+ w.write_u8(self.log2_blocksize)?;
+ write_sized_array(&mut w, &self.salt)?;
+ write_sized_array(&mut w, &self.raw_root_hash)?;
+
+ // Determine the size of hashing_info, and write it in front of the struct where the value
+ // was initialized to zero.
+ let end = w.stream_position()?;
+ let size = end - start - std::mem::size_of::<u32>() as u64;
+ w.seek(SeekFrom::Start(start))?;
+ w.write_u32::<LittleEndian>(size as u32)?;
+ w.seek(SeekFrom::Start(end))?;
+ Ok(())
+ }
+}
+
+impl SigningInfo {
+ fn from(mut r: &mut dyn Read) -> Result<SigningInfo> {
+ // Size of the entire signing_info struct. We don't need this because each variable-sized
+ // fields in the struct are also length encoded.
+ r.read_u32::<LittleEndian>()?;
+ Ok(SigningInfo {
+ apk_digest: read_sized_array(&mut r)?,
+ x509_certificate: read_sized_array(&mut r)?,
+ additional_data: read_sized_array(&mut r)?,
+ public_key: read_sized_array(&mut r)?,
+ signature_algorithm_id: SignatureAlgorithmId::from(r.read_u32::<LittleEndian>()?)?,
+ signature: read_sized_array(&mut r)?,
+ })
+ }
+
+ fn write_into<W: Write + Seek>(&self, mut w: &mut W) -> Result<()> {
+ let start = w.stream_position()?;
+ // Size of the entire signing_info struct. Since we don't know the size yet, fill the place
+ // with 0. The exact size will then be written below.
+ w.write_u32::<LittleEndian>(0)?;
+
+ write_sized_array(&mut w, &self.apk_digest)?;
+ write_sized_array(&mut w, &self.x509_certificate)?;
+ write_sized_array(&mut w, &self.additional_data)?;
+ write_sized_array(&mut w, &self.public_key)?;
+ w.write_u32::<LittleEndian>(self.signature_algorithm_id.to_u32().unwrap())?;
+ write_sized_array(&mut w, &self.signature)?;
+
+ // Determine the size of signing_info, and write it in front of the struct where the value
+ // was initialized to zero.
+ let end = w.stream_position()?;
+ let size = end - start - std::mem::size_of::<u32>() as u64;
+ w.seek(SeekFrom::Start(start))?;
+ w.write_u32::<LittleEndian>(size as u32)?;
+ w.seek(SeekFrom::Start(end))?;
+ Ok(())
+ }
+}
+
+fn read_sized_array(r: &mut dyn Read) -> Result<Box<[u8]>> {
+ let size = r.read_u32::<LittleEndian>()?;
+ let mut data = vec![0; size as usize];
+ r.read_exact(&mut data)?;
+ Ok(data.into_boxed_slice())
+}
+
+fn write_sized_array(w: &mut dyn Write, data: &[u8]) -> Result<()> {
+ w.write_u32::<LittleEndian>(data.len() as u32)?;
+ Ok(w.write_all(data)?)
+}
+
+fn log2(n: usize) -> u8 {
+ let num_bits = std::mem::size_of::<usize>() * 8;
+ (num_bits as u32 - n.leading_zeros() - 1) as u8
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::io::Cursor;
+
+ fn hexstring_from(s: &[u8]) -> String {
+ s.iter().map(|byte| format!("{:02x}", byte)).reduce(|i, j| i + &j).unwrap_or_default()
+ }
+
+ #[test]
+ fn parse_idsig_file() {
+ let idsig = Cursor::new(include_bytes!("../testdata/test.apk.idsig"));
+ let parsed = V4Signature::from(idsig).unwrap();
+
+ assert_eq!(Version::V2, parsed.version);
+
+ let hi = parsed.hashing_info;
+ assert_eq!(HashAlgorithm::SHA256, hi.hash_algorithm);
+ assert_eq!(12, hi.log2_blocksize);
+ assert_eq!("", hexstring_from(hi.salt.as_ref()));
+ assert_eq!(
+ "ce1194fdb3cb2537daf0ac8cdf4926754adcbce5abeece7945fe25d204a0df6a",
+ hexstring_from(hi.raw_root_hash.as_ref())
+ );
+
+ let si = parsed.signing_info;
+ assert_eq!(
+ "b5225523a813fb84ed599dd649698c080bcfed4fb19ddb00283a662a2683bc15",
+ hexstring_from(si.apk_digest.as_ref())
+ );
+ assert_eq!("", hexstring_from(si.additional_data.as_ref()));
+ assert_eq!(
+ "303d021c77304d0f4732a90372bbfce095223e4ba82427ceb381f69bc6762d78021d008b99924\
+ a8585c38d7f654835eb219ae9e176b44e86dcb23153e3d9d6",
+ hexstring_from(si.signature.as_ref())
+ );
+ assert_eq!(SignatureAlgorithmId::DSA_SHA2_256, si.signature_algorithm_id);
+
+ assert_eq!(36864, parsed.merkle_tree_size);
+ assert_eq!(2251, parsed.merkle_tree_offset);
+ }
+
+ /// Parse an idsig file into V4Signature and write it. The written date must be the same as
+ /// the input file.
+ #[test]
+ fn parse_and_compose() {
+ let input = Cursor::new(include_bytes!("../testdata/test.apk.idsig"));
+ let mut parsed = V4Signature::from(input.clone()).unwrap();
+
+ let mut output = Cursor::new(Vec::new());
+ parsed.write_into(&mut output).unwrap();
+
+ assert_eq!(input.get_ref().as_ref(), output.get_ref().as_slice());
+ }
+
+ /// Create V4Signature by hashing an APK. Merkle tree and the root hash should be the same
+ /// as those in the idsig file created by the signapk tool.
+ #[test]
+ fn digest_from_apk() {
+ let mut input = Cursor::new(include_bytes!("../testdata/test.apk"));
+ let mut created =
+ V4Signature::create(&mut input, 4096, &[], HashAlgorithm::SHA256).unwrap();
+
+ let golden = Cursor::new(include_bytes!("../testdata/test.apk.idsig"));
+ let mut golden = V4Signature::from(golden).unwrap();
+
+ // Compare the root hash
+ assert_eq!(
+ created.hashing_info.raw_root_hash.as_ref(),
+ golden.hashing_info.raw_root_hash.as_ref()
+ );
+
+ // Compare the merkle tree
+ assert_eq!(
+ created.merkle_tree().unwrap().as_slice(),
+ golden.merkle_tree().unwrap().as_slice()
+ );
+ }
+}
diff --git a/libs/idsig/src/hashtree.rs b/libs/idsig/src/hashtree.rs
new file mode 100644
index 0000000..63f83ea
--- /dev/null
+++ b/libs/idsig/src/hashtree.rs
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+pub use ring::digest::{Algorithm, Digest};
+
+use ring::digest;
+use std::io::{Cursor, Read, Result, Write};
+
+/// `HashTree` is a merkle tree (and its root hash) that is compatible with fs-verity.
+pub struct HashTree {
+ /// Binary presentation of the merkle tree
+ pub tree: Vec<u8>,
+ /// Root hash
+ pub root_hash: Vec<u8>,
+}
+
+impl HashTree {
+ /// Creates merkle tree from `input`, using the given `salt` and hashing `algorithm`. `input`
+ /// is divided into `block_size` chunks.
+ pub fn from<R: Read>(
+ input: &mut R,
+ input_size: usize,
+ salt: &[u8],
+ block_size: usize,
+ algorithm: &'static Algorithm,
+ ) -> Result<Self> {
+ let salt = zero_pad_salt(salt, algorithm);
+ let tree = generate_hash_tree(input, input_size, &salt, block_size, algorithm)?;
+
+ // Root hash is from the first block of the hash or the input data if there is no hash tree
+ // generated which can happen when input data is smaller than block size
+ let root_hash = if tree.is_empty() {
+ let mut data = Vec::new();
+ input.read_to_end(&mut data)?;
+ hash_one_block(&data, &salt, block_size, algorithm).as_ref().to_vec()
+ } else {
+ let first_block = &tree[0..block_size];
+ hash_one_block(first_block, &salt, block_size, algorithm).as_ref().to_vec()
+ };
+ Ok(HashTree { tree, root_hash })
+ }
+}
+
+/// Calculate hash tree for the blocks in `input`.
+///
+/// This function implements: https://www.kernel.org/doc/html/latest/filesystems/fsverity.html#merkle-tree
+///
+/// The file contents is divided into blocks, where the block size is configurable but is usually
+/// 4096 bytes. The end of the last block is zero-padded if needed. Each block is then hashed,
+/// producing the first level of hashes. Then, the hashes in this first level are grouped into
+/// blocksize-byte blocks (zero-padding the ends as needed) and these blocks are hashed,
+/// producing the second level of hashes. This proceeds up the tree until only a single block
+/// remains.
+pub fn generate_hash_tree<R: Read>(
+ input: &mut R,
+ input_size: usize,
+ salt: &[u8],
+ block_size: usize,
+ algorithm: &'static Algorithm,
+) -> Result<Vec<u8>> {
+ let digest_size = algorithm.output_len;
+ let levels = calc_hash_levels(input_size, block_size, digest_size);
+ let tree_size = levels.iter().map(|r| r.len()).sum();
+
+ // The contiguous memory that holds the entire merkle tree
+ let mut hash_tree = vec![0; tree_size];
+
+ for (n, cur) in levels.iter().enumerate() {
+ if n == 0 {
+ // Level 0: the (zero-padded) input stream is hashed into level 0
+ let pad_size = round_to_multiple(input_size, block_size) - input_size;
+ let mut input = input.chain(Cursor::new(vec![0; pad_size]));
+ let mut level0 = Cursor::new(&mut hash_tree[cur.start..cur.end]);
+
+ let mut a_block = vec![0; block_size];
+ let mut num_blocks = (input_size + block_size - 1) / block_size;
+ while num_blocks > 0 {
+ input.read_exact(&mut a_block)?;
+ let h = hash_one_block(&a_block, salt, block_size, algorithm);
+ level0.write_all(h.as_ref()).unwrap();
+ num_blocks -= 1;
+ }
+ } else {
+ // Intermediate levels: level n - 1 is hashed into level n
+ // Both levels belong to the same `hash_tree`. In order to have a mutable slice for
+ // level n while having a slice for level n - 1, take the mutable slice for both levels
+ // and split it.
+ let prev = &levels[n - 1];
+ let cur_and_prev = &mut hash_tree[cur.start..prev.end];
+ let (cur, prev) = cur_and_prev.split_at_mut(prev.start - cur.start);
+ let mut cur = Cursor::new(cur);
+ prev.chunks(block_size).for_each(|data| {
+ let h = hash_one_block(data, salt, block_size, algorithm);
+ cur.write_all(h.as_ref()).unwrap();
+ });
+ }
+ }
+ Ok(hash_tree)
+}
+
+/// Hash one block of input using the given hash algorithm and the salt. Input might be smaller
+/// than a block, in which case zero is padded.
+fn hash_one_block(
+ input: &[u8],
+ salt: &[u8],
+ block_size: usize,
+ algorithm: &'static Algorithm,
+) -> Digest {
+ let mut ctx = digest::Context::new(algorithm);
+ ctx.update(salt);
+ ctx.update(input);
+ let pad_size = block_size - input.len();
+ ctx.update(&vec![0; pad_size]);
+ ctx.finish()
+}
+
+type Range = std::ops::Range<usize>;
+
+/// Calculate the ranges of hash for each level
+fn calc_hash_levels(input_size: usize, block_size: usize, digest_size: usize) -> Vec<Range> {
+ // The input is split into multiple blocks and each block is hashed, which becomes the input
+ // for the next level. Size of a single hash is `digest_size`.
+ let mut level_sizes = Vec::new();
+ loop {
+ // Input for this level is from either the last level (if exists), or the input parameter.
+ let input_size = *level_sizes.last().unwrap_or(&input_size);
+ if input_size <= block_size {
+ break;
+ }
+ let num_blocks = (input_size + block_size - 1) / block_size;
+ let hashes_size = round_to_multiple(num_blocks * digest_size, block_size);
+ level_sizes.push(hashes_size);
+ }
+
+ // The hash tree is stored upside down. The top level is at offset 0. The second level comes
+ // next, and so on. Level 0 is located at the end.
+ //
+ // Given level_sizes [10, 3, 1], the offsets for each label are ...
+ //
+ // Level 2 is at offset 0
+ // Level 1 is at offset 1 (because Level 2 is of size 1)
+ // Level 0 is at offset 4 (because Level 1 is of size 3)
+ //
+ // This is done by scanning the sizes in reverse order
+ let mut ranges = level_sizes
+ .iter()
+ .rev()
+ .scan(0, |prev_end, size| {
+ let range = *prev_end..*prev_end + size;
+ *prev_end = range.end;
+ Some(range)
+ })
+ .collect::<Vec<_>>();
+ ranges.reverse(); // reverse again so that index N is for level N
+ ranges
+}
+
+/// Round `n` up to the nearest multiple of `unit`
+fn round_to_multiple(n: usize, unit: usize) -> usize {
+ (n + unit - 1) & !(unit - 1)
+}
+
+/// Pad zero to salt if necessary.
+///
+/// According to https://www.kernel.org/doc/html/latest/filesystems/fsverity.html:
+///
+/// If a salt was specified, then it’s zero-padded to the closest multiple of the input size of the
+/// hash algorithm’s compression function, e.g. 64 bytes for SHA-256 or 128 bytes for SHA-512. The
+/// padded salt is prepended to every data or Merkle tree block that is hashed.
+fn zero_pad_salt(salt: &[u8], algorithm: &Algorithm) -> Vec<u8> {
+ if salt.is_empty() {
+ salt.to_vec()
+ } else {
+ let padded_len = round_to_multiple(salt.len(), algorithm.block_len);
+ let mut salt = salt.to_vec();
+ salt.resize(padded_len, 0);
+ salt
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ring::digest;
+ use std::fs::{self, File};
+
+ #[test]
+ fn compare_with_golden_output() -> Result<()> {
+ // The golden outputs are generated by using the `fsverity` utility.
+ let sizes = ["512", "4K", "1M", "10000000", "272629760"];
+ for size in sizes.iter() {
+ let input_name = format!("testdata/input.{}", size);
+ let mut input = File::open(&input_name)?;
+ let golden_hash_tree = fs::read(format!("testdata/input.{}.hash", size))?;
+ let golden_descriptor = fs::read(format!("testdata/input.{}.descriptor", size))?;
+ let golden_root_hash = &golden_descriptor[16..16 + 32];
+
+ let size = std::fs::metadata(&input_name)?.len() as usize;
+ let salt = vec![1, 2, 3, 4, 5, 6];
+ let ht = HashTree::from(&mut input, size, &salt, 4096, &digest::SHA256)?;
+
+ assert_eq!(golden_hash_tree.as_slice(), ht.tree.as_slice());
+ assert_eq!(golden_root_hash, ht.root_hash.as_slice());
+ }
+ Ok(())
+ }
+}
diff --git a/compos/aidl/com/android/compos/Metadata.aidl b/libs/idsig/src/lib.rs
similarity index 70%
rename from compos/aidl/com/android/compos/Metadata.aidl
rename to libs/idsig/src/lib.rs
index a15214d..7937d71 100644
--- a/compos/aidl/com/android/compos/Metadata.aidl
+++ b/libs/idsig/src/lib.rs
@@ -14,13 +14,11 @@
* limitations under the License.
*/
-package com.android.compos;
+//! `idsig` provides routines for creating the idsig file that is defined for the APK signature
+//! scheme v4 and for parsing the file.
-import com.android.compos.InputFdAnnotation;
-import com.android.compos.OutputFdAnnotation;
+mod apksigv4;
+mod hashtree;
-/** {@hide} */
-parcelable Metadata {
- InputFdAnnotation[] input_fd_annotations;
- OutputFdAnnotation[] output_fd_annotations;
-}
+pub use crate::apksigv4::*;
+pub use crate::hashtree::*;
diff --git a/libs/idsig/testdata/create.sh b/libs/idsig/testdata/create.sh
new file mode 100755
index 0000000..1a15d2b
--- /dev/null
+++ b/libs/idsig/testdata/create.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sizes="512 4K 1M 10000000 272629760"
+for size in $sizes; do
+ echo $size
+ dd if=/dev/random of=input.$size bs=$size count=1
+ fsverity digest input.$size \
+ --hash-alg=sha256 \
+ --salt=010203040506 \
+ --block-size=4096 \
+ --out-merkle-tree input.$size.hash \
+ --out-descriptor input.$size.descriptor
+done
diff --git a/libs/idsig/testdata/input.10000000 b/libs/idsig/testdata/input.10000000
new file mode 100644
index 0000000..6bc5a4b
--- /dev/null
+++ b/libs/idsig/testdata/input.10000000
Binary files differ
diff --git a/libs/idsig/testdata/input.10000000.descriptor b/libs/idsig/testdata/input.10000000.descriptor
new file mode 100644
index 0000000..dc0d096
--- /dev/null
+++ b/libs/idsig/testdata/input.10000000.descriptor
Binary files differ
diff --git a/libs/idsig/testdata/input.10000000.hash b/libs/idsig/testdata/input.10000000.hash
new file mode 100644
index 0000000..354c5c2
--- /dev/null
+++ b/libs/idsig/testdata/input.10000000.hash
Binary files differ
diff --git a/libs/idsig/testdata/input.1M b/libs/idsig/testdata/input.1M
new file mode 100644
index 0000000..7040ec3
--- /dev/null
+++ b/libs/idsig/testdata/input.1M
Binary files differ
diff --git a/libs/idsig/testdata/input.1M.descriptor b/libs/idsig/testdata/input.1M.descriptor
new file mode 100644
index 0000000..f11753d
--- /dev/null
+++ b/libs/idsig/testdata/input.1M.descriptor
Binary files differ
diff --git a/libs/idsig/testdata/input.1M.hash b/libs/idsig/testdata/input.1M.hash
new file mode 100644
index 0000000..689790c
--- /dev/null
+++ b/libs/idsig/testdata/input.1M.hash
Binary files differ
diff --git a/libs/idsig/testdata/input.272629760 b/libs/idsig/testdata/input.272629760
new file mode 100644
index 0000000..5bb6753
--- /dev/null
+++ b/libs/idsig/testdata/input.272629760
Binary files differ
diff --git a/libs/idsig/testdata/input.272629760.descriptor b/libs/idsig/testdata/input.272629760.descriptor
new file mode 100644
index 0000000..70e0744
--- /dev/null
+++ b/libs/idsig/testdata/input.272629760.descriptor
Binary files differ
diff --git a/libs/idsig/testdata/input.272629760.hash b/libs/idsig/testdata/input.272629760.hash
new file mode 100644
index 0000000..f2c68cc
--- /dev/null
+++ b/libs/idsig/testdata/input.272629760.hash
Binary files differ
diff --git a/libs/idsig/testdata/input.4K b/libs/idsig/testdata/input.4K
new file mode 100644
index 0000000..99db32a
--- /dev/null
+++ b/libs/idsig/testdata/input.4K
Binary files differ
diff --git a/libs/idsig/testdata/input.4K.descriptor b/libs/idsig/testdata/input.4K.descriptor
new file mode 100644
index 0000000..b120e2f
--- /dev/null
+++ b/libs/idsig/testdata/input.4K.descriptor
Binary files differ
diff --git a/libs/idsig/testdata/input.4K.hash b/libs/idsig/testdata/input.4K.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libs/idsig/testdata/input.4K.hash
diff --git a/libs/idsig/testdata/input.512 b/libs/idsig/testdata/input.512
new file mode 100644
index 0000000..a57797f
--- /dev/null
+++ b/libs/idsig/testdata/input.512
Binary files differ
diff --git a/libs/idsig/testdata/input.512.descriptor b/libs/idsig/testdata/input.512.descriptor
new file mode 100644
index 0000000..805019b
--- /dev/null
+++ b/libs/idsig/testdata/input.512.descriptor
Binary files differ
diff --git a/libs/idsig/testdata/input.512.hash b/libs/idsig/testdata/input.512.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libs/idsig/testdata/input.512.hash
diff --git a/libs/idsig/testdata/test.apk b/libs/idsig/testdata/test.apk
new file mode 100644
index 0000000..cbee532
--- /dev/null
+++ b/libs/idsig/testdata/test.apk
Binary files differ
diff --git a/libs/idsig/testdata/test.apk.idsig b/libs/idsig/testdata/test.apk.idsig
new file mode 100644
index 0000000..8c112de
--- /dev/null
+++ b/libs/idsig/testdata/test.apk.idsig
Binary files differ
diff --git a/libs/statslog_virtualization/Android.bp b/libs/statslog_virtualization/Android.bp
new file mode 100644
index 0000000..51a51a3
--- /dev/null
+++ b/libs/statslog_virtualization/Android.bp
@@ -0,0 +1,71 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Autogenerate the class (and respective headers) with logging methods and constants
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+genrule {
+ name: "statslog_virtualization_header.rs",
+ tools: ["stats-log-api-gen"],
+ cmd: "$(location stats-log-api-gen) --module virtualizationservice --minApiLevel 34 --rustHeader $(genDir)/statslog_virtualization_header.rs --rustHeaderCrate statslog_virtualization_rust_header",
+ out: [
+ "statslog_virtualization_header.rs",
+ ],
+}
+
+rust_library {
+ name: "libstatslog_virtualization_rust_header",
+ crate_name: "statslog_virtualization_rust_header",
+ srcs: [
+ "statslog_header_wrapper.rs",
+ ":statslog_virtualization_header.rs",
+ ],
+ rustlibs: [
+ "libstatspull_bindgen",
+ "libthiserror",
+ ],
+ apex_available: [
+ "com.android.virt",
+ ],
+
+}
+
+genrule {
+ name: "statslog_virtualization.rs",
+ tools: ["stats-log-api-gen"],
+ cmd: "$(location stats-log-api-gen) --module virtualizationservice --minApiLevel 34 --rustHeaderCrate statslog_virtualization_rust_header --rust $(genDir)/statslog_virtualization.rs",
+ out: [
+ "statslog_virtualization.rs",
+ ],
+}
+
+rust_library {
+ name: "libstatslog_virtualization_rust",
+ crate_name: "statslog_virtualization_rust",
+ srcs: [
+ "statslog_wrapper.rs",
+ ":statslog_virtualization.rs",
+ ],
+ rustlibs: [
+ "libstatslog_virtualization_rust_header",
+ "libstatspull_bindgen",
+ ],
+ apex_available: [
+ "com.android.virt",
+ ],
+
+}
diff --git a/libs/statslog_virtualization/statslog_header_wrapper.rs b/libs/statslog_virtualization/statslog_header_wrapper.rs
new file mode 100644
index 0000000..39ff51f
--- /dev/null
+++ b/libs/statslog_virtualization/statslog_header_wrapper.rs
@@ -0,0 +1,4 @@
+#![allow(clippy::too_many_arguments)]
+#![allow(missing_docs)]
+
+include!(concat!(env!("OUT_DIR"), "/statslog_virtualization_header.rs"));
diff --git a/libs/statslog_virtualization/statslog_wrapper.rs b/libs/statslog_virtualization/statslog_wrapper.rs
new file mode 100644
index 0000000..4d1a0fa
--- /dev/null
+++ b/libs/statslog_virtualization/statslog_wrapper.rs
@@ -0,0 +1,5 @@
+#![allow(clippy::too_many_arguments)]
+#![allow(missing_docs)]
+#![allow(unused)]
+
+include!(concat!(env!("OUT_DIR"), "/statslog_virtualization.rs"));
diff --git a/vmconfig/Android.bp b/libs/vmconfig/Android.bp
similarity index 94%
rename from vmconfig/Android.bp
rename to libs/vmconfig/Android.bp
index 321eba0..1aee1ce 100644
--- a/vmconfig/Android.bp
+++ b/libs/vmconfig/Android.bp
@@ -10,8 +10,9 @@
rustlibs: [
"android.system.virtualizationservice-rust",
"libanyhow",
- "libserde_json",
+ "libsemver",
"libserde",
+ "libserde_json",
],
apex_available: [
"com.android.virt",
diff --git a/vmconfig/src/lib.rs b/libs/vmconfig/src/lib.rs
similarity index 90%
rename from vmconfig/src/lib.rs
rename to libs/vmconfig/src/lib.rs
index 4a5b3b1..607b347 100644
--- a/vmconfig/src/lib.rs
+++ b/libs/vmconfig/src/lib.rs
@@ -22,6 +22,7 @@
};
use anyhow::{bail, Context, Error, Result};
+use semver::VersionReq;
use serde::{Deserialize, Serialize};
use std::convert::TryInto;
use std::fs::{File, OpenOptions};
@@ -51,6 +52,9 @@
/// The amount of RAM to give the VM, in MiB.
#[serde(default)]
pub memory_mib: Option<NonZeroU32>,
+ /// Version or range of versions of the virtual platform that this config is compatible with.
+ /// The format follows SemVer (https://semver.org).
+ pub platform_version: VersionReq,
}
impl VmConfig {
@@ -93,8 +97,10 @@
params: self.params.clone(),
bootloader: maybe_open_parcel_file(&self.bootloader, false)?,
disks: self.disks.iter().map(DiskImage::to_parcelable).collect::<Result<_, Error>>()?,
- protected_vm: self.protected,
- memory_mib,
+ protectedVm: self.protected,
+ memoryMib: memory_mib,
+ platformVersion: self.platform_version.to_string(),
+ ..Default::default()
})
}
}
@@ -131,8 +137,7 @@
/// A label for the partition.
pub label: String,
/// The filename of the partition image.
- #[serde(default)]
- pub paths: Vec<PathBuf>,
+ pub path: PathBuf,
/// Whether the partition should be writable.
#[serde(default)]
pub writable: bool,
@@ -140,15 +145,11 @@
impl Partition {
fn to_parcelable(&self) -> Result<AidlPartition> {
- if self.paths.is_empty() {
- bail!("Partition {} contains no paths", &self.label);
- }
- let images = self
- .paths
- .iter()
- .map(|path| open_parcel_file(&path, self.writable))
- .collect::<Result<Vec<_>, _>>()?;
- Ok(AidlPartition { images, writable: self.writable, label: self.label.to_owned() })
+ Ok(AidlPartition {
+ image: Some(open_parcel_file(&self.path, self.writable)?),
+ writable: self.writable,
+ label: self.label.to_owned(),
+ })
}
}
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index a5b2898..0ca7036 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -35,28 +35,30 @@
target: "/system/etc",
name: "etc",
},
+ {
+ target: "/system/bin",
+ name: "bin",
+ },
]
android_system_image {
name: "microdroid",
use_avb: true,
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
avb_algorithm: "SHA256_RSA4096",
partition_name: "system",
deps: [
"init_second_stage",
"microdroid_build_prop",
"microdroid_init_rc",
+ "microdroid_ueventd_rc",
"microdroid_launcher",
- "microdroid_manager",
- "ueventd.rc",
"libbinder",
"libbinder_ndk",
"libstdc++",
"logcat",
"logd",
- "run-as",
"secilc",
// "com.android.adbd" requires these,
@@ -69,30 +71,33 @@
"apexd",
"debuggerd",
- "keystore2",
+ "diced.microdroid",
"linker",
"linkerconfig",
- "servicemanager",
+ "servicemanager.microdroid",
"tombstoned",
"cgroups.json",
"public.libraries.android.txt",
- // TODO(b/185767624): remove hidl after full keymint support
- "hwservicemanager",
-
"microdroid_plat_sepolicy_and_mapping.sha256",
"microdroid_file_contexts",
- "microdroid_hwservice_contexts",
"microdroid_property_contexts",
"microdroid_service_contexts",
- "microdroid_keystore2_key_contexts",
"microdroid_compatibility_matrix",
"microdroid_manifest",
+
+ // TODO(b/195425111) these four should be added automatically
+ "android.hardware.security.secureclock-V1-ndk",
+ "android.hardware.security.sharedsecret-V1-ndk",
+ "libcrypto",
+ "liblzma",
] + microdroid_shell_and_utilities,
multilib: {
common: {
deps: [
+ // non-updatable & mandatory apexes
"com.android.runtime",
+
"microdroid_plat_sepolicy.cil",
"microdroid_plat_mapping_file",
],
@@ -101,6 +106,8 @@
deps: [
"apkdmverity",
"authfs",
+ "authfs_service",
+ "microdroid_manager",
"zipfuse",
// TODO(b/184872979): Needed by authfs. Remove once the Rust API is created.
@@ -123,6 +130,13 @@
installable: false, // avoid collision with system partition's init.rc
}
+prebuilt_etc {
+ name: "microdroid_ueventd_rc",
+ filename: "ueventd.rc",
+ src: "ueventd.rc",
+ installable: false, // avoid collision with system partition's ueventd.rc
+}
+
prebuilt_root {
name: "microdroid_build_prop",
filename: "build.prop",
@@ -154,11 +168,11 @@
android_filesystem {
name: "microdroid_vendor",
+ partition_name: "vendor",
use_avb: true,
deps: [
- "android.hardware.security.keymint-service.microdroid",
+ "android.hardware.security.dice-service.microdroid",
"microdroid_fstab",
- "microdroid_precompiled_sepolicy",
"microdroid_precompiled_sepolicy.plat_sepolicy_and_mapping.sha256",
"microdroid_vendor_manifest",
"microdroid_vendor_compatibility_matrix",
@@ -169,10 +183,11 @@
"microdroid_vendor_sepolicy.cil",
"microdroid_plat_pub_versioned.cil",
"microdroid_plat_sepolicy_vers.txt",
+ "microdroid_precompiled_sepolicy",
],
},
},
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
avb_algorithm: "SHA256_RSA4096",
file_contexts: ":microdroid_vendor_file_contexts.gen",
}
@@ -183,24 +198,24 @@
size: "auto",
default_group: [
{
- name: "system",
+ name: "system_a",
filesystem: ":microdroid",
},
{
- name: "vendor",
+ name: "vendor_a",
filesystem: ":microdroid_vendor",
},
],
}
-microdroid_boot_cmdline = "panic=-1 " +
- "bootconfig " +
- // TODO(b/181936135) make the ratelimiting conditional; ratelimiting on prod build
- "printk.devkmsg=on "
+microdroid_boot_cmdline = [
+ "panic=-1",
+ "bootconfig",
+ "ioremap_guard",
+]
bootimg {
name: "microdroid_boot-5.10",
- ramdisk_module: "microdroid_ramdisk-5.10",
// We don't have kernel for arm and x86. But Soong demands one when it builds for
// arm or x86 target. Satisfy that by providing an empty file as the kernel.
kernel_prebuilt: "empty_kernel",
@@ -211,14 +226,33 @@
},
x86_64: {
kernel_prebuilt: ":kernel_prebuilts-5.10-x86_64",
- cmdline: microdroid_boot_cmdline + "pci=noacpi ",
+ cmdline: microdroid_boot_cmdline + [
+ // console=none is to work around the x86 specific u-boot behavior which when
+ // console= option is not found in the kernel commandline console=ttyS0 is
+ // automatically added. By adding console=none, we can prevent u-boot from doing
+ // that. Note that console is set to hvc0 by bootconfig if the VM is configured as
+ // debuggable.
+ "console=none",
+ "acpi=noirq",
+ ],
},
},
+
dtb_prebuilt: "dummy_dtb.img",
header_version: "4",
partition_name: "boot",
use_avb: true,
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
+}
+
+bootimg {
+ name: "microdroid_init_boot",
+ ramdisk_module: "microdroid_ramdisk-5.10",
+ kernel_prebuilt: "empty_kernel",
+ header_version: "4",
+ partition_name: "init_boot",
+ use_avb: true,
+ avb_private_key: ":microdroid_sign_key",
}
android_filesystem {
@@ -255,21 +289,27 @@
},
partition_name: "vendor_boot",
use_avb: true,
- avb_private_key: ":avb_testkey_rsa4096",
+ avb_private_key: ":microdroid_sign_key",
+}
+
+prebuilt_kernel_modules {
+ name: "microdroid_kernel_modules",
+ arch: {
+ arm64: {
+ srcs: [":virt_device_prebuilts_kernel_modules_microdroid-5.10-arm64"],
+ },
+ x86_64: {
+ srcs: [":virt_device_prebuilts_kernel_modules_microdroid-5.10-x86_64"],
+ },
+ },
+ kernel_version: "5.10",
}
android_filesystem {
name: "microdroid_vendor_ramdisk-5.10",
- arch: {
- arm64: {
- deps: ["virt_device_prebuilts_kernel_modules-5.10-arm64"],
- },
- x86_64: {
- deps: ["virt_device_prebuilts_kernel_modules-5.10-x86_64"],
- },
- },
deps: [
"microdroid_fstab",
+ "microdroid_kernel_modules",
],
base_dir: "first_stage_ramdisk",
type: "compressed_cpio",
@@ -305,10 +345,94 @@
cmd: "cat $(in) > $(out)",
}
+vbmeta {
+ name: "microdroid_vbmeta_bootconfig",
+ partition_name: "vbmeta",
+ private_key: ":microdroid_sign_key",
+ chained_partitions: [
+ {
+ name: "bootconfig",
+ private_key: ":microdroid_sign_key",
+ },
+ {
+ name: "uboot_env",
+ private_key: ":microdroid_sign_key",
+ },
+ ],
+}
+
+// See external/avb/avbtool.py
+// MAX_VBMETA_SIZE=64KB, MAX_FOOTER_SIZE=4KB
+avb_hash_footer_kb = "68"
+
prebuilt_etc {
- name: "microdroid_bootconfig_debug",
- src: "bootconfig.debug",
- filename: "microdroid_bootconfig.debug",
+ name: "microdroid_bootconfig_normal",
+ src: ":microdroid_bootconfig_normal_gen",
+ filename: "microdroid_bootconfig.normal",
+}
+
+prebuilt_etc {
+ name: "microdroid_bootconfig_app_debuggable",
+ src: ":microdroid_bootconfig_app_debuggable_gen",
+ filename: "microdroid_bootconfig.app_debuggable",
+}
+
+prebuilt_etc {
+ name: "microdroid_bootconfig_full_debuggable",
+ src: ":microdroid_bootconfig_full_debuggable_gen",
+ filename: "microdroid_bootconfig.full_debuggable",
+}
+
+// TODO(jiyong): make a new module type that does the avb signing
+genrule {
+ name: "microdroid_bootconfig_normal_gen",
+ tools: ["avbtool"],
+ srcs: [
+ "bootconfig.normal",
+ ":microdroid_sign_key",
+ ],
+ out: ["microdroid_bootconfig.normal"],
+ cmd: "cp $(location bootconfig.normal) $(out) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name bootconfig " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
+}
+
+genrule {
+ name: "microdroid_bootconfig_app_debuggable_gen",
+ tools: ["avbtool"],
+ srcs: [
+ "bootconfig.app_debuggable",
+ ":microdroid_sign_key",
+ ],
+ out: ["microdroid_bootconfig.app_debuggable"],
+ cmd: "cp $(location bootconfig.app_debuggable) $(out) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name bootconfig " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
+}
+
+genrule {
+ name: "microdroid_bootconfig_full_debuggable_gen",
+ tools: ["avbtool"],
+ srcs: [
+ "bootconfig.full_debuggable",
+ ":microdroid_sign_key",
+ ],
+ out: ["microdroid_bootconfig.full_debuggable"],
+ cmd: "cp $(location bootconfig.full_debuggable) $(out) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name bootconfig " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
}
prebuilt_etc {
@@ -326,22 +450,18 @@
// For unknown reason, the signed bootloader doesn't work on x86_64. Until the problem
// is fixed, let's use the unsigned bootloader for the architecture.
// TODO(b/185115783): remove this
- src: ":cuttlefish_crosvm_bootloader",
+ src: ":microdroid_bootloader_pubkey_replaced",
},
},
filename: "microdroid_bootloader",
}
-// See external/avb/avbtool.py
-// MAX_VBMETA_SIZE=64KB, MAX_FOOTER_SIZE=4KB
-avb_hash_footer_kb = "68"
-
genrule {
name: "microdroid_bootloader_gen",
tools: ["avbtool"],
srcs: [
- ":cuttlefish_crosvm_bootloader",
- ":avb_testkey_rsa4096",
+ ":microdroid_bootloader_pubkey_replaced",
+ ":microdroid_sign_key",
],
out: ["bootloader-signed"],
// 1. Copy the input to the output becaise avbtool modifies --image in
@@ -350,72 +470,101 @@
// bootloader file whose size is 1. It can't pass avbtool.
// 3. Add the hash footer. The partition size is set to (image size + 68KB)
// rounded up to 4KB boundary.
- cmd: "cp $(location :cuttlefish_crosvm_bootloader) $(out) && " +
+ cmd: "cp $(location :microdroid_bootloader_pubkey_replaced) $(out) && " +
"if [ $$(stat --format=%s $(out)) -gt 4096 ]; then " +
"$(location avbtool) add_hash_footer " +
"--algorithm SHA256_RSA4096 " +
"--partition_name bootloader " +
- "--key $(location :avb_testkey_rsa4096) " +
+ "--key $(location :microdroid_sign_key) " +
"--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
"--image $(out)" +
"; fi",
}
+// Replace avbpubkey of prebuilt bootloader with the avbpubkey of the signing key
+genrule {
+ name: "microdroid_bootloader_pubkey_replaced",
+ tools: ["replace_bytes"],
+ srcs: [
+ ":microdroid_crosvm_bootloader", // input (bootloader)
+ ":microdroid_crosvm_bootloader.avbpubkey", // old bytes (old pubkey)
+ ":microdroid_bootloader_avbpubkey_gen", // new bytes (new pubkey)
+ ],
+ out: ["bootloader-pubkey-replaced"],
+ // 1. Copy the input to the output (replace_bytes modifies the file in-place)
+ // 2. Check if the file is big enough. For arm and x86 we have fake
+ // bootloader file whose size is 1. (replace_bytes fails if key not found)
+ // 3. Replace embedded pubkey with new one.
+ cmd: "cp $(location :microdroid_crosvm_bootloader) $(out) && " +
+ "if [ $$(stat --format=%s $(out)) -gt 4096 ]; then " +
+ "$(location replace_bytes) $(out) " +
+ "$(location :microdroid_crosvm_bootloader.avbpubkey) " +
+ "$(location :microdroid_bootloader_avbpubkey_gen)" +
+ "; fi",
+}
+
+// Apex keeps a copy of avbpubkey embedded in bootloader so that embedded avbpubkey can be replaced
+// while re-signing bootloader.
+prebuilt_etc {
+ name: "microdroid_bootloader.avbpubkey",
+ src: ":microdroid_bootloader_avbpubkey_gen",
+}
+
+// Generate avbpukey from the signing key
+genrule {
+ name: "microdroid_bootloader_avbpubkey_gen",
+ tools: ["avbtool"],
+ srcs: [":microdroid_sign_key"],
+ out: ["bootloader.pubkey"],
+ cmd: "$(location avbtool) extract_public_key " +
+ "--key $(location :microdroid_sign_key) " +
+ "--output $(out)",
+}
+
prebuilt_etc {
name: "microdroid_uboot_env",
src: ":microdroid_uboot_env_gen",
- arch: {
- x86_64: {
- src: ":microdroid_uboot_env_gen_x86_64",
- },
- },
filename: "uboot_env.img",
}
genrule {
name: "microdroid_uboot_env_gen",
- tools: ["mkenvimage_host"],
- srcs: ["uboot-env.txt"],
+ tools: [
+ "mkenvimage_host",
+ "avbtool",
+ ],
+ srcs: [
+ "uboot-env.txt",
+ ":microdroid_sign_key",
+ ],
out: ["output.img"],
- cmd: "$(location mkenvimage_host) -s 4096 -o $(out) $(in)",
+ cmd: "$(location mkenvimage_host) -s 4096 -o $(out) $(location uboot-env.txt) && " +
+ "$(location avbtool) add_hash_footer " +
+ "--algorithm SHA256_RSA4096 " +
+ "--partition_name uboot_env " +
+ "--key $(location :microdroid_sign_key) " +
+ "--partition_size $$(( " + avb_hash_footer_kb + " * 1024 + ( $$(stat --format=%s $(out)) + 4096 - 1 ) / 4096 * 4096 )) " +
+ "--image $(out)",
}
-genrule {
- name: "microdroid_uboot_env_gen_x86_64",
- tools: ["mkenvimage_host"],
- srcs: ["uboot-env-x86_64.txt"],
- out: ["output.img"],
- cmd: "$(location mkenvimage_host) -s 4096 -o $(out) $(in)",
+// Note that keys can be different for filesystem images even though we're using the same key
+// for microdroid. However, the key signing VBmeta should match with the pubkey embedded in
+// bootloader.
+filegroup {
+ name: "microdroid_sign_key",
+ srcs: [":avb_testkey_rsa4096"],
}
vbmeta {
name: "microdroid_vbmeta",
partition_name: "vbmeta",
- private_key: ":avb_testkey_rsa4096",
+ private_key: ":microdroid_sign_key",
partitions: [
"microdroid_vendor",
"microdroid_vendor_boot-5.10",
- ],
- chained_partitions: [
- {
- name: "vbmeta_system",
- rollback_index_location: 1,
- private_key: ":avb_testkey_rsa4096",
- },
- {
- name: "boot",
- rollback_index_location: 2,
- private_key: ":avb_testkey_rsa4096",
- },
- ],
-}
-
-vbmeta {
- name: "microdroid_vbmeta_system",
- partition_name: "vbmeta_system",
- private_key: ":avb_testkey_rsa4096",
- partitions: [
"microdroid",
+ "microdroid_boot-5.10",
+ "microdroid_init_boot",
],
}
diff --git a/microdroid/README.md b/microdroid/README.md
index 0578921..a652139 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -7,7 +7,7 @@
## Prerequisites
-Any 64-bit target (either x86\_64 or arm64) is supported. 32-bit target is not
+Any 64-bit target (either x86_64 or arm64) is supported. 32-bit target is not
supported. Note that we currently don't support user builds; only userdebug
builds are supported.
@@ -21,11 +21,8 @@
Build the target after adding the line, and flash it. This step needs to be done
only once for the target.
-If you are using `yukawa` (VIM3L) or `aosp_cf_x86_64_phone` (Cuttlefish), adding
-above line is not necessary as it's already done.
-
-Instructions for building and flashing Android for `yukawa` can be found
-[here](../docs/getting_started/yukawa.md).
+If you are using `aosp_oriole` (Pixel 6) or `aosp_cf_x86_64_phone` (Cuttlefish),
+adding above line is not necessary as it's already done.
## Building and installing microdroid
@@ -34,12 +31,12 @@
```sh
banchan com.android.virt aosp_arm64
-m apps_only dist
+UNBUNDLED_BUILD_SDKS_FROM_SOURCE=true m apps_only dist
adb install out/dist/com.android.virt.apex
adb reboot
```
-If your target is x86\_64 (e.g. `aosp_cf_x86_64_phone`), replace `aosp_arm64`
+If your target is x86_64 (e.g. `aosp_cf_x86_64_phone`), replace `aosp_arm64`
with `aosp_x86_64`.
## Building an app
@@ -69,7 +66,7 @@
```json
{
- "os": {"name": "microdroid"},
+ "os": { "name": "microdroid" },
"task": {
"type": "microdroid_launcher",
"command": "MyMicrodroidApp.so"
@@ -78,7 +75,7 @@
```
The value of `task.command` should match with the name of the shared library
-defined above. If your app rquires APEXes to be imported, you can declare the
+defined above. If your app requires APEXes to be imported, you can declare the
list in `apexes` key like following.
```json
@@ -106,27 +103,15 @@
// directory.
```
-Finally, you build and sign the APK.
+Finally, you build the APK.
```sh
TARGET_BUILD_APPS=MyApp m apps_only dist
-m apksigner
-apksigner sign --ks path_to_keystore out/dist/MyApp.apk
```
-`path_to_keystore` should be replaced with the actual path to the keystore,
-which can be created as follows:
-
-```sh
-keytool -keystore my_keystore -genkey -alias my_key
-```
-
-Make sure that `.apk.idsig` file is also generated in the same directory as the
-signed APK.
-
## Running the app on microdroid
-First of all, install the signed APK to the target device.
+First of all, install the APK to the target device.
```sh
adb install out/dist/MyApp.apk
@@ -134,6 +119,7 @@
`ALL_CAP`s below are placeholders. They need to be replaced with correct
values:
+
* `VM_CONFIG_FILE`: the name of the VM config file that you embedded in the APK.
(e.g. `vm_config.json`)
* `PACKAGE_NAME_OF_YOUR_APP`: package name of your app (e.g. `com.acme.app`).
@@ -144,14 +130,6 @@
```
It shall report a cryptic path similar to `/data/app/~~OgZq==/com.acme.app-HudMahQ==/base.apk`.
-Push idsig of the APK to the device.
-
-```sh
-TEST_ROOT=/data/local/tmp/virt
-adb shell mkdir $TEST_ROOT
-adb push out/dist/MyApp.apk.idsig $TEST_ROOT/
-```
-
Execute the following commands to launch a VM. The VM will boot to microdroid
and then automatically execute your app (the shared library
`MyMicrodroidApp.so`).
@@ -174,10 +152,10 @@
Stopping the VM can be done as follows:
```sh
-adb shell /apex/com.android.virt/bin/vm stop CID
+adb shell /apex/com.android.virt/bin/vm stop $CID
```
-, where `CID` is the reported CID value. This works only when the `vm` was
+, where `$CID` is the reported CID value. This works only when the `vm` was
invoked with the `--daemonize` flag. If the flag was not used, press Ctrl+C on
the console where the `vm run-app` command was invoked.
@@ -190,10 +168,10 @@
adb connect localhost:8000
```
-`CID` should be the CID that `vm` reported upon execution of the `vm run`
-command in the above. You can also check it with `adb shell
-"/apex/com.android.virt/bin/vm list"`. `5555` must be
-the value. `8000` however can be any port in the development machine.
+`$CID` should be the CID that `vm` reported upon execution of the `vm run`
+command in the above. You can also check it with
+`adb shell "/apex/com.android.virt/bin/vm list"`. `5555` must be the value.
+`8000` however can be any port on the development machine.
Done. Now you can log into microdroid. Have fun!
diff --git a/microdroid/bootconfig.app_debuggable b/microdroid/bootconfig.app_debuggable
new file mode 100644
index 0000000..0d85186
--- /dev/null
+++ b/microdroid/bootconfig.app_debuggable
@@ -0,0 +1,19 @@
+# The app is debuggable.
+androidboot.microdroid.app_debuggable=1
+
+# TODO(b/203369076) This should be 0 to disable adb rooting. For now, we can't do that because
+# if this is set to 0, adbd enforces the host authentication but we don't put the adb
+# public key (which represents the owner) in the VM yet.
+androidboot.microdroid.debuggable=0
+
+# Console output is not redirect to the host-side.
+# TODO(b/219743539) This doesn't successfully disable the console
+kernel.printk.devkmsg=off
+kernel.console=null
+
+# ADB is supported but rooting is prohibited.
+androidboot.adb.enabled=1
+
+# logd is enabled
+# TODO(b/200914564) Filter only the log from the app
+androidboot.logd.enabled=1
diff --git a/microdroid/bootconfig.debug b/microdroid/bootconfig.debug
deleted file mode 100644
index d83ecb9..0000000
--- a/microdroid/bootconfig.debug
+++ /dev/null
@@ -1 +0,0 @@
-androidboot.selinux = permissive
diff --git a/microdroid/bootconfig.full_debuggable b/microdroid/bootconfig.full_debuggable
new file mode 100644
index 0000000..0bdd810
--- /dev/null
+++ b/microdroid/bootconfig.full_debuggable
@@ -0,0 +1,17 @@
+# The app is debuggable as full_debuggable is a superser of app_debuggable.
+androidboot.microdroid.app_debuggable=1
+
+# ro.debuggable is set.
+androidboot.microdroid.debuggable=1
+
+# Kernel message is exported.
+kernel.printk.devkmsg=on
+kernel.console=hvc0
+
+# ADB is supported and rooting is possible. Note that
+# ro.adb.secure is still 0 (see build.prop) which means that adbd is started
+# unrooted by default. To root, developer should explicitly execute `adb root`.
+androidboot.adb.enabled=1
+
+# logd is enabled
+androidboot.logd.enabled=1
diff --git a/microdroid/bootconfig.normal b/microdroid/bootconfig.normal
new file mode 100644
index 0000000..ea83287
--- /dev/null
+++ b/microdroid/bootconfig.normal
@@ -0,0 +1,17 @@
+# The app is not debuggable.
+androidboot.microdroid.app_debuggable=0
+
+# ro.debuggable is off
+androidboot.microdroid.debuggable=0
+
+# Console output is not redirect to the host-side.
+# TODO(b/219743539) This doesn't successfully disable the console
+kernel.printk.devkmsg=off
+# TODO(b/219743539) Setting this to null makes everything slow
+kernel.console=hvc0
+
+# ADB is not enabled.
+androidboot.adb.enabled=0
+
+# logd is not enabled
+androidboot.logd.enabled=0
diff --git a/microdroid/bootconfig.x86_64 b/microdroid/bootconfig.x86_64
index 75e4a80..6076889 100644
--- a/microdroid/bootconfig.x86_64
+++ b/microdroid/bootconfig.x86_64
@@ -1 +1 @@
-androidboot.boot_devices = pci0000:00/0000:00:01.0,pci0000:00/0000:00:02.0,pci0000:00/0000:00:03.0
+androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0
diff --git a/microdroid/build.prop b/microdroid/build.prop
index eaca63d..8cbabff 100644
--- a/microdroid/build.prop
+++ b/microdroid/build.prop
@@ -1,9 +1,13 @@
# build.prop for microdroid
ro.apex.updatable=true
-ro.debuggable=1
ro.adb.secure=0
service.adb.listen_addrs=vsock:5555
# TODO(b/189164487): support build related properties
-ro.build.version.release=11
-ro.build.version.security_patch=2021-07-05
+ro.build.version.codename=Tiramisu
+ro.build.version.release=12
+ro.build.version.sdk=32
+ro.build.version.security_patch=2021-12-05
+
+# Payload metadata partition
+apexd.payload_metadata.path=/dev/block/by-name/payload-metadata
diff --git a/microdroid/dice/Android.bp b/microdroid/dice/Android.bp
new file mode 100644
index 0000000..8026581
--- /dev/null
+++ b/microdroid/dice/Android.bp
@@ -0,0 +1,29 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "android.hardware.security.dice-service.microdroid",
+ srcs: ["service.rs"],
+ relative_install_path: "hw",
+ vendor: true,
+ prefer_rlib: true,
+ rustlibs: [
+ "android.hardware.security.dice-V1-rust",
+ "libandroid_logger",
+ "libanyhow",
+ "libbinder_rs",
+ "libbyteorder",
+ "libdiced_open_dice_cbor",
+ "libdiced_sample_inputs",
+ "libdiced_vendor",
+ "liblibc",
+ "liblog_rust",
+ "libserde",
+ ],
+ init_rc: ["android.hardware.security.dice-service.microdroid.rc"],
+ vintf_fragments: [
+ "android.hardware.security.dice-service.microdroid.xml",
+ ],
+ bootstrap: true,
+}
diff --git a/microdroid/dice/android.hardware.security.dice-service.microdroid.rc b/microdroid/dice/android.hardware.security.dice-service.microdroid.rc
new file mode 100644
index 0000000..7d9d441
--- /dev/null
+++ b/microdroid/dice/android.hardware.security.dice-service.microdroid.rc
@@ -0,0 +1,3 @@
+service vendor.dice-microdroid /vendor/bin/hw/android.hardware.security.dice-service.microdroid
+ user diced
+ group diced
diff --git a/microdroid/dice/android.hardware.security.dice-service.microdroid.xml b/microdroid/dice/android.hardware.security.dice-service.microdroid.xml
new file mode 100644
index 0000000..cf6c482
--- /dev/null
+++ b/microdroid/dice/android.hardware.security.dice-service.microdroid.xml
@@ -0,0 +1,6 @@
+<manifest version="1.0" type="device">
+ <hal format="aidl">
+ <name>android.hardware.security.dice</name>
+ <fqname>IDiceDevice/default</fqname>
+ </hal>
+</manifest>
diff --git a/microdroid/dice/service.rs b/microdroid/dice/service.rs
new file mode 100644
index 0000000..8199c7c
--- /dev/null
+++ b/microdroid/dice/service.rs
@@ -0,0 +1,225 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Main entry point for the microdroid IDiceDevice HAL implementation.
+
+use anyhow::{bail, Error, Result};
+use byteorder::{NativeEndian, ReadBytesExt};
+use diced::{
+ dice,
+ hal_node::{DiceArtifacts, DiceDevice, ResidentHal, UpdatableDiceArtifacts},
+};
+use libc::{c_void, mmap, munmap, MAP_FAILED, MAP_PRIVATE, PROT_READ};
+use serde::{Deserialize, Serialize};
+use std::fs;
+use std::os::unix::io::AsRawFd;
+use std::panic;
+use std::path::{Path, PathBuf};
+use std::ptr::null_mut;
+use std::slice;
+use std::sync::Arc;
+
+const AVF_STRICT_BOOT: &str = "/sys/firmware/devicetree/base/chosen/avf,strict-boot";
+const DICE_HAL_SERVICE_NAME: &str = "android.hardware.security.dice.IDiceDevice/default";
+
+/// Artifacts that are mapped into the process address space from the driver.
+struct MappedDriverArtifacts<'a> {
+ mmap_addr: *mut c_void,
+ mmap_size: usize,
+ cdi_attest: &'a [u8; dice::CDI_SIZE],
+ cdi_seal: &'a [u8; dice::CDI_SIZE],
+ bcc: &'a [u8],
+}
+
+impl MappedDriverArtifacts<'_> {
+ fn new(driver_path: &Path) -> Result<Self> {
+ let mut file = fs::File::open(driver_path)
+ .map_err(|error| Error::new(error).context("Opening driver"))?;
+ let mmap_size =
+ file.read_u64::<NativeEndian>()
+ .map_err(|error| Error::new(error).context("Reading driver"))? as usize;
+ // It's safe to map the driver as the service will only create a single
+ // mapping per process.
+ let mmap_addr = unsafe {
+ let fd = file.as_raw_fd();
+ mmap(null_mut(), mmap_size, PROT_READ, MAP_PRIVATE, fd, 0)
+ };
+ if mmap_addr == MAP_FAILED {
+ bail!("Failed to mmap {:?}", driver_path);
+ }
+ // The slice is created for the region of memory that was just
+ // successfully mapped into the process address space so it will be
+ // accessible and not referenced from anywhere else.
+ let mmap_buf =
+ unsafe { slice::from_raw_parts((mmap_addr as *const u8).as_ref().unwrap(), mmap_size) };
+ // Very inflexible parsing / validation of the BccHandover data. Assumes deterministically
+ // encoded CBOR.
+ //
+ // BccHandover = {
+ // 1 : bstr .size 32, ; CDI_Attest
+ // 2 : bstr .size 32, ; CDI_Seal
+ // 3 : Bcc, ; Certificate chain
+ // }
+ if mmap_buf[0..4] != [0xa3, 0x01, 0x58, 0x20]
+ || mmap_buf[36..39] != [0x02, 0x58, 0x20]
+ || mmap_buf[71] != 0x03
+ {
+ bail!("BccHandover format mismatch");
+ }
+ Ok(Self {
+ mmap_addr,
+ mmap_size,
+ cdi_attest: mmap_buf[4..36].try_into().unwrap(),
+ cdi_seal: mmap_buf[39..71].try_into().unwrap(),
+ bcc: &mmap_buf[72..],
+ })
+ }
+}
+
+impl Drop for MappedDriverArtifacts<'_> {
+ fn drop(&mut self) {
+ // All references to the mapped region have the same lifetime as self.
+ // Since self is being dropped, so are all the references to the mapped
+ // region meaning its safe to unmap.
+ let ret = unsafe { munmap(self.mmap_addr, self.mmap_size) };
+ if ret != 0 {
+ log::warn!("Failed to munmap ({})", ret);
+ }
+ }
+}
+
+impl DiceArtifacts for MappedDriverArtifacts<'_> {
+ fn cdi_attest(&self) -> &[u8; dice::CDI_SIZE] {
+ self.cdi_attest
+ }
+ fn cdi_seal(&self) -> &[u8; dice::CDI_SIZE] {
+ self.cdi_seal
+ }
+ fn bcc(&self) -> Vec<u8> {
+ // The BCC only contains public information so it's fine to copy.
+ self.bcc.to_vec()
+ }
+}
+
+/// Artifacts that are kept in the process address space after the artifacts
+/// from the driver have been consumed.
+#[derive(Clone, Serialize, Deserialize)]
+struct RawArtifacts {
+ cdi_attest: [u8; dice::CDI_SIZE],
+ cdi_seal: [u8; dice::CDI_SIZE],
+ bcc: Vec<u8>,
+}
+
+impl DiceArtifacts for RawArtifacts {
+ fn cdi_attest(&self) -> &[u8; dice::CDI_SIZE] {
+ &self.cdi_attest
+ }
+ fn cdi_seal(&self) -> &[u8; dice::CDI_SIZE] {
+ &self.cdi_seal
+ }
+ fn bcc(&self) -> Vec<u8> {
+ // The BCC only contains public information so it's fine to copy.
+ self.bcc.clone()
+ }
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+enum DriverArtifactManager {
+ Invalid,
+ Driver(PathBuf),
+ Updated(RawArtifacts),
+}
+
+impl DriverArtifactManager {
+ fn new(driver_path: &Path) -> Self {
+ if driver_path.exists() {
+ log::info!("Using DICE values from driver");
+ Self::Driver(driver_path.to_path_buf())
+ } else if Path::new(AVF_STRICT_BOOT).exists() {
+ log::error!("Strict boot requires DICE value from driver but none were found");
+ Self::Invalid
+ } else {
+ log::warn!("Using sample DICE values");
+ let (cdi_attest, cdi_seal, bcc) = diced_sample_inputs::make_sample_bcc_and_cdis()
+ .expect("Failed to create sample dice artifacts.");
+ Self::Updated(RawArtifacts {
+ cdi_attest: cdi_attest[..].try_into().unwrap(),
+ cdi_seal: cdi_seal[..].try_into().unwrap(),
+ bcc,
+ })
+ }
+ }
+}
+
+impl UpdatableDiceArtifacts for DriverArtifactManager {
+ fn with_artifacts<F, T>(&self, f: F) -> Result<T>
+ where
+ F: FnOnce(&dyn DiceArtifacts) -> Result<T>,
+ {
+ match self {
+ Self::Invalid => bail!("No DICE artifacts available."),
+ Self::Driver(driver_path) => f(&MappedDriverArtifacts::new(driver_path.as_path())?),
+ Self::Updated(raw_artifacts) => f(raw_artifacts),
+ }
+ }
+ fn update(self, new_artifacts: &impl DiceArtifacts) -> Result<Self> {
+ if let Self::Invalid = self {
+ bail!("Cannot update invalid DICE artifacts.");
+ }
+ if let Self::Driver(driver_path) = self {
+ // Writing to the device wipes the artifcates. The string is ignored
+ // by the driver but included for documentation.
+ fs::write(driver_path, "wipe")
+ .map_err(|error| Error::new(error).context("Wiping driver"))?;
+ }
+ Ok(Self::Updated(RawArtifacts {
+ cdi_attest: *new_artifacts.cdi_attest(),
+ cdi_seal: *new_artifacts.cdi_seal(),
+ bcc: new_artifacts.bcc(),
+ }))
+ }
+}
+
+fn main() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("android.hardware.security.dice")
+ .with_min_level(log::Level::Debug),
+ );
+ // Redirect panic messages to logcat.
+ panic::set_hook(Box::new(|panic_info| {
+ log::error!("{}", panic_info);
+ }));
+
+ // Saying hi.
+ log::info!("android.hardware.security.dice is starting.");
+
+ let hal_impl = Arc::new(
+ unsafe {
+ // Safety: ResidentHal cannot be used in multi threaded processes.
+ // This service does not start a thread pool. The main thread is the only thread
+ // joining the thread pool, thereby keeping the process single threaded.
+ ResidentHal::new(DriverArtifactManager::new(Path::new("/dev/open-dice0")))
+ }
+ .expect("Failed to create ResidentHal implementation."),
+ );
+
+ let hal = DiceDevice::new_as_binder(hal_impl).expect("Failed to construct hal service.");
+
+ binder::add_service(DICE_HAL_SERVICE_NAME, hal.as_binder())
+ .expect("Failed to register IDiceDevice Service");
+
+ log::info!("Joining thread pool now.");
+ binder::ProcessState::join_thread_pool();
+}
diff --git a/microdroid/fstab.microdroid b/microdroid/fstab.microdroid
index 129718e..25d82cc 100644
--- a/microdroid/fstab.microdroid
+++ b/microdroid/fstab.microdroid
@@ -1,2 +1,2 @@
-system /system ext4 noatime,ro,errors=panic wait,first_stage_mount,logical
-vendor /vendor ext4 noatime,ro,errors=panic wait,first_stage_mount,logical
+system /system ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta,first_stage_mount,logical
+vendor /vendor ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta,first_stage_mount,logical
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 157e534..f6d5092 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -15,25 +15,8 @@
# set RLIMIT_NICE to allow priorities from 19 to -20
setrlimit nice 40 40
- # in microdroid, we don't use "bootstrap" mount namespace
- # because APEXes are passed from host and are available
- # from the start. We don't need to wait till /data is ready.
- enter_default_mount_ns
-
start ueventd
- # TODO(b/190343842) verify apexes/apk before mounting them.
-
- # Exec apexd in the VM mode to avoid unnecessary overhead of normal mode.
- # (e.g. session management)
- exec - root system -- /system/bin/apexd --vm
-
- perform_apex_config
-
- exec - root system -- /system/bin/apkdmverity /dev/block/by-name/microdroid-apk /dev/block/by-name/microdroid-apk-idsig microdroid-apk
- mkdir /mnt/apk 0755 system system
- start zipfuse
-
on init
# Mount binderfs
mkdir /dev/binderfs
@@ -74,21 +57,42 @@
chmod 0664 /dev/cpuset/background/tasks
chmod 0664 /dev/cpuset/system-background/tasks
+ start servicemanager
+
+ start vendor.dice-microdroid
+ start diced
+
+ mkdir /mnt/apk 0755 system system
+ mkdir /mnt/extra-apk 0755 root root
+ # Microdroid_manager starts apkdmverity/zipfuse/apexd
+ start microdroid_manager
+
+ # restorecon so microdroid_manager can create subdirectories
+ restorecon /mnt/extra-apk
+
+ # Wait for apexd to finish activating APEXes before starting more processes.
+ wait_for_prop apexd.status activated
+ perform_apex_config
+
+ # Notify to microdroid_manager that perform_apex_config is done.
+ # Microdroid_manager shouldn't execute payload before this, because app
+ # payloads are not designed to run with bootstrap bionic
+ setprop apex_config.done true
+
+ setprop ro.debuggable ${ro.boot.microdroid.debuggable:-0}
+
+on init && property:ro.boot.microdroid.debuggable=1
+ # Mount tracefs (with GID=AID_READTRACEFS)
+ mount tracefs tracefs /sys/kernel/tracing gid=3012
+
+on init && property:ro.boot.logd.enabled=1
# Start logd before any other services run to ensure we capture all of their logs.
start logd
- start servicemanager
-
- # TODO(b/185767624): remove hidl after full keymint support
- start hwservicemanager
-
+on init && property:ro.boot.adb.enabled=1
start adbd
- # TODO(b/186396070) microdroid_manager starts zipfuse if necessary
- # TODO(b/186396070) move this before apexd for DICE derivation
- start microdroid_manager
-
-on load_persist_props_action
+on load_persist_props_action && property:ro.boot.logd.enabled=1
start logd
start logd-reinit
@@ -123,17 +127,9 @@
# The bind+remount combination allows this to work in containers.
mount rootfs rootfs / remount bind ro nodev
- start keystore2
-
-on late-fs
- start vendor.keymint-microdroid
-
# TODO(b/185767624): change the hard-coded size?
mount tmpfs tmpfs /data noatime nosuid nodev rw size=128M
-on post-fs-data
- mark_post_data
-
# We chown/chmod /data again so because mount is run as root + defaults
chown system system /data
chmod 0771 /data
@@ -141,6 +137,24 @@
# We restorecon /data in case the userdata partition has been reset.
restorecon /data
+ # set up misc directory structure first so that we can end early boot
+ # and start apexd
+ mkdir /data/misc 01771 system misc
+ # work around b/183668221
+ restorecon /data/misc
+
+ mkdir /data/misc/authfs 0700 root root
+ start authfs_service
+
+on late-fs && property:ro.debuggable=1
+ # Ensure that tracefs has the correct permissions.
+ # This does not work correctly if it is called in post-fs.
+ chmod 0755 /sys/kernel/tracing
+ chmod 0755 /sys/kernel/debug/tracing
+
+on post-fs-data
+ mark_post_data
+
mkdir /data/vendor 0771 root root
mkdir /data/vendor_ce 0771 root root
mkdir /data/vendor_de 0771 root root
@@ -154,24 +168,17 @@
start tombstoned
- # set up keystore directory structure first so that we can end early boot
- # and start apexd
- mkdir /data/misc 01771 system misc
- mkdir /data/misc/keystore 0700 keystore keystore
- # work around b/183668221
- restorecon /data/misc /data/misc/keystore
-
- # Boot level 30
- # odsign signing keys have MAX_BOOT_LEVEL=30
- # This is currently the earliest boot level, but we start at 30
- # to leave room for earlier levels.
- setprop keystore.boot_level 30
-
# For security reasons, /data/local/tmp should always be empty.
# Do not place files or directories in /data/local/tmp
mkdir /data/local 0751 root root
mkdir /data/local/tmp 0771 shell shell
+service apexd-vm /system/bin/apexd --vm
+ user root
+ group system
+ oneshot
+ disabled
+
service ueventd /system/bin/ueventd
class core
critical
@@ -187,11 +194,13 @@
seclabel u:r:shell:s0
setenv HOSTNAME console
+service seriallogging /system/bin/logcat -b all -v threadtime -f /dev/hvc2 *:V
+ disabled
+ user logd
+ group root logd
+
on fs
write /dev/event-log-tags "# content owned by logd
"
chown logd logd /dev/event-log-tags
chmod 0644 /dev/event-log-tags
-
-on property:sys.boot_completed=1
- start logd-auditctl
diff --git a/microdroid/keymint/Android.bp b/microdroid/keymint/Android.bp
deleted file mode 100644
index 6d651b9..0000000
--- a/microdroid/keymint/Android.bp
+++ /dev/null
@@ -1,39 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-cc_binary {
- name: "android.hardware.security.keymint-service.microdroid",
- relative_install_path: "hw",
- init_rc: ["android.hardware.security.keymint-service.microdroid.rc"],
- vintf_fragments: [
- "android.hardware.security.keymint-service.microdroid.xml",
- ],
- vendor: true,
- cflags: [
- "-Wall",
- "-Wextra",
- ],
- shared_libs: [
- "android.hardware.security.keymint-V1-ndk_platform",
- "lib_android_keymaster_keymint_utils",
- "libbase",
- "libbinder_ndk",
- "libcppbor_external",
- "libcrypto",
- "libkeymaster_portable",
- "libkeymint",
- "liblog",
- "libpuresoftkeymasterdevice",
- "libsoft_attestation_cert",
- "libutils",
- ],
- local_include_dirs: [
- "include",
- ],
- srcs: [
- "MicrodroidKeyMintDevice.cpp",
- "MicrodroidKeymasterContext.cpp",
- "service.cpp",
- ],
-}
diff --git a/microdroid/keymint/MicrodroidKeyMintDevice.cpp b/microdroid/keymint/MicrodroidKeyMintDevice.cpp
deleted file mode 100644
index c2f01f2..0000000
--- a/microdroid/keymint/MicrodroidKeyMintDevice.cpp
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "android.hardware.security.keymint-impl"
-#include "MicrodroidKeyMintDevice.h"
-
-#include <AndroidKeyMintOperation.h>
-#include <KeyMintUtils.h>
-#include <aidl/android/hardware/security/keymint/ErrorCode.h>
-#include <android-base/logging.h>
-#include <keymaster/android_keymaster.h>
-#include <keymaster/contexts/pure_soft_keymaster_context.h>
-#include <keymaster/keymaster_configuration.h>
-
-#include "MicrodroidKeyMintDevice.h"
-#include "MicrodroidKeymasterContext.h"
-
-namespace aidl::android::hardware::security::keymint {
-
-using namespace keymaster; // NOLINT(google-build-using-namespace)
-
-using km_utils::authToken2AidlVec;
-using km_utils::kmBlob2vector;
-using km_utils::kmError2ScopedAStatus;
-using km_utils::kmParam2Aidl;
-using km_utils::KmParamSet;
-using km_utils::kmParamSet2Aidl;
-using km_utils::legacy_enum_conversion;
-using secureclock::TimeStampToken;
-
-namespace {
-
-vector<KeyCharacteristics> convertKeyCharacteristics(const AuthorizationSet& requestParams,
- const AuthorizationSet& sw_enforced,
- const AuthorizationSet& hw_enforced,
- bool include_keystore_enforced = true) {
- KeyCharacteristics keyMintEnforced{SecurityLevel::SOFTWARE, {}};
- KeyCharacteristics keystoreEnforced{SecurityLevel::KEYSTORE, {}};
- CHECK(hw_enforced.empty()) << "Hardware-enforced list is non-empty for pure SW KeyMint";
-
- // This is a pure software implementation, so all tags are in sw_enforced.
- // We need to walk through the SW-enforced list and figure out which tags to
- // return in the software list and which in the keystore list.
-
- for (auto& entry : sw_enforced) {
- switch (entry.tag) {
- /* Invalid and unused */
- case KM_TAG_ECIES_SINGLE_HASH_MODE:
- case KM_TAG_INVALID:
- case KM_TAG_KDF:
- case KM_TAG_ROLLBACK_RESISTANCE:
- CHECK(false) << "We shouldn't see tag " << entry.tag;
- break;
-
- /* Unimplemented */
- case KM_TAG_ALLOW_WHILE_ON_BODY:
- case KM_TAG_BOOTLOADER_ONLY:
- case KM_TAG_EARLY_BOOT_ONLY:
- case KM_TAG_ROLLBACK_RESISTANT:
- case KM_TAG_STORAGE_KEY:
- case KM_TAG_TRUSTED_CONFIRMATION_REQUIRED:
- case KM_TAG_TRUSTED_USER_PRESENCE_REQUIRED:
- break;
-
- /* Keystore-enforced if not locally generated. */
- case KM_TAG_CREATION_DATETIME:
- // A KeyMaster implementation is required to add this tag to generated/imported
- // keys. A KeyMint implementation is not required to create this tag, only to echo
- // it back if it was included in the key generation/import request.
- if (requestParams.Contains(KM_TAG_CREATION_DATETIME)) {
- keystoreEnforced.authorizations.push_back(kmParam2Aidl(entry));
- }
- break;
-
- /* Disallowed in KeyCharacteristics */
- case KM_TAG_APPLICATION_DATA:
- case KM_TAG_ATTESTATION_APPLICATION_ID:
- break;
-
- /* Not key characteristics */
- case KM_TAG_ASSOCIATED_DATA:
- case KM_TAG_ATTESTATION_CHALLENGE:
- case KM_TAG_ATTESTATION_ID_BRAND:
- case KM_TAG_ATTESTATION_ID_DEVICE:
- case KM_TAG_ATTESTATION_ID_IMEI:
- case KM_TAG_ATTESTATION_ID_MANUFACTURER:
- case KM_TAG_ATTESTATION_ID_MEID:
- case KM_TAG_ATTESTATION_ID_MODEL:
- case KM_TAG_ATTESTATION_ID_PRODUCT:
- case KM_TAG_ATTESTATION_ID_SERIAL:
- case KM_TAG_AUTH_TOKEN:
- case KM_TAG_CERTIFICATE_SERIAL:
- case KM_TAG_CERTIFICATE_SUBJECT:
- case KM_TAG_CERTIFICATE_NOT_AFTER:
- case KM_TAG_CERTIFICATE_NOT_BEFORE:
- case KM_TAG_CONFIRMATION_TOKEN:
- case KM_TAG_DEVICE_UNIQUE_ATTESTATION:
- case KM_TAG_IDENTITY_CREDENTIAL_KEY:
- case KM_TAG_MAC_LENGTH:
- case KM_TAG_NONCE:
- case KM_TAG_RESET_SINCE_ID_ROTATION:
- case KM_TAG_ROOT_OF_TRUST:
- case KM_TAG_UNIQUE_ID:
- break;
-
- /* KeyMint-enforced */
- case KM_TAG_ALGORITHM:
- case KM_TAG_APPLICATION_ID:
- case KM_TAG_AUTH_TIMEOUT:
- case KM_TAG_BLOB_USAGE_REQUIREMENTS:
- case KM_TAG_BLOCK_MODE:
- case KM_TAG_BOOT_PATCHLEVEL:
- case KM_TAG_CALLER_NONCE:
- case KM_TAG_DIGEST:
- case KM_TAG_EC_CURVE:
- case KM_TAG_EXPORTABLE:
- case KM_TAG_INCLUDE_UNIQUE_ID:
- case KM_TAG_KEY_SIZE:
- case KM_TAG_MAX_USES_PER_BOOT:
- case KM_TAG_MIN_MAC_LENGTH:
- case KM_TAG_MIN_SECONDS_BETWEEN_OPS:
- case KM_TAG_NO_AUTH_REQUIRED:
- case KM_TAG_ORIGIN:
- case KM_TAG_OS_PATCHLEVEL:
- case KM_TAG_OS_VERSION:
- case KM_TAG_PADDING:
- case KM_TAG_PURPOSE:
- case KM_TAG_RSA_OAEP_MGF_DIGEST:
- case KM_TAG_RSA_PUBLIC_EXPONENT:
- case KM_TAG_UNLOCKED_DEVICE_REQUIRED:
- case KM_TAG_USER_AUTH_TYPE:
- case KM_TAG_USER_SECURE_ID:
- case KM_TAG_VENDOR_PATCHLEVEL:
- keyMintEnforced.authorizations.push_back(kmParam2Aidl(entry));
- break;
-
- /* Keystore-enforced */
- case KM_TAG_ACTIVE_DATETIME:
- case KM_TAG_ALL_APPLICATIONS:
- case KM_TAG_ALL_USERS:
- case KM_TAG_MAX_BOOT_LEVEL:
- case KM_TAG_ORIGINATION_EXPIRE_DATETIME:
- case KM_TAG_USAGE_EXPIRE_DATETIME:
- case KM_TAG_USER_ID:
- case KM_TAG_USAGE_COUNT_LIMIT:
- keystoreEnforced.authorizations.push_back(kmParam2Aidl(entry));
- break;
- }
- }
-
- vector<KeyCharacteristics> retval;
- retval.reserve(2);
- if (!keyMintEnforced.authorizations.empty()) retval.push_back(std::move(keyMintEnforced));
- if (include_keystore_enforced && !keystoreEnforced.authorizations.empty()) {
- retval.push_back(std::move(keystoreEnforced));
- }
-
- return retval;
-}
-
-Certificate convertCertificate(const keymaster_blob_t& cert) {
- return {std::vector<uint8_t>(cert.data, cert.data + cert.data_length)};
-}
-
-vector<Certificate> convertCertificateChain(const CertificateChain& chain) {
- vector<Certificate> retval;
- retval.reserve(chain.entry_count);
- std::transform(chain.begin(), chain.end(), std::back_inserter(retval), convertCertificate);
- return retval;
-}
-
-void addClientAndAppData(const std::vector<uint8_t>& appId, const std::vector<uint8_t>& appData,
- ::keymaster::AuthorizationSet* params) {
- params->Clear();
- if (appId.size()) {
- params->push_back(::keymaster::TAG_APPLICATION_ID, appId.data(), appId.size());
- }
- if (appData.size()) {
- params->push_back(::keymaster::TAG_APPLICATION_DATA, appData.data(), appData.size());
- }
-}
-
-} // namespace
-
-constexpr size_t kOperationTableSize = 16;
-
-MicrodroidKeyMintDevice::MicrodroidKeyMintDevice(::keymaster::KeymasterKeyBlob& rootKey)
- : impl_(new ::keymaster::AndroidKeymaster(
- [&]() -> auto {
- auto context = new MicrodroidKeymasterContext(KmVersion::KEYMINT_1, rootKey);
- context->SetSystemVersion(::keymaster::GetOsVersion(),
- ::keymaster::GetOsPatchlevel());
- return context;
- }(),
- kOperationTableSize)) {}
-
-MicrodroidKeyMintDevice::~MicrodroidKeyMintDevice() {}
-
-ScopedAStatus MicrodroidKeyMintDevice::getHardwareInfo(KeyMintHardwareInfo* info) {
- info->versionNumber = 1;
- info->securityLevel = SecurityLevel::SOFTWARE;
- info->keyMintName = "MicrodroidKeyMintDevice";
- info->keyMintAuthorName = "Google";
- info->timestampTokenRequired = false;
- return ScopedAStatus::ok();
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::addRngEntropy(const vector<uint8_t>& data) {
- if (data.size() == 0) {
- return ScopedAStatus::ok();
- }
-
- AddEntropyRequest request(impl_->message_version());
- request.random_data.Reinitialize(data.data(), data.size());
-
- AddEntropyResponse response(impl_->message_version());
- impl_->AddRngEntropy(request, &response);
-
- return kmError2ScopedAStatus(response.error);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::generateKey(const vector<KeyParameter>& keyParams,
- const optional<AttestationKey>& attestationKey,
- KeyCreationResult* creationResult) {
- GenerateKeyRequest request(impl_->message_version());
- request.key_description.Reinitialize(KmParamSet(keyParams));
- if (attestationKey) {
- request.attestation_signing_key_blob =
- KeymasterKeyBlob(attestationKey->keyBlob.data(), attestationKey->keyBlob.size());
- request.attest_key_params.Reinitialize(KmParamSet(attestationKey->attestKeyParams));
- request.issuer_subject = KeymasterBlob(attestationKey->issuerSubjectName.data(),
- attestationKey->issuerSubjectName.size());
- }
-
- GenerateKeyResponse response(impl_->message_version());
- impl_->GenerateKey(request, &response);
-
- if (response.error != KM_ERROR_OK) {
- // Note a key difference between this current aidl and previous hal, is
- // that hal returns void where as aidl returns the error status. If
- // aidl returns error, then aidl will not return any change you may make
- // to the out parameters. This is quite different from hal where all
- // output variable can be modified due to hal returning void.
- //
- // So the caller need to be aware not to expect aidl functions to clear
- // the output variables for you in case of error. If you left some
- // wrong data set in the out parameters, they will stay there.
- return kmError2ScopedAStatus(response.error);
- }
-
- creationResult->keyBlob = kmBlob2vector(response.key_blob);
- creationResult->keyCharacteristics =
- convertKeyCharacteristics(request.key_description, response.unenforced,
- response.enforced);
- creationResult->certificateChain = convertCertificateChain(response.certificate_chain);
- return ScopedAStatus::ok();
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::importKey(const vector<KeyParameter>& keyParams,
- KeyFormat keyFormat,
- const vector<uint8_t>& keyData,
- const optional<AttestationKey>& attestationKey,
- KeyCreationResult* creationResult) {
- ImportKeyRequest request(impl_->message_version());
- request.key_description.Reinitialize(KmParamSet(keyParams));
- request.key_format = legacy_enum_conversion(keyFormat);
- request.key_data = KeymasterKeyBlob(keyData.data(), keyData.size());
- if (attestationKey) {
- request.attestation_signing_key_blob =
- KeymasterKeyBlob(attestationKey->keyBlob.data(), attestationKey->keyBlob.size());
- request.attest_key_params.Reinitialize(KmParamSet(attestationKey->attestKeyParams));
- request.issuer_subject = KeymasterBlob(attestationKey->issuerSubjectName.data(),
- attestationKey->issuerSubjectName.size());
- }
-
- ImportKeyResponse response(impl_->message_version());
- impl_->ImportKey(request, &response);
-
- if (response.error != KM_ERROR_OK) {
- return kmError2ScopedAStatus(response.error);
- }
-
- creationResult->keyBlob = kmBlob2vector(response.key_blob);
- creationResult->keyCharacteristics =
- convertKeyCharacteristics(request.key_description, response.unenforced,
- response.enforced);
- creationResult->certificateChain = convertCertificateChain(response.certificate_chain);
-
- return ScopedAStatus::ok();
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::importWrappedKey(
- const vector<uint8_t>& wrappedKeyData, const vector<uint8_t>& wrappingKeyBlob,
- const vector<uint8_t>& maskingKey, const vector<KeyParameter>& unwrappingParams,
- int64_t passwordSid, int64_t biometricSid, KeyCreationResult* creationResult) {
- ImportWrappedKeyRequest request(impl_->message_version());
- request.SetWrappedMaterial(wrappedKeyData.data(), wrappedKeyData.size());
- request.SetWrappingMaterial(wrappingKeyBlob.data(), wrappingKeyBlob.size());
- request.SetMaskingKeyMaterial(maskingKey.data(), maskingKey.size());
- request.additional_params.Reinitialize(KmParamSet(unwrappingParams));
- request.password_sid = static_cast<uint64_t>(passwordSid);
- request.biometric_sid = static_cast<uint64_t>(biometricSid);
-
- ImportWrappedKeyResponse response(impl_->message_version());
- impl_->ImportWrappedKey(request, &response);
-
- if (response.error != KM_ERROR_OK) {
- return kmError2ScopedAStatus(response.error);
- }
-
- creationResult->keyBlob = kmBlob2vector(response.key_blob);
- creationResult->keyCharacteristics =
- convertKeyCharacteristics(request.additional_params, response.unenforced,
- response.enforced);
- creationResult->certificateChain = convertCertificateChain(response.certificate_chain);
-
- return ScopedAStatus::ok();
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::upgradeKey(const vector<uint8_t>& keyBlobToUpgrade,
- const vector<KeyParameter>& upgradeParams,
- vector<uint8_t>* keyBlob) {
- UpgradeKeyRequest request(impl_->message_version());
- request.SetKeyMaterial(keyBlobToUpgrade.data(), keyBlobToUpgrade.size());
- request.upgrade_params.Reinitialize(KmParamSet(upgradeParams));
-
- UpgradeKeyResponse response(impl_->message_version());
- impl_->UpgradeKey(request, &response);
-
- if (response.error != KM_ERROR_OK) {
- return kmError2ScopedAStatus(response.error);
- }
-
- *keyBlob = kmBlob2vector(response.upgraded_key);
- return ScopedAStatus::ok();
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::deleteKey(const vector<uint8_t>&) {
- // There's nothing to be done to delete software key blobs.
- return kmError2ScopedAStatus(KM_ERROR_OK);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::deleteAllKeys() {
- // There's nothing to be done to delete software key blobs.
- return kmError2ScopedAStatus(KM_ERROR_OK);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::destroyAttestationIds() {
- return kmError2ScopedAStatus(KM_ERROR_UNIMPLEMENTED);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::begin(KeyPurpose purpose, const vector<uint8_t>& keyBlob,
- const vector<KeyParameter>& params,
- const optional<HardwareAuthToken>& authToken,
- BeginResult* result) {
- BeginOperationRequest request(impl_->message_version());
- request.purpose = legacy_enum_conversion(purpose);
- request.SetKeyMaterial(keyBlob.data(), keyBlob.size());
- request.additional_params.Reinitialize(KmParamSet(params));
-
- vector<uint8_t> vector_token = authToken2AidlVec(authToken);
- request.additional_params.push_back(TAG_AUTH_TOKEN,
- reinterpret_cast<uint8_t*>(vector_token.data()),
- vector_token.size());
-
- BeginOperationResponse response(impl_->message_version());
- impl_->BeginOperation(request, &response);
-
- if (response.error != KM_ERROR_OK) {
- return kmError2ScopedAStatus(response.error);
- }
-
- result->params = kmParamSet2Aidl(response.output_params);
- result->challenge = response.op_handle;
- result->operation =
- ndk::SharedRefBase::make<AndroidKeyMintOperation>(impl_, response.op_handle);
- return ScopedAStatus::ok();
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::deviceLocked(
- bool, const std::optional<secureclock::TimeStampToken>&) {
- // Microdroid doesn't yet have a concept of a locked device.
- return kmError2ScopedAStatus(KM_ERROR_OK);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::earlyBootEnded() {
- return kmError2ScopedAStatus(KM_ERROR_UNIMPLEMENTED);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::convertStorageKeyToEphemeral(
- const std::vector<uint8_t>& /* storageKeyBlob */,
- std::vector<uint8_t>* /* ephemeralKeyBlob */) {
- return kmError2ScopedAStatus(KM_ERROR_UNIMPLEMENTED);
-}
-
-ScopedAStatus MicrodroidKeyMintDevice::getKeyCharacteristics(
- const std::vector<uint8_t>& keyBlob, const std::vector<uint8_t>& appId,
- const std::vector<uint8_t>& appData, std::vector<KeyCharacteristics>* keyCharacteristics) {
- GetKeyCharacteristicsRequest request(impl_->message_version());
- request.SetKeyMaterial(keyBlob.data(), keyBlob.size());
- addClientAndAppData(appId, appData, &request.additional_params);
-
- GetKeyCharacteristicsResponse response(impl_->message_version());
- impl_->GetKeyCharacteristics(request, &response);
-
- if (response.error != KM_ERROR_OK) {
- return kmError2ScopedAStatus(response.error);
- }
-
- AuthorizationSet emptySet;
- *keyCharacteristics =
- convertKeyCharacteristics(emptySet, response.unenforced, response.enforced,
- /* include_keystore_enforced = */ false);
-
- return ScopedAStatus::ok();
-}
-
-} // namespace aidl::android::hardware::security::keymint
diff --git a/microdroid/keymint/MicrodroidKeymasterContext.cpp b/microdroid/keymint/MicrodroidKeymasterContext.cpp
deleted file mode 100644
index b5440f3..0000000
--- a/microdroid/keymint/MicrodroidKeymasterContext.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MicrodroidKeymasterContext.h"
-
-#include <android-base/logging.h>
-#include <keymaster/key.h>
-#include <keymaster/key_blob_utils/auth_encrypted_key_blob.h>
-#include <keymaster/key_blob_utils/software_keyblobs.h>
-
-using namespace ::keymaster;
-
-// This value is used for the ROOT_OF_TRUST tag which is only used in
-// attestation records which aren't supported in this implementation so a
-// constant doesn't cause any hard. MicroDroid SoftWare root-of-trust.
-static uint8_t SWROT[] = {'M', 'D', 'S', 'W'};
-static const KeymasterBlob microdroidSoftwareRootOfTrust(SWROT);
-
-keymaster_error_t MicrodroidKeymasterContext::CreateKeyBlob(const AuthorizationSet& key_description,
- keymaster_key_origin_t origin,
- const KeymasterKeyBlob& key_material,
- KeymasterKeyBlob* blob,
- AuthorizationSet* hw_enforced,
- AuthorizationSet* sw_enforced) const {
- keymaster_error_t error;
-
- if (key_description.GetTagValue(TAG_ROLLBACK_RESISTANCE)) {
- return KM_ERROR_ROLLBACK_RESISTANCE_UNAVAILABLE;
- }
-
- error = SetKeyBlobAuthorizations(key_description, origin, os_version_, os_patchlevel_,
- hw_enforced, sw_enforced);
- if (error != KM_ERROR_OK) return error;
-
- AuthorizationSet hidden;
- error = BuildHiddenAuthorizations(key_description, &hidden, microdroidSoftwareRootOfTrust);
- if (error != KM_ERROR_OK) return error;
-
- CHECK(hw_enforced->empty());
-
- // Note that the authorizations included in the blob are not encrypted. This
- // doesn't pose a problem for the current applications but may be a
- // candidate for hardening.
- auto encrypted_key = EncryptKey(key_material, AES_GCM_WITH_SW_ENFORCED, *hw_enforced,
- *sw_enforced, hidden, root_key_, random_, &error);
- if (error != KM_ERROR_OK) return error;
-
- *blob = SerializeAuthEncryptedBlob(encrypted_key, *hw_enforced, *sw_enforced, &error);
- return error;
-}
-
-keymaster_error_t MicrodroidKeymasterContext::ParseKeyBlob(
- const KeymasterKeyBlob& blob, const AuthorizationSet& additional_params,
- UniquePtr<Key>* key) const {
- keymaster_error_t error;
-
- AuthorizationSet hidden;
- error = BuildHiddenAuthorizations(additional_params, &hidden, microdroidSoftwareRootOfTrust);
- if (error != KM_ERROR_OK) return error;
-
- auto deserialized_key = DeserializeAuthEncryptedBlob(blob, &error);
- if (error != KM_ERROR_OK) return error;
-
- keymaster_algorithm_t algorithm;
- if (!deserialized_key.sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
- return KM_ERROR_INVALID_ARGUMENT;
- }
-
- auto key_material = DecryptKey(deserialized_key, hidden, root_key_, &error);
- if (error != KM_ERROR_OK) return error;
-
- auto factory = GetKeyFactory(algorithm);
- return factory->LoadKey(move(key_material), additional_params,
- move(deserialized_key.hw_enforced), move(deserialized_key.sw_enforced),
- key);
-}
-
-static bool UpgradeIntegerTag(keymaster_tag_t tag, uint32_t value, AuthorizationSet* set) {
- int index = set->find(tag);
- if (index == -1) {
- keymaster_key_param_t param;
- param.tag = tag;
- param.integer = value;
- set->push_back(param);
- return true;
- }
-
- if (set->params[index].integer > value) return false;
-
- if (set->params[index].integer != value) {
- set->params[index].integer = value;
- }
- return true;
-}
-
-keymaster_error_t MicrodroidKeymasterContext::UpgradeKeyBlob(const KeymasterKeyBlob& key_to_upgrade,
- const AuthorizationSet& upgrade_params,
- KeymasterKeyBlob* upgraded_key) const {
- UniquePtr<Key> key;
- keymaster_error_t error = ParseKeyBlob(key_to_upgrade, upgrade_params, &key);
- if (error != KM_ERROR_OK) return error;
-
- if (os_version_ == 0) {
- // We need to allow "upgrading" OS version to zero, to support upgrading from proper
- // numbered releases to unnumbered development and preview releases.
-
- int key_os_version_pos = key->sw_enforced().find(TAG_OS_VERSION);
- if (key_os_version_pos != -1) {
- uint32_t key_os_version = key->sw_enforced()[key_os_version_pos].integer;
- if (key_os_version != 0) {
- key->sw_enforced()[key_os_version_pos].integer = os_version_;
- }
- }
- }
-
- if (!UpgradeIntegerTag(TAG_OS_VERSION, os_version_, &key->sw_enforced()) ||
- !UpgradeIntegerTag(TAG_OS_PATCHLEVEL, os_patchlevel_, &key->sw_enforced()))
- // One of the version fields would have been a downgrade. Not allowed.
- return KM_ERROR_INVALID_ARGUMENT;
-
- AuthorizationSet hidden;
- error = BuildHiddenAuthorizations(upgrade_params, &hidden, microdroidSoftwareRootOfTrust);
- if (error != KM_ERROR_OK) return error;
-
- auto encrypted_key =
- EncryptKey(key->key_material(), AES_GCM_WITH_SW_ENFORCED, key->hw_enforced(),
- key->sw_enforced(), hidden, root_key_, random_, &error);
- if (error != KM_ERROR_OK) return error;
-
- *upgraded_key = SerializeAuthEncryptedBlob(encrypted_key, key->hw_enforced(),
- key->sw_enforced(), &error);
- return error;
-}
diff --git a/microdroid/keymint/android.hardware.security.keymint-service.microdroid.rc b/microdroid/keymint/android.hardware.security.keymint-service.microdroid.rc
deleted file mode 100644
index d6851bd..0000000
--- a/microdroid/keymint/android.hardware.security.keymint-service.microdroid.rc
+++ /dev/null
@@ -1,3 +0,0 @@
-service vendor.keymint-microdroid /vendor/bin/hw/android.hardware.security.keymint-service.microdroid
- class early_hal
- user nobody
diff --git a/microdroid/keymint/android.hardware.security.keymint-service.microdroid.xml b/microdroid/keymint/android.hardware.security.keymint-service.microdroid.xml
deleted file mode 100644
index 73d15a8..0000000
--- a/microdroid/keymint/android.hardware.security.keymint-service.microdroid.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<manifest version="1.0" type="device">
- <hal format="aidl">
- <name>android.hardware.security.keymint</name>
- <fqname>IKeyMintDevice/default</fqname>
- </hal>
-</manifest>
diff --git a/microdroid/keymint/include/MicrodroidKeyMintDevice.h b/microdroid/keymint/include/MicrodroidKeyMintDevice.h
deleted file mode 100644
index dec7baa..0000000
--- a/microdroid/keymint/include/MicrodroidKeyMintDevice.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <aidl/android/hardware/security/keymint/BnKeyMintDevice.h>
-#include <aidl/android/hardware/security/keymint/BnKeyMintOperation.h>
-#include <aidl/android/hardware/security/keymint/HardwareAuthToken.h>
-#include <keymaster/android_keymaster_utils.h>
-
-namespace keymaster {
-class AndroidKeymaster;
-}
-
-namespace aidl::android::hardware::security::keymint {
-using ::ndk::ScopedAStatus;
-using std::optional;
-using std::shared_ptr;
-using std::vector;
-
-using secureclock::TimeStampToken;
-
-class MicrodroidKeyMintDevice : public BnKeyMintDevice {
-public:
- explicit MicrodroidKeyMintDevice(::keymaster::KeymasterKeyBlob& rootKey);
- virtual ~MicrodroidKeyMintDevice();
-
- ScopedAStatus getHardwareInfo(KeyMintHardwareInfo* info) override;
-
- ScopedAStatus addRngEntropy(const vector<uint8_t>& data) override;
-
- ScopedAStatus generateKey(const vector<KeyParameter>& keyParams,
- const optional<AttestationKey>& attestationKey,
- KeyCreationResult* creationResult) override;
-
- ScopedAStatus importKey(const vector<KeyParameter>& keyParams, KeyFormat keyFormat,
- const vector<uint8_t>& keyData,
- const optional<AttestationKey>& attestationKey,
- KeyCreationResult* creationResult) override;
-
- ScopedAStatus importWrappedKey(const vector<uint8_t>& wrappedKeyData,
- const vector<uint8_t>& wrappingKeyBlob,
- const vector<uint8_t>& maskingKey,
- const vector<KeyParameter>& unwrappingParams,
- int64_t passwordSid, int64_t biometricSid,
- KeyCreationResult* creationResult) override;
-
- ScopedAStatus upgradeKey(const vector<uint8_t>& keyBlobToUpgrade,
- const vector<KeyParameter>& upgradeParams,
- vector<uint8_t>* keyBlob) override;
-
- ScopedAStatus deleteKey(const vector<uint8_t>& keyBlob) override;
- ScopedAStatus deleteAllKeys() override;
- ScopedAStatus destroyAttestationIds() override;
-
- ScopedAStatus begin(KeyPurpose purpose, const vector<uint8_t>& keyBlob,
- const vector<KeyParameter>& params,
- const optional<HardwareAuthToken>& authToken, BeginResult* result) override;
-
- ScopedAStatus deviceLocked(bool passwordOnly,
- const optional<TimeStampToken>& timestampToken) override;
- ScopedAStatus earlyBootEnded() override;
-
- ScopedAStatus convertStorageKeyToEphemeral(const std::vector<uint8_t>& storageKeyBlob,
- std::vector<uint8_t>* ephemeralKeyBlob) override;
-
- ScopedAStatus getKeyCharacteristics(
- const std::vector<uint8_t>& keyBlob, const std::vector<uint8_t>& appId,
- const std::vector<uint8_t>& appData,
- std::vector<KeyCharacteristics>* keyCharacteristics) override;
-
- shared_ptr<::keymaster::AndroidKeymaster>& getKeymasterImpl() { return impl_; }
-
-protected:
- std::shared_ptr<::keymaster::AndroidKeymaster> impl_;
-};
-
-} // namespace aidl::android::hardware::security::keymint
diff --git a/microdroid/keymint/include/MicrodroidKeymasterContext.h b/microdroid/keymint/include/MicrodroidKeymasterContext.h
deleted file mode 100644
index 636d240..0000000
--- a/microdroid/keymint/include/MicrodroidKeymasterContext.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <keymaster/contexts/pure_soft_keymaster_context.h>
-#include <keymaster/km_openssl/software_random_source.h>
-
-class MicrodroidKeymasterContext : public ::keymaster::PureSoftKeymasterContext {
-public:
- explicit MicrodroidKeymasterContext(::keymaster::KmVersion version,
- ::keymaster::KeymasterKeyBlob& root_key)
- : PureSoftKeymasterContext(version, KM_SECURITY_LEVEL_SOFTWARE), root_key_(root_key) {}
-
- keymaster_error_t CreateKeyBlob(const ::keymaster::AuthorizationSet& auths,
- keymaster_key_origin_t origin,
- const ::keymaster::KeymasterKeyBlob& key_material,
- ::keymaster::KeymasterKeyBlob* blob,
- ::keymaster::AuthorizationSet* hw_enforced,
- ::keymaster::AuthorizationSet* sw_enforced) const override;
-
- keymaster_error_t ParseKeyBlob(const ::keymaster::KeymasterKeyBlob& blob,
- const ::keymaster::AuthorizationSet& additional_params,
- ::keymaster::UniquePtr<::keymaster::Key>* key) const override;
-
- keymaster_error_t UpgradeKeyBlob(const ::keymaster::KeymasterKeyBlob& key_to_upgrade,
- const ::keymaster::AuthorizationSet& upgrade_params,
- ::keymaster::KeymasterKeyBlob* upgraded_key) const override;
-
-private:
- ::keymaster::SoftwareRandomSource random_;
- ::keymaster::KeymasterKeyBlob root_key_;
-};
diff --git a/microdroid/keymint/service.cpp b/microdroid/keymint/service.cpp
deleted file mode 100644
index 5fc0bd2..0000000
--- a/microdroid/keymint/service.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright 2021, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "android.hardware.security.keymint-service"
-
-#include <AndroidKeyMintDevice.h>
-#include <android-base/logging.h>
-#include <android-base/properties.h>
-#include <android-base/result.h>
-#include <android/binder_manager.h>
-#include <android/binder_process.h>
-#include <keymaster/android_keymaster_utils.h>
-#include <keymaster/mem.h>
-#include <keymaster/soft_keymaster_logger.h>
-#include <openssl/digest.h>
-#include <openssl/hkdf.h>
-#include <openssl/is_boringssl.h>
-#include <openssl/sha.h>
-
-#include "MicrodroidKeyMintDevice.h"
-
-using aidl::android::hardware::security::keymint::MicrodroidKeyMintDevice;
-using aidl::android::hardware::security::keymint::SecurityLevel;
-
-using android::base::Error;
-using android::base::GetProperty;
-using android::base::Result;
-
-using keymaster::KeymasterBlob;
-using keymaster::KeymasterKeyBlob;
-using keymaster::memset_s;
-
-namespace {
-
-template <typename T, class... Args>
-std::shared_ptr<T> addService(Args&&... args) {
- std::shared_ptr<T> ser = ndk::SharedRefBase::make<T>(std::forward<Args>(args)...);
- auto instanceName = std::string(T::descriptor) + "/default";
- LOG(INFO) << "adding keymint service instance: " << instanceName;
- binder_status_t status =
- AServiceManager_addService(ser->asBinder().get(), instanceName.c_str());
- CHECK(status == STATUS_OK);
- return ser;
-}
-
-Result<KeymasterKeyBlob> getRootKey() {
- const std::string prop = "ro.vmsecret.keymint";
- const std::chrono::seconds timeout(15);
- while (!android::base::WaitForPropertyCreation(prop, timeout)) {
- LOG(WARNING) << "waited " << timeout.count() << "seconds for " << prop
- << ", still waiting...";
- }
-
- // In a small effort to avoid spreading the secret around too widely in
- // memory, move the secert into a buffer that will wipe itself and clear
- // the original string.
- std::string secretProp = GetProperty(prop, "");
- KeymasterBlob secret(reinterpret_cast<const uint8_t*>(secretProp.data()), secretProp.size());
- memset_s(secretProp.data(), 0, secretProp.size());
- if (secret.size() < 64u) return Error() << "secret is too small";
-
- // Derive the root key from the secret to avoid getting locked into using
- // the secret directly.
- KeymasterKeyBlob rootKey(SHA512_DIGEST_LENGTH);
- const uint8_t kRootKeyIkm[] = "keymint_root_key";
- const uint8_t* kNoSalt = nullptr;
- const size_t kNoSaltLen = 0;
- if (!HKDF(rootKey.writable_data(), rootKey.size(), EVP_sha512(), (uint8_t*)secret.begin(),
- secret.size(), kNoSalt, kNoSaltLen, kRootKeyIkm, sizeof(kRootKeyIkm))) {
- return Error() << "Failed to derive a key";
- }
- if (rootKey.size() < 64u) return Error() << "root key is too small";
-
- LOG(INFO) << "root key obtained";
- return rootKey;
-}
-
-} // namespace
-
-int main() {
- auto rootKey = getRootKey();
- if (!rootKey.ok()) {
- LOG(FATAL) << "Failed to get root key: " << rootKey.error();
- }
-
- // Zero threads seems like a useless pool, but below we'll join this thread
- // to it, increasing the pool size to 1.
- ABinderProcess_setThreadPoolMaxThreadCount(0);
-
- // Add Keymint Service
- std::shared_ptr<MicrodroidKeyMintDevice> keyMint =
- ndk::SharedRefBase::make<MicrodroidKeyMintDevice>(*rootKey);
- auto instanceName = std::string(MicrodroidKeyMintDevice::descriptor) + "/default";
- LOG(INFO) << "adding keymint service instance: " << instanceName;
- binder_status_t status =
- AServiceManager_addService(keyMint->asBinder().get(), instanceName.c_str());
- CHECK(status == STATUS_OK);
-
- ABinderProcess_joinThreadPool();
- return EXIT_FAILURE; // should not reach
-}
diff --git a/microdroid/microdroid.json b/microdroid/microdroid.json
index da82289..aff0b7b 100644
--- a/microdroid/microdroid.json
+++ b/microdroid/microdroid.json
@@ -5,23 +5,23 @@
"partitions": [
{
"label": "boot_a",
- "paths": ["/apex/com.android.virt/etc/fs/microdroid_boot-5.10.img"]
+ "path": "/apex/com.android.virt/etc/fs/microdroid_boot-5.10.img"
+ },
+ {
+ "label": "init_boot_a",
+ "path": "/apex/com.android.virt/etc/fs/microdroid_init_boot.img"
},
{
"label": "vendor_boot_a",
- "paths": ["/apex/com.android.virt/etc/fs/microdroid_vendor_boot-5.10.img"]
+ "path": "/apex/com.android.virt/etc/fs/microdroid_vendor_boot-5.10.img"
},
{
"label": "vbmeta_a",
- "paths": ["/apex/com.android.virt/etc/fs/microdroid_vbmeta.img"]
- },
- {
- "label": "vbmeta_system_a",
- "paths": ["/apex/com.android.virt/etc/fs/microdroid_vbmeta_system.img"]
+ "path": "/apex/com.android.virt/etc/fs/microdroid_vbmeta.img"
},
{
"label": "super",
- "paths": ["/apex/com.android.virt/etc/fs/microdroid_super.img"]
+ "path": "/apex/com.android.virt/etc/fs/microdroid_super.img"
}
],
"writable": false
@@ -30,12 +30,13 @@
"partitions": [
{
"label": "uboot_env",
- "paths": ["/apex/com.android.virt/etc/uboot_env.img"],
+ "path": "/apex/com.android.virt/etc/uboot_env.img",
"writable": false
}
],
"writable": true
}
],
- "memory_mib": 2048
+ "memory_mib": 256,
+ "platform_version": "~1.0"
}
diff --git a/microdroid/microdroid_compatibility_matrix.xml b/microdroid/microdroid_compatibility_matrix.xml
index dbc12a8..a345e30 100644
--- a/microdroid/microdroid_compatibility_matrix.xml
+++ b/microdroid/microdroid_compatibility_matrix.xml
@@ -1,10 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<compatibility-matrix version="1.0" type="framework">
<hal format="aidl" optional="true">
- <name>android.hardware.security.keymint</name>
+ <name>android.hardware.security.dice</name>
<version>1</version>
<interface>
- <name>IKeyMintDevice</name>
+ <name>IDiceDevice</name>
<instance>default</instance>
</interface>
</hal>
diff --git a/microdroid/microdroid_manifest.xml b/microdroid/microdroid_manifest.xml
index 28a374f..b84ba8f 100644
--- a/microdroid/microdroid_manifest.xml
+++ b/microdroid/microdroid_manifest.xml
@@ -1,24 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<manifest version="1.0" type="framework">
- <!--TODO(b/185767624): remove hidl after full keymint support-->
- <hal format="hidl">
- <name>android.hidl.manager</name>
- <transport>hwbinder</transport>
- <version>1.2</version>
- <interface>
- <name>IServiceManager</name>
- <instance>default</instance>
- </interface>
- <fqname>@1.2::IServiceManager/default</fqname>
- </hal>
- <hal format="hidl">
- <name>android.hidl.token</name>
- <transport>hwbinder</transport>
- <version>1.0</version>
- <interface>
- <name>ITokenManager</name>
- <instance>default</instance>
- </interface>
- <fqname>@1.0::ITokenManager/default</fqname>
- </hal>
+ <!-- empty -->
</manifest>
diff --git a/microdroid/microdroid_vendor_compatibility_matrix.xml b/microdroid/microdroid_vendor_compatibility_matrix.xml
index efa1c98..44735d8 100644
--- a/microdroid/microdroid_vendor_compatibility_matrix.xml
+++ b/microdroid/microdroid_vendor_compatibility_matrix.xml
@@ -1,27 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<compatibility-matrix version="1.0" type="device">
- <hal format="aidl">
- <name>android.system.keystore2</name>
- <interface>
- <name>IKeystoreService</name>
- <instance>default</instance>
- </interface>
- </hal>
- <!--TODO(b/185767624): remove hidl after full keymint support-->
- <hal format="hidl" optional="true">
- <name>android.hidl.manager</name>
- <version>1.0</version>
- <interface>
- <name>IServiceManager</name>
- <instance>default</instance>
- </interface>
- </hal>
- <hal format="hidl" optional="true">
- <name>android.hidl.token</name>
- <version>1.0</version>
- <interface>
- <name>ITokenManager</name>
- <instance>default</instance>
- </interface>
- </hal>
+ <!-- empty -->
</compatibility-matrix>
diff --git a/microdroid/payload/Android.bp b/microdroid/payload/Android.bp
index c7bc415..f77c037 100644
--- a/microdroid/payload/Android.bp
+++ b/microdroid/payload/Android.bp
@@ -25,19 +25,6 @@
defaults: ["microdroid_metadata_default"],
}
-cc_library_static {
- name: "lib_microdroid_metadata_proto_lite",
- recovery_available: true,
- proto: {
- export_proto_headers: true,
- type: "lite",
- },
- defaults: ["microdroid_metadata_default"],
- apex_available: [
- "com.android.virt",
- ],
-}
-
rust_protobuf {
name: "libmicrodroid_metadata_proto_rust",
crate_name: "microdroid_metadata",
@@ -49,30 +36,26 @@
],
}
-cc_binary {
+cc_binary_host {
name: "mk_payload",
srcs: [
"mk_payload.cc",
],
- shared_libs: [
+ static_libs: [
+ "lib_microdroid_metadata_proto",
"libbase",
+ "libcdisk_spec",
"libcuttlefish_fs",
"libcuttlefish_utils",
- "liblog",
- "libz",
- ],
- static_libs: [
- "lib_microdroid_metadata_proto_lite",
- "libcdisk_spec",
"libext2_uuid",
"libimage_aggregator",
"libjsoncpp",
+ "liblog",
+ "libprotobuf-cpp-full",
"libprotobuf-cpp-lite",
"libsparse",
"libxml2",
+ "libz",
],
- generated_sources: ["apex-info-list"],
- apex_available: [
- "com.android.virt",
- ],
+ static_executable: true,
}
diff --git a/microdroid/payload/README.md b/microdroid/payload/README.md
index 35502c1..c2f624a 100644
--- a/microdroid/payload/README.md
+++ b/microdroid/payload/README.md
@@ -3,6 +3,9 @@
Payload disk is a composite disk image referencing host APEXes and an APK so that microdroid
mounts/activates APK/APEXes and executes a binary within the APK.
+Payload disk is created by [VirtualizationService](../../virtualizationservice) Service when
+starting a VM.
+
## Partitions
Payload disk has 1 + N(number of APEX/APK payloads) partitions.
@@ -14,7 +17,7 @@
* partition 1: Metadata partition
* partition 2 ~ n: APEX payloads
-* partition n + 1: APK payload
+* partition n+1, n+2: APK payload and its idsig
It's subject to change in the future, though.
@@ -25,61 +28,44 @@
The partition is a protobuf message prefixed with the size of the message.
-| offset | size | description |
-|--------|------|----------------------------------------------------------------|
-| 0 | 4 | Header. unsigned int32: body length(L) in big endian |
-| 4 | L | Body. A protobuf message. [schema](metadata.proto) |
+| offset | size | description |
+| ------ | ---- | ---------------------------------------------------- |
+| 0 | 4 | Header. unsigned int32: body length(L) in big endian |
+| 4 | L | Body. A protobuf message. [schema](metadata.proto) |
### Payload partitions
Each payload partition presents APEX or APK passed from the host.
-At the end of each payload partition the size of the original payload file (APEX or APK) is stored
-in 4-byte big endian.
+The size of a payload partition must be a multiple of 4096 bytes.
-For example, the following code shows how to get the original size of host apex file
-when the apex is read in microdroid as /dev/block/vdc2,
+# `mk_payload`
- int fd = open("/dev/block/vdc2", O_RDONLY | O_BINARY | O_CLOEXEC);
- uint32_t size;
- lseek(fd, -sizeof(size), SEEK_END);
- read(fd, &size, sizeof(size));
- size = betoh32(size);
-
-## How to Create
-
-### `mk_payload`
-
-`mk_payload` creates a payload composite disk image as described in a JSON which is intentionlly
-similar to the schema of VM payload config.
+`mk_payload` is a small utility to create a payload disk image. It is used by ARCVM.
```
$ cat payload_config.json
{
- "system_apexes": [
- "com.android.adbd",
- ],
"apexes": [
{
"name": "com.my.hello",
- "path": "hello.apex"
+ "path": "hello.apex",
}
],
"apk": {
"name": "com.my.world",
- "path": "/path/to/world.apk"
+ "path": "/path/to/world.apk",
+ "idsigPath": "/path/to/world.apk.idsig",
}
}
-$ adb push payload_config.json hello.apex /data/local/tmp/
-$ adb shell 'cd /data/local/tmp; /apex/com.android.virt/bin/mk_payload payload_config.json payload.img
-$ adb shell ls /data/local/tmp/*.img
+$ m mk_payload
+$ mk_payload payload_config.json payload.img
+$ ls
payload.img
payload-footer.img
payload-header.img
payload-metadata.img
-payload.img.0 # fillers
-payload.img.1
+payload-filler-0.img
+payload-filler-1.img
...
```
-
-In the future, [VirtualizationService](../../virtualizationservice) will handle this.
diff --git a/microdroid/payload/config/src/lib.rs b/microdroid/payload/config/src/lib.rs
index 6dc127b..67e8feb 100644
--- a/microdroid/payload/config/src/lib.rs
+++ b/microdroid/payload/config/src/lib.rs
@@ -30,6 +30,14 @@
/// APEXes to activate in a VM
#[serde(default)]
pub apexes: Vec<ApexConfig>,
+
+ /// Extra APKs to be passed to a VM
+ #[serde(default)]
+ pub extra_apks: Vec<ApkConfig>,
+
+ /// Tells VirtualizationService to use staged APEXes if possible
+ #[serde(default)]
+ pub prefer_staged: bool,
}
/// OS config
@@ -87,3 +95,10 @@
/// The name of APEX
pub name: String,
}
+
+/// APK config
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ApkConfig {
+ /// The path of APK
+ pub path: String,
+}
diff --git a/microdroid/payload/metadata.proto b/microdroid/payload/metadata.proto
index 0fa0650..2e92f55 100644
--- a/microdroid/payload/metadata.proto
+++ b/microdroid/payload/metadata.proto
@@ -31,18 +31,22 @@
message ApexPayload {
// Required.
- // The apex name.
string name = 1;
-
string partition_name = 2;
// Optional.
- // When specified, the public key used to sign the apex should match with it.
- string publicKey = 3;
+ // When specified, apex payload should be verified with the public key and root digest.
+ bytes public_key = 3;
+ bytes root_digest = 4;
- // Optional.
- // When specified, the root digest of the apex should match with it.
- string rootDigest = 4;
+ // Required.
+ // The timestamp in seconds when the APEX was last updated. This should match the value in
+ // apex-info-list.xml.
+ uint64 last_update_seconds = 5;
+
+ // Required.
+ // Whether the APEX is a factory version or not.
+ bool is_factory = 6;
}
message ApkPayload {
diff --git a/microdroid/payload/mk_payload.cc b/microdroid/payload/mk_payload.cc
index b27683c..6e3f526 100644
--- a/microdroid/payload/mk_payload.cc
+++ b/microdroid/payload/mk_payload.cc
@@ -26,7 +26,6 @@
#include <android-base/file.h>
#include <android-base/result.h>
-#include <com_android_apex.h>
#include <image_aggregator.h>
#include <json/json.h>
@@ -42,9 +41,6 @@
using android::microdroid::Metadata;
using android::microdroid::WriteMetadata;
-using com::android::apex::ApexInfoList;
-using com::android::apex::readApexInfoList;
-
using cuttlefish::AlignToPartitionSize;
using cuttlefish::CreateCompositeDisk;
using cuttlefish::kLinuxFilesystem;
@@ -58,9 +54,9 @@
return static_cast<uint32_t>(st.st_size);
}
-std::string ToAbsolute(const std::string& path, const std::string& dirname) {
+std::string RelativeTo(const std::string& path, const std::string& dirname) {
bool is_absolute = !path.empty() && path[0] == '/';
- if (is_absolute) {
+ if (is_absolute || dirname == ".") {
return path;
} else {
return dirname + "/" + path;
@@ -81,25 +77,20 @@
std::string name; // the apex name
std::string path; // the path to the apex file
// absolute or relative to the config file
- std::optional<std::string> public_key;
- std::optional<std::string> root_digest;
};
struct ApkConfig {
std::string name;
std::string path;
- // TODO(jooyung) make this required?
- std::optional<std::string> idsig_path;
+ std::string idsig_path;
};
struct Config {
std::string dirname; // config file's direname to resolve relative paths in the config
- // TODO(b/185956069) remove this when VirtualizationService can provide apex paths
- std::vector<std::string> system_apexes;
-
std::vector<ApexConfig> apexes;
std::optional<ApkConfig> apk;
+ // This is a path in the guest side
std::optional<std::string> payload_config_path;
};
@@ -137,8 +128,6 @@
Result<void> ParseJson(const Json::Value& value, ApexConfig& apex_config) {
DO(ParseJson(value["name"], apex_config.name));
DO(ParseJson(value["path"], apex_config.path));
- DO(ParseJson(value["publicKey"], apex_config.public_key));
- DO(ParseJson(value["rootDigest"], apex_config.root_digest));
return {};
}
@@ -150,7 +139,6 @@
}
Result<void> ParseJson(const Json::Value& value, Config& config) {
- DO(ParseJson(value["system_apexes"], config.system_apexes));
DO(ParseJson(value["apexes"], config.apexes));
DO(ParseJson(value["apk"], config.apk));
DO(ParseJson(value["payload_config_path"], config.payload_config_path));
@@ -163,7 +151,7 @@
Json::Value root;
Json::String errs;
if (!parseFromStream(builder, in, &root, &errs)) {
- return Error() << "bad config: " << errs;
+ return Error() << errs;
}
Config config;
@@ -174,63 +162,23 @@
#undef DO
-Result<void> LoadSystemApexes(Config& config) {
- static const char* kApexInfoListFile = "/apex/apex-info-list.xml";
- std::optional<ApexInfoList> apex_info_list = readApexInfoList(kApexInfoListFile);
- if (!apex_info_list.has_value()) {
- return Error() << "Failed to read " << kApexInfoListFile;
- }
- auto get_apex_path = [&](const std::string& apex_name) -> std::optional<std::string> {
- for (const auto& apex_info : apex_info_list->getApexInfo()) {
- if (apex_info.getIsActive() && apex_info.getModuleName() == apex_name) {
- return apex_info.getModulePath();
- }
- }
- return std::nullopt;
- };
- for (const auto& apex_name : config.system_apexes) {
- const auto& apex_path = get_apex_path(apex_name);
- if (!apex_path.has_value()) {
- return Error() << "Can't find the system apex: " << apex_name;
- }
- config.apexes.push_back(ApexConfig{
- .name = apex_name,
- .path = *apex_path,
- .public_key = std::nullopt,
- .root_digest = std::nullopt,
- });
- }
- return {};
-}
-
Result<void> MakeMetadata(const Config& config, const std::string& filename) {
Metadata metadata;
metadata.set_version(1);
+ int apex_index = 0;
for (const auto& apex_config : config.apexes) {
auto* apex = metadata.add_apexes();
-
- // name
apex->set_name(apex_config.name);
-
- // publicKey
- if (apex_config.public_key.has_value()) {
- apex->set_publickey(apex_config.public_key.value());
- }
-
- // rootDigest
- if (apex_config.root_digest.has_value()) {
- apex->set_rootdigest(apex_config.root_digest.value());
- }
+ apex->set_partition_name("microdroid-apex-" + std::to_string(apex_index++));
+ apex->set_is_factory(true);
}
if (config.apk.has_value()) {
auto* apk = metadata.mutable_apk();
apk->set_name(config.apk->name);
apk->set_payload_partition_name("microdroid-apk");
- if (config.apk->idsig_path.has_value()) {
- apk->set_idsig_partition_name("microdroid-apk-idsig");
- }
+ apk->set_idsig_partition_name("microdroid-apk-idsig");
}
if (config.payload_config_path.has_value()) {
@@ -241,34 +189,8 @@
return WriteMetadata(metadata, out);
}
-// fill (zeros + original file's size) with aligning BLOCK_SIZE(4096) boundary
-// return true when the filler is generated.
-Result<bool> SizeFiller(const std::string& file_path, const std::string& filler_path) {
- auto file_size = GetFileSize(file_path);
- if (!file_size.ok()) {
- return file_size.error();
- }
- auto disk_size = AlignToPartitionSize(*file_size + sizeof(uint32_t));
-
- unique_fd fd(TEMP_FAILURE_RETRY(open(filler_path.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0600)));
- if (fd.get() == -1) {
- return ErrnoError() << "open(" << filler_path << ") failed.";
- }
- uint32_t size = htobe32(static_cast<uint32_t>(*file_size));
- if (ftruncate(fd.get(), disk_size - *file_size) == -1) {
- return ErrnoError() << "ftruncate(" << filler_path << ") failed.";
- }
- if (lseek(fd.get(), -sizeof(size), SEEK_END) == -1) {
- return ErrnoError() << "lseek(" << filler_path << ") failed.";
- }
- if (write(fd.get(), &size, sizeof(size)) <= 0) {
- return ErrnoError() << "write(" << filler_path << ") failed.";
- }
- return true;
-}
-
// fill zeros to align |file_path|'s size to BLOCK_SIZE(4096) boundary.
-// return true when the filler is generated.
+// return true when the filler is needed.
Result<bool> ZeroFiller(const std::string& file_path, const std::string& filler_path) {
auto file_size = GetFileSize(file_path);
if (!file_size.ok()) {
@@ -288,32 +210,17 @@
return true;
}
-// Do not generate any fillers
-// Note that CreateCompositeDisk() handles gaps between partitions.
-Result<bool> NoFiller(const std::string& file_path, const std::string& filler_path) {
- (void)file_path;
- (void)filler_path;
- return false;
-}
-
Result<void> MakePayload(const Config& config, const std::string& metadata_file,
const std::string& output_file) {
std::vector<MultipleImagePartition> partitions;
- // put metadata at the first partition
- partitions.push_back(MultipleImagePartition{
- .label = "payload-metadata",
- .image_file_paths = {metadata_file},
- .type = kLinuxFilesystem,
- .read_only = true,
- });
-
int filler_count = 0;
- auto add_partition = [&](auto partition_name, auto file_path, auto filler) -> Result<void> {
+ auto add_partition = [&](auto partition_name, auto file_path) -> Result<void> {
std::vector<std::string> image_files{file_path};
- std::string filler_path = output_file + "." + std::to_string(filler_count++);
- if (auto ret = filler(file_path, filler_path); !ret.ok()) {
+ std::string filler_path =
+ AppendFileName(output_file, "-filler-" + std::to_string(filler_count++));
+ if (auto ret = ZeroFiller(file_path, filler_path); !ret.ok()) {
return ret.error();
} else if (*ret) {
image_files.push_back(filler_path);
@@ -327,27 +234,31 @@
return {};
};
- // put apexes at the subsequent partitions with "size" filler
+ // put metadata at the first partition
+ partitions.push_back(MultipleImagePartition{
+ .label = "payload-metadata",
+ .image_file_paths = {metadata_file},
+ .type = kLinuxFilesystem,
+ .read_only = true,
+ });
+ // put apexes at the subsequent partitions
for (size_t i = 0; i < config.apexes.size(); i++) {
const auto& apex_config = config.apexes[i];
- std::string apex_path = ToAbsolute(apex_config.path, config.dirname);
- if (auto ret = add_partition("microdroid-apex-" + std::to_string(i), apex_path, SizeFiller);
+ std::string apex_path = RelativeTo(apex_config.path, config.dirname);
+ if (auto ret = add_partition("microdroid-apex-" + std::to_string(i), apex_path);
!ret.ok()) {
return ret.error();
}
}
- // put apk with "zero" filler.
- // TODO(jooyung): partition name("microdroid-apk") is TBD
+ // put apk and its idsig
if (config.apk.has_value()) {
- std::string apk_path = ToAbsolute(config.apk->path, config.dirname);
- if (auto ret = add_partition("microdroid-apk", apk_path, ZeroFiller); !ret.ok()) {
+ std::string apk_path = RelativeTo(config.apk->path, config.dirname);
+ if (auto ret = add_partition("microdroid-apk", apk_path); !ret.ok()) {
return ret.error();
}
- if (config.apk->idsig_path.has_value()) {
- std::string idsig_path = ToAbsolute(config.apk->idsig_path.value(), config.dirname);
- if (auto ret = add_partition("microdroid-apk-idsig", idsig_path, NoFiller); !ret.ok()) {
- return ret.error();
- }
+ std::string idsig_path = RelativeTo(config.apk->idsig_path, config.dirname);
+ if (auto ret = add_partition("microdroid-apk-idsig", idsig_path); !ret.ok()) {
+ return ret.error();
}
}
@@ -358,29 +269,34 @@
}
int main(int argc, char** argv) {
- if (argc != 3) {
- std::cerr << "Usage: " << argv[0] << " <config> <output>\n";
+ if (argc < 3 || argc > 4) {
+ std::cerr << "Usage: " << argv[0] << " [--metadata-only] <config> <output>\n";
return 1;
}
+ int arg_index = 1;
+ bool metadata_only = false;
+ if (strcmp(argv[arg_index], "--metadata-only") == 0) {
+ metadata_only = true;
+ arg_index++;
+ }
- auto config = LoadConfig(argv[1]);
+ auto config = LoadConfig(argv[arg_index++]);
if (!config.ok()) {
- std::cerr << config.error() << '\n';
+ std::cerr << "bad config: " << config.error() << '\n';
return 1;
}
- if (const auto res = LoadSystemApexes(*config); !res.ok()) {
- std::cerr << res.error() << '\n';
- return 1;
- }
-
- const std::string output_file(argv[2]);
- const std::string metadata_file = AppendFileName(output_file, "-metadata");
+ const std::string output_file(argv[arg_index++]);
+ const std::string metadata_file =
+ metadata_only ? output_file : AppendFileName(output_file, "-metadata");
if (const auto res = MakeMetadata(*config, metadata_file); !res.ok()) {
std::cerr << res.error() << '\n';
return 1;
}
+ if (metadata_only) {
+ return 0;
+ }
if (const auto res = MakePayload(*config, metadata_file, output_file); !res.ok()) {
std::cerr << res.error() << '\n';
return 1;
diff --git a/microdroid/uboot-env-x86_64.txt b/microdroid/uboot-env-x86_64.txt
deleted file mode 100644
index 1abafa6..0000000
--- a/microdroid/uboot-env-x86_64.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Static u-boot environment variables for microdroid. See b/180481192
-
-# Boot the device following the Android boot procedure
-# `0` is the disk number of os_composite.img
-# `a` and `_a` are the slot index for A/B
-bootcmd=avb init virtio 0 && avb verify _a && boot_android virtio 0 a
-
-bootdelay=0
-
-# U-Boot in x86_64 by defaults loads kernel at 0x20000000 (512MB), which is
-# out of the physical memory when the VM is launched with the default memory
-# size of 256MB. To avoid that, explicitly set the kernel load addresss using
-# loadaddr variable.
-loadaddr=0x02000000
-fdtaddr=0x40000000
diff --git a/microdroid/uboot-env.txt b/microdroid/uboot-env.txt
index 585702e..e5f8b79 100644
--- a/microdroid/uboot-env.txt
+++ b/microdroid/uboot-env.txt
@@ -1,9 +1 @@
-# Static u-boot environment variables for microdroid. See b/180481192
-
-# Boot the device following the Android boot procedure
-# `0` is the disk number of os_composite.img
-# `a` and `_a` are the slot index for A/B
-bootcmd=avb init virtio 0 && avb verify _a && boot_android virtio 0 a
-
-bootdelay=0
-fdtaddr=0x80000000
+# Empty environment for bootloader debugging
diff --git a/microdroid/ueventd.rc b/microdroid/ueventd.rc
new file mode 100644
index 0000000..340a1f7
--- /dev/null
+++ b/microdroid/ueventd.rc
@@ -0,0 +1,31 @@
+uevent_socket_rcvbuf_size 16M
+
+subsystem dma_heap
+ devname uevent_devpath
+ dirname /dev/dma_heap
+
+/dev/null 0666 root root
+/dev/zero 0666 root root
+/dev/full 0666 root root
+/dev/ptmx 0666 root root
+/dev/tty 0666 root root
+/dev/random 0666 root root
+/dev/urandom 0666 root root
+/dev/ashmem* 0666 root root
+/dev/binder 0666 root root
+/dev/hwbinder 0666 root root
+/dev/vndbinder 0666 root root
+
+/dev/pmsg0 0222 root log
+/dev/dma_heap/system 0444 system system
+/dev/dma_heap/system-uncached 0444 system system
+/dev/dma_heap/system-secure 0444 system system
+
+# these should not be world writable
+/dev/rtc0 0640 system system
+/dev/tty0 0660 root system
+
+# Virtual console for logcat
+/dev/hvc2 0660 logd logd
+
+/dev/open-dice0 0660 diced diced
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index dabcf31..e4827aa 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -9,30 +9,60 @@
edition: "2018",
prefer_rlib: true,
rustlibs: [
+ "android.hardware.security.dice-V1-rust",
+ "android.security.dice-rust",
+ "android.system.virtualizationservice-rust",
+ "android.system.virtualmachineservice-rust",
"libanyhow",
+ "libapkverify",
+ "libavb_bindgen",
+ "libbinder_rpc_unstable_bindgen",
+ "libbinder_rs",
+ "libbyteorder",
+ "libdiced_utils",
+ "libglob",
+ "libidsig",
+ "libitertools",
"libkernlog",
"liblibc",
"liblog_rust",
"libmicrodroid_metadata",
"libmicrodroid_payload_config",
+ "libnix",
+ "libonce_cell",
"libprotobuf",
+ "libring",
+ "librustutils",
"libserde",
+ "libserde_cbor",
"libserde_json",
- "libsystem_properties-rust",
+ "libthiserror",
+ "libuuid",
"libvsock",
+ "librand",
+ "libzip",
+ ],
+ shared_libs: [
+ "libbinder_rpc_unstable",
],
init_rc: ["microdroid_manager.rc"],
+ multilib: {
+ lib32: {
+ enabled: false,
+ },
+ },
}
rust_binary {
name: "microdroid_manager",
defaults: ["microdroid_manager_defaults"],
+ bootstrap: true,
}
rust_test {
name: "microdroid_manager_test",
defaults: ["microdroid_manager_defaults"],
- test_suites: ["device-tests"],
+ test_suites: ["general-tests"],
rustlibs: [
"libtempfile",
],
@@ -41,4 +71,5 @@
enabled: false,
},
},
+ data: ["tests/data/*"],
}
diff --git a/microdroid_manager/src/instance.rs b/microdroid_manager/src/instance.rs
new file mode 100644
index 0000000..267a0e3
--- /dev/null
+++ b/microdroid_manager/src/instance.rs
@@ -0,0 +1,342 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Provides routines to read/write on the instance disk.
+//!
+//! Instance disk is a disk where the identity of a VM instance is recorded. The identity usually
+//! includes certificates of the VM payload that is trusted, but not limited to it. Instance disk
+//! is empty when a VM is first booted. The identity data is filled in during the first boot, and
+//! then encrypted and signed. Subsequent boots decrypts and authenticates the data and uses the
+//! identity data to further verify the payload (e.g. against the certificate).
+//!
+//! Instance disk consists of a disk header and one or more partitions each of which consists of a
+//! header and payload. Each header (both the disk header and a partition header) is 512 bytes
+//! long. Payload is just next to the header and its size can be arbitrary. Headers are located at
+//! 512 bytes boundaries. So, when the size of a payload is not multiple of 512, there exists a gap
+//! between the end of the payload and the start of the next partition (if there is any).
+//!
+//! Each partition is identified by a UUID. A partition is created for a program loader that
+//! participates in the boot chain of the VM. Each program loader is expected to locate the
+//! partition that corresponds to the loader using the UUID that is assigned to the loader.
+//!
+//! The payload of a partition is encrypted/signed by a key that is unique to the loader and to the
+//! VM as well. Failing to decrypt/authenticate a partition by a loader stops the boot process.
+
+use crate::ioutil;
+
+use android_security_dice::aidl::android::security::dice::IDiceNode::IDiceNode;
+use anyhow::{anyhow, bail, Context, Result};
+use binder::wait_for_interface;
+use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
+use ring::aead::{Aad, Algorithm, LessSafeKey, Nonce, UnboundKey, AES_256_GCM};
+use ring::hkdf::{Salt, HKDF_SHA256};
+use serde::{Deserialize, Serialize};
+use std::fs::{File, OpenOptions};
+use std::io::{Read, Seek, SeekFrom, Write};
+use uuid::Uuid;
+
+/// Path to the instance disk inside the VM
+const INSTANCE_IMAGE_PATH: &str = "/dev/block/by-name/vm-instance";
+
+/// Magic string in the instance disk header
+const DISK_HEADER_MAGIC: &str = "Android-VM-instance";
+
+/// Version of the instance disk format
+const DISK_HEADER_VERSION: u16 = 1;
+
+/// Size of the headers in the instance disk
+const DISK_HEADER_SIZE: u64 = 512;
+const PARTITION_HEADER_SIZE: u64 = 512;
+
+/// UUID of the partition that microdroid manager uses
+const MICRODROID_PARTITION_UUID: &str = "cf9afe9a-0662-11ec-a329-c32663a09d75";
+
+/// Encryption algorithm used to cipher payload
+static ENCRYPT_ALG: &Algorithm = &AES_256_GCM;
+
+/// Handle to the instance disk
+pub struct InstanceDisk {
+ file: File,
+}
+
+/// Information from a partition header
+struct PartitionHeader {
+ uuid: Uuid,
+ payload_size: u64, // in bytes
+}
+
+/// Offset of a partition in the instance disk
+type PartitionOffset = u64;
+
+impl InstanceDisk {
+ /// Creates handle to instance disk
+ pub fn new() -> Result<Self> {
+ let mut file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .open(INSTANCE_IMAGE_PATH)
+ .with_context(|| format!("Failed to open {}", INSTANCE_IMAGE_PATH))?;
+
+ // Check if this file is a valid instance disk by examining the header (the first block)
+ let mut magic = [0; DISK_HEADER_MAGIC.len()];
+ file.read_exact(&mut magic)?;
+ if magic != DISK_HEADER_MAGIC.as_bytes() {
+ bail!("invalid magic: {:?}", magic);
+ }
+
+ let version = file.read_u16::<LittleEndian>()?;
+ if version == 0 {
+ bail!("invalid version: {}", version);
+ }
+ if version > DISK_HEADER_VERSION {
+ bail!("unsupported version: {}", version);
+ }
+
+ Ok(Self { file })
+ }
+
+ /// Reads the identity data that was written by microdroid manager. The returned data is
+ /// plaintext, although it is stored encrypted. In case when the partition for microdroid
+ /// manager doesn't exist, which can happen if it's the first boot, `Ok(None)` is returned.
+ pub fn read_microdroid_data(&mut self) -> Result<Option<MicrodroidData>> {
+ let (header, offset) = self.locate_microdroid_header()?;
+ if header.is_none() {
+ return Ok(None);
+ }
+ let header = header.unwrap();
+ let payload_offset = offset + PARTITION_HEADER_SIZE;
+ self.file.seek(SeekFrom::Start(payload_offset))?;
+
+ // Read the 12-bytes nonce (unencrypted)
+ let mut nonce = [0; 12];
+ self.file.read_exact(&mut nonce)?;
+ let nonce = Nonce::assume_unique_for_key(nonce);
+
+ // Read the encrypted payload
+ let payload_size = header.payload_size - 12; // we already have read the nonce
+ let mut data = vec![0; payload_size as usize];
+ self.file.read_exact(&mut data)?;
+
+ // Read the header as well because it's part of the signed data (though not encrypted).
+ let mut header = [0; PARTITION_HEADER_SIZE as usize];
+ self.file.seek(SeekFrom::Start(offset))?;
+ self.file.read_exact(&mut header)?;
+
+ // Decrypt and authenticate the data (along with the header). The data is decrypted in
+ // place. `open_in_place` returns slice to the decrypted part in the buffer.
+ let plaintext_len = get_key()?.open_in_place(nonce, Aad::from(&header), &mut data)?.len();
+ // Truncate to remove the tag
+ data.truncate(plaintext_len);
+
+ let microdroid_data = serde_cbor::from_slice(data.as_slice())?;
+ Ok(Some(microdroid_data))
+ }
+
+ /// Writes identity data to the partition for microdroid manager. The partition is appended
+ /// if it doesn't exist. The data is stored encrypted.
+ pub fn write_microdroid_data(&mut self, microdroid_data: &MicrodroidData) -> Result<()> {
+ let (header, offset) = self.locate_microdroid_header()?;
+
+ let mut data = serde_cbor::to_vec(microdroid_data)?;
+
+ // By encrypting and signing the data, tag will be appended. The tag also becomes part of
+ // the encrypted payload which will be written. In addition, a 12-bytes nonce will be
+ // prepended (non-encrypted).
+ let payload_size = (data.len() + ENCRYPT_ALG.tag_len() + 12) as u64;
+
+ // If the partition exists, make sure we don't change the partition size. If not (i.e.
+ // partition is not found), write the header at the empty place.
+ if let Some(header) = header {
+ if header.payload_size != payload_size {
+ bail!("Can't change payload size from {} to {}", header.payload_size, payload_size);
+ }
+ } else {
+ let uuid = Uuid::parse_str(MICRODROID_PARTITION_UUID)?;
+ self.write_header_at(offset, &uuid, payload_size)?;
+ }
+
+ // Read the header as it is used as additionally authenticated data (AAD).
+ let mut header = [0; PARTITION_HEADER_SIZE as usize];
+ self.file.seek(SeekFrom::Start(offset))?;
+ self.file.read_exact(&mut header)?;
+
+ // Generate a nonce randomly and recorde it on the disk first.
+ let nonce = Nonce::assume_unique_for_key(rand::random::<[u8; 12]>());
+ self.file.seek(SeekFrom::Start(offset + PARTITION_HEADER_SIZE))?;
+ self.file.write_all(nonce.as_ref())?;
+
+ // Then encrypt and sign the data. The non-encrypted input data is copied to a vector
+ // because it is encrypted in place, and also the tag is appended.
+ get_key()?.seal_in_place_append_tag(nonce, Aad::from(&header), &mut data)?;
+
+ // Persist the encrypted payload data
+ self.file.write_all(&data)?;
+ ioutil::blkflsbuf(&mut self.file)?;
+
+ Ok(())
+ }
+
+ /// Read header at `header_offset` and parse it into a `PartitionHeader`.
+ fn read_header_at(&mut self, header_offset: u64) -> Result<PartitionHeader> {
+ assert!(
+ header_offset % PARTITION_HEADER_SIZE == 0,
+ "header offset {} is not aligned to 512 bytes",
+ header_offset
+ );
+
+ let mut uuid = [0; 16];
+ self.file.seek(SeekFrom::Start(header_offset))?;
+ self.file.read_exact(&mut uuid)?;
+ let uuid = Uuid::from_bytes(uuid);
+ let payload_size = self.file.read_u64::<LittleEndian>()?;
+
+ Ok(PartitionHeader { uuid, payload_size })
+ }
+
+ /// Write header at `header_offset`
+ fn write_header_at(
+ &mut self,
+ header_offset: u64,
+ uuid: &Uuid,
+ payload_size: u64,
+ ) -> Result<()> {
+ self.file.seek(SeekFrom::Start(header_offset))?;
+ self.file.write_all(uuid.as_bytes())?;
+ self.file.write_u64::<LittleEndian>(payload_size)?;
+ Ok(())
+ }
+
+ /// Locate the header of the partition for microdroid manager. A pair of `PartitionHeader` and
+ /// the offset of the partition in the disk is returned. If the partition is not found,
+ /// `PartitionHeader` is `None` and the offset points to the empty partition that can be used
+ /// for the partition.
+ fn locate_microdroid_header(&mut self) -> Result<(Option<PartitionHeader>, PartitionOffset)> {
+ let microdroid_uuid = Uuid::parse_str(MICRODROID_PARTITION_UUID)?;
+
+ // the first partition header is located just after the disk header
+ let mut header_offset = DISK_HEADER_SIZE;
+ loop {
+ let header = self.read_header_at(header_offset)?;
+ if header.uuid == microdroid_uuid {
+ // found a matching header
+ return Ok((Some(header), header_offset));
+ } else if header.uuid == Uuid::nil() {
+ // found an empty space
+ return Ok((None, header_offset));
+ }
+ // Move to the next partition. Be careful about overflow.
+ let payload_size = round_to_multiple(header.payload_size, PARTITION_HEADER_SIZE)?;
+ let part_size = payload_size
+ .checked_add(PARTITION_HEADER_SIZE)
+ .ok_or_else(|| anyhow!("partition too large"))?;
+ header_offset = header_offset
+ .checked_add(part_size)
+ .ok_or_else(|| anyhow!("next partition at invalid offset"))?;
+ }
+ }
+}
+
+/// Round `n` up to the nearest multiple of `unit`
+fn round_to_multiple(n: u64, unit: u64) -> Result<u64> {
+ assert!((unit & (unit - 1)) == 0, "{} is not power of two", unit);
+ let ret = (n + unit - 1) & !(unit - 1);
+ if ret < n {
+ bail!("overflow")
+ }
+ Ok(ret)
+}
+
+struct ZeroOnDropKey(LessSafeKey);
+
+impl Drop for ZeroOnDropKey {
+ fn drop(&mut self) {
+ // Zeroize the key by overwriting it with a key constructed from zeros of same length
+ // This works because the raw key bytes are allocated inside the struct, not on the heap
+ let zero = [0; 32];
+ let zero_key = LessSafeKey::new(UnboundKey::new(ENCRYPT_ALG, &zero).unwrap());
+ unsafe {
+ ::std::ptr::write_volatile::<LessSafeKey>(&mut self.0, zero_key);
+ }
+ }
+}
+
+impl std::ops::Deref for ZeroOnDropKey {
+ type Target = LessSafeKey;
+ fn deref(&self) -> &LessSafeKey {
+ &self.0
+ }
+}
+
+/// Returns the key that is used to encrypt the microdroid manager partition. It is derived from
+/// the sealing CDI of the previous stage, which is Android Boot Loader (ABL).
+fn get_key() -> Result<ZeroOnDropKey> {
+ // Sealing CDI from the previous stage.
+ let diced = wait_for_interface::<dyn IDiceNode>("android.security.dice.IDiceNode")
+ .context("IDiceNode service not found")?;
+ let bcc_handover = diced.derive(&[]).context("Failed to get BccHandover")?;
+
+ // Derive a key from the Sealing CDI
+ // Step 1 is extraction: https://datatracker.ietf.org/doc/html/rfc5869#section-2.2 where a
+ // pseduo random key (PRK) is extracted from (Input Keying Material - IKM, which is secret) and
+ // optional salt.
+ let salt = Salt::new(HKDF_SHA256, &[]); // use 0 as salt
+ let prk = salt.extract(&bcc_handover.cdiSeal); // Sealing CDI as IKM
+
+ // Step 2 is expansion: https://datatracker.ietf.org/doc/html/rfc5869#section-2.3 where the PRK
+ // (optionally with the `info` which gives contextual information) is expanded into the output
+ // keying material (OKM). Note that the process fails only when the size of OKM is longer than
+ // 255 * SHA256_HASH_SIZE (32), which isn't the case here.
+ let info = [b"microdroid_manager_key".as_ref()];
+ let okm = prk.expand(&info, HKDF_SHA256).unwrap(); // doesn't fail as explained above
+ let mut key = [0; 32];
+ okm.fill(&mut key).unwrap(); // doesn't fail as explained above
+
+ // The term LessSafe might be misleading here. LessSafe here just means that the API can
+ // possibly accept same nonces for different messages. However, since we encrypt/decrypt only a
+ // single message (the microdroid_manager partition payload) with a randomly generated nonce,
+ // this is safe enough.
+ let ret = ZeroOnDropKey(LessSafeKey::new(UnboundKey::new(ENCRYPT_ALG, &key).unwrap()));
+
+ // Don't forget to zeroize the raw key array as well
+ unsafe {
+ ::std::ptr::write_volatile::<[u8; 32]>(&mut key, [0; 32]);
+ }
+
+ Ok(ret)
+}
+
+#[derive(Debug, Serialize, Deserialize, PartialEq)]
+pub struct MicrodroidData {
+ pub salt: Vec<u8>, // Should be [u8; 64] but that isn't serializable.
+ pub apk_data: ApkData,
+ pub extra_apks_data: Vec<ApkData>,
+ pub apex_data: Vec<ApexData>,
+}
+
+#[derive(Debug, Serialize, Deserialize, PartialEq)]
+pub struct ApkData {
+ pub root_hash: Box<RootHash>,
+ pub pubkey: Box<[u8]>,
+}
+
+pub type RootHash = [u8];
+
+#[derive(Debug, Serialize, Deserialize, PartialEq)]
+pub struct ApexData {
+ pub name: String,
+ pub public_key: Vec<u8>,
+ pub root_digest: Vec<u8>,
+ pub last_update_seconds: u64,
+ pub is_factory: bool,
+}
diff --git a/microdroid_manager/src/ioutil.rs b/microdroid_manager/src/ioutil.rs
index e8732ad..8ac3712 100644
--- a/microdroid_manager/src/ioutil.rs
+++ b/microdroid_manager/src/ioutil.rs
@@ -14,8 +14,13 @@
//! IO utilities
+use anyhow::{anyhow, bail, Result};
+use log::debug;
+use std::fmt::Debug;
use std::fs::File;
use std::io;
+use std::os::unix::fs::FileTypeExt;
+use std::os::unix::io::AsRawFd;
use std::path::Path;
use std::thread;
use std::time::{Duration, Instant};
@@ -23,17 +28,18 @@
const SLEEP_DURATION: Duration = Duration::from_millis(5);
/// waits for a file with a timeout and returns it
-pub fn wait_for_file<P: AsRef<Path>>(path: P, timeout: Duration) -> io::Result<File> {
+pub fn wait_for_file<P: AsRef<Path> + Debug>(path: P, timeout: Duration) -> Result<File> {
+ debug!("waiting for {:?}...", path);
let begin = Instant::now();
loop {
match File::open(&path) {
Ok(file) => return Ok(file),
Err(error) => {
if error.kind() != io::ErrorKind::NotFound {
- return Err(error);
+ return Err(anyhow!(error));
}
if begin.elapsed() > timeout {
- return Err(io::Error::from(io::ErrorKind::NotFound));
+ return Err(anyhow!(io::Error::from(io::ErrorKind::NotFound)));
}
thread::sleep(SLEEP_DURATION);
}
@@ -41,13 +47,27 @@
}
}
+// From include/uapi/linux/fs.h
+const BLK: u8 = 0x12;
+const BLKFLSBUF: u8 = 97;
+nix::ioctl_none!(_blkflsbuf, BLK, BLKFLSBUF);
+
+pub fn blkflsbuf(f: &mut File) -> Result<()> {
+ if !f.metadata()?.file_type().is_block_device() {
+ bail!("{:?} is not a block device", f.as_raw_fd());
+ }
+ // SAFETY: The file is kept open until the end of this function.
+ unsafe { _blkflsbuf(f.as_raw_fd()) }?;
+ Ok(())
+}
+
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Read, Write};
#[test]
- fn test_wait_for_file() -> io::Result<()> {
+ fn test_wait_for_file() -> Result<()> {
let test_dir = tempfile::TempDir::new().unwrap();
let test_file = test_dir.path().join("test.txt");
thread::spawn(move || -> io::Result<()> {
@@ -69,6 +89,9 @@
let test_file = test_dir.path().join("test.txt");
let file = wait_for_file(&test_file, Duration::from_secs(1));
assert!(file.is_err());
- assert_eq!(io::ErrorKind::NotFound, file.unwrap_err().kind());
+ assert_eq!(
+ io::ErrorKind::NotFound,
+ file.unwrap_err().root_cause().downcast_ref::<io::Error>().unwrap().kind()
+ );
}
}
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 9efa68a..9e159d2 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -14,48 +14,506 @@
//! Microdroid Manager
+mod instance;
mod ioutil;
-mod metadata;
+mod payload;
-use anyhow::{anyhow, bail, Result};
-use log::{error, info, warn};
+use crate::instance::{ApkData, InstanceDisk, MicrodroidData, RootHash};
+use android_hardware_security_dice::aidl::android::hardware::security::dice::{
+ Config::Config, InputValues::InputValues, Mode::Mode,
+};
+use android_security_dice::aidl::android::security::dice::IDiceMaintenance::IDiceMaintenance;
+use anyhow::{anyhow, bail, ensure, Context, Error, Result};
+use apkverify::{get_public_key_der, verify};
+use binder::unstable_api::{new_spibinder, AIBinder};
+use binder::{wait_for_interface, FromIBinder, Strong};
+use diced_utils::cbor::encode_header;
+use glob::glob;
+use idsig::V4Signature;
+use itertools::sorted;
+use log::{error, info};
+use microdroid_metadata::{write_metadata, Metadata};
use microdroid_payload_config::{Task, TaskType, VmPayloadConfig};
-use std::fs::{self, File};
+use payload::{get_apex_data_from_payload, load_metadata, to_metadata};
+use rand::Fill;
+use ring::digest;
+use rustutils::system_properties;
+use rustutils::system_properties::PropertyWatcher;
+use std::convert::TryInto;
+use std::fs::{self, create_dir, File, OpenOptions};
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::path::Path;
-use std::process::{Command, Stdio};
+use std::process::{Child, Command, Stdio};
use std::str;
-use std::time::Duration;
-use system_properties::PropertyWatcher;
+use std::time::{Duration, SystemTime};
use vsock::VsockStream;
-const WAIT_TIMEOUT: Duration = Duration::from_secs(10);
+use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::{
+ ERROR_PAYLOAD_CHANGED, ERROR_PAYLOAD_VERIFICATION_FAILED, ERROR_PAYLOAD_INVALID_CONFIG, ERROR_UNKNOWN, VM_BINDER_SERVICE_PORT, VM_STREAM_SERVICE_PORT, IVirtualMachineService,
+};
-fn main() -> Result<()> {
- kernlog::init()?;
+const WAIT_TIMEOUT: Duration = Duration::from_secs(10);
+const MAIN_APK_PATH: &str = "/dev/block/by-name/microdroid-apk";
+const MAIN_APK_IDSIG_PATH: &str = "/dev/block/by-name/microdroid-apk-idsig";
+const MAIN_APK_DEVICE_NAME: &str = "microdroid-apk";
+const EXTRA_APK_PATH_PATTERN: &str = "/dev/block/by-name/extra-apk-*";
+const EXTRA_IDSIG_PATH_PATTERN: &str = "/dev/block/by-name/extra-idsig-*";
+const DM_MOUNTED_APK_PATH: &str = "/dev/block/mapper/microdroid-apk";
+const APKDMVERITY_BIN: &str = "/system/bin/apkdmverity";
+const ZIPFUSE_BIN: &str = "/system/bin/zipfuse";
+const AVF_STRICT_BOOT: &str = "/sys/firmware/devicetree/base/chosen/avf,strict-boot";
+const AVF_NEW_INSTANCE: &str = "/sys/firmware/devicetree/base/chosen/avf,new-instance";
+
+/// The CID representing the host VM
+const VMADDR_CID_HOST: u32 = 2;
+
+const APEX_CONFIG_DONE_PROP: &str = "apex_config.done";
+const LOGD_ENABLED_PROP: &str = "ro.boot.logd.enabled";
+const APP_DEBUGGABLE_PROP: &str = "ro.boot.microdroid.app_debuggable";
+
+#[derive(thiserror::Error, Debug)]
+enum MicrodroidError {
+ #[error("Payload has changed: {0}")]
+ PayloadChanged(String),
+ #[error("Payload verification has failed: {0}")]
+ PayloadVerificationFailed(String),
+ #[error("Payload config is invalid: {0}")]
+ InvalidConfig(String),
+}
+
+fn translate_error(err: &Error) -> (i32, String) {
+ if let Some(e) = err.downcast_ref::<MicrodroidError>() {
+ match e {
+ MicrodroidError::PayloadChanged(msg) => (ERROR_PAYLOAD_CHANGED, msg.to_string()),
+ MicrodroidError::PayloadVerificationFailed(msg) => {
+ (ERROR_PAYLOAD_VERIFICATION_FAILED, msg.to_string())
+ }
+ MicrodroidError::InvalidConfig(msg) => (ERROR_PAYLOAD_INVALID_CONFIG, msg.to_string()),
+ }
+ } else {
+ (ERROR_UNKNOWN, err.to_string())
+ }
+}
+
+fn get_vms_rpc_binder() -> Result<Strong<dyn IVirtualMachineService>> {
+ // SAFETY: AIBinder returned by RpcClient has correct reference count, and the ownership can be
+ // safely taken by new_spibinder.
+ let ibinder = unsafe {
+ new_spibinder(binder_rpc_unstable_bindgen::RpcClient(
+ VMADDR_CID_HOST,
+ VM_BINDER_SERVICE_PORT as u32,
+ ) as *mut AIBinder)
+ };
+ if let Some(ibinder) = ibinder {
+ <dyn IVirtualMachineService>::try_from(ibinder).context("Cannot connect to RPC service")
+ } else {
+ bail!("Invalid raw AIBinder")
+ }
+}
+
+fn main() {
+ if let Err(e) = try_main() {
+ error!("Failed with {:?}. Shutting down...", e);
+ if let Err(e) = system_properties::write("sys.powerctl", "shutdown") {
+ error!("failed to shutdown {:?}", e);
+ }
+ std::process::exit(1);
+ }
+}
+
+fn try_main() -> Result<()> {
+ let _ = kernlog::init();
info!("started.");
- let metadata = metadata::load()?;
- if !metadata.payload_config_path.is_empty() {
- let config = load_config(Path::new(&metadata.payload_config_path))?;
-
- let fake_secret = "This is a placeholder for a value that is derived from the images that are loaded in the VM.";
- if let Err(err) = system_properties::write("ro.vmsecret.keymint", fake_secret) {
- warn!("failed to set ro.vmsecret.keymint: {}", err);
+ let service = get_vms_rpc_binder().context("cannot connect to VirtualMachineService")?;
+ match try_run_payload(&service) {
+ Ok(code) => {
+ info!("notifying payload finished");
+ service.notifyPayloadFinished(code)?;
+ if code == 0 {
+ info!("task successfully finished");
+ } else {
+ error!("task exited with exit code: {}", code);
+ }
+ Ok(())
}
+ Err(err) => {
+ error!("task terminated: {:?}", err);
+ let (error_code, message) = translate_error(&err);
+ service.notifyError(error_code, &message)?;
+ Err(err)
+ }
+ }
+}
- // TODO(jooyung): wait until sys.boot_completed?
- if let Some(main_task) = &config.task {
- exec_task(main_task).map_err(|e| {
- error!("failed to execute task: {}", e);
- e
- })?;
+fn dice_derivation(verified_data: MicrodroidData, payload_config_path: &str) -> Result<()> {
+ // Calculate compound digests of code and authorities
+ let mut code_hash_ctx = digest::Context::new(&digest::SHA512);
+ let mut authority_hash_ctx = digest::Context::new(&digest::SHA512);
+ code_hash_ctx.update(verified_data.apk_data.root_hash.as_ref());
+ authority_hash_ctx.update(verified_data.apk_data.pubkey.as_ref());
+ for extra_apk in verified_data.extra_apks_data {
+ code_hash_ctx.update(extra_apk.root_hash.as_ref());
+ authority_hash_ctx.update(extra_apk.pubkey.as_ref());
+ }
+ for apex in verified_data.apex_data {
+ code_hash_ctx.update(apex.root_digest.as_ref());
+ authority_hash_ctx.update(apex.public_key.as_ref());
+ }
+ let code_hash = code_hash_ctx.finish().as_ref().try_into().unwrap();
+ let authority_hash = authority_hash_ctx.finish().as_ref().try_into().unwrap();
+
+ // {
+ // -70002: "Microdroid payload",
+ // -71000: payload_config_path
+ // }
+ let mut config_desc = vec![
+ 0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72, 0x6f,
+ 0x69, 0x64, 0x20, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x00, 0x01, 0x15, 0x57,
+ ];
+ let config_path_bytes = payload_config_path.as_bytes();
+ encode_header(3, config_path_bytes.len().try_into().unwrap(), &mut config_desc)?;
+ config_desc.extend_from_slice(config_path_bytes);
+
+ // Check app debuggability, conervatively assuming it is debuggable
+ let app_debuggable = system_properties::read_bool(APP_DEBUGGABLE_PROP, true)?;
+
+ // Send the details to diced
+ let diced =
+ wait_for_interface::<dyn IDiceMaintenance>("android.security.dice.IDiceMaintenance")
+ .context("IDiceMaintenance service not found")?;
+ diced
+ .demoteSelf(&[InputValues {
+ codeHash: code_hash,
+ config: Config { desc: config_desc },
+ authorityHash: authority_hash,
+ authorityDescriptor: None,
+ mode: if app_debuggable { Mode::DEBUG } else { Mode::NORMAL },
+ hidden: verified_data.salt.try_into().unwrap(),
+ }])
+ .context("IDiceMaintenance::demoteSelf failed")?;
+ Ok(())
+}
+
+fn is_strict_boot() -> bool {
+ Path::new(AVF_STRICT_BOOT).exists()
+}
+
+fn is_new_instance() -> bool {
+ Path::new(AVF_NEW_INSTANCE).exists()
+}
+
+fn try_run_payload(service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
+ let metadata = load_metadata().context("Failed to load payload metadata")?;
+
+ let mut instance = InstanceDisk::new().context("Failed to load instance.img")?;
+ let saved_data = instance.read_microdroid_data().context("Failed to read identity data")?;
+
+ if is_strict_boot() {
+ // Provisioning must happen on the first boot and never again.
+ if is_new_instance() {
+ ensure!(
+ saved_data.is_none(),
+ MicrodroidError::InvalidConfig("Found instance data on first boot.".to_string())
+ );
+ } else {
+ ensure!(
+ saved_data.is_some(),
+ MicrodroidError::InvalidConfig("Instance data not found.".to_string())
+ );
+ };
+ }
+
+ // Verify the payload before using it.
+ let verified_data =
+ verify_payload(&metadata, saved_data.as_ref()).context("Payload verification failed")?;
+ if let Some(saved_data) = saved_data {
+ ensure!(
+ saved_data == verified_data,
+ MicrodroidError::PayloadChanged(String::from(
+ "Detected an update of the payload which isn't supported yet."
+ ))
+ );
+ info!("Saved data is verified.");
+ } else {
+ info!("Saving verified data.");
+ instance.write_microdroid_data(&verified_data).context("Failed to write identity data")?;
+ }
+
+ // Before reading a file from the APK, start zipfuse
+ run_zipfuse(
+ "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:system_file:s0",
+ Path::new("/dev/block/mapper/microdroid-apk"),
+ Path::new("/mnt/apk"),
+ )
+ .context("Failed to run zipfuse")?;
+
+ ensure!(
+ !metadata.payload_config_path.is_empty(),
+ MicrodroidError::InvalidConfig("No payload_config_path in metadata".to_string())
+ );
+
+ let config = load_config(Path::new(&metadata.payload_config_path))?;
+ if config.extra_apks.len() != verified_data.extra_apks_data.len() {
+ return Err(anyhow!(
+ "config expects {} extra apks, but found only {}",
+ config.extra_apks.len(),
+ verified_data.extra_apks_data.len()
+ ));
+ }
+ mount_extra_apks(&config)?;
+
+ info!("DICE derivation for payload");
+ dice_derivation(verified_data, &metadata.payload_config_path)?;
+
+ // Wait until apex config is done. (e.g. linker configuration for apexes)
+ // TODO(jooyung): wait until sys.boot_completed?
+ wait_for_apex_config_done()?;
+
+ ensure!(
+ config.task.is_some(),
+ MicrodroidError::InvalidConfig("No task in VM config".to_string())
+ );
+ exec_task(&config.task.unwrap(), service)
+}
+
+struct ApkDmverityArgument<'a> {
+ apk: &'a str,
+ idsig: &'a str,
+ name: &'a str,
+ saved_root_hash: Option<&'a RootHash>,
+}
+
+fn run_apkdmverity(args: &[ApkDmverityArgument]) -> Result<Child> {
+ let mut cmd = Command::new(APKDMVERITY_BIN);
+
+ cmd.stdin(Stdio::null()).stdout(Stdio::null()).stderr(Stdio::null());
+
+ for argument in args {
+ cmd.arg("--apk").arg(argument.apk).arg(argument.idsig).arg(argument.name);
+ if let Some(root_hash) = argument.saved_root_hash {
+ cmd.arg(&to_hex_string(root_hash));
+ } else {
+ cmd.arg("none");
}
}
+ cmd.spawn().context("Spawn apkdmverity")
+}
+
+fn run_zipfuse(option: &str, zip_path: &Path, mount_dir: &Path) -> Result<Child> {
+ Command::new(ZIPFUSE_BIN)
+ .arg("-o")
+ .arg(option)
+ .arg(zip_path)
+ .arg(mount_dir)
+ .stdin(Stdio::null())
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .spawn()
+ .context("Spawn zipfuse")
+}
+
+// Verify payload before executing it. For APK payload, Full verification (which is slow) is done
+// when the root_hash values from the idsig file and the instance disk are different. This function
+// returns the verified root hash (for APK payload) and pubkeys (for APEX payloads) that can be
+// saved to the instance disk.
+fn verify_payload(
+ metadata: &Metadata,
+ saved_data: Option<&MicrodroidData>,
+) -> Result<MicrodroidData> {
+ let start_time = SystemTime::now();
+
+ // Verify main APK
+ let root_hash = saved_data.map(|d| &d.apk_data.root_hash);
+ let root_hash_from_idsig = get_apk_root_hash_from_idsig(MAIN_APK_IDSIG_PATH)?;
+ let root_hash_trustful = root_hash == Some(&root_hash_from_idsig);
+
+ // If root_hash can be trusted, pass it to apkdmverity so that it uses the passed root_hash
+ // instead of the value read from the idsig file.
+ let main_apk_argument = {
+ ApkDmverityArgument {
+ apk: MAIN_APK_PATH,
+ idsig: MAIN_APK_IDSIG_PATH,
+ name: MAIN_APK_DEVICE_NAME,
+ saved_root_hash: if root_hash_trustful {
+ Some(root_hash_from_idsig.as_ref())
+ } else {
+ None
+ },
+ }
+ };
+ let mut apkdmverity_arguments = vec![main_apk_argument];
+
+ // Verify extra APKs
+ // For now, we can't read the payload config, so glob APKs and idsigs.
+ // Later, we'll see if it matches with the payload config.
+
+ // sort globbed paths to match apks (extra-apk-{idx}) and idsigs (extra-idsig-{idx})
+ // e.g. "extra-apk-0" corresponds to "extra-idsig-0"
+ let extra_apks =
+ sorted(glob(EXTRA_APK_PATH_PATTERN)?.collect::<Result<Vec<_>, _>>()?).collect::<Vec<_>>();
+ let extra_idsigs =
+ sorted(glob(EXTRA_IDSIG_PATH_PATTERN)?.collect::<Result<Vec<_>, _>>()?).collect::<Vec<_>>();
+ if extra_apks.len() != extra_idsigs.len() {
+ return Err(anyhow!(
+ "Extra apks/idsigs mismatch: {} apks but {} idsigs",
+ extra_apks.len(),
+ extra_idsigs.len()
+ ));
+ }
+ let extra_apks_count = extra_apks.len();
+
+ let (extra_apk_names, extra_root_hashes_from_idsig): (Vec<_>, Vec<_>) = extra_idsigs
+ .iter()
+ .enumerate()
+ .map(|(i, extra_idsig)| {
+ (
+ format!("extra-apk-{}", i),
+ get_apk_root_hash_from_idsig(extra_idsig.to_str().unwrap())
+ .expect("Can't find root hash from extra idsig"),
+ )
+ })
+ .unzip();
+
+ let saved_extra_root_hashes: Vec<_> = saved_data
+ .map(|d| d.extra_apks_data.iter().map(|apk_data| &apk_data.root_hash).collect())
+ .unwrap_or_else(Vec::new);
+ let extra_root_hashes_trustful: Vec<_> = extra_root_hashes_from_idsig
+ .iter()
+ .enumerate()
+ .map(|(i, root_hash_from_idsig)| {
+ saved_extra_root_hashes.get(i).copied() == Some(root_hash_from_idsig)
+ })
+ .collect();
+
+ for i in 0..extra_apks_count {
+ apkdmverity_arguments.push({
+ ApkDmverityArgument {
+ apk: extra_apks[i].to_str().unwrap(),
+ idsig: extra_idsigs[i].to_str().unwrap(),
+ name: &extra_apk_names[i],
+ saved_root_hash: if extra_root_hashes_trustful[i] {
+ Some(&extra_root_hashes_from_idsig[i])
+ } else {
+ None
+ },
+ }
+ });
+ }
+
+ // Start apkdmverity and wait for the dm-verify block
+ let mut apkdmverity_child = run_apkdmverity(&apkdmverity_arguments)?;
+
+ // While waiting for apkdmverity to mount APK, gathers public keys and root digests from
+ // APEX payload.
+ let apex_data_from_payload = get_apex_data_from_payload(metadata)?;
+ if let Some(saved_data) = saved_data.map(|d| &d.apex_data) {
+ // We don't support APEX updates. (assuming that update will change root digest)
+ ensure!(
+ saved_data == &apex_data_from_payload,
+ MicrodroidError::PayloadChanged(String::from("APEXes have changed."))
+ );
+ let apex_metadata = to_metadata(&apex_data_from_payload);
+ // Pass metadata(with public keys and root digests) to apexd so that it uses the passed
+ // metadata instead of the default one (/dev/block/by-name/payload-metadata)
+ OpenOptions::new()
+ .create_new(true)
+ .write(true)
+ .open("/apex/vm-payload-metadata")
+ .context("Failed to open /apex/vm-payload-metadata")
+ .and_then(|f| write_metadata(&apex_metadata, f))?;
+ }
+ // Start apexd to activate APEXes
+ system_properties::write("ctl.start", "apexd-vm")?;
+
+ // TODO(inseob): add timeout
+ apkdmverity_child.wait()?;
+
+ // Do the full verification if the root_hash is un-trustful. This requires the full scanning of
+ // the APK file and therefore can be very slow if the APK is large. Note that this step is
+ // taken only when the root_hash is un-trustful which can be either when this is the first boot
+ // of the VM or APK was updated in the host.
+ // TODO(jooyung): consider multithreading to make this faster
+ let main_apk_pubkey = get_public_key_from_apk(DM_MOUNTED_APK_PATH, root_hash_trustful)?;
+ let extra_apks_data = extra_root_hashes_from_idsig
+ .into_iter()
+ .enumerate()
+ .map(|(i, extra_root_hash)| {
+ let mount_path = format!("/dev/block/mapper/{}", &extra_apk_names[i]);
+ let apk_pubkey = get_public_key_from_apk(&mount_path, extra_root_hashes_trustful[i])?;
+ Ok(ApkData { root_hash: extra_root_hash, pubkey: apk_pubkey })
+ })
+ .collect::<Result<Vec<_>>>()?;
+
+ info!("payload verification successful. took {:#?}", start_time.elapsed().unwrap());
+
+ // Use the salt from a verified instance, or generate a salt for a new instance.
+ let salt = if let Some(saved_data) = saved_data {
+ saved_data.salt.clone()
+ } else {
+ let mut salt = vec![0u8; 64];
+ salt.as_mut_slice().try_fill(&mut rand::thread_rng())?;
+ salt
+ };
+
+ // At this point, we can ensure that the root_hash from the idsig file is trusted, either by
+ // fully verifying the APK or by comparing it with the saved root_hash.
+ Ok(MicrodroidData {
+ salt,
+ apk_data: ApkData { root_hash: root_hash_from_idsig, pubkey: main_apk_pubkey },
+ extra_apks_data,
+ apex_data: apex_data_from_payload,
+ })
+}
+
+fn mount_extra_apks(config: &VmPayloadConfig) -> Result<()> {
+ // For now, only the number of apks is important, as the mount point and dm-verity name is fixed
+ for i in 0..config.extra_apks.len() {
+ let mount_dir = format!("/mnt/extra-apk/{}", i);
+ create_dir(Path::new(&mount_dir)).context("Failed to create mount dir for extra apks")?;
+
+ // don't wait, just detach
+ run_zipfuse(
+ "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:extra_apk_file:s0",
+ Path::new(&format!("/dev/block/mapper/extra-apk-{}", i)),
+ Path::new(&mount_dir),
+ )
+ .context("Failed to zipfuse extra apks")?;
+ }
+
Ok(())
}
+// Waits until linker config is generated
+fn wait_for_apex_config_done() -> Result<()> {
+ let mut prop = PropertyWatcher::new(APEX_CONFIG_DONE_PROP)?;
+ loop {
+ prop.wait()?;
+ if system_properties::read_bool(APEX_CONFIG_DONE_PROP, false)? {
+ break;
+ }
+ }
+ Ok(())
+}
+
+fn get_apk_root_hash_from_idsig(path: &str) -> Result<Box<RootHash>> {
+ let mut idsig = File::open(path)?;
+ let idsig = V4Signature::from(&mut idsig)?;
+ Ok(idsig.hashing_info.raw_root_hash)
+}
+
+fn get_public_key_from_apk(apk: &str, root_hash_trustful: bool) -> Result<Box<[u8]>> {
+ if !root_hash_trustful {
+ verify(apk).context(MicrodroidError::PayloadVerificationFailed(format!(
+ "failed to verify {}",
+ apk
+ )))
+ } else {
+ get_public_key_der(apk)
+ }
+}
+
fn load_config(path: &Path) -> Result<VmPayloadConfig> {
info!("loading config from {:?}...", path);
let file = ioutil::wait_for_file(path, WAIT_TIMEOUT)?;
@@ -64,41 +522,27 @@
/// Executes the given task. Stdout of the task is piped into the vsock stream to the
/// virtualizationservice in the host side.
-fn exec_task(task: &Task) -> Result<()> {
- const VMADDR_CID_HOST: u32 = 2;
- const PORT_VIRT_SVC: u32 = 3000;
- let stdout = match VsockStream::connect_with_cid_port(VMADDR_CID_HOST, PORT_VIRT_SVC) {
- Ok(stream) => {
- // SAFETY: the ownership of the underlying file descriptor is transferred from stream
- // to the file object, and then into the Command object. When the command is finished,
- // the file descriptor is closed.
- let f = unsafe { File::from_raw_fd(stream.into_raw_fd()) };
- Stdio::from(f)
- }
- Err(e) => {
- error!("failed to connect to virtualization service: {}", e);
- // Don't fail hard here. Even if we failed to connect to the virtualizationservice,
- // we keep executing the task. This can happen if the owner of the VM doesn't register
- // callback to accept the stream. Use /dev/null as the stdout so that the task can
- // make progress without waiting for someone to consume the output.
- Stdio::null()
- }
- };
+fn exec_task(task: &Task, service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
info!("executing main task {:?}...", task);
- // TODO(jiyong): consider piping the stream into stdio (and probably stderr) as well.
- let mut child = build_command(task)?.stdout(stdout).spawn()?;
- match child.wait()?.code() {
- Some(0) => {
- info!("task successfully finished");
- Ok(())
- }
- Some(code) => bail!("task exited with exit code: {}", code),
- None => bail!("task terminated by signal"),
+ let mut command = build_command(task)?;
+
+ info!("notifying payload started");
+ service.notifyPayloadStarted()?;
+
+ // Start logging if enabled
+ // TODO(b/200914564) set filterspec if debug_level is app_only
+ if system_properties::read_bool(LOGD_ENABLED_PROP, false)? {
+ system_properties::write("ctl.start", "seriallogging")?;
}
+
+ let exit_status = command.spawn()?.wait()?;
+ exit_status.code().ok_or_else(|| anyhow!("Failed to get exit_code from the paylaod."))
}
fn build_command(task: &Task) -> Result<Command> {
- Ok(match task.type_ {
+ const VMADDR_CID_HOST: u32 = 2;
+
+ let mut command = match task.type_ {
TaskType::Executable => {
let mut command = Command::new(&task.command);
command.args(&task.args);
@@ -109,7 +553,30 @@
command.arg(find_library_path(&task.command)?).args(&task.args);
command
}
- })
+ };
+
+ match VsockStream::connect_with_cid_port(VMADDR_CID_HOST, VM_STREAM_SERVICE_PORT as u32) {
+ Ok(stream) => {
+ // SAFETY: the ownership of the underlying file descriptor is transferred from stream
+ // to the file object, and then into the Command object. When the command is finished,
+ // the file descriptor is closed.
+ let file = unsafe { File::from_raw_fd(stream.into_raw_fd()) };
+ command
+ .stdin(Stdio::from(file.try_clone()?))
+ .stdout(Stdio::from(file.try_clone()?))
+ .stderr(Stdio::from(file));
+ }
+ Err(e) => {
+ error!("failed to connect to virtualization service: {}", e);
+ // Don't fail hard here. Even if we failed to connect to the virtualizationservice,
+ // we keep executing the task. This can happen if the owner of the VM doesn't register
+ // callback to accept the stream. Use /dev/null as the stream so that the task can
+ // make progress without waiting for someone to consume the output.
+ command.stdin(Stdio::null()).stdout(Stdio::null()).stderr(Stdio::null());
+ }
+ }
+
+ Ok(command)
}
fn find_library_path(name: &str) -> Result<String> {
@@ -125,3 +592,7 @@
Ok(path)
}
+
+fn to_hex_string(buf: &[u8]) -> String {
+ buf.iter().map(|b| format!("{:02X}", b)).collect()
+}
diff --git a/microdroid_manager/src/metadata.rs b/microdroid_manager/src/metadata.rs
deleted file mode 100644
index 86a9e3e..0000000
--- a/microdroid_manager/src/metadata.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Payload metadata from /dev/block/by-name/payload-metadata
-
-use anyhow::Result;
-use log::info;
-use microdroid_metadata::{read_metadata, Metadata};
-use std::fs::File;
-
-const PAYLOAD_METADATA_PATH: &str = "/dev/block/by-name/payload-metadata";
-
-/// loads payload metadata from /dev/block/by-name/paylaod-metadata
-pub fn load() -> Result<Metadata> {
- info!("loading payload metadata...");
- read_metadata(File::open(PAYLOAD_METADATA_PATH)?)
-}
diff --git a/microdroid_manager/src/payload.rs b/microdroid_manager/src/payload.rs
new file mode 100644
index 0000000..48535f3
--- /dev/null
+++ b/microdroid_manager/src/payload.rs
@@ -0,0 +1,73 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Routines for handling payload
+
+mod apex;
+
+use crate::instance::ApexData;
+use crate::ioutil::wait_for_file;
+use anyhow::Result;
+use apex::verify;
+use log::info;
+use microdroid_metadata::{read_metadata, ApexPayload, Metadata};
+use std::time::Duration;
+
+const PAYLOAD_METADATA_PATH: &str = "/dev/block/by-name/payload-metadata";
+const WAIT_TIMEOUT: Duration = Duration::from_secs(10);
+
+/// Loads payload metadata from /dev/block/by-name/payload-metadata
+pub fn load_metadata() -> Result<Metadata> {
+ info!("loading payload metadata...");
+ let file = wait_for_file(PAYLOAD_METADATA_PATH, WAIT_TIMEOUT)?;
+ read_metadata(file)
+}
+
+/// Loads (name, public_key, root_digest) from payload APEXes
+pub fn get_apex_data_from_payload(metadata: &Metadata) -> Result<Vec<ApexData>> {
+ metadata
+ .apexes
+ .iter()
+ .map(|apex| {
+ let name = apex.name.clone();
+ let apex_path = format!("/dev/block/by-name/{}", apex.partition_name);
+ let result = verify(&apex_path)?;
+ Ok(ApexData {
+ name,
+ public_key: result.public_key,
+ root_digest: result.root_digest,
+ last_update_seconds: apex.last_update_seconds,
+ is_factory: apex.is_factory,
+ })
+ })
+ .collect()
+}
+
+/// Convert vector of ApexData into Metadata
+pub fn to_metadata(apex_data: &[ApexData]) -> Metadata {
+ Metadata {
+ apexes: apex_data
+ .iter()
+ .map(|data| ApexPayload {
+ name: data.name.clone(),
+ public_key: data.public_key.clone(),
+ root_digest: data.root_digest.clone(),
+ last_update_seconds: data.last_update_seconds,
+ is_factory: data.is_factory,
+ ..Default::default()
+ })
+ .collect(),
+ ..Default::default()
+ }
+}
diff --git a/microdroid_manager/src/payload/apex.rs b/microdroid_manager/src/payload/apex.rs
new file mode 100644
index 0000000..24c4f05
--- /dev/null
+++ b/microdroid_manager/src/payload/apex.rs
@@ -0,0 +1,225 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Routines for handling APEX payload
+
+use anyhow::{anyhow, ensure, Result};
+use avb_bindgen::*;
+use std::ffi::{c_void, CStr};
+use std::fs::File;
+use std::io::{Read, Seek, SeekFrom};
+use std::mem::{size_of, zeroed};
+use std::ops::Deref;
+use std::ptr::null_mut;
+use std::slice::{from_raw_parts, from_raw_parts_mut};
+use zip::ZipArchive;
+
+const APEX_PUBKEY_ENTRY: &str = "apex_pubkey";
+const APEX_PAYLOAD_ENTRY: &str = "apex_payload.img";
+
+/// Verification result holds public key and root digest of apex_payload.img
+pub struct ApexVerificationResult {
+ pub public_key: Vec<u8>,
+ pub root_digest: Vec<u8>,
+}
+
+/// Verify APEX payload by AVB verification and return public key and root digest
+pub fn verify(path: &str) -> Result<ApexVerificationResult> {
+ let apex_file = File::open(path)?;
+ let (public_key, image_offset, image_size) = get_public_key_and_image_info(&apex_file)?;
+ let root_digest = verify_vbmeta(apex_file, image_offset, image_size, &public_key)?;
+ Ok(ApexVerificationResult { public_key, root_digest })
+}
+
+fn get_public_key_and_image_info(apex_file: &File) -> Result<(Vec<u8>, u64, u64)> {
+ let mut z = ZipArchive::new(apex_file)?;
+
+ let mut public_key = Vec::new();
+ z.by_name(APEX_PUBKEY_ENTRY)?.read_to_end(&mut public_key)?;
+
+ let (image_offset, image_size) =
+ z.by_name(APEX_PAYLOAD_ENTRY).map(|f| (f.data_start(), f.size()))?;
+
+ Ok((public_key, image_offset, image_size))
+}
+
+// Manual addition of a missing enum
+#[allow(non_camel_case_types, dead_code)]
+#[repr(u8)]
+enum AvbDescriptorTag {
+ AVB_DESCRIPTOR_TAG_PROPERTY = 0,
+ AVB_DESCRIPTOR_TAG_HASHTREE,
+ AVB_DESCRIPTOR_TAG_HASH,
+ AVB_DESCRIPTOR_TAG_KERNEL_CMDLINE,
+ AVB_DESCRIPTOR_TAG_CHAIN_PARTITION,
+}
+
+const FOOTER_SIZE: usize = size_of::<AvbFooter>();
+const HASHTREE_DESCRIPTOR_SIZE: usize = size_of::<AvbHashtreeDescriptor>();
+
+/// Verify VBmeta image and return root digest
+fn verify_vbmeta<R: Read + Seek>(
+ image: R,
+ offset: u64,
+ size: u64,
+ public_key: &[u8],
+) -> Result<Vec<u8>> {
+ let vbmeta = VbMeta::from(image, offset, size)?;
+ vbmeta.verify(public_key)?;
+ for &descriptor in vbmeta.descriptors()?.iter() {
+ if let Ok(hashtree_descriptor) = HashtreeDescriptor::from(descriptor) {
+ return hashtree_descriptor.root_digest();
+ }
+ }
+ Err(anyhow!("HashtreeDescriptor is not found."))
+}
+
+struct VbMeta {
+ data: Vec<u8>,
+}
+
+impl VbMeta {
+ // Read a VbMeta data from a given image
+ fn from<R: Read + Seek>(mut image: R, offset: u64, size: u64) -> Result<VbMeta> {
+ // Get AvbFooter first
+ image.seek(SeekFrom::Start(offset + size - FOOTER_SIZE as u64))?;
+ // SAFETY: AvbDescriptor is a "repr(C,packed)" struct from bindgen
+ let mut footer: AvbFooter = unsafe { zeroed() };
+ // SAFETY: safe to read because of seek(-FOOTER_SIZE) above
+ unsafe {
+ let footer_slice = from_raw_parts_mut(&mut footer as *mut _ as *mut u8, FOOTER_SIZE);
+ image.read_exact(footer_slice)?;
+ ensure!(avb_footer_validate_and_byteswap(&footer, &mut footer));
+ }
+ // Get VbMeta block
+ image.seek(SeekFrom::Start(offset + footer.vbmeta_offset))?;
+ let vbmeta_size = footer.vbmeta_size as usize;
+ let mut data = vec![0u8; vbmeta_size];
+ image.read_exact(&mut data)?;
+ Ok(VbMeta { data })
+ }
+ // Verify VbMeta image. Its enclosed public key should match with a given public key.
+ fn verify(&self, outer_public_key: &[u8]) -> Result<()> {
+ // SAFETY: self.data points to a valid VBMeta data and avb_vbmeta_image_verify should work fine
+ // with it
+ let public_key = unsafe {
+ let mut pk_ptr: *const u8 = null_mut();
+ let mut pk_len: usize = 0;
+ let res = avb_vbmeta_image_verify(
+ self.data.as_ptr(),
+ self.data.len(),
+ &mut pk_ptr,
+ &mut pk_len,
+ );
+ ensure!(
+ res == AvbVBMetaVerifyResult_AVB_VBMETA_VERIFY_RESULT_OK,
+ CStr::from_ptr(avb_vbmeta_verify_result_to_string(res))
+ .to_string_lossy()
+ .into_owned()
+ );
+ from_raw_parts(pk_ptr, pk_len)
+ };
+
+ ensure!(public_key == outer_public_key, "Public key mismatch with a given one.");
+ Ok(())
+ }
+ // Return a slice of AvbDescriptor pointers
+ fn descriptors(&self) -> Result<Descriptors> {
+ let mut num: usize = 0;
+ // SAFETY: ptr will be freed by Descriptor.
+ Ok(unsafe {
+ let ptr = avb_descriptor_get_all(self.data.as_ptr(), self.data.len(), &mut num);
+ ensure!(!ptr.is_null(), "VbMeta has no descriptors.");
+ let all = from_raw_parts(ptr, num);
+ Descriptors { ptr, all }
+ })
+ }
+}
+
+struct HashtreeDescriptor {
+ ptr: *const u8,
+ inner: AvbHashtreeDescriptor,
+}
+
+impl HashtreeDescriptor {
+ fn from(descriptor: *const AvbDescriptor) -> Result<HashtreeDescriptor> {
+ // SAFETY: AvbDescriptor is a "repr(C,packed)" struct from bindgen
+ let mut desc: AvbDescriptor = unsafe { zeroed() };
+ // SAFETY: both points to valid AvbDescriptor pointers
+ unsafe {
+ ensure!(avb_descriptor_validate_and_byteswap(descriptor, &mut desc));
+ }
+ ensure!({ desc.tag } == AvbDescriptorTag::AVB_DESCRIPTOR_TAG_HASHTREE as u64);
+ // SAFETY: AvbHashtreeDescriptor is a "repr(C, packed)" struct from bindgen
+ let mut hashtree_descriptor: AvbHashtreeDescriptor = unsafe { zeroed() };
+ // SAFETY: With tag == AVB_DESCRIPTOR_TAG_HASHTREE, descriptor should point to
+ // a AvbHashtreeDescriptor.
+ unsafe {
+ ensure!(avb_hashtree_descriptor_validate_and_byteswap(
+ descriptor as *const AvbHashtreeDescriptor,
+ &mut hashtree_descriptor,
+ ));
+ }
+ Ok(Self { ptr: descriptor as *const u8, inner: hashtree_descriptor })
+ }
+ fn root_digest(&self) -> Result<Vec<u8>> {
+ // SAFETY: digest_ptr should point to a valid buffer of root_digest_len
+ let root_digest = unsafe {
+ let digest_ptr = self.ptr.offset(
+ HASHTREE_DESCRIPTOR_SIZE as isize
+ + self.inner.partition_name_len as isize
+ + self.inner.salt_len as isize,
+ );
+ from_raw_parts(digest_ptr, self.inner.root_digest_len as usize)
+ };
+ Ok(root_digest.to_owned())
+ }
+}
+
+// Wraps pointer to a heap-allocated array of AvbDescriptor pointers
+struct Descriptors<'a> {
+ ptr: *mut *const AvbDescriptor,
+ all: &'a [*const AvbDescriptor],
+}
+
+// Wrapped pointer should be freed with avb_free.
+impl Drop for Descriptors<'_> {
+ fn drop(&mut self) {
+ // SAFETY: ptr is allocated by avb_descriptor_get_all
+ unsafe { avb_free(self.ptr as *mut c_void) }
+ }
+}
+
+impl<'a> Deref for Descriptors<'a> {
+ type Target = &'a [*const AvbDescriptor];
+ fn deref(&self) -> &Self::Target {
+ &self.all
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ fn to_hex_string(buf: &[u8]) -> String {
+ buf.iter().map(|b| format!("{:02x}", b)).collect()
+ }
+ #[test]
+ fn test_open_apex() {
+ let res = verify("tests/data/test.apex").unwrap();
+ assert_eq!(
+ to_hex_string(&res.root_digest),
+ "fe11ab17da0a3a738b54bdc3a13f6139cbdf91ec32f001f8d4bbbf8938e04e39"
+ );
+ }
+}
diff --git a/microdroid_manager/tests/data/README.md b/microdroid_manager/tests/data/README.md
new file mode 100644
index 0000000..82ebec6
--- /dev/null
+++ b/microdroid_manager/tests/data/README.md
@@ -0,0 +1,3 @@
+# Test data
+
+- test.apex: copied from system/apexshim/prebuilts/x86/com.android.apex.cts.shim.v1.apex
\ No newline at end of file
diff --git a/microdroid_manager/tests/data/test.apex b/microdroid_manager/tests/data/test.apex
new file mode 100644
index 0000000..fd79365
--- /dev/null
+++ b/microdroid_manager/tests/data/test.apex
Binary files differ
diff --git a/pvmfw/pvmfw.img b/pvmfw/pvmfw.img
new file mode 100644
index 0000000..d7e64c0
--- /dev/null
+++ b/pvmfw/pvmfw.img
Binary files differ
diff --git a/tests/Android.bp b/tests/Android.bp
index 8cfefcc..2c36a62 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -19,11 +19,14 @@
kernel_version = "5.10"
kernel_stem = "kernel_prebuilts-" + kernel_version
-kernel_modules_stem = "virt_device_prebuilts_kernel_modules-" + kernel_version
cc_test {
- name: "VirtualizationTestCases",
- test_suites: ["device-tests"],
+ // ".64" suffix is to work around cts-unit-test which is demanding that all
+ // executables in CTS should have both 32 and 64 ABIs.
+ name: "VirtualizationTestCases.64",
+ test_suites: [
+ "general-tests",
+ ],
srcs: [
"common.cc",
"vsock_test.cc",
@@ -38,6 +41,7 @@
// The existence of the library in the system partition is not guaranteed.
// Let's have our own copy of it.
"android.system.virtualizationservice-cpp",
+ "PlatformProperties",
],
shared_libs: [
"libbase",
@@ -79,17 +83,18 @@
android_filesystem {
name: "virt_test_initramfs",
- arch: {
- arm64: {
- deps: [kernel_modules_stem + "-arm64"],
- },
- x86_64: {
- deps: [kernel_modules_stem + "-x86_64"],
- },
- },
deps: [
+ "microdroid_kernel_modules",
"virt_test_guest_init",
"virt_test_vsock_guest",
],
type: "cpio",
}
+
+genrule {
+ name: "test-payload-metadata",
+ tools: ["mk_payload"],
+ cmd: "$(location mk_payload) --metadata-only $(in) $(out)",
+ srcs: ["test-payload-metadata-config.json"],
+ out: ["test-payload-metadata.img"],
+}
diff --git a/tests/AndroidTest.xml b/tests/AndroidTest.xml
index 5e7faf9..68e9c1b 100644
--- a/tests/AndroidTest.xml
+++ b/tests/AndroidTest.xml
@@ -15,11 +15,16 @@
-->
<configuration description="Config for Virtualization tests">
+ <option name="test-suite-tag" value="cts" />
+ <option name="config-descriptor:metadata" key="component" value="security" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_instant_app" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_multi_abi" />
+ <option name="config-descriptor:metadata" key="parameter" value="secondary_user" />
<!-- Push test binaries to the device. -->
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="true" />
<option name="abort-on-push-failure" value="true" />
- <option name="push-file" key="VirtualizationTestCases" value="/data/local/tmp/virt-test/VirtualizationTestCases" />
+ <option name="push-file" key="VirtualizationTestCases.64" value="/data/local/tmp/virt-test/VirtualizationTestCases.64" />
<option name="push-file" key="virt_test_kernel" value="/data/local/tmp/virt-test/kernel" />
<option name="push-file" key="virt_test_initramfs.img" value="/data/local/tmp/virt-test/initramfs" />
</target_preparer>
@@ -30,7 +35,7 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/virt-test" />
- <option name="module-name" value="VirtualizationTestCases" />
+ <option name="module-name" value="VirtualizationTestCases.64" />
<!-- test-timeout unit is ms, value = 2 minutes -->
<option name="native-test-timeout" value="120000" />
</test>
diff --git a/tests/aidl/Android.bp b/tests/aidl/Android.bp
new file mode 100644
index 0000000..893ec0b
--- /dev/null
+++ b/tests/aidl/Android.bp
@@ -0,0 +1,18 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+aidl_interface {
+ name: "com.android.microdroid.testservice",
+ srcs: ["com/android/microdroid/testservice/**/*.aidl"],
+ unstable: true,
+ backend: {
+ java: {
+ platform_apis: true,
+ gen_rpc: true,
+ },
+ cpp: {
+ enabled: true,
+ },
+ },
+}
diff --git a/tests/aidl/com/android/microdroid/testservice/ITestService.aidl b/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
new file mode 100644
index 0000000..0913fe3
--- /dev/null
+++ b/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.microdroid.testservice;
+
+/** {@hide} */
+interface ITestService {
+ const int SERVICE_PORT = 5678;
+
+ /* add two integers. */
+ int addInteger(int a, int b);
+
+ /* read a system property. */
+ String readProperty(String prop);
+
+ /* get the VM's stable secret, this is _only_ done for testing. */
+ byte[] insecurelyExposeSealingCdi();
+
+ /* get the VM's attestation secret, this is _only_ done for testing. */
+ byte[] insecurelyExposeAttestationCdi();
+
+ /* get the VM's boot certificate chain (BCC). */
+ byte[] getBcc();
+}
diff --git a/tests/benchmark/Android.bp b/tests/benchmark/Android.bp
new file mode 100644
index 0000000..cf9d16e
--- /dev/null
+++ b/tests/benchmark/Android.bp
@@ -0,0 +1,23 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_binary {
+ name: "fs_benchmark",
+ static_executable: true,
+ static_libs: ["libbase"],
+ srcs: ["fs_benchmark.cpp"],
+}
+
+cc_library_shared {
+ name: "empty_payload",
+ srcs: ["empty_payload.cpp"],
+}
+
+android_app {
+ name: "MicrodroidFilesystemBenchmarkApp",
+ srcs: [],
+ jni_libs: ["empty_payload"],
+ platform_apis: true,
+ use_embedded_native_libs: true,
+}
diff --git a/tests/benchmark/AndroidManifest.xml b/tests/benchmark/AndroidManifest.xml
new file mode 100644
index 0000000..9fd7347
--- /dev/null
+++ b/tests/benchmark/AndroidManifest.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.microdroid.benchmark">
+ <application android:label="Microdroid Filesystem Benchmark" />
+</manifest>
diff --git a/tests/benchmark/assets/benchmark.pem b/tests/benchmark/assets/benchmark.pem
new file mode 100644
index 0000000..2716fde
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCsXMc0YBu8ch0E
+ACd9xchbrAHulBYggzsQ9sEj6paC211QfRoYt2kCukAvj4uqutIBACLO44FPUfjf
+Pzh6woSXU8NdLiMR23Quyg8JChChdTvvl83aTCnKdUAO8YnmjWxdebuIeisv0QGQ
+VbZWhZP+VTXWBgbBQ426Klkx/GZXBGT5TpKBf4/Y9x3iOaYKSK0F5uSbl5BSD9rG
+iizWQAgBe2TfyzYQFOdjjiu6/sJ92d/Y5bVWo4efhWg8ZPyhi6oLyfnm/hbn5t3H
+IIYptmdoqFy2xgZf++EWoPwfvaG3YB6mmRwt/bvuGyab43duzSCjR6Sj93p7Y3Zb
+l4vdIG9TAgMBAAECggEADO/dx9Ga41cRVHaKgEczFaZgcr7Mtl4DNe+/aWm3KFU8
+uMjbB5XknN7L6IX2rrbdAlQ3SJ4M255EdsUxAQ3Ok+kmlbzbxwBYxRZHcJ8/xk6d
+VAtp2QO0c0y/pR9+AT8GLjHv4HuC+FDQtDuqtu3JwOI2az0Cjsj6P0nkbfsK12cO
+eKQnCH8dYSvmdPWF2GimBgJVhsfoeY9gQ44aR9sMSSwWMO7M58CkRuur9QvKYB/C
+fLjcA7dqodmLNMbiLAe/RWfg+WfdR9DUgbK3zB2h+2+qFyFCfMFt03I+DkVzg/ej
+ICNmgv4D9eRJaKdIXaCKA8FqHqQN+/a6cmDUi8qQ6QKBgQDbnrkxJAoLJ6gPBFOu
+Eml/XVczll8F4EEyQz0uPBgADkE5CV6Zh4gaBMj3b6iuUq7sQRldeDb3C/g5zcUZ
+U940gmzlJv4FPf0itJ46JNkIrCSuf0/NhDb2qIvrb/j+VTzd350YgMIG34B9tLxu
+W+eHuDTDSMsS0YZHAVZzGmhFRQKBgQDI6gisOKgpp4KZnvqxJCJ/54PMO6Kks7Oa
+4ZVyc8iTn1B6k+njOy98xzk29pI3+o1v822WImwGs0njAUcAPm7jPEer657rva8C
+ZVmSzme/YHfxhOI7QGzGyXdmh+3Da4ywAUwgfPY7b+lv+I0J9pcijpIh1ayApKy2
+I32TIjZvtwKBgQDGzLrenLzqtA8Q6N3GqKwOurOA4xFJBGJ/2RW8kHE5O64Wr0CO
+wXyV8NbqBI0wn2/wNE19qqA2qQMdcAKGlsCBz747ADzZCe/mRpEkGM7NZuYdfukC
+JDiMtq1RhZ5iu03Jme1ejM8V4aMyJzSawV6oIDrCu1X3xupBxBg5QSI58QKBgQCx
+/Ts/r1WyyTZW99NpWPTDUQuew/obZSOpA03NPiukNBAs95rNdqJkLW5PdfMlam8g
+jYw45DfFW9IKLBiFa8n6v21TLgL1H27KdZT8DKU2krTPnwR4r2NuXA7OI3+Mj1vs
+lMmnQm01TLiGPLBd8joEID/vf4c51Ck5lolp7nZBUwKBgQCmS5R2fsH0XYCMvfWR
+hHUre/zxNMj6+FrxiglecdJLPAAIEUmP2No/KRnLezi36TdL9aXGLBhTt9KQeQnv
+eoKIUFkYr6kTP/mXG9LM7yqE+ic37M4MZ2qL7DE8MSASy/aBKruueyLEUSWvjGxd
+aBW8JQ/zbcsdZKwV1as6St5kyQ==
+-----END PRIVATE KEY-----
diff --git a/tests/benchmark/assets/benchmark.pk8 b/tests/benchmark/assets/benchmark.pk8
new file mode 100644
index 0000000..a78fa9b
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.pk8
Binary files differ
diff --git a/tests/benchmark/assets/benchmark.x509.der b/tests/benchmark/assets/benchmark.x509.der
new file mode 100644
index 0000000..d137bc0
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.x509.der
Binary files differ
diff --git a/tests/benchmark/assets/benchmark.x509.pem b/tests/benchmark/assets/benchmark.x509.pem
new file mode 100644
index 0000000..7bc794b
--- /dev/null
+++ b/tests/benchmark/assets/benchmark.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIECzCCAvOgAwIBAgIUXL3rcLOhlqZ9IDu04sF+FGo3OtIwDQYJKoZIhvcNAQEL
+BQAwgZQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH
+DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy
+b2lkMRAwDgYDVQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFu
+ZHJvaWQuY29tMB4XDTIxMTAyNzEzNDE1NloXDTQ5MDMxNDEzNDE1NlowgZQxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFp
+biBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRAwDgYD
+VQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFuZHJvaWQuY29t
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArFzHNGAbvHIdBAAnfcXI
+W6wB7pQWIIM7EPbBI+qWgttdUH0aGLdpArpAL4+LqrrSAQAizuOBT1H43z84esKE
+l1PDXS4jEdt0LsoPCQoQoXU775fN2kwpynVADvGJ5o1sXXm7iHorL9EBkFW2VoWT
+/lU11gYGwUONuipZMfxmVwRk+U6SgX+P2Pcd4jmmCkitBebkm5eQUg/axoos1kAI
+AXtk38s2EBTnY44ruv7Cfdnf2OW1VqOHn4VoPGT8oYuqC8n55v4W5+bdxyCGKbZn
+aKhctsYGX/vhFqD8H72ht2AeppkcLf277hsmm+N3bs0go0eko/d6e2N2W5eL3SBv
+UwIDAQABo1MwUTAdBgNVHQ4EFgQU8eS6/fiyZMqPN1erLU8POJHci4swHwYDVR0j
+BBgwFoAU8eS6/fiyZMqPN1erLU8POJHci4swDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEAagHQT+tZ5RE2V9U/3gXdqyQzpIjIAWBdA1HJ0obi+mqr
+n5BtftKHK2li/f6jp64oCxwBOtZZEWj8J4m53EWunG9oktjfiCq1wKASdfrhSN6J
+hz+YSBURsrrDOVzVCcKgzwlEgYYMsAt+NnGGp9UlSaJMpQSghrDNkKmDLB1zfkN1
+sRG71UbqqxSun/3k0HcwWIRy6WTDXoPeyYWuCaksdzqPHMvn0bbgf1Jw6jI5UNXG
+3ZSteqhLseS6jhlYOmfLaINHpBfdZXdzqsEjlg6Qt2pCNaRfVp2+fIfNjsWhrfOJ
+8uoz3I/u5Nd3S2ET/jYqpqsB3g9ngjbilclKYjL1bg==
+-----END CERTIFICATE-----
diff --git a/tests/benchmark/assets/vm_config.json b/tests/benchmark/assets/vm_config.json
new file mode 100644
index 0000000..d431877
--- /dev/null
+++ b/tests/benchmark/assets/vm_config.json
@@ -0,0 +1,11 @@
+{
+ "os": {
+ "name": "microdroid"
+ },
+ "task": {
+ "type": "microdroid_launcher",
+ "command": "empty_payload.so",
+ "args": []
+ }
+}
+
diff --git a/tests/benchmark/benchmark_example.sh b/tests/benchmark/benchmark_example.sh
new file mode 100755
index 0000000..7ba0c6d
--- /dev/null
+++ b/tests/benchmark/benchmark_example.sh
@@ -0,0 +1,60 @@
+# This script runs 256 MB file benchmark, both on host and on authfs.
+# Usage: after connecting the device with adb, run:
+# $ packages/modules/Virtualization/tests/benchmark/benchmark_example.sh <target> (e.g. aosp_oriole_pkvm-userdebug)
+
+set -e
+
+# Prerequisite: we need root to flush disk cache.
+adb root
+
+# 1. Build needed artifacts, and install it to device
+source build/make/rbesetup.sh
+lunch $1
+m fs_benchmark MicrodroidFilesystemBenchmarkApp fsverity fsverity_metadata_generator
+adb push $OUT/system/bin/fs_benchmark /data/local/tmp
+adb install $OUT/system/app/MicrodroidFilesystemBenchmarkApp/MicrodroidFilesystemBenchmarkApp.apk
+
+# 2. Generate testcases
+# /data/local/tmp/testcase: 256 MB, signed by fsverity.
+# /data/local/tmp/testcase2: empty file, used for authfs write test.
+adb shell 'rm -rf /data/local/tmp/virt /data/local/tmp/testcase*'
+adb shell 'mkdir -p /data/local/tmp/virt'
+dd if=/dev/zero of=/tmp/testcase bs=1048576 count=256
+fsverity_metadata_generator --fsverity-path $(which fsverity) --signature none --hash-alg sha256 --out /tmp/testcase.fsv_meta /tmp/testcase
+adb shell 'dd if=/dev/zero of=/data/local/tmp/testcase bs=1048576 count=256'
+adb push /tmp/testcase.fsv_meta /data/local/tmp
+
+# 3. Run fd_server from host
+adb shell 'exec 3</data/local/tmp/testcase 4</data/local/tmp/testcase.fsv_meta 6</data/local/tmp/testcase 7<>/data/local/tmp/testcase2 /apex/com.android.virt/bin/fd_server --ro-fds 3:4 --ro-fds 6 --rw-fds 7' &
+
+# 4. Run VM and get the CID
+result=$(adb shell "/apex/com.android.virt/bin/vm run-app --debug full --daemonize --log /data/local/tmp/virt/log.txt $(adb shell pm path com.android.microdroid.benchmark | cut -d':' -f2) /data/local/tmp/virt/MicrodroidFilesystemBenchmarkApp.apk.idsig /data/local/tmp/virt/instance.img assets/vm_config.json")
+cid=$(echo $result | grep -P "with CID \d+" --only-matching --color=none | cut -d' ' -f3)
+echo "CID IS $cid"
+
+# 5. Run host tests
+echo "Running host read/write test..."
+adb shell 'dd if=/dev/zero of=/data/local/tmp/testcase_host bs=1048576 count=256'
+adb shell '/data/local/tmp/fs_benchmark /data/local/tmp/testcase_host 268435456 both 5'
+
+# 6. Connect to the VM
+# We are cheating here. The VM is expected to finish booting, while the host tests are running.
+adb forward tcp:8000 vsock:$cid:5555
+adb connect localhost:8000
+adb -s localhost:8000 root
+sleep 10
+
+# 7. Install artifacts and run authfs
+adb -s localhost:8000 push $OUT/system/bin/fs_benchmark /data/local/tmp
+adb -s localhost:8000 shell "mkdir -p /data/local/tmp/authfs"
+adb -s localhost:8000 shell "/system/bin/authfs /data/local/tmp/authfs --cid 2 --remote-ro-file 3:sha256-$(fsverity digest /tmp/testcase --hash-alg sha256 --compact) --remote-ro-file-unverified 6 --remote-new-rw-file 7" &
+
+# 8. Run guest tests
+echo "Running guest block device read test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /dev/block/vda $(adb -s localhost:8000 shell blockdev --getsize64 /dev/block/vda) read 5"
+echo "Running guest authfs read test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /data/local/tmp/authfs/3 268435456 read 5"
+echo "Running guest authfs unverified read test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /data/local/tmp/authfs/6 268435456 read 5"
+echo "Running guest authfs write test..."
+adb -s localhost:8000 shell "/data/local/tmp/fs_benchmark /data/local/tmp/authfs/7 268435456 write 5"
diff --git a/compos/aidl/com/android/compos/Metadata.aidl b/tests/benchmark/empty_payload.cpp
similarity index 70%
copy from compos/aidl/com/android/compos/Metadata.aidl
copy to tests/benchmark/empty_payload.cpp
index a15214d..afcd653 100644
--- a/compos/aidl/com/android/compos/Metadata.aidl
+++ b/tests/benchmark/empty_payload.cpp
@@ -14,13 +14,7 @@
* limitations under the License.
*/
-package com.android.compos;
-
-import com.android.compos.InputFdAnnotation;
-import com.android.compos.OutputFdAnnotation;
-
-/** {@hide} */
-parcelable Metadata {
- InputFdAnnotation[] input_fd_annotations;
- OutputFdAnnotation[] output_fd_annotations;
+extern "C" int android_native_main([[maybe_unused]] int argc, [[maybe_unused]] char* argv[]) {
+ // do nothing
+ return 0;
}
diff --git a/tests/benchmark/fs_benchmark.cpp b/tests/benchmark/fs_benchmark.cpp
new file mode 100644
index 0000000..220e004
--- /dev/null
+++ b/tests/benchmark/fs_benchmark.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/result.h>
+#include <android-base/unique_fd.h>
+#include <linux/vm_sockets.h>
+#include <strings.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cerrno>
+#include <cinttypes>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <random>
+#include <string>
+#include <vector>
+
+using android::base::ErrnoError;
+using android::base::Error;
+using android::base::Result;
+using android::base::unique_fd;
+
+namespace {
+
+constexpr int kBlockSize = 4096;
+
+[[noreturn]] void PrintUsage(const char* exe_name) {
+ std::printf("Usage: %s path size (read|write|both) [rounds]\n", exe_name);
+ std::exit(EXIT_FAILURE);
+}
+
+void DropCache() {
+ system("echo 1 > /proc/sys/vm/drop_caches");
+}
+
+struct BenchmarkResult {
+ struct timespec elapsed;
+ std::uint64_t size;
+};
+
+enum class BenchmarkOption {
+ READ = 0,
+ WRITE = 1,
+ RANDREAD = 2,
+ RANDWRITE = 3,
+};
+
+Result<BenchmarkResult> runTest(const char* path, BenchmarkOption option, std::uint64_t size) {
+ bool is_read = (option == BenchmarkOption::READ || option == BenchmarkOption::RANDREAD);
+ bool is_rand = (option == BenchmarkOption::RANDREAD || option == BenchmarkOption::RANDWRITE);
+
+ unique_fd fd(open(path, is_read ? O_RDONLY : O_WRONLY | O_CREAT, 0644));
+ if (fd.get() == -1) {
+ return ErrnoError() << "opening " << path << " failed";
+ }
+
+ uint64_t block_count = (size + kBlockSize - 1) / kBlockSize;
+ std::vector<uint64_t> offsets;
+ if (is_rand) {
+ std::mt19937 rd{std::random_device{}()};
+ offsets.reserve(block_count);
+ for (uint64_t i = 0; i < block_count; i++) offsets.push_back(i * kBlockSize);
+ std::shuffle(offsets.begin(), offsets.end(), rd);
+ }
+
+ uint64_t total_processed = 0;
+ char buf[kBlockSize] = {};
+
+ struct timespec start;
+ if (clock_gettime(CLOCK_REALTIME, &start) < 0) {
+ return ErrnoError() << "failed to get start time";
+ }
+
+ for (uint64_t i = 0; i < block_count; i++) {
+ if (!offsets.empty()) {
+ if (lseek(fd.get(), offsets[i], SEEK_SET) == -1) {
+ return ErrnoError() << "failed to lseek";
+ }
+ }
+
+ auto ret = is_read ? read(fd.get(), buf, kBlockSize) : write(fd.get(), buf, kBlockSize);
+ if (ret == 0) {
+ return Error() << "unexpected end of file";
+ } else if (ret == -1) {
+ return ErrnoError() << "file io failed";
+ }
+ total_processed += ret;
+ }
+
+ struct timespec stop;
+ if (clock_gettime(CLOCK_REALTIME, &stop) < 0) {
+ return ErrnoError() << "failed to get finish time";
+ }
+
+ struct timespec elapsed;
+ if ((stop.tv_nsec - start.tv_nsec) < 0) {
+ elapsed.tv_sec = stop.tv_sec - start.tv_sec - 1;
+ elapsed.tv_nsec = stop.tv_nsec - start.tv_nsec + 1000000000;
+ } else {
+ elapsed.tv_sec = stop.tv_sec - start.tv_sec;
+ elapsed.tv_nsec = stop.tv_nsec - start.tv_nsec;
+ }
+
+ return BenchmarkResult{elapsed, total_processed};
+}
+
+} // namespace
+
+int main(int argc, char* argv[]) {
+ // without this, stdout isn't immediately flushed when running via "adb shell"
+ std::setvbuf(stdout, nullptr, _IONBF, 0);
+ std::setvbuf(stderr, nullptr, _IONBF, 0);
+
+ if (argc < 4 || argc > 5) {
+ PrintUsage(argv[0]);
+ }
+
+ const char* path = argv[1];
+
+ std::uint64_t size = std::strtoull(argv[2], nullptr, 0);
+ if (size == 0 || size == UINT64_MAX) {
+ std::fprintf(stderr, "invalid size %s\n", argv[1]);
+ PrintUsage(argv[0]);
+ }
+
+ std::vector<std::pair<BenchmarkOption, std::string>> benchmarkList;
+ if (strcmp(argv[3], "read") != 0) {
+ benchmarkList.emplace_back(BenchmarkOption::WRITE, "write");
+ benchmarkList.emplace_back(BenchmarkOption::RANDWRITE, "randwrite");
+ }
+ if (strcmp(argv[3], "write") != 0) {
+ benchmarkList.emplace_back(BenchmarkOption::READ, "read");
+ benchmarkList.emplace_back(BenchmarkOption::RANDREAD, "randread");
+ }
+
+ std::shuffle(benchmarkList.begin(), benchmarkList.end(), std::mt19937{std::random_device{}()});
+
+ int rounds = 1;
+ if (argc == 5) {
+ rounds = std::atoi(argv[4]);
+ if (rounds <= 0) {
+ std::fprintf(stderr, "invalid round %s\n", argv[4]);
+ PrintUsage(argv[0]);
+ }
+ }
+
+ for (auto [option, name] : benchmarkList) {
+ std::printf("%s test:\n", name.c_str());
+
+ for (int i = 0; i < rounds; i++) {
+ DropCache();
+ auto res = runTest(path, option, size);
+ if (!res.ok()) {
+ std::fprintf(stderr, "Error while benchmarking: %s\n",
+ res.error().message().c_str());
+ return EXIT_FAILURE;
+ }
+
+ double elapsed_time = res->elapsed.tv_sec + res->elapsed.tv_nsec / 1e9;
+ std::printf("total %" PRIu64 " bytes, took %.3g seconds ", res->size, elapsed_time);
+
+ double speed = res->size / elapsed_time;
+ const char* unit = "bytes";
+ if (speed >= 1000) {
+ speed /= 1024;
+ unit = "KB";
+ }
+ if (speed >= 1000) {
+ speed /= 1024;
+ unit = "MB";
+ }
+ if (speed >= 1000) {
+ speed /= 1024;
+ unit = "GB";
+ }
+ std::printf("(%.3g %s/s)\n", speed, unit);
+ }
+ std::printf("\n");
+ }
+}
diff --git a/tests/common.cc b/tests/common.cc
index a9f0807..5d32351 100644
--- a/tests/common.cc
+++ b/tests/common.cc
@@ -14,11 +14,38 @@
* limitations under the License.
*/
+#include <android/sysprop/HypervisorProperties.sysprop.h>
+
#include "virt/VirtualizationTest.h"
+using android::sysprop::HypervisorProperties::hypervisor_protected_vm_supported;
+using android::sysprop::HypervisorProperties::hypervisor_vm_supported;
+
+namespace {
+
+bool isVmSupported() {
+ bool has_capability = hypervisor_vm_supported().value_or(false) ||
+ hypervisor_protected_vm_supported().value_or(false);
+ if (!has_capability) {
+ return false;
+ }
+ const std::array<const char *, 2> needed_files = {
+ "/apex/com.android.virt/bin/crosvm",
+ "/apex/com.android.virt/bin/virtualizationservice",
+ };
+ return std::all_of(needed_files.begin(), needed_files.end(),
+ [](const char *file) { return access(file, F_OK) == 0; });
+}
+
+} // namespace
+
namespace virt {
void VirtualizationTest::SetUp() {
+ if (!isVmSupported()) {
+ GTEST_SKIP() << "Device doesn't support KVM.";
+ }
+
mVirtualizationService = waitForService<IVirtualizationService>(
String16("android.system.virtualizationservice"));
ASSERT_NE(mVirtualizationService, nullptr);
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index 968c991..dfc2f2b 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -5,12 +5,48 @@
java_test_host {
name: "MicrodroidHostTestCases",
srcs: ["java/**/*.java"],
- test_suites: ["device-tests"],
+ test_suites: [
+ "cts",
+ "general-tests",
+ ],
libs: [
"tradefed",
],
static_libs: [
"VirtualizationTestHelper",
],
- data: [":MicrodroidTestApp.signed"],
+ per_testcase_directory: true,
+ data: [
+ ":MicrodroidTestApp",
+ ":microdroid_general_sepolicy.conf",
+ ":test.com.android.virt.pem",
+ ":test2.com.android.virt.pem",
+ ":test-payload-metadata",
+ ":com.android.adbd{.apex}",
+ ":com.android.os.statsd{.apex}",
+ ],
+ data_native_bins: [
+ "sepolicy-analyze",
+ // For re-sign test
+ "avbtool",
+ "img2simg",
+ "lpmake",
+ "lpunpack",
+ "sign_virt_apex",
+ "simg2img",
+ ],
+ // java_test_host doesn't have data_native_libs but jni_libs can be used to put
+ // native modules under ./lib directory.
+ // This works because host tools have rpath (../lib and ./lib).
+ jni_libs: [
+ "libbase",
+ "libc++",
+ "libcrypto_utils",
+ "libcrypto",
+ "libext4_utils",
+ "liblog",
+ "liblp",
+ "libsparse",
+ "libz",
+ ],
}
diff --git a/tests/hostside/AndroidTest.xml b/tests/hostside/AndroidTest.xml
index e8aced6..79428ce 100644
--- a/tests/hostside/AndroidTest.xml
+++ b/tests/hostside/AndroidTest.xml
@@ -14,6 +14,11 @@
limitations under the License.
-->
<configuration description="Tests for microdroid">
+ <option name="test-suite-tag" value="cts" />
+ <option name="config-descriptor:metadata" key="component" value="security" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_instant_app" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_multi_abi" />
+ <option name="config-descriptor:metadata" key="parameter" value="secondary_user" />
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
<option name="jar" value="MicrodroidHostTestCases.jar" />
</test>
diff --git a/tests/hostside/helper/Android.bp b/tests/hostside/helper/Android.bp
index 48dbcac..4ca0bf0 100644
--- a/tests/hostside/helper/Android.bp
+++ b/tests/hostside/helper/Android.bp
@@ -2,12 +2,9 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-java_test_helper_library {
+java_library_host {
name: "VirtualizationTestHelper",
- host_supported: true,
- device_supported: false,
srcs: ["java/**/*.java"],
- test_suites: ["device-tests"],
libs: [
"tradefed",
"compatibility-tradefed",
diff --git a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
index a9e5040..40be248 100644
--- a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
+++ b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
@@ -16,16 +16,22 @@
package android.virt.test;
+import static com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestLogData;
+
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
import com.android.compatibility.common.tradefed.build.CompatibilityBuildHelper;
import com.android.tradefed.build.IBuildInfo;
import com.android.tradefed.device.DeviceNotAvailableException;
import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.device.TestDevice;
import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.result.FileInputStreamSource;
+import com.android.tradefed.result.LogDataType;
import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
import com.android.tradefed.util.CommandResult;
import com.android.tradefed.util.CommandStatus;
@@ -33,7 +39,9 @@
import java.io.File;
import java.io.FileNotFoundException;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.regex.Matcher;
@@ -41,14 +49,15 @@
public abstract class VirtualizationTestCaseBase extends BaseHostJUnit4Test {
protected static final String TEST_ROOT = "/data/local/tmp/virt/";
- private static final String VIRT_APEX = "/apex/com.android.virt/";
+ protected static final String VIRT_APEX = "/apex/com.android.virt/";
+ protected static final String LOG_PATH = TEST_ROOT + "log.txt";
private static final int TEST_VM_ADB_PORT = 8000;
private static final String MICRODROID_SERIAL = "localhost:" + TEST_VM_ADB_PORT;
private static final String INSTANCE_IMG = "instance.img";
- // This is really slow on GCE (2m 40s) but fast on localhost or actual Android phones (< 10s)
- // Set the maximum timeout value big enough.
- private static final long MICRODROID_BOOT_TIMEOUT_MINUTES = 5;
+ // This is really slow on GCE (2m 40s) but fast on localhost or actual Android phones (< 10s).
+ // Then there is time to run the actual task. Set the maximum timeout value big enough.
+ private static final long MICRODROID_MAX_LIFETIME_MINUTES = 20;
private static final long MICRODROID_ADB_CONNECT_TIMEOUT_MINUTES = 5;
@@ -61,6 +70,9 @@
// disconnect from microdroid
tryRunOnHost("adb", "disconnect", MICRODROID_SERIAL);
+
+ // remove any leftover files under test root
+ android.tryRun("rm", "-rf", TEST_ROOT + "*");
}
public static void cleanUpVirtualizationTestSetup(ITestDevice androidDevice)
@@ -70,29 +82,42 @@
// disconnect from microdroid
tryRunOnHost("adb", "disconnect", MICRODROID_SERIAL);
- // Make sure we're connected to the host adb again (b/194219111)
- for (int retry = 0; retry < 3; ++retry) {
+ reconnectHostAdb(androidDevice);
+
+ // kill stale VMs and directories
+ android.tryRun("killall", "crosvm");
+ android.tryRun("stop", "virtualizationservice");
+ android.tryRun("rm", "-rf", "/data/misc/virtualizationservice/*");
+ }
+
+ public static void reconnectHostAdb(ITestDevice androidDevice)
+ throws DeviceNotAvailableException {
+ CommandRunner android = new CommandRunner(androidDevice);
+
+ // Make sure we're connected to the host adb; this connection seems to get dropped when a VM
+ // exits.
+ for (int retry = 0; retry < 10; ++retry) {
if (android.tryRun("true") != null) {
break;
}
androidDevice.waitForDeviceOnline(1000);
}
-
- // kill stale VMs and directories
- android.tryRun("killall", "crosvm");
- android.tryRun("rm", "-rf", "/data/misc/virtualizationservice/*");
- android.tryRun("stop", "virtualizationservice");
}
- public static void testIfDeviceIsCapable(ITestDevice androidDevice)
- throws DeviceNotAvailableException {
- CommandRunner android = new CommandRunner(androidDevice);
+ public static void testIfDeviceIsCapable(ITestDevice androidDevice) throws Exception {
+ assumeTrue("Need an actual TestDevice", androidDevice instanceof TestDevice);
+ TestDevice testDevice = (TestDevice) androidDevice;
+ assumeTrue("Requires VM support", testDevice.supportsMicrodroid());
+ }
- // Checks the preconditions to run microdroid. If the condition is not satisfied
- // don't run the test (instead of failing)
- android.assumeSuccess("ls /dev/kvm");
- android.assumeSuccess("ls /dev/vhost-vsock");
- android.assumeSuccess("ls /apex/com.android.virt");
+ public static void archiveLogThenDelete(TestLogData logs, ITestDevice device, String remotePath,
+ String localName) throws DeviceNotAvailableException {
+ File logFile = device.pullFile(remotePath);
+ if (logFile != null) {
+ logs.addTestLog(localName, LogDataType.TEXT, new FileInputStreamSource(logFile));
+ // Delete to avoid confusing logs from a previous run, just in case.
+ device.deleteFile(remotePath);
+ }
}
// Run an arbitrary command in the host side and returns the result
@@ -136,9 +161,32 @@
}
public static CommandResult runOnMicrodroidForResult(String... cmd) {
- final long timeout = 30000; // 30 sec. Microdroid is extremely slow on GCE-on-CF.
+ final long timeoutMs = 30000; // 30 sec. Microdroid is extremely slow on GCE-on-CF.
return RunUtil.getDefault()
- .runTimedCmd(timeout, "adb", "-s", MICRODROID_SERIAL, "shell", join(cmd));
+ .runTimedCmd(timeoutMs, "adb", "-s", MICRODROID_SERIAL, "shell", join(cmd));
+ }
+
+ public static void pullMicrodroidFile(String path, File target) {
+ final long timeoutMs = 30000; // 30 sec. Microdroid is extremely slow on GCE-on-CF.
+ CommandResult result =
+ RunUtil.getDefault()
+ .runTimedCmd(
+ timeoutMs,
+ "adb",
+ "-s",
+ MICRODROID_SERIAL,
+ "pull",
+ path,
+ target.getPath());
+ if (result.getStatus() != CommandStatus.SUCCESS) {
+ fail("pulling " + path + " has failed: " + result);
+ }
+ }
+
+ // Asserts the command will fail on Microdroid.
+ public static void assertFailedOnMicrodroid(String... cmd) {
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ assertThat(result.getStatus(), is(CommandStatus.FAILED));
}
private static String join(String... strs) {
@@ -158,57 +206,121 @@
}
}
+ public String getPathForPackage(String packageName)
+ throws DeviceNotAvailableException {
+ return getPathForPackage(getDevice(), packageName);
+ }
+
+ // Get the path to the installed apk. Note that
+ // getDevice().getAppPackageInfo(...).getCodePath() doesn't work due to the incorrect
+ // parsing of the "=" character. (b/190975227). So we use the `pm path` command directly.
+ private static String getPathForPackage(ITestDevice device, String packageName)
+ throws DeviceNotAvailableException {
+ CommandRunner android = new CommandRunner(device);
+ String pathLine = android.run("pm", "path", packageName);
+ assertTrue("package not found", pathLine.startsWith("package:"));
+ return pathLine.substring("package:".length());
+ }
+
public static String startMicrodroid(
ITestDevice androidDevice,
IBuildInfo buildInfo,
String apkName,
String packageName,
String configPath,
- boolean debug)
+ boolean debug,
+ int memoryMib,
+ Optional<Integer> numCpus,
+ Optional<String> cpuAffinity)
+ throws DeviceNotAvailableException {
+ return startMicrodroid(androidDevice, buildInfo, apkName, packageName, null, configPath,
+ debug, memoryMib, numCpus, cpuAffinity);
+ }
+
+ public static String startMicrodroid(
+ ITestDevice androidDevice,
+ IBuildInfo buildInfo,
+ String apkName,
+ String packageName,
+ String[] extraIdsigPaths,
+ String configPath,
+ boolean debug,
+ int memoryMib,
+ Optional<Integer> numCpus,
+ Optional<String> cpuAffinity)
+ throws DeviceNotAvailableException {
+ return startMicrodroid(androidDevice, buildInfo, apkName, null, packageName,
+ extraIdsigPaths, configPath, debug,
+ memoryMib, numCpus, cpuAffinity);
+ }
+
+ public static String startMicrodroid(
+ ITestDevice androidDevice,
+ IBuildInfo buildInfo,
+ String apkName,
+ String apkPath,
+ String packageName,
+ String[] extraIdsigPaths,
+ String configPath,
+ boolean debug,
+ int memoryMib,
+ Optional<Integer> numCpus,
+ Optional<String> cpuAffinity)
throws DeviceNotAvailableException {
CommandRunner android = new CommandRunner(androidDevice);
- // Install APK
- File apkFile = findTestFile(buildInfo, apkName);
- androidDevice.installPackage(apkFile, /* reinstall */ true);
+ // Install APK if necessary
+ if (apkName != null) {
+ File apkFile = findTestFile(buildInfo, apkName);
+ androidDevice.installPackage(apkFile, /* reinstall */ true);
+ }
- // Get the path to the installed apk. Note that
- // getDevice().getAppPackageInfo(...).getCodePath() doesn't work due to the incorrect
- // parsing of the "=" character. (b/190975227). So we use the `pm path` command directly.
- String apkPath = android.run("pm", "path", packageName);
- assertTrue(apkPath.startsWith("package:"));
- apkPath = apkPath.substring("package:".length());
+ if (apkPath == null) {
+ apkPath = getPathForPackage(androidDevice, packageName);
+ }
- // Push the idsig file to the device
- File idsigOnHost = findTestFile(buildInfo, apkName + ".idsig");
- final String apkIdsigPath = TEST_ROOT + apkName + ".idsig";
- androidDevice.pushFile(idsigOnHost, apkIdsigPath);
+ android.run("mkdir", "-p", TEST_ROOT);
+
+ // This file is not what we provide. It will be created by the vm tool.
+ final String outApkIdsigPath = TEST_ROOT + apkName + ".idsig";
final String instanceImg = TEST_ROOT + INSTANCE_IMG;
- final String logPath = TEST_ROOT + "log.txt";
- final String debugFlag = debug ? "--debug " : "";
+ final String logPath = LOG_PATH;
+ final String debugFlag = debug ? "--debug full" : "";
// Run the VM
- String ret =
- android.run(
- VIRT_APEX + "bin/vm",
- "run-app",
- "--daemonize",
- "--log " + logPath,
- debugFlag,
- apkPath,
- apkIdsigPath,
- instanceImg,
- configPath);
+ ArrayList<String> args = new ArrayList<>(Arrays.asList(
+ VIRT_APEX + "bin/vm",
+ "run-app",
+ "--daemonize",
+ "--log " + logPath,
+ "--mem " + memoryMib,
+ numCpus.isPresent() ? "--cpus " + numCpus.get() : "",
+ cpuAffinity.isPresent() ? "--cpu-affinity " + cpuAffinity.get() : "",
+ debugFlag,
+ apkPath,
+ outApkIdsigPath,
+ instanceImg,
+ configPath));
+ if (extraIdsigPaths != null) {
+ for (String path : extraIdsigPaths) {
+ args.add("--extra-idsig");
+ args.add(path);
+ }
+ }
+ String ret = android.run(args.toArray(new String[0]));
// Redirect log.txt to logd using logwrapper
ExecutorService executor = Executors.newFixedThreadPool(1);
executor.execute(
() -> {
try {
- // Keep redirecting sufficiently long enough
+ // Keep redirecting as long as the expecting maximum test time. When an adb
+ // command times out, it may trigger the device recovery process, which
+ // disconnect adb, which terminates any live adb commands. See an example at
+ // b/194974010#comment25.
android.runWithTimeout(
- MICRODROID_BOOT_TIMEOUT_MINUTES * 60 * 1000,
+ MICRODROID_MAX_LIFETIME_MINUTES * 60 * 1000,
"logwrapper",
"tail",
"-f",
@@ -230,16 +342,20 @@
throws DeviceNotAvailableException {
CommandRunner android = new CommandRunner(androidDevice);
- // Close the connection before shutting the VM down. Otherwise, b/192660485.
- tryRunOnHost("adb", "disconnect", MICRODROID_SERIAL);
- final String serial = androidDevice.getSerialNumber();
- tryRunOnHost("adb", "-s", serial, "forward", "--remove", "tcp:" + TEST_VM_ADB_PORT);
-
// Shutdown the VM
android.run(VIRT_APEX + "bin/vm", "stop", cid);
+
+ // TODO(192660485): Figure out why shutting down the VM disconnects adb on cuttlefish
+ // temporarily. Without this wait, the rest of `runOnAndroid/skipIfFail` fails due to the
+ // connection loss, and results in assumption error exception for the rest of the tests.
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
}
- public static void rootMicrodroid() throws DeviceNotAvailableException {
+ public static void rootMicrodroid() {
runOnHost("adb", "-s", MICRODROID_SERIAL, "root");
// TODO(192660959): Figure out the root cause and remove the sleep. For unknown reason,
@@ -260,8 +376,7 @@
// Establish an adb connection to microdroid by letting Android forward the connection to
// microdroid. Wait until the connection is established and microdroid is booted.
- public static void adbConnectToMicrodroid(ITestDevice androidDevice, String cid)
- throws DeviceNotAvailableException {
+ public static void adbConnectToMicrodroid(ITestDevice androidDevice, String cid) {
long start = System.currentTimeMillis();
long timeoutMillis = MICRODROID_ADB_CONNECT_TIMEOUT_MINUTES * 60 * 1000;
long elapsed = 0;
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
index c05a841..5b71eba 100644
--- a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -16,21 +16,339 @@
package android.virt.test;
+import static com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestLogData;
+
+import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+import com.android.tradefed.device.DeviceNotAvailableException;
+import com.android.tradefed.result.TestDescription;
+import com.android.tradefed.result.TestResult;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
+import com.android.tradefed.testtype.junit4.DeviceTestRunOptions;
+import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.CommandStatus;
+import com.android.tradefed.util.FileUtil;
+import com.android.tradefed.util.RunUtil;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
import org.junit.After;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TestName;
import org.junit.runner.RunWith;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.Callable;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
@RunWith(DeviceJUnit4ClassRunner.class)
public class MicrodroidTestCase extends VirtualizationTestCaseBase {
private static final String APK_NAME = "MicrodroidTestApp.apk";
private static final String PACKAGE_NAME = "com.android.microdroid.test";
+ private static final int MIN_MEM_ARM64 = 145;
+ private static final int MIN_MEM_X86_64 = 196;
+
+ // Number of vCPUs and their affinity to host CPUs for testing purpose
+ private static final int NUM_VCPUS = 3;
+ private static final String CPU_AFFINITY = "0,1,2";
+
+ @Rule public TestLogData mTestLogs = new TestLogData();
+ @Rule public TestName mTestName = new TestName();
+
+ // TODO(b/176805428): remove this
+ private boolean isCuttlefish() throws Exception {
+ String productName = getDevice().getProperty("ro.product.name");
+ return (null != productName)
+ && (productName.startsWith("aosp_cf_x86")
+ || productName.startsWith("aosp_cf_arm")
+ || productName.startsWith("cf_x86")
+ || productName.startsWith("cf_arm"));
+ }
+
+ private int minMemorySize() throws DeviceNotAvailableException {
+ CommandRunner android = new CommandRunner(getDevice());
+ String abi = android.run("getprop", "ro.product.cpu.abi");
+ assertTrue(abi != null && !abi.isEmpty());
+ if (abi.startsWith("arm64")) {
+ return MIN_MEM_ARM64;
+ } else if (abi.startsWith("x86_64")) {
+ return MIN_MEM_X86_64;
+ }
+ fail("Unsupported ABI: " + abi);
+ return 0;
+ }
+
+ private boolean isProtectedVmSupported() throws DeviceNotAvailableException {
+ return getDevice().getBooleanProperty("ro.boot.hypervisor.protected_vm.supported",
+ false);
+ }
+
+ @Test
+ public void testCreateVmRequiresPermission() throws Exception {
+ // Revoke the MANAGE_VIRTUAL_MACHINE permission for the test app
+ CommandRunner android = new CommandRunner(getDevice());
+ android.run("pm", "revoke", PACKAGE_NAME, "android.permission.MANAGE_VIRTUAL_MACHINE");
+
+ // Run MicrodroidTests#connectToVmService test, which should fail
+ final DeviceTestRunOptions options = new DeviceTestRunOptions(PACKAGE_NAME)
+ .setTestClassName(PACKAGE_NAME + ".MicrodroidTests")
+ .setTestMethodName("connectToVmService[protectedVm=false]")
+ .setCheckResults(false);
+ assertFalse(runDeviceTests(options));
+
+ Map<TestDescription, TestResult> results = getLastDeviceRunResults().getTestResults();
+ assertThat(results.size(), is(1));
+ TestResult result = results.values().toArray(new TestResult[0])[0];
+ assertTrue("The test should fail with a permission error",
+ result.getStackTrace()
+ .contains("android.permission.MANAGE_VIRTUAL_MACHINE permission"));
+ }
+
+ private static JSONObject newPartition(String label, String path) {
+ return new JSONObject(Map.of("label", label, "path", path));
+ }
+
+ private void resignVirtApex(File virtApexDir, File signingKey, Map<String, File> keyOverrides) {
+ File signVirtApex = findTestFile("sign_virt_apex");
+
+ RunUtil runUtil = new RunUtil();
+ // Set the parent dir on the PATH (e.g. <workdir>/bin)
+ String separator = System.getProperty("path.separator");
+ String path = signVirtApex.getParentFile().getPath() + separator + System.getenv("PATH");
+ runUtil.setEnvVariable("PATH", path);
+
+ List<String> command = new ArrayList<String>();
+ command.add("sign_virt_apex");
+ for (Map.Entry<String, File> entry : keyOverrides.entrySet()) {
+ String filename = entry.getKey();
+ File overridingKey = entry.getValue();
+ command.add("--key_override " + filename + "=" + overridingKey.getPath());
+ }
+ command.add(signingKey.getPath());
+ command.add(virtApexDir.getPath());
+
+ CommandResult result = runUtil.runTimedCmd(
+ 20 * 1000,
+ "/bin/bash",
+ "-c",
+ String.join(" ", command));
+ String out = result.getStdout();
+ String err = result.getStderr();
+ assertEquals(
+ "resigning the Virt APEX failed:\n\tout: " + out + "\n\terr: " + err + "\n",
+ CommandStatus.SUCCESS, result.getStatus());
+ }
+
+ private static <T> void assertThatEventually(long timeoutMillis, Callable<T> callable,
+ org.hamcrest.Matcher<T> matcher) throws Exception {
+ long start = System.currentTimeMillis();
+ while (true) {
+ try {
+ assertThat(callable.call(), matcher);
+ return;
+ } catch (Throwable e) {
+ if (System.currentTimeMillis() - start < timeoutMillis) {
+ Thread.sleep(500);
+ } else {
+ throw e;
+ }
+ }
+ }
+ }
+
+ private String runMicrodroidWithResignedImages(File key, Map<String, File> keyOverrides,
+ boolean isProtected, boolean daemonize, String consolePath)
+ throws DeviceNotAvailableException, IOException, JSONException {
+ CommandRunner android = new CommandRunner(getDevice());
+
+ File virtApexDir = FileUtil.createTempDir("virt_apex");
+
+ // Pull the virt apex's etc/ directory (which contains images and microdroid.json)
+ File virtApexEtcDir = new File(virtApexDir, "etc");
+ // We need only etc/ directory for images
+ assertTrue(virtApexEtcDir.mkdirs());
+ assertTrue(getDevice().pullDir(VIRT_APEX + "etc", virtApexEtcDir));
+
+ resignVirtApex(virtApexDir, key, keyOverrides);
+
+ // Push back re-signed virt APEX contents and updated microdroid.json
+ getDevice().pushDir(virtApexDir, TEST_ROOT);
+
+ // Create the idsig file for the APK
+ final String apkPath = getPathForPackage(PACKAGE_NAME);
+ final String idSigPath = TEST_ROOT + "idsig";
+ android.run(VIRT_APEX + "bin/vm", "create-idsig", apkPath, idSigPath);
+
+ // Create the instance image for the VM
+ final String instanceImgPath = TEST_ROOT + "instance.img";
+ android.run(VIRT_APEX + "bin/vm", "create-partition", "--type instance",
+ instanceImgPath, Integer.toString(10 * 1024 * 1024));
+
+ // payload-metadata is prepared on host with the two APEXes and APK
+ final String payloadMetadataPath = TEST_ROOT + "payload-metadata.img";
+ getDevice().pushFile(findTestFile("test-payload-metadata.img"), payloadMetadataPath);
+
+ // push APEXes required for the VM.
+ final String statsdApexPath = TEST_ROOT + "com.android.os.statsd.apex";
+ final String adbdApexPath = TEST_ROOT + "com.android.adbd.apex";
+ getDevice().pushFile(findTestFile("com.android.os.statsd.apex"), statsdApexPath);
+ getDevice().pushFile(findTestFile("com.android.adbd.apex"), adbdApexPath);
+
+ // Since Java APP can't start a VM with a custom image, here, we start a VM using `vm run`
+ // command with a VM Raw config which is equiv. to what virtualizationservice creates with
+ // a VM App config.
+ //
+ // 1. use etc/microdroid.json as base
+ // 2. add partitions: bootconfig, vbmeta, instance image
+ // 3. add a payload image disk with
+ // - payload-metadata
+ // - apexes
+ // - test apk
+ // - its idsig
+
+ // Load etc/microdroid.json
+ File microdroidConfigFile = new File(virtApexEtcDir, "microdroid.json");
+ JSONObject config = new JSONObject(FileUtil.readStringFromFile(microdroidConfigFile));
+
+ // Replace paths so that the config uses re-signed images from TEST_ROOT
+ config.put("bootloader", config.getString("bootloader").replace(VIRT_APEX, TEST_ROOT));
+ JSONArray disks = config.getJSONArray("disks");
+ for (int diskIndex = 0; diskIndex < disks.length(); diskIndex++) {
+ JSONObject disk = disks.getJSONObject(diskIndex);
+ JSONArray partitions = disk.getJSONArray("partitions");
+ for (int partIndex = 0; partIndex < partitions.length(); partIndex++) {
+ JSONObject part = partitions.getJSONObject(partIndex);
+ part.put("path", part.getString("path").replace(VIRT_APEX, TEST_ROOT));
+ }
+ }
+
+ // Add partitions to the second disk
+ final String vbmetaPath = TEST_ROOT + "etc/fs/microdroid_vbmeta_bootconfig.img";
+ final String bootconfigPath = TEST_ROOT + "etc/microdroid_bootconfig.full_debuggable";
+ disks.getJSONObject(1).getJSONArray("partitions")
+ .put(newPartition("vbmeta", vbmetaPath))
+ .put(newPartition("bootconfig", bootconfigPath))
+ .put(newPartition("vm-instance", instanceImgPath));
+
+ // Add payload image disk with partitions:
+ // - payload-metadata
+ // - apexes: com.android.os.statsd, com.android.adbd
+ // - apk and idsig
+ disks.put(new JSONObject().put("writable", false).put("partitions", new JSONArray()
+ .put(newPartition("payload-metadata", payloadMetadataPath))
+ .put(newPartition("microdroid-apex-0", statsdApexPath))
+ .put(newPartition("microdroid-apex-1", adbdApexPath))
+ .put(newPartition("microdroid-apk", apkPath))
+ .put(newPartition("microdroid-apk-idsig", idSigPath))));
+
+ config.put("protected", isProtected);
+
+ // Write updated raw config
+ final String configPath = TEST_ROOT + "raw_config.json";
+ getDevice().pushString(config.toString(), configPath);
+
+ final String logPath = LOG_PATH;
+ final String ret = android.runWithTimeout(
+ 60 * 1000,
+ VIRT_APEX + "bin/vm run",
+ daemonize ? "--daemonize" : "",
+ (consolePath != null) ? "--console " + consolePath : "",
+ "--log " + logPath,
+ configPath);
+ Pattern pattern = Pattern.compile("with CID (\\d+)");
+ Matcher matcher = pattern.matcher(ret);
+ assertTrue(matcher.find());
+ return matcher.group(1);
+ }
+
+ @Test
+ public void testBootFailsWhenProtectedVmStartsWithImagesSignedWithDifferentKey()
+ throws Exception {
+ assumeTrue(isProtectedVmSupported());
+
+ File key = findTestFile("test.com.android.virt.pem");
+ Map<String, File> keyOverrides = Map.of();
+ boolean isProtected = true;
+ boolean daemonize = false; // VM should shut down due to boot failure.
+ String consolePath = TEST_ROOT + "console";
+ runMicrodroidWithResignedImages(key, keyOverrides, isProtected, daemonize, consolePath);
+ assertThat(getDevice().pullFileContents(consolePath),
+ containsString("pvmfw boot failed"));
+ }
+
+ @Test
+ public void testBootSucceedsWhenNonProtectedVmStartsWithImagesSignedWithDifferentKey()
+ throws Exception {
+ File key = findTestFile("test.com.android.virt.pem");
+ Map<String, File> keyOverrides = Map.of();
+ boolean isProtected = false;
+ boolean daemonize = true;
+ String consolePath = TEST_ROOT + "console";
+ String cid = runMicrodroidWithResignedImages(key, keyOverrides, isProtected, daemonize,
+ consolePath);
+ // Adb connection to the microdroid means that boot succeeded.
+ adbConnectToMicrodroid(getDevice(), cid);
+ shutdownMicrodroid(getDevice(), cid);
+ }
+
+ @Test
+ public void testBootFailsWhenBootloaderAndVbMetaAreSignedWithDifferentKeys()
+ throws Exception {
+ // Sign everything with key1 except vbmeta
+ File key = findTestFile("test.com.android.virt.pem");
+ File key2 = findTestFile("test2.com.android.virt.pem");
+ Map<String, File> keyOverrides = Map.of(
+ "microdroid_vbmeta.img", key2);
+ boolean isProtected = false; // Not interested in pvwfw
+ boolean daemonize = true; // Bootloader fails and enters prompts.
+ // To be able to stop it, it should be a daemon.
+ String consolePath = TEST_ROOT + "console";
+ String cid = runMicrodroidWithResignedImages(key, keyOverrides, isProtected, daemonize,
+ consolePath);
+ // Wail for a while so that bootloader prints errors to console
+ assertThatEventually(10000, () -> getDevice().pullFileContents(consolePath),
+ containsString("Public key was rejected"));
+ shutdownMicrodroid(getDevice(), cid);
+ }
+
+ @Test
+ public void testBootSucceedsWhenBootloaderAndVbmetaHaveSameSigningKeys()
+ throws Exception {
+ // Sign everything with key1 except bootloader and vbmeta
+ File key = findTestFile("test.com.android.virt.pem");
+ File key2 = findTestFile("test2.com.android.virt.pem");
+ Map<String, File> keyOverrides = Map.of(
+ "microdroid_bootloader", key2,
+ "microdroid_vbmeta.img", key2,
+ "microdroid_vbmeta_bootconfig.img", key2);
+ boolean isProtected = false; // Not interested in pvwfw
+ boolean daemonize = true; // Bootloader should succeed.
+ // To be able to stop it, it should be a daemon.
+ String consolePath = TEST_ROOT + "console";
+ String cid = runMicrodroidWithResignedImages(key, keyOverrides, isProtected, daemonize,
+ consolePath);
+ // Adb connection to the microdroid means that boot succeeded.
+ adbConnectToMicrodroid(getDevice(), cid);
+ shutdownMicrodroid(getDevice(), cid);
+ }
+
@Test
public void testMicrodroidBoots() throws Exception {
final String configPath = "assets/vm_config.json"; // path inside the APK
@@ -41,9 +359,17 @@
APK_NAME,
PACKAGE_NAME,
configPath,
- /* debug */ false);
+ /* debug */ true,
+ minMemorySize(),
+ Optional.of(NUM_VCPUS),
+ Optional.of(CPU_AFFINITY));
adbConnectToMicrodroid(getDevice(), cid);
+ // Wait until logd-init starts. The service is one of the last services that are started in
+ // the microdroid boot procedure. Therefore, waiting for the service means that we wait for
+ // the boot to complete. TODO: we need a better marker eventually.
+ tryRunOnMicrodroid("watch -e \"getprop init.svc.logd-reinit | grep '^$'\"");
+
// Test writing to /data partition
runOnMicrodroid("echo MicrodroidTest > /data/local/tmp/test.txt");
assertThat(runOnMicrodroid("cat /data/local/tmp/test.txt"), is("MicrodroidTest"));
@@ -64,32 +390,39 @@
final String label = "u:object_r:system_file:s0";
assertThat(runOnMicrodroid("ls", "-Z", testLib), is(label + " " + testLib));
- // Check if the command in vm_config.json was executed by examining the side effect of the
- // command
- assertThat(runOnMicrodroid("getprop", "debug.microdroid.app.run"), is("true"));
- assertThat(runOnMicrodroid("getprop", "debug.microdroid.app.sublib.run"), is("true"));
+ // Check that no denials have happened so far
+ assertThat(runOnMicrodroid("logcat -d -e 'avc:[[:space:]]{1,2}denied'"), is(""));
- // Manually execute the library and check the output
- final String microdroidLauncher = "system/bin/microdroid_launcher";
- assertThat(
- runOnMicrodroid(microdroidLauncher, testLib, "arg1", "arg2"),
- is("Hello Microdroid " + testLib + " arg1 arg2"));
+ assertThat(runOnMicrodroid("cat /proc/cpuinfo | grep processor | wc -l"),
+ is(Integer.toString(NUM_VCPUS)));
- // Check that keystore was found by the payload
- assertThat(runOnMicrodroid("getprop", "debug.microdroid.test.keystore"), is("PASS"));
+ // Check that selinux is enabled
+ assertThat(runOnMicrodroid("getenforce"), is("Enforcing"));
- shutdownMicrodroid(getDevice(), cid);
- }
+ // TODO(b/176805428): adb is broken for nested VM
+ if (!isCuttlefish()) {
+ // Check neverallow rules on microdroid
+ File policyFile = FileUtil.createTempFile("microdroid_sepolicy", "");
+ pullMicrodroidFile("/sys/fs/selinux/policy", policyFile);
- @Test
- public void testDebugMode() throws Exception {
- final String configPath = "assets/vm_config.json"; // path inside the APK
- final boolean debug = true;
- final String cid =
- startMicrodroid(getDevice(), getBuild(), APK_NAME, PACKAGE_NAME, configPath, debug);
- adbConnectToMicrodroid(getDevice(), cid);
+ File generalPolicyConfFile = findTestFile("microdroid_general_sepolicy.conf");
+ File sepolicyAnalyzeBin = findTestFile("sepolicy-analyze");
- assertThat(runOnMicrodroid("getenforce"), is("Permissive"));
+ CommandResult result =
+ RunUtil.getDefault()
+ .runTimedCmd(
+ 10000,
+ sepolicyAnalyzeBin.getPath(),
+ policyFile.getPath(),
+ "neverallow",
+ "-w",
+ "-f",
+ generalPolicyConfFile.getPath());
+ assertEquals(
+ "neverallow check failed: " + result.getStderr().trim(),
+ result.getStatus(),
+ CommandStatus.SUCCESS);
+ }
shutdownMicrodroid(getDevice(), cid);
}
@@ -110,6 +443,9 @@
public void shutdown() throws Exception {
cleanUpVirtualizationTestSetup(getDevice());
+ archiveLogThenDelete(mTestLogs, getDevice(), LOG_PATH,
+ "vm.log-" + mTestName.getMethodName());
+
getDevice().uninstallPackage(PACKAGE_NAME);
}
}
diff --git a/tests/test-payload-metadata-config.json b/tests/test-payload-metadata-config.json
new file mode 100644
index 0000000..3c56e5f
--- /dev/null
+++ b/tests/test-payload-metadata-config.json
@@ -0,0 +1,19 @@
+{
+ "_comment": "This file is to create a payload-metadata partition for payload.img which is for MicrodroidTestApp to run with assets/vm_config.json",
+ "apexes": [
+ {
+ "name": "com.android.os.statsd",
+ "path": ""
+ },
+ {
+ "name": "com.android.adbd",
+ "path": ""
+ }
+ ],
+ "apk": {
+ "name": "microdroid-apk",
+ "path": "",
+ "idsig_path": ""
+ },
+ "payload_config_path": "/mnt/apk/assets/vm_config.json"
+}
\ No newline at end of file
diff --git a/tests/testapk/Android.bp b/tests/testapk/Android.bp
index 7332149..818c05a 100644
--- a/tests/testapk/Android.bp
+++ b/tests/testapk/Android.bp
@@ -2,15 +2,26 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-android_test_helper_app {
+android_test {
name: "MicrodroidTestApp",
- srcs: ["src/java/**/*.java"],
- libs: [
- "android.system.virtualmachine",
+ test_suites: [
+ "cts",
+ "general-tests",
],
+ srcs: ["src/java/**/*.java"],
+ static_libs: [
+ "androidx.test.runner",
+ "androidx.test.ext.junit",
+ "cbor-java",
+ "com.android.microdroid.testservice-java",
+ "truth-prebuilt",
+ ],
+ libs: ["android.system.virtualmachine"],
jni_libs: ["MicrodroidTestNativeLib"],
platform_apis: true,
use_embedded_native_libs: true,
+ // We only support 64-bit ABI, but CTS demands all APKs to be multi-ABI.
+ compile_multilib: "both",
}
// TODO(jiyong): make this a binary, not a shared library
@@ -18,31 +29,22 @@
name: "MicrodroidTestNativeLib",
srcs: ["src/native/testbinary.cpp"],
shared_libs: [
- "android.system.keystore2-V1-ndk_platform",
+ "android.security.dice-ndk",
+ "android.system.virtualmachineservice-ndk",
+ "com.android.microdroid.testservice-ndk",
"libbase",
"libbinder_ndk",
+ "libbinder_rpc_unstable",
"MicrodroidTestNativeLibSub",
],
+ static_libs: [
+ "libfsverity_digests_proto_cc",
+ "liblog",
+ "libprotobuf-cpp-lite-ndk",
+ ],
}
cc_library_shared {
name: "MicrodroidTestNativeLibSub",
srcs: ["src/native/testlib.cpp"],
}
-
-genrule {
- name: "MicrodroidTestApp.signed",
- out: [
- "MicrodroidTestApp.apk",
- "MicrodroidTestApp.apk.idsig",
- ],
- srcs: [":MicrodroidTestApp"],
- tools: ["apksigner"],
- tool_files: ["test.keystore"],
- cmd: "$(location apksigner) sign " +
- "--ks $(location test.keystore) " +
- "--ks-pass=pass:testkey --key-pass=pass:testkey " +
- "--in $(in) " +
- "--out $(genDir)/MicrodroidTestApp.apk",
- // $(genDir)/MicrodroidTestApp.apk.idsig is generated implicitly
-}
diff --git a/tests/testapk/AndroidManifest.xml b/tests/testapk/AndroidManifest.xml
index 94f49dd..bc955d2 100644
--- a/tests/testapk/AndroidManifest.xml
+++ b/tests/testapk/AndroidManifest.xml
@@ -1,5 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2021 The Android Open Source Project
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -14,13 +15,11 @@
-->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.android.microdroid.test">
- <application android:label="Microdroid Test">
- <uses-library android:name="android.system.virtualmachine" android:required="true" />
- <activity android:name="TestActivity" android:exported="true">
- <intent-filter>
- <action android:name="android.intent.action.MAIN" />
- <category android:name="android.intent.category.LAUNCHER" />
- </intent-filter>
- </activity>
+ <uses-permission android:name="android.permission.MANAGE_VIRTUAL_MACHINE" />
+ <application>
+ <uses-library android:name="android.system.virtualmachine" android:required="false" />
</application>
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.microdroid.test"
+ android:label="Microdroid Test" />
</manifest>
diff --git a/tests/testapk/AndroidTest.xml b/tests/testapk/AndroidTest.xml
new file mode 100644
index 0000000..e8bb1aa
--- /dev/null
+++ b/tests/testapk/AndroidTest.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs sample instrumentation test.">
+ <option name="test-suite-tag" value="cts" />
+ <option name="config-descriptor:metadata" key="component" value="security" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_instant_app" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_multi_abi" />
+ <option name="config-descriptor:metadata" key="parameter" value="secondary_user" />
+ <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
+ <option name="test-file-name" value="MicrodroidTestApp.apk" />
+ </target_preparer>
+ <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+ <option
+ name="run-command"
+ value="pm grant com.android.microdroid.test android.permission.MANAGE_VIRTUAL_MACHINE" />
+ </target_preparer>
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.microdroid.test" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="shell-timeout" value="300000" />
+ <option name="test-timeout" value="300000" />
+ </test>
+</configuration>
diff --git a/tests/testapk/assets/vm_config_extra_apk.json b/tests/testapk/assets/vm_config_extra_apk.json
new file mode 100644
index 0000000..a5bae63
--- /dev/null
+++ b/tests/testapk/assets/vm_config_extra_apk.json
@@ -0,0 +1,18 @@
+{
+ "os": {
+ "name": "microdroid"
+ },
+ "task": {
+ "type": "microdroid_launcher",
+ "command": "MicrodroidTestNativeLib.so",
+ "args": [
+ "hello",
+ "microdroid"
+ ]
+ },
+ "extra_apks": [
+ {
+ "path": "/system/etc/security/fsverity/BuildManifest.apk"
+ }
+ ]
+}
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
new file mode 100644
index 0000000..36bea72
--- /dev/null
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -0,0 +1,583 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.microdroid.test;
+
+import static com.google.common.truth.Truth.assertThat;
+import static com.google.common.truth.TruthJUnit.assume;
+
+import static org.junit.Assume.assumeNoException;
+
+import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
+
+import android.content.Context;
+import android.os.Build;
+import android.os.ParcelFileDescriptor;
+import android.os.SystemProperties;
+import android.sysprop.HypervisorProperties;
+import android.system.virtualmachine.VirtualMachine;
+import android.system.virtualmachine.VirtualMachineCallback;
+import android.system.virtualmachine.VirtualMachineConfig;
+import android.system.virtualmachine.VirtualMachineConfig.DebugLevel;
+import android.system.virtualmachine.VirtualMachineException;
+import android.system.virtualmachine.VirtualMachineManager;
+
+import androidx.annotation.CallSuper;
+import androidx.test.core.app.ApplicationProvider;
+
+import com.android.microdroid.testservice.ITestService;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.file.Files;
+import java.util.List;
+import java.util.OptionalLong;
+import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import co.nstant.in.cbor.CborDecoder;
+import co.nstant.in.cbor.CborException;
+import co.nstant.in.cbor.model.Array;
+import co.nstant.in.cbor.model.DataItem;
+import co.nstant.in.cbor.model.MajorType;
+
+@RunWith(Parameterized.class)
+public class MicrodroidTests {
+ @Rule public Timeout globalTimeout = Timeout.seconds(300);
+
+ private static final String KERNEL_VERSION = SystemProperties.get("ro.kernel.version");
+
+ private static class Inner {
+ public boolean mProtectedVm;
+ public Context mContext;
+ public VirtualMachineManager mVmm;
+ public VirtualMachine mVm;
+
+ Inner(boolean protectedVm) {
+ mProtectedVm = protectedVm;
+ }
+
+ /** Create a new VirtualMachineConfig.Builder with the parameterized protection mode. */
+ public VirtualMachineConfig.Builder newVmConfigBuilder(String payloadConfigPath) {
+ return new VirtualMachineConfig.Builder(mContext, payloadConfigPath)
+ .protectedVm(mProtectedVm);
+ }
+ }
+
+ @Parameterized.Parameters(name = "protectedVm={0}")
+ public static Object[] protectedVmConfigs() {
+ return new Object[] { false, true };
+ }
+
+ @Parameterized.Parameter
+ public boolean mProtectedVm;
+
+ private boolean mPkvmSupported = false;
+ private Inner mInner;
+
+ @Before
+ public void setup() {
+ // In case when the virt APEX doesn't exist on the device, classes in the
+ // android.system.virtualmachine package can't be loaded. Therefore, before using the
+ // classes, check the existence of a class in the package and skip this test if not exist.
+ try {
+ Class.forName("android.system.virtualmachine.VirtualMachineManager");
+ mPkvmSupported = true;
+ } catch (ClassNotFoundException e) {
+ assumeNoException(e);
+ return;
+ }
+ if (mProtectedVm) {
+ assume()
+ .withMessage("Skip where protected VMs aren't support")
+ .that(HypervisorProperties.hypervisor_protected_vm_supported().orElse(false))
+ .isTrue();
+ } else {
+ assume()
+ .withMessage("Skip where VMs aren't support")
+ .that(HypervisorProperties.hypervisor_vm_supported().orElse(false))
+ .isTrue();
+ }
+ mInner = new Inner(mProtectedVm);
+ mInner.mContext = ApplicationProvider.getApplicationContext();
+ mInner.mVmm = VirtualMachineManager.getInstance(mInner.mContext);
+ }
+
+ @After
+ public void cleanup() throws VirtualMachineException {
+ if (!mPkvmSupported) {
+ return;
+ }
+ if (mInner == null) {
+ return;
+ }
+ if (mInner.mVm == null) {
+ return;
+ }
+ mInner.mVm.stop();
+ mInner.mVm.delete();
+ }
+
+ private abstract static class VmEventListener implements VirtualMachineCallback {
+ private ExecutorService mExecutorService = Executors.newSingleThreadExecutor();
+
+ void runToFinish(VirtualMachine vm) throws VirtualMachineException, InterruptedException {
+ vm.setCallback(mExecutorService, this);
+ vm.run();
+ mExecutorService.awaitTermination(300, TimeUnit.SECONDS);
+ }
+
+ void forceStop(VirtualMachine vm) {
+ try {
+ vm.clearCallback();
+ vm.stop();
+ mExecutorService.shutdown();
+ } catch (VirtualMachineException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {}
+
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {}
+
+ @Override
+ public void onPayloadFinished(VirtualMachine vm, int exitCode) {}
+
+ @Override
+ public void onError(VirtualMachine vm, int errorCode, String message) {}
+
+ @Override
+ @CallSuper
+ public void onDied(VirtualMachine vm, @DeathReason int reason) {
+ mExecutorService.shutdown();
+ }
+ }
+
+ private static final int MIN_MEM_ARM64 = 145;
+ private static final int MIN_MEM_X86_64 = 196;
+
+ @Test
+ public void connectToVmService() throws VirtualMachineException, InterruptedException {
+ assume()
+ .withMessage("SKip on 5.4 kernel. b/218303240")
+ .that(KERNEL_VERSION)
+ .isNotEqualTo("5.4");
+
+ VirtualMachineConfig.Builder builder =
+ mInner.newVmConfigBuilder("assets/vm_config_extra_apk.json");
+ if (Build.SUPPORTED_ABIS.length > 0) {
+ String primaryAbi = Build.SUPPORTED_ABIS[0];
+ switch(primaryAbi) {
+ case "x86_64":
+ builder.memoryMib(MIN_MEM_X86_64);
+ break;
+ case "arm64-v8a":
+ builder.memoryMib(MIN_MEM_ARM64);
+ break;
+ }
+ }
+ VirtualMachineConfig config = builder.build();
+
+ mInner.mVm = mInner.mVmm.getOrCreate("test_vm_extra_apk", config);
+
+ class TestResults {
+ Exception mException;
+ Integer mAddInteger;
+ String mAppRunProp;
+ String mSublibRunProp;
+ String mExtraApkTestProp;
+ }
+ final CompletableFuture<Boolean> payloadStarted = new CompletableFuture<>();
+ final CompletableFuture<Boolean> payloadReady = new CompletableFuture<>();
+ final TestResults testResults = new TestResults();
+ VmEventListener listener =
+ new VmEventListener() {
+ private void testVMService(VirtualMachine vm) {
+ try {
+ ITestService testService = ITestService.Stub.asInterface(
+ vm.connectToVsockServer(ITestService.SERVICE_PORT).get());
+ testResults.mAddInteger = testService.addInteger(123, 456);
+ testResults.mAppRunProp =
+ testService.readProperty("debug.microdroid.app.run");
+ testResults.mSublibRunProp =
+ testService.readProperty("debug.microdroid.app.sublib.run");
+ testResults.mExtraApkTestProp =
+ testService.readProperty("debug.microdroid.test.extra_apk");
+ } catch (Exception e) {
+ testResults.mException = e;
+ }
+ }
+
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ payloadReady.complete(true);
+ testVMService(vm);
+ forceStop(vm);
+ }
+
+ @Override
+ public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {
+ payloadStarted.complete(true);
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+ assertThat(payloadStarted.getNow(false)).isTrue();
+ assertThat(payloadReady.getNow(false)).isTrue();
+ assertThat(testResults.mException).isNull();
+ assertThat(testResults.mAddInteger).isEqualTo(123 + 456);
+ assertThat(testResults.mAppRunProp).isEqualTo("true");
+ assertThat(testResults.mSublibRunProp).isEqualTo("true");
+ assertThat(testResults.mExtraApkTestProp).isEqualTo("PASS");
+ }
+
+ @Test
+ public void changingDebugLevelInvalidatesVmIdentity()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume()
+ .withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ assume()
+ .withMessage("SKip on 5.4 kernel. b/218303240")
+ .that(KERNEL_VERSION)
+ .isNotEqualTo("5.4");
+
+ VirtualMachineConfig.Builder builder = mInner.newVmConfigBuilder("assets/vm_config.json");
+ VirtualMachineConfig normalConfig = builder.debugLevel(DebugLevel.NONE).build();
+ mInner.mVm = mInner.mVmm.getOrCreate("test_vm", normalConfig);
+ VmEventListener listener =
+ new VmEventListener() {
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ forceStop(vm);
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+
+ // Launch the same VM with different debug level. The Java API prohibits this (thankfully).
+ // For testing, we do that by creating another VM with debug level, and copy the config file
+ // from the new VM directory to the old VM directory.
+ VirtualMachineConfig debugConfig = builder.debugLevel(DebugLevel.FULL).build();
+ VirtualMachine newVm = mInner.mVmm.getOrCreate("test_debug_vm", debugConfig);
+ File vmRoot = new File(mInner.mContext.getFilesDir(), "vm");
+ File newVmConfig = new File(new File(vmRoot, "test_debug_vm"), "config.xml");
+ File oldVmConfig = new File(new File(vmRoot, "test_vm"), "config.xml");
+ Files.copy(newVmConfig.toPath(), oldVmConfig.toPath(), REPLACE_EXISTING);
+ newVm.delete();
+ mInner.mVm = mInner.mVmm.get("test_vm"); // re-load with the copied-in config file.
+ final CompletableFuture<Boolean> payloadStarted = new CompletableFuture<>();
+ listener =
+ new VmEventListener() {
+ @Override
+ public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {
+ payloadStarted.complete(true);
+ forceStop(vm);
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+ assertThat(payloadStarted.getNow(false)).isFalse();
+ }
+
+ private class VmCdis {
+ public byte[] cdiAttest;
+ public byte[] cdiSeal;
+ }
+
+ private VmCdis launchVmAndGetCdis(String instanceName)
+ throws VirtualMachineException, InterruptedException {
+ VirtualMachineConfig normalConfig = mInner.newVmConfigBuilder("assets/vm_config.json")
+ .debugLevel(DebugLevel.NONE)
+ .build();
+ mInner.mVm = mInner.mVmm.getOrCreate(instanceName, normalConfig);
+ final VmCdis vmCdis = new VmCdis();
+ final CompletableFuture<Exception> exception = new CompletableFuture<>();
+ VmEventListener listener =
+ new VmEventListener() {
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ try {
+ ITestService testService = ITestService.Stub.asInterface(
+ vm.connectToVsockServer(ITestService.SERVICE_PORT).get());
+ vmCdis.cdiAttest = testService.insecurelyExposeAttestationCdi();
+ vmCdis.cdiSeal = testService.insecurelyExposeSealingCdi();
+ forceStop(vm);
+ } catch (Exception e) {
+ exception.complete(e);
+ }
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+ assertThat(exception.getNow(null)).isNull();
+ return vmCdis;
+ }
+
+ @Test
+ public void instancesOfSameVmHaveDifferentCdis()
+ throws VirtualMachineException, InterruptedException {
+ assume()
+ .withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ assume()
+ .withMessage("SKip on 5.4 kernel. b/218303240")
+ .that(KERNEL_VERSION)
+ .isNotEqualTo("5.4");
+
+ VmCdis vm_a_cdis = launchVmAndGetCdis("test_vm_a");
+ VmCdis vm_b_cdis = launchVmAndGetCdis("test_vm_b");
+ assertThat(vm_a_cdis.cdiAttest).isNotNull();
+ assertThat(vm_b_cdis.cdiAttest).isNotNull();
+ assertThat(vm_a_cdis.cdiAttest).isNotEqualTo(vm_b_cdis.cdiAttest);
+ assertThat(vm_a_cdis.cdiSeal).isNotNull();
+ assertThat(vm_b_cdis.cdiSeal).isNotNull();
+ assertThat(vm_a_cdis.cdiSeal).isNotEqualTo(vm_b_cdis.cdiSeal);
+ assertThat(vm_a_cdis.cdiAttest).isNotEqualTo(vm_b_cdis.cdiSeal);
+ }
+
+ @Test
+ public void sameInstanceKeepsSameCdis()
+ throws VirtualMachineException, InterruptedException {
+ assume()
+ .withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ assume()
+ .withMessage("SKip on 5.4 kernel. b/218303240")
+ .that(KERNEL_VERSION)
+ .isNotEqualTo("5.4");
+
+ VmCdis first_boot_cdis = launchVmAndGetCdis("test_vm");
+ VmCdis second_boot_cdis = launchVmAndGetCdis("test_vm");
+ // The attestation CDI isn't specified to be stable, though it might be
+ assertThat(first_boot_cdis.cdiSeal).isNotNull();
+ assertThat(second_boot_cdis.cdiSeal).isNotNull();
+ assertThat(first_boot_cdis.cdiSeal).isEqualTo(second_boot_cdis.cdiSeal);
+ }
+
+ @Test
+ public void bccIsSuperficiallyWellFormed()
+ throws VirtualMachineException, InterruptedException, CborException {
+ assume()
+ .withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ assume()
+ .withMessage("SKip on 5.4 kernel. b/218303240")
+ .that(KERNEL_VERSION)
+ .isNotEqualTo("5.4");
+
+ VirtualMachineConfig normalConfig = mInner.newVmConfigBuilder("assets/vm_config.json")
+ .debugLevel(DebugLevel.NONE)
+ .build();
+ mInner.mVm = mInner.mVmm.getOrCreate("bcc_vm", normalConfig);
+ final VmCdis vmCdis = new VmCdis();
+ final CompletableFuture<byte[]> bcc = new CompletableFuture<>();
+ final CompletableFuture<Exception> exception = new CompletableFuture<>();
+ VmEventListener listener =
+ new VmEventListener() {
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ try {
+ ITestService testService = ITestService.Stub.asInterface(
+ vm.connectToVsockServer(ITestService.SERVICE_PORT).get());
+ bcc.complete(testService.getBcc());
+ forceStop(vm);
+ } catch (Exception e) {
+ exception.complete(e);
+ }
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+ byte[] bccBytes = bcc.getNow(null);
+ assertThat(exception.getNow(null)).isNull();
+ assertThat(bccBytes).isNotNull();
+
+ ByteArrayInputStream bais = new ByteArrayInputStream(bccBytes);
+ List<DataItem> dataItems = new CborDecoder(bais).decode();
+ assertThat(dataItems.size()).isEqualTo(1);
+ assertThat(dataItems.get(0).getMajorType()).isEqualTo(MajorType.ARRAY);
+ List<DataItem> rootArrayItems = ((Array) dataItems.get(0)).getDataItems();
+ assertThat(rootArrayItems.size()).isAtLeast(2); // Public key and one certificate
+ if (mProtectedVm) {
+ // When a true BCC is created, microdroid expects entries for at least: the root public
+ // key, pvmfw, u-boot, u-boot-env, microdroid, app payload and the service process.
+ assertThat(rootArrayItems.size()).isAtLeast(7);
+ }
+ }
+
+ private static final UUID MICRODROID_PARTITION_UUID =
+ UUID.fromString("cf9afe9a-0662-11ec-a329-c32663a09d75");
+ private static final UUID U_BOOT_AVB_PARTITION_UUID =
+ UUID.fromString("7e8221e7-03e6-4969-948b-73a4c809a4f2");
+ private static final UUID U_BOOT_ENV_PARTITION_UUID =
+ UUID.fromString("0ab72d30-86ae-4d05-81b2-c1760be2b1f9");
+ private static final UUID PVM_FW_PARTITION_UUID =
+ UUID.fromString("90d2174a-038a-4bc6-adf3-824848fc5825");
+ private static final long BLOCK_SIZE = 512;
+
+ // Find the starting offset which holds the data of a partition having UUID.
+ // This is a kind of hack; rather than parsing QCOW2 we exploit the fact that the cluster size
+ // is normally greater than 512. It implies that the partition data should exist at a block
+ // which follows the header block
+ private OptionalLong findPartitionDataOffset(RandomAccessFile file, UUID uuid)
+ throws IOException {
+ // For each 512-byte block in file, check header
+ long fileSize = file.length();
+
+ for (long idx = 0; idx + BLOCK_SIZE < fileSize; idx += BLOCK_SIZE) {
+ file.seek(idx);
+ long high = file.readLong();
+ long low = file.readLong();
+ if (uuid.equals(new UUID(high, low))) return OptionalLong.of(idx + BLOCK_SIZE);
+ }
+ return OptionalLong.empty();
+ }
+
+ private void flipBit(RandomAccessFile file, long offset) throws IOException {
+ file.seek(offset);
+ int b = file.readByte();
+ file.seek(offset);
+ file.writeByte(b ^ 1);
+ }
+
+ private boolean tryBootVm(String vmName)
+ throws VirtualMachineException, InterruptedException {
+ mInner.mVm = mInner.mVmm.get(vmName); // re-load the vm before running tests
+ final CompletableFuture<Boolean> payloadStarted = new CompletableFuture<>();
+ VmEventListener listener =
+ new VmEventListener() {
+ @Override
+ public void onPayloadStarted(VirtualMachine vm, ParcelFileDescriptor stream) {
+ payloadStarted.complete(true);
+ forceStop(vm);
+ }
+ };
+ listener.runToFinish(mInner.mVm);
+ return payloadStarted.getNow(false);
+ }
+
+ private RandomAccessFile prepareInstanceImage(String vmName)
+ throws VirtualMachineException, InterruptedException, IOException {
+ VirtualMachineConfig config = mInner.newVmConfigBuilder("assets/vm_config.json")
+ .debugLevel(DebugLevel.NONE)
+ .build();
+
+ // Remove any existing VM so we can start from scratch
+ VirtualMachine oldVm = mInner.mVmm.getOrCreate(vmName, config);
+ oldVm.delete();
+ mInner.mVmm.getOrCreate(vmName, config);
+
+ assertThat(tryBootVm(vmName)).isTrue();
+
+ File vmRoot = new File(mInner.mContext.getFilesDir(), "vm");
+ File vmDir = new File(vmRoot, vmName);
+ File instanceImgPath = new File(vmDir, "instance.img");
+ return new RandomAccessFile(instanceImgPath, "rw");
+
+ }
+
+ private void assertThatPartitionIsMissing(UUID partitionUuid)
+ throws VirtualMachineException, InterruptedException, IOException {
+ RandomAccessFile instanceFile = prepareInstanceImage("test_vm_integrity");
+ assertThat(findPartitionDataOffset(instanceFile, partitionUuid).isPresent())
+ .isFalse();
+ }
+
+ // Flips a bit of given partition, and then see if boot fails.
+ private void assertThatBootFailsAfterCompromisingPartition(UUID partitionUuid)
+ throws VirtualMachineException, InterruptedException, IOException {
+ RandomAccessFile instanceFile = prepareInstanceImage("test_vm_integrity");
+ OptionalLong offset = findPartitionDataOffset(instanceFile, partitionUuid);
+ assertThat(offset.isPresent()).isTrue();
+
+ flipBit(instanceFile, offset.getAsLong());
+ assertThat(tryBootVm("test_vm_integrity")).isFalse();
+ }
+
+ @Test
+ public void bootFailsWhenMicrodroidDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ assertThatBootFailsAfterCompromisingPartition(MICRODROID_PARTITION_UUID);
+ }
+
+ @Test
+ public void bootFailsWhenUBootAvbDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ if (mProtectedVm) {
+ assertThatBootFailsAfterCompromisingPartition(U_BOOT_AVB_PARTITION_UUID);
+ } else {
+ // non-protected VM shouldn't have u-boot avb data
+ assertThatPartitionIsMissing(U_BOOT_AVB_PARTITION_UUID);
+ }
+ }
+
+ @Test
+ public void bootFailsWhenUBootEnvDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ if (mProtectedVm) {
+ assertThatBootFailsAfterCompromisingPartition(U_BOOT_ENV_PARTITION_UUID);
+ } else {
+ // non-protected VM shouldn't have u-boot env data
+ assertThatPartitionIsMissing(U_BOOT_ENV_PARTITION_UUID);
+ }
+ }
+
+ @Test
+ public void bootFailsWhenPvmFwDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ if (mProtectedVm) {
+ assertThatBootFailsAfterCompromisingPartition(PVM_FW_PARTITION_UUID);
+ } else {
+ // non-protected VM shouldn't have pvmfw data
+ assertThatPartitionIsMissing(PVM_FW_PARTITION_UUID);
+ }
+ }
+}
diff --git a/tests/testapk/src/java/com/android/microdroid/test/TestActivity.java b/tests/testapk/src/java/com/android/microdroid/test/TestActivity.java
deleted file mode 100644
index f73772e..0000000
--- a/tests/testapk/src/java/com/android/microdroid/test/TestActivity.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.android.microdroid.test;
-
-import android.app.Activity;
-import android.os.Bundle;
-import android.system.virtualmachine.VirtualMachine;
-import android.system.virtualmachine.VirtualMachineConfig;
-import android.system.virtualmachine.VirtualMachineException;
-import android.system.virtualmachine.VirtualMachineManager;
-
-public class TestActivity extends Activity {
-
- @Override
- public void onCreate(Bundle savedInstanceState) {
- super.onCreate(savedInstanceState);
-
- VirtualMachine vm1 = createAndRunVirtualMachine("vm1");
- VirtualMachine vm2 = createAndRunVirtualMachine("vm2");
- }
-
- private VirtualMachine createAndRunVirtualMachine(String name) {
- VirtualMachine vm;
- try {
- VirtualMachineConfig config =
- new VirtualMachineConfig.Builder(this, "assets/vm_config.json")
- .idsigPath("/data/local/tmp/virt/MicrodroidTestApp.apk.idsig")
- .build();
-
- VirtualMachineManager vmm = VirtualMachineManager.getInstance(this);
- vm = vmm.create(name, config);
- vm.run();
- } catch (VirtualMachineException e) {
- throw new RuntimeException(e);
- }
- return vm;
- }
-}
diff --git a/tests/testapk/src/native/testbinary.cpp b/tests/testapk/src/native/testbinary.cpp
index 1572021..89570c0 100644
--- a/tests/testapk/src/native/testbinary.cpp
+++ b/tests/testapk/src/native/testbinary.cpp
@@ -13,28 +13,32 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include <aidl/android/system/keystore2/IKeystoreService.h>
+#include <aidl/android/security/dice/IDiceNode.h>
+#include <aidl/android/system/virtualmachineservice/IVirtualMachineService.h>
+#include <aidl/com/android/microdroid/testservice/BnTestService.h>
+#include <android-base/file.h>
+#include <android-base/properties.h>
#include <android-base/result.h>
#include <android/binder_auto_utils.h>
#include <android/binder_manager.h>
+#include <fcntl.h>
+#include <fsverity_digests.pb.h>
+#include <linux/vm_sockets.h>
+#include <stdint.h>
#include <stdio.h>
+#include <sys/ioctl.h>
#include <sys/system_properties.h>
+#include <unistd.h>
-using aidl::android::hardware::security::keymint::Algorithm;
-using aidl::android::hardware::security::keymint::Digest;
-using aidl::android::hardware::security::keymint::KeyParameter;
-using aidl::android::hardware::security::keymint::KeyParameterValue;
-using aidl::android::hardware::security::keymint::KeyPurpose;
-using aidl::android::hardware::security::keymint::SecurityLevel;
-using aidl::android::hardware::security::keymint::Tag;
+#include <binder_rpc_unstable.hpp>
+#include <string>
-using aidl::android::system::keystore2::CreateOperationResponse;
-using aidl::android::system::keystore2::Domain;
-using aidl::android::system::keystore2::IKeystoreSecurityLevel;
-using aidl::android::system::keystore2::IKeystoreService;
-using aidl::android::system::keystore2::KeyDescriptor;
-using aidl::android::system::keystore2::KeyMetadata;
+using aidl::android::hardware::security::dice::BccHandover;
+using aidl::android::security::dice::IDiceNode;
+using aidl::android::system::virtualmachineservice::IVirtualMachineService;
+
+using android::base::ErrnoError;
using android::base::Error;
using android::base::Result;
@@ -42,133 +46,6 @@
namespace {
-Result<void> test_keystore() {
- // Connect to Keystore.
- ndk::SpAIBinder binder(
- AServiceManager_waitForService("android.system.keystore2.IKeystoreService/default"));
- auto service = IKeystoreService::fromBinder(binder);
- if (service == nullptr) {
- return Error() << "Failed to find Keystore";
- }
- std::shared_ptr<IKeystoreSecurityLevel> securityLevel;
- auto status = service->getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT, &securityLevel);
- if (!status.isOk()) {
- return Error() << "Failed to get security level";
- }
-
- // Create a signing key.
- std::vector<KeyParameter> params;
-
- KeyParameter algo;
- algo.tag = Tag::ALGORITHM;
- algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::HMAC);
- params.push_back(algo);
-
- KeyParameter key_size;
- key_size.tag = Tag::KEY_SIZE;
- key_size.value = KeyParameterValue::make<KeyParameterValue::integer>(256);
- params.push_back(key_size);
-
- KeyParameter min_mac_length;
- min_mac_length.tag = Tag::MIN_MAC_LENGTH;
- min_mac_length.value = KeyParameterValue::make<KeyParameterValue::integer>(256);
- params.push_back(min_mac_length);
-
- KeyParameter digest;
- digest.tag = Tag::DIGEST;
- digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
- params.push_back(digest);
-
- KeyParameter purposeSign;
- purposeSign.tag = Tag::PURPOSE;
- purposeSign.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
- params.push_back(purposeSign);
-
- KeyParameter purposeVerify;
- purposeVerify.tag = Tag::PURPOSE;
- purposeVerify.value =
- KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::VERIFY);
- params.push_back(purposeVerify);
-
- KeyParameter auth;
- auth.tag = Tag::NO_AUTH_REQUIRED;
- auth.value = KeyParameterValue::make<KeyParameterValue::boolValue>(true);
- params.push_back(auth);
-
- KeyDescriptor descriptor;
- descriptor.domain = Domain::SELINUX;
- descriptor.alias = "payload-test-key";
- descriptor.nspace = 140; // vm_payload_key
-
- KeyMetadata metadata;
- status = securityLevel->generateKey(descriptor, {}, params, 0, {}, &metadata);
- if (!status.isOk()) {
- return Error() << "Failed to create new HMAC key";
- }
-
- // Sign something.
- params.clear();
- params.push_back(algo);
- params.push_back(digest);
- params.push_back(purposeSign);
-
- KeyParameter mac_length;
- mac_length.tag = Tag::MAC_LENGTH;
- mac_length.value = KeyParameterValue::make<KeyParameterValue::integer>(256);
- params.push_back(mac_length);
-
- CreateOperationResponse opResponse;
- status = securityLevel->createOperation(descriptor, params, false, &opResponse);
- if (!status.isOk()) {
- return Error() << "Failed to create keystore signing operation: "
- << status.getServiceSpecificError();
- }
- auto operation = opResponse.iOperation;
-
- std::string message = "This is the message to sign";
- std::optional<std::vector<uint8_t>> out;
- status = operation->update({message.begin(), message.end()}, &out);
- if (!status.isOk()) {
- return Error() << "Failed to call keystore update operation.";
- }
-
- std::optional<std::vector<uint8_t>> signature;
- status = operation->finish({}, {}, &signature);
- if (!status.isOk()) {
- return Error() << "Failed to call keystore finish operation.";
- }
-
- if (!signature.has_value()) {
- return Error() << "Didn't receive a signature from keystore finish operation.";
- }
-
- // Verify the signature.
- params.clear();
- params.push_back(algo);
- params.push_back(digest);
- params.push_back(purposeVerify);
-
- status = securityLevel->createOperation(descriptor, params, false, &opResponse);
- if (!status.isOk()) {
- return Error() << "Failed to create keystore verification operation: "
- << status.getServiceSpecificError();
- }
- operation = opResponse.iOperation;
-
- status = operation->update({message.begin(), message.end()}, &out);
- if (!status.isOk()) {
- return Error() << "Failed to call keystore update operation.";
- }
-
- std::optional<std::vector<uint8_t>> out_signature;
- status = operation->finish({}, signature.value(), &out_signature);
- if (!status.isOk()) {
- return Error() << "Failed to call keystore finish operation.";
- }
-
- return {};
-}
-
template <typename T>
Result<T> report_test(std::string name, Result<T> result) {
auto property = "debug.microdroid.test." + name;
@@ -177,16 +54,130 @@
outcome << "PASS";
} else {
outcome << "FAIL: " << result.error();
- // Pollute stdout with the error in case the property is truncated.
- std::cout << "[" << name << "] test failed: " << result.error() << "\n";
+ // Pollute stderr with the error in case the property is truncated.
+ std::cerr << "[" << name << "] test failed: " << result.error() << "\n";
}
__system_property_set(property.c_str(), outcome.str().c_str());
return result;
}
+Result<void> start_test_service() {
+ class TestService : public aidl::com::android::microdroid::testservice::BnTestService {
+ ndk::ScopedAStatus addInteger(int32_t a, int32_t b, int32_t* out) override {
+ *out = a + b;
+ return ndk::ScopedAStatus::ok();
+ }
+
+ ndk::ScopedAStatus readProperty(const std::string& prop, std::string* out) override {
+ *out = android::base::GetProperty(prop, "");
+ if (out->empty()) {
+ std::string msg = "cannot find property " + prop;
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_SERVICE_SPECIFIC,
+ msg.c_str());
+ }
+
+ return ndk::ScopedAStatus::ok();
+ }
+
+ ndk::ScopedAStatus insecurelyExposeSealingCdi(std::vector<uint8_t>* out) override {
+ ndk::SpAIBinder binder(AServiceManager_getService("android.security.dice.IDiceNode"));
+ auto service = IDiceNode::fromBinder(binder);
+ if (service == nullptr) {
+ return ndk::ScopedAStatus::
+ fromServiceSpecificErrorWithMessage(0, "Failed to find diced");
+ }
+ BccHandover handover;
+ auto deriveStatus = service->derive({}, &handover);
+ if (!deriveStatus.isOk()) {
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(0,
+ "Failed call diced");
+ }
+ *out = {handover.cdiSeal.begin(), handover.cdiSeal.end()};
+ return ndk::ScopedAStatus::ok();
+ }
+
+ ndk::ScopedAStatus insecurelyExposeAttestationCdi(std::vector<uint8_t>* out) override {
+ ndk::SpAIBinder binder(AServiceManager_getService("android.security.dice.IDiceNode"));
+ auto service = IDiceNode::fromBinder(binder);
+ if (service == nullptr) {
+ return ndk::ScopedAStatus::
+ fromServiceSpecificErrorWithMessage(0, "Failed to find diced");
+ }
+ BccHandover handover;
+ auto deriveStatus = service->derive({}, &handover);
+ if (!deriveStatus.isOk()) {
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(0,
+ "Failed call diced");
+ }
+ *out = {handover.cdiAttest.begin(), handover.cdiAttest.end()};
+ return ndk::ScopedAStatus::ok();
+ }
+
+ ndk::ScopedAStatus getBcc(std::vector<uint8_t>* out) override {
+ ndk::SpAIBinder binder(AServiceManager_getService("android.security.dice.IDiceNode"));
+ auto service = IDiceNode::fromBinder(binder);
+ if (service == nullptr) {
+ return ndk::ScopedAStatus::
+ fromServiceSpecificErrorWithMessage(0, "Failed to find diced");
+ }
+ BccHandover handover;
+ auto deriveStatus = service->derive({}, &handover);
+ if (!deriveStatus.isOk()) {
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(0,
+ "Failed call diced");
+ }
+ *out = {handover.bcc.data.begin(), handover.bcc.data.end()};
+ return ndk::ScopedAStatus::ok();
+ }
+ };
+ auto testService = ndk::SharedRefBase::make<TestService>();
+
+ auto callback = []([[maybe_unused]] void* param) {
+ // Tell microdroid_manager that we're ready.
+ // Failing to notify is not a fatal error; the payload can continue.
+ ndk::SpAIBinder binder(
+ RpcClient(VMADDR_CID_HOST, IVirtualMachineService::VM_BINDER_SERVICE_PORT));
+ auto virtualMachineService = IVirtualMachineService::fromBinder(binder);
+ if (virtualMachineService == nullptr) {
+ std::cerr << "failed to connect VirtualMachineService";
+ return;
+ }
+ if (!virtualMachineService->notifyPayloadReady().isOk()) {
+ std::cerr << "failed to notify payload ready to virtualizationservice";
+ }
+ };
+
+ if (!RunRpcServerCallback(testService->asBinder().get(), testService->SERVICE_PORT, callback,
+ nullptr)) {
+ return Error() << "RPC Server failed to run";
+ }
+
+ return {};
+}
+
+Result<void> verify_apk() {
+ const char* path = "/mnt/extra-apk/0/assets/build_manifest.pb";
+
+ std::string str;
+ if (!android::base::ReadFileToString(path, &str)) {
+ return ErrnoError() << "failed to read build_manifest.pb";
+ }
+
+ if (!android::security::fsverity::FSVerityDigests().ParseFromString(str)) {
+ return Error() << "invalid build_manifest.pb";
+ }
+
+ return {};
+}
+
} // Anonymous namespace
extern "C" int android_native_main(int argc, char* argv[]) {
+ // disable buffering to communicate seamlessly
+ setvbuf(stdin, nullptr, _IONBF, 0);
+ setvbuf(stdout, nullptr, _IONBF, 0);
+ setvbuf(stderr, nullptr, _IONBF, 0);
+
printf("Hello Microdroid ");
for (int i = 0; i < argc; i++) {
printf("%s", argv[i]);
@@ -198,8 +189,15 @@
testlib_sub();
printf("\n");
- __system_property_set("debug.microdroid.app.run", "true");
- if (!report_test("keystore", test_keystore()).ok()) return 1;
+ // Extra apks may be missing; this is not a fatal error
+ report_test("extra_apk", verify_apk());
- return 0;
+ __system_property_set("debug.microdroid.app.run", "true");
+
+ if (auto res = start_test_service(); res.ok()) {
+ return 0;
+ } else {
+ std::cerr << "starting service failed: " << res.error();
+ return 1;
+ }
}
diff --git a/tests/testapk/test.keystore b/tests/testapk/test.keystore
index 2f024d8..2946641 100644
--- a/tests/testapk/test.keystore
+++ b/tests/testapk/test.keystore
Binary files differ
diff --git a/tests/vsock_test.cc b/tests/vsock_test.cc
index d9b8f21..0fc451d 100644
--- a/tests/vsock_test.cc
+++ b/tests/vsock_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <android/sysprop/HypervisorProperties.sysprop.h>
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
@@ -47,25 +48,18 @@
static constexpr const char kVmInitrdPath[] = "/data/local/tmp/virt-test/initramfs";
static constexpr const char kVmParams[] = "rdinit=/bin/init bin/vsock_client 2 45678 HelloWorld";
static constexpr const char kTestMessage[] = "HelloWorld";
+static constexpr const char kPlatformVersion[] = "~1.0";
-bool isVmSupported() {
- const std::array<const char *, 4> needed_files = {
- "/dev/kvm",
- "/dev/vhost-vsock",
- "/apex/com.android.virt/bin/crosvm",
- "/apex/com.android.virt/bin/virtualizationservice",
- };
- return std::all_of(needed_files.begin(), needed_files.end(),
- [](const char *file) { return access(file, F_OK) == 0; });
+/** Returns true if the kernel supports unprotected VMs. */
+bool isUnprotectedVmSupported() {
+ return android::sysprop::HypervisorProperties::hypervisor_vm_supported().value_or(false);
}
-/** Returns true if the kernel supports Protected KVM. */
-bool isPkvmSupported() {
- unique_fd kvm_fd(open("/dev/kvm", O_NONBLOCK | O_CLOEXEC));
- return kvm_fd != 0 && ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_PROTECTED_VM) == 1;
-}
+TEST_F(VirtualizationTest, TestVsock) {
+ if (!isUnprotectedVmSupported()) {
+ GTEST_SKIP() << "Skipping as unprotected VMs are not supported on this device.";
+ }
-void runTest(sp<IVirtualizationService> virtualization_service, bool protected_vm) {
binder::Status status;
unique_fd server_fd(TEMP_FAILURE_RETRY(socket(AF_VSOCK, SOCK_STREAM, 0)));
@@ -88,18 +82,22 @@
raw_config.kernel = ParcelFileDescriptor(unique_fd(open(kVmKernelPath, O_RDONLY | O_CLOEXEC)));
raw_config.initrd = ParcelFileDescriptor(unique_fd(open(kVmInitrdPath, O_RDONLY | O_CLOEXEC)));
raw_config.params = kVmParams;
- raw_config.protected_vm = protected_vm;
+ raw_config.protectedVm = false;
+ raw_config.platformVersion = kPlatformVersion;
VirtualMachineConfig config(std::move(raw_config));
sp<IVirtualMachine> vm;
- status = virtualization_service->startVm(config, std::nullopt, &vm);
- ASSERT_TRUE(status.isOk()) << "Error starting VM: " << status;
+ status = mVirtualizationService->createVm(config, std::nullopt, std::nullopt, &vm);
+ ASSERT_TRUE(status.isOk()) << "Error creating VM: " << status;
int32_t cid;
status = vm->getCid(&cid);
ASSERT_TRUE(status.isOk()) << "Error getting CID: " << status;
LOG(INFO) << "VM starting with CID " << cid;
+ status = vm->start();
+ ASSERT_TRUE(status.isOk()) << "Error starting VM: " << status;
+
LOG(INFO) << "Accepting connection...";
struct sockaddr_vm client_sa;
socklen_t client_sa_len = sizeof(client_sa);
@@ -116,22 +114,17 @@
ASSERT_EQ(msg, kTestMessage);
}
-TEST_F(VirtualizationTest, TestVsock) {
- if (!isVmSupported()) {
- GTEST_SKIP() << "Device doesn't support KVM.";
- }
+TEST_F(VirtualizationTest, RejectIncompatiblePlatformVersion) {
+ VirtualMachineRawConfig raw_config;
+ raw_config.kernel = ParcelFileDescriptor(unique_fd(open(kVmKernelPath, O_RDONLY | O_CLOEXEC)));
+ raw_config.initrd = ParcelFileDescriptor(unique_fd(open(kVmInitrdPath, O_RDONLY | O_CLOEXEC)));
+ raw_config.params = kVmParams;
+ raw_config.platformVersion = "~2.0"; // The current platform version is 1.0.0.
- runTest(mVirtualizationService, false);
-}
-
-TEST_F(VirtualizationTest, TestVsockProtected) {
- if (!isVmSupported()) {
- GTEST_SKIP() << "Device doesn't support KVM.";
- } else if (!isPkvmSupported()) {
- GTEST_SKIP() << "Skipping as pKVM is not supported on this device.";
- }
-
- runTest(mVirtualizationService, true);
+ VirtualMachineConfig config(std::move(raw_config));
+ sp<IVirtualMachine> vm;
+ auto status = mVirtualizationService->createVm(config, std::nullopt, std::nullopt, &vm);
+ ASSERT_FALSE(status.isOk());
}
} // namespace virt
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 239d729..9b2b740 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -21,26 +21,39 @@
prefer_rlib: true,
rustlibs: [
"android.system.virtualizationservice-rust",
+ "android.system.virtualmachineservice-rust",
"android.os.permissions_aidl-rust",
"libandroid_logger",
"libanyhow",
+ "libbinder_common",
+ "libbinder_rpc_unstable_bindgen",
+ "libbinder_rs",
"libcommand_fds",
- "libcrc32fast",
"libdisk",
+ "libidsig",
"liblog_rust",
"libmicrodroid_metadata",
"libmicrodroid_payload_config",
+ "libnix",
"libonce_cell",
- "libprotobuf",
- "libprotos",
+ "libregex",
+ "librustutils",
+ "libsemver",
+ "libselinux_bindgen",
+ "libserde",
"libserde_json",
"libserde_xml_rs",
- "libserde",
"libshared_child",
- "libuuid",
+ "libstatslog_virtualization_rust",
"libvmconfig",
"libzip",
"libvsock",
+ // TODO(b/202115393) stabilize the interface
+ "packagemanager_aidl-rust",
+ ],
+ shared_libs: [
+ "libbinder_rpc_unstable",
+ "libselinux",
],
}
diff --git a/virtualizationservice/aidl/Android.bp b/virtualizationservice/aidl/Android.bp
index f7cb339..30a4b03 100644
--- a/virtualizationservice/aidl/Android.bp
+++ b/virtualizationservice/aidl/Android.bp
@@ -4,7 +4,7 @@
aidl_interface {
name: "android.system.virtualizationservice",
- srcs: ["**/*.aidl"],
+ srcs: ["android/system/virtualizationservice/**/*.aidl"],
// This is never accessed directly. Apps are expected to use this indirectly via the Java
// wrapper android.system.virtualmachine.
unstable: true,
@@ -16,9 +16,33 @@
cpp: {
enabled: true,
},
+ ndk: {
+ apex_available: [
+ "com.android.virt",
+ "com.android.compos",
+ ],
+ },
rust: {
enabled: true,
- apex_available: ["com.android.virt"],
+ apex_available: [
+ "com.android.virt",
+ "com.android.compos",
+ ],
+ },
+ },
+}
+
+aidl_interface {
+ name: "android.system.virtualmachineservice",
+ srcs: ["android/system/virtualmachineservice/**/*.aidl"],
+ unstable: true,
+ backend: {
+ rust: {
+ enabled: true,
+ apex_available: [
+ "com.android.virt",
+ "com.android.compos",
+ ],
},
},
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl
new file mode 100644
index 0000000..7b80fc9
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtualizationservice;
+
+/**
+ * The reason why a VM died.
+ */
+@Backing(type="int")
+enum DeathReason {
+ /** There was an error waiting for the VM. */
+ INFRASTRUCTURE_ERROR = 0,
+ /** The VM was killed. */
+ KILLED = 1,
+ /** The VM died for an unknown reason. */
+ UNKNOWN = 2,
+ /** The VM requested to shut down. */
+ SHUTDOWN = 3,
+ /** crosvm had an error starting the VM. */
+ ERROR = 4,
+ /** The VM requested to reboot, possibly as the result of a kernel panic. */
+ REBOOT = 5,
+ /** The VM or crosvm crashed. */
+ CRASH = 6,
+ /** The pVM firmware failed to verify the VM because the public key doesn't match. */
+ PVM_FIRMWARE_PUBLIC_KEY_MISMATCH = 7,
+ /** The pVM firmware failed to verify the VM because the instance image changed. */
+ PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED = 8,
+ /** The bootloader failed to verify the VM because the public key doesn't match. */
+ BOOTLOADER_PUBLIC_KEY_MISMATCH = 9,
+ /** The bootloader failed to verify the VM because the instance image changed. */
+ BOOTLOADER_INSTANCE_IMAGE_CHANGED = 10,
+}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl
index 33c9716..6f3d4f0 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachine.aidl
@@ -16,20 +16,24 @@
package android.system.virtualizationservice;
import android.system.virtualizationservice.IVirtualMachineCallback;
+import android.system.virtualizationservice.VirtualMachineState;
interface IVirtualMachine {
/** Get the CID allocated to the VM. */
int getCid();
- /** Returns true if the VM is still running, or false if it has exited for any reason. */
- boolean isRunning();
+ /** Returns the current lifecycle state of the VM. */
+ VirtualMachineState getState();
/**
* Register a Binder object to get callbacks when the state of the VM changes, such as if it
* dies.
- *
- * TODO(jiyong): this should be registered when IVirtualizationService.run is called. Otherwise,
- * we might miss some events that happen before the registration is done.
*/
void registerCallback(IVirtualMachineCallback callback);
+
+ /** Starts running the VM. */
+ void start();
+
+ /** Open a vsock connection to the CID of the VM on the given port. */
+ ParcelFileDescriptor connectVsock(int port);
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachineCallback.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachineCallback.aidl
index 7bb18a4..12a056c 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachineCallback.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualMachineCallback.aidl
@@ -15,7 +15,7 @@
*/
package android.system.virtualizationservice;
-import android.system.virtualizationservice.IVirtualMachine;
+import android.system.virtualizationservice.DeathReason;
/**
* An object which a client may register with the VirtualizationService to get callbacks about the
@@ -23,14 +23,28 @@
*/
oneway interface IVirtualMachineCallback {
/**
- * Called when the payload starts in the VM. `stdout` is the stdout of the payload.
+ * Called when the payload starts in the VM. `stream` is the input/output port of the payload.
*
* <p>Note: when the virtual machine object is shared to multiple processes and they register
- * this callback to the same virtual machine object, the processes will compete to read from the
- * same payload stdout. As a result, each process might get only a part of the entire output
- * stream. To avoid such a case, keep only one process to read from the stdout.
+ * this callback to the same virtual machine object, the processes will compete to access the
+ * same payload stream. Keep only one process to access the stream.
*/
- void onPayloadStarted(int cid, in ParcelFileDescriptor stdout);
+ void onPayloadStarted(int cid, in @nullable ParcelFileDescriptor stream);
+
+ /**
+ * Called when the payload in the VM is ready to serve.
+ */
+ void onPayloadReady(int cid);
+
+ /**
+ * Called when the payload has finished in the VM. `exitCode` is the exit code of the payload.
+ */
+ void onPayloadFinished(int cid, int exitCode);
+
+ /**
+ * Called when an error occurs in the VM.
+ */
+ void onError(int cid, int errorCode, in String message);
/**
* Called when the VM dies.
@@ -38,5 +52,5 @@
* Note that this will not be called if the VirtualizationService itself dies, so you should
* also use `link_to_death` to handle that.
*/
- void onDied(int cid);
+ void onDied(int cid, in DeathReason reason);
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
index 8affaad..e417ec4 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
@@ -16,23 +16,37 @@
package android.system.virtualizationservice;
import android.system.virtualizationservice.IVirtualMachine;
+import android.system.virtualizationservice.PartitionType;
import android.system.virtualizationservice.VirtualMachineConfig;
import android.system.virtualizationservice.VirtualMachineDebugInfo;
interface IVirtualizationService {
/**
- * Start the VM with the given config file, and return a handle to it. If `logFd` is provided
- * then console logs from the VM will be sent to it.
+ * Create the VM with the given config file, and return a handle to it ready to start it. If
+ * `consoleFd` is provided then console output from the VM will be sent to it. If `osLogFd` is
+ * provided then the OS-level logs will be sent to it. `osLogFd` is supported only when the OS
+ * running in the VM has the logging system. In case of Microdroid, the logging system is logd.
*/
- IVirtualMachine startVm(
- in VirtualMachineConfig config, in @nullable ParcelFileDescriptor logFd);
+ IVirtualMachine createVm(in VirtualMachineConfig config,
+ in @nullable ParcelFileDescriptor consoleFd,
+ in @nullable ParcelFileDescriptor osLogFd);
/**
* Initialise an empty partition image of the given size to be used as a writable partition.
*
* The file must be open with both read and write permissions, and should be a new empty file.
*/
- void initializeWritablePartition(in ParcelFileDescriptor imageFd, long size);
+ void initializeWritablePartition(
+ in ParcelFileDescriptor imageFd, long size, PartitionType type);
+
+ /**
+ * Create or update an idsig file that digests the given APK file. The idsig file follows the
+ * idsig format that is defined by the APK Signature Scheme V4. The idsig file is not updated
+ * when it is up to date with the input file, which is checked by comparing the
+ * signing_info.apk_digest field in the idsig file with the signer.signed_data.digests.digest
+ * field in the input APK file.
+ */
+ void createOrUpdateIdsigFile(in ParcelFileDescriptor inputFd, in ParcelFileDescriptor idsigFd);
/**
* Get a list of all currently running VMs. This method is only intended for debug purposes,
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/Partition.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/Partition.aidl
index 9b8658b..825c3da 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/Partition.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/Partition.aidl
@@ -20,8 +20,8 @@
/** A label for the partition. */
@utf8InCpp String label;
- /** The backing file descriptors of the partition images. */
- ParcelFileDescriptor[] images;
+ /** The backing file descriptor of the partition image. */
+ ParcelFileDescriptor image;
/** Whether the partition should be writable by the VM. */
boolean writable;
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/PartitionType.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/PartitionType.aidl
new file mode 100644
index 0000000..f25e674
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/PartitionType.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtualizationservice;
+
+/**
+ * Type of the writable partition that virtualizationservice creates via
+ * initializeWritablePartition.
+ */
+@Backing(type="int")
+enum PartitionType {
+ /**
+ * The partition is simply initialized as all zeros
+ */
+ RAW = 0,
+ /**
+ * The partition is initialized as an instance image which is formatted to hold per-VM secrets
+ */
+ ANDROID_VM_INSTANCE = 1,
+}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
index 9339f82..c36e561 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
@@ -23,18 +23,48 @@
/** idsig for an APK */
ParcelFileDescriptor idsig;
+ /** Idsigs for the extra APKs. Must match with the extra_apks in the payload config. */
+ List<ParcelFileDescriptor> extraIdsigs;
+
/** instance.img that has per-instance data */
ParcelFileDescriptor instanceImage;
/** Path to a configuration in an APK. This is the actual configuration for a VM. */
@utf8InCpp String configPath;
- /** Whether to run the VM in debug mode or not */
- boolean debug;
+ enum DebugLevel {
+ /** Not debuggable at all */
+ NONE,
+ /** Only the logs from app is shown */
+ APP_ONLY,
+ /**
+ * Fully debuggable. All logs are shown, kernel messages are shown, and adb shell is
+ * supported
+ */
+ FULL,
+ }
+
+ /** Debug level of the VM */
+ DebugLevel debugLevel;
+
+ /** Whether the VM should be a protected VM. */
+ boolean protectedVm;
/**
* The amount of RAM to give the VM, in MiB. If this is 0 or negative then it will default to
* the value in microdroid.json, if any, or the crosvm default.
*/
- int memory_mib;
+ int memoryMib;
+
+ /**
+ * Number of vCPUs in the VM. Defaults to 1.
+ */
+ int numCpus = 1;
+
+ /**
+ * Comma-separated list of CPUs or CPU ranges to run vCPUs on (e.g. 0,1-3,5), or
+ * colon-separated list of assignments of vCPU to host CPU assignments (e.g. 0=0:1=1:2=2).
+ * Default is no mask which means a vCPU can run on any host CPU.
+ */
+ @nullable String cpuAffinity;
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineDebugInfo.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineDebugInfo.aidl
index d081b8d..672c41a 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineDebugInfo.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineDebugInfo.aidl
@@ -15,6 +15,8 @@
*/
package android.system.virtualizationservice;
+import android.system.virtualizationservice.VirtualMachineState;
+
/** Information about a running VM, for debug purposes only. */
parcelable VirtualMachineDebugInfo {
/** The CID assigned to the VM. */
@@ -35,6 +37,6 @@
*/
int requesterPid;
- /** Whether the VM is still running. */
- boolean running;
+ /** The current lifecycle state of the VM. */
+ VirtualMachineState state;
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
index 612c498..dfd3bff 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
@@ -41,8 +41,26 @@
DiskImage[] disks;
/** Whether the VM should be a protected VM. */
- boolean protected_vm;
+ boolean protectedVm;
/** The amount of RAM to give the VM, in MiB. 0 or negative to use the default. */
- int memory_mib;
+ int memoryMib;
+
+ /**
+ * Number of vCPUs in the VM. Defaults to 1.
+ */
+ int numCpus = 1;
+
+ /**
+ * Comma-separated list of CPUs or CPU ranges to run vCPUs on (e.g. 0,1-3,5), or
+ * colon-separated list of assignments of vCPU to host CPU assignments (e.g. 0=0:1=1:2=2).
+ * Default is no mask which means a vCPU can run on any host CPU.
+ */
+ @nullable String cpuAffinity;
+
+ /**
+ * A version or range of versions of the virtual platform that this config is compatible with.
+ * The format follows SemVer.
+ */
+ @utf8InCpp String platformVersion;
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineState.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineState.aidl
new file mode 100644
index 0000000..d85b3c1
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineState.aidl
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtualizationservice;
+
+/**
+ * The lifecycle state of a VM.
+ */
+@Backing(type="int")
+enum VirtualMachineState {
+ /**
+ * The VM has been created but not yet started.
+ */
+ NOT_STARTED = 0,
+ /**
+ * The VM is running, but the payload has not yet started.
+ */
+ STARTING = 1,
+ /**
+ * The VM is running and the payload has been started, but it has not yet indicated that it is
+ * ready.
+ */
+ STARTED = 2,
+ /**
+ * The VM payload has indicated that it is ready to serve requests.
+ */
+ READY = 3,
+ /**
+ * The VM payload has finished but the VM itself is still running.
+ */
+ FINISHED = 4,
+ /**
+ * The VM has died.
+ */
+ DEAD = 6,
+}
diff --git a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
new file mode 100644
index 0000000..1a16f2a
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtualmachineservice;
+
+/** {@hide} */
+interface IVirtualMachineService {
+ /**
+ * Port number that VirtualMachineService listens on connections from the guest VMs for the
+ * payload input and output.
+ */
+ const int VM_STREAM_SERVICE_PORT = 3000;
+
+ /**
+ * Port number that VirtualMachineService listens on connections from the guest VMs for the
+ * VirtualMachineService binder service.
+ */
+ const int VM_BINDER_SERVICE_PORT = 5000;
+
+ /**
+ * Notifies that the payload has started.
+ */
+ void notifyPayloadStarted();
+
+ /**
+ * Notifies that the payload is ready to serve.
+ */
+ void notifyPayloadReady();
+
+ /**
+ * Notifies that the payload has finished.
+ */
+ void notifyPayloadFinished(int exitCode);
+
+ /**
+ * Notifies that an error has occurred. See the ERROR_* constants.
+ */
+ void notifyError(int errorCode, in String message);
+
+ /**
+ * Error code for all other errors not listed below.
+ */
+ const int ERROR_UNKNOWN = 0;
+
+ /**
+ * Error code indicating that the payload can't be verified due to various reasons (e.g invalid
+ * merkle tree, invalid formats, etc).
+ */
+ const int ERROR_PAYLOAD_VERIFICATION_FAILED = 1;
+
+ /**
+ * Error code indicating that the payload is verified, but has changed since the last boot.
+ */
+ const int ERROR_PAYLOAD_CHANGED = 2;
+
+ /**
+ * Error code indicating that the payload config is invalid.
+ */
+ const int ERROR_PAYLOAD_INVALID_CONFIG = 3;
+}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 8c5eb97..d9825dc 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -15,39 +15,57 @@
//! Implementation of the AIDL interface of the VirtualizationService.
use crate::composite::make_composite_image;
-use crate::crosvm::{CrosvmConfig, DiskFile, VmInstance};
+use crate::crosvm::{CrosvmConfig, DiskFile, PayloadState, VmInstance, VmState};
use crate::payload::add_microdroid_images;
-use crate::{Cid, FIRST_GUEST_CID};
-
+use crate::{Cid, FIRST_GUEST_CID, SYSPROP_LAST_CID};
+use crate::selinux::{SeContext, getfilecon};
+use ::binder::unstable_api::AsNative;
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualizationService::IVirtualizationService;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::DiskImage::DiskImage;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualMachine::{
- BnVirtualMachine, IVirtualMachine,
-};
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualMachineCallback::IVirtualMachineCallback;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
+ DeathReason::DeathReason,
+ DiskImage::DiskImage,
+ IVirtualMachine::{BnVirtualMachine, IVirtualMachine},
+ IVirtualMachineCallback::IVirtualMachineCallback,
+ IVirtualizationService::IVirtualizationService,
+ Partition::Partition,
+ PartitionType::PartitionType,
VirtualMachineAppConfig::VirtualMachineAppConfig,
VirtualMachineConfig::VirtualMachineConfig,
+ VirtualMachineDebugInfo::VirtualMachineDebugInfo,
VirtualMachineRawConfig::VirtualMachineRawConfig,
+ VirtualMachineState::VirtualMachineState,
};
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::VirtualMachineDebugInfo::VirtualMachineDebugInfo;
use android_system_virtualizationservice::binder::{
- self, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor, Status, Strong, ThreadState,
+ self, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor, Status, StatusCode, Strong,
+ ThreadState,
};
-use anyhow::{bail, Context, Result};
+use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::{
+ IVirtualMachineService::{
+ BnVirtualMachineService, IVirtualMachineService, VM_BINDER_SERVICE_PORT,
+ VM_STREAM_SERVICE_PORT,
+ },
+};
+use anyhow::{anyhow, bail, Context, Result};
+use binder_common::{lazy_service::LazyServiceGuard, new_binder_exception};
use disk::QcowFile;
-use log::{debug, error, warn, info};
+use idsig::{HashAlgorithm, V4Signature};
+use log::{debug, error, info, warn, trace};
use microdroid_payload_config::VmPayloadConfig;
+use rustutils::system_properties;
+use semver::VersionReq;
+use statslog_virtualization_rust::vm_creation_requested::{stats_write, Hypervisor};
use std::convert::TryInto;
-use std::ffi::CString;
-use std::fs::{File, OpenOptions, create_dir};
+use std::ffi::CStr;
+use std::fs::{create_dir, File, OpenOptions};
+use std::io::{Error, ErrorKind, Write};
use std::num::NonZeroU32;
-use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
+use std::os::raw;
+use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::path::{Path, PathBuf};
+use std::ptr::null_mut;
use std::sync::{Arc, Mutex, Weak};
use vmconfig::VmConfig;
-use vsock::{VsockListener, SockAddr, VsockStream};
+use vsock::{SockAddr, VsockListener, VsockStream};
use zip::ZipArchive;
pub const BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtualizationservice";
@@ -58,38 +76,67 @@
/// The CID representing the host VM
const VMADDR_CID_HOST: u32 = 2;
-/// Port number that virtualizationservice listens on connections from the guest VMs for the
-/// payload output
-const PORT_VIRT_SERVICE: u32 = 3000;
-
/// The size of zero.img.
/// Gaps in composite disk images are filled with a shared zero.img.
const ZERO_FILLER_SIZE: u64 = 4096;
+/// Magic string for the instance image
+const ANDROID_VM_INSTANCE_MAGIC: &str = "Android-VM-instance";
+
+/// Version of the instance image format
+const ANDROID_VM_INSTANCE_VERSION: u16 = 1;
+
/// Implementation of `IVirtualizationService`, the entry point of the AIDL service.
#[derive(Debug, Default)]
pub struct VirtualizationService {
state: Arc<Mutex<State>>,
}
-impl Interface for VirtualizationService {}
+impl Interface for VirtualizationService {
+ fn dump(&self, mut file: &File, _args: &[&CStr]) -> Result<(), StatusCode> {
+ check_permission("android.permission.DUMP").or(Err(StatusCode::PERMISSION_DENIED))?;
+ let state = &mut *self.state.lock().unwrap();
+ let vms = state.vms();
+ writeln!(file, "Running {0} VMs:", vms.len()).or(Err(StatusCode::UNKNOWN_ERROR))?;
+ for vm in vms {
+ writeln!(file, "VM CID: {}", vm.cid).or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\tState: {:?}", vm.vm_state.lock().unwrap())
+ .or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\tPayload state {:?}", vm.payload_state())
+ .or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\tProtected: {}", vm.protected).or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\ttemporary_directory: {}", vm.temporary_directory.to_string_lossy())
+ .or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\trequester_uid: {}", vm.requester_uid)
+ .or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\trequester_sid: {}", vm.requester_sid)
+ .or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(file, "\trequester_debug_pid: {}", vm.requester_debug_pid)
+ .or(Err(StatusCode::UNKNOWN_ERROR))?;
+ }
+ Ok(())
+ }
+}
impl IVirtualizationService for VirtualizationService {
- /// Create and start a new VM with the given configuration, assigning it the next available CID.
+ /// Creates (but does not start) a new VM with the given configuration, assigning it the next
+ /// available CID.
///
/// Returns a binder `IVirtualMachine` object referring to it, as a handle for the client.
- fn startVm(
+ fn createVm(
&self,
config: &VirtualMachineConfig,
+ console_fd: Option<&ParcelFileDescriptor>,
log_fd: Option<&ParcelFileDescriptor>,
) -> binder::Result<Strong<dyn IVirtualMachine>> {
check_manage_access()?;
let state = &mut *self.state.lock().unwrap();
+ let console_fd = console_fd.map(clone_file).transpose()?;
let log_fd = log_fd.map(clone_file).transpose()?;
let requester_uid = ThreadState::get_calling_uid();
let requester_sid = get_calling_sid()?;
let requester_debug_pid = ThreadState::get_calling_pid();
- let cid = state.allocate_cid()?;
+ let cid = next_cid().or(Err(ExceptionCode::ILLEGAL_STATE))?;
// Counter to generate unique IDs for temporary image files.
let mut next_temporary_image_id = 0;
@@ -100,6 +147,9 @@
// Make directory for temporary files.
let temporary_directory: PathBuf = format!("{}/{}", TEMPORARY_DIRECTORY, cid).into();
create_dir(&temporary_directory).map_err(|e| {
+ // At this point, we do not know the protected status of Vm
+ // setting it to false, though this may not be correct.
+ write_vm_creation_stats(false, false);
error!(
"Failed to create temporary directory {:?} for VM files: {}",
temporary_directory, e
@@ -113,10 +163,13 @@
)
})?;
+ let is_app_config = matches!(config, VirtualMachineConfig::AppConfig(_));
+
let config = match config {
VirtualMachineConfig::AppConfig(config) => BorrowedOrOwned::Owned(
load_app_config(config, &temporary_directory).map_err(|e| {
error!("Failed to load app config from {}: {}", &config.configPath, e);
+ write_vm_creation_stats(config.protectedVm, false);
new_binder_exception(
ExceptionCode::SERVICE_SPECIFIC,
format!("Failed to load app config from {}: {}", &config.configPath, e),
@@ -126,16 +179,36 @@
VirtualMachineConfig::RawConfig(config) => BorrowedOrOwned::Borrowed(config),
};
let config = config.as_ref();
+ let protected = config.protectedVm;
+
+ // Check if partition images are labeled incorrectly. This is to prevent random images
+ // which are not protected by the Android Verified Boot (e.g. bits downloaded by apps) from
+ // being loaded in a pVM. Specifically, for images in the raw config, nothing is allowed
+ // to be labeled as app_data_file. For images in the app config, nothing but the instance
+ // partition is allowed to be labeled as such.
+ config
+ .disks
+ .iter()
+ .flat_map(|disk| disk.partitions.iter())
+ .filter(|partition| {
+ if is_app_config {
+ partition.label != "vm-instance"
+ } else {
+ true // all partitions are checked
+ }
+ })
+ .try_for_each(check_label_for_partition)
+ .map_err(|e| new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, e.to_string()))?;
let zero_filler_path = temporary_directory.join("zero.img");
- let zero_filler_file = write_zero_filler(&zero_filler_path).map_err(|e| {
+ write_zero_filler(&zero_filler_path).map_err(|e| {
error!("Failed to make composite image: {}", e);
+ write_vm_creation_stats(protected, false);
new_binder_exception(
ExceptionCode::SERVICE_SPECIFIC,
format!("Failed to make composite image: {}", e),
)
})?;
- indirect_files.push(zero_filler_file);
// Assemble disk images if needed.
let disks = config
@@ -155,33 +228,39 @@
// Actually start the VM.
let crosvm_config = CrosvmConfig {
cid,
- bootloader: as_asref(&config.bootloader),
- kernel: as_asref(&config.kernel),
- initrd: as_asref(&config.initrd),
+ bootloader: maybe_clone_file(&config.bootloader)?,
+ kernel: maybe_clone_file(&config.kernel)?,
+ initrd: maybe_clone_file(&config.initrd)?,
disks,
params: config.params.to_owned(),
- protected: config.protected_vm,
- memory_mib: config.memory_mib.try_into().ok().and_then(NonZeroU32::new),
- };
- let composite_disk_fds: Vec<_> =
- indirect_files.iter().map(|file| file.as_raw_fd()).collect();
- let instance = VmInstance::start(
- &crosvm_config,
+ protected,
+ memory_mib: config.memoryMib.try_into().ok().and_then(NonZeroU32::new),
+ cpus: config.numCpus.try_into().ok().and_then(NonZeroU32::new),
+ cpu_affinity: config.cpuAffinity.clone(),
+ console_fd,
log_fd,
- &composite_disk_fds,
- temporary_directory,
- requester_uid,
- requester_sid,
- requester_debug_pid,
- )
- .map_err(|e| {
- error!("Failed to start VM with config {:?}: {}", config, e);
- new_binder_exception(
- ExceptionCode::SERVICE_SPECIFIC,
- format!("Failed to start VM: {}", e),
+ indirect_files,
+ platform_version: parse_platform_version_req(&config.platformVersion)?,
+ };
+ let instance = Arc::new(
+ VmInstance::new(
+ crosvm_config,
+ temporary_directory,
+ requester_uid,
+ requester_sid,
+ requester_debug_pid,
)
- })?;
+ .map_err(|e| {
+ error!("Failed to create VM with config {:?}: {}", config, e);
+ write_vm_creation_stats(protected, false);
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("Failed to create VM: {}", e),
+ )
+ })?,
+ );
state.add_vm(Arc::downgrade(&instance));
+ write_vm_creation_stats(protected, true);
Ok(VirtualMachine::create(instance))
}
@@ -190,6 +269,7 @@
&self,
image_fd: &ParcelFileDescriptor,
size: i64,
+ partition_type: PartitionType,
) -> binder::Result<()> {
check_manage_access()?;
let size = size.try_into().map_err(|e| {
@@ -199,14 +279,53 @@
)
})?;
let image = clone_file(image_fd)?;
-
- QcowFile::new(image, size).map_err(|e| {
+ // initialize the file. Any data in the file will be erased.
+ image.set_len(0).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("Failed to reset a file: {}", e),
+ )
+ })?;
+ let mut part = QcowFile::new(image, size).map_err(|e| {
new_binder_exception(
ExceptionCode::SERVICE_SPECIFIC,
format!("Failed to create QCOW2 image: {}", e),
)
})?;
+ match partition_type {
+ PartitionType::RAW => Ok(()),
+ PartitionType::ANDROID_VM_INSTANCE => format_as_android_vm_instance(&mut part),
+ _ => Err(Error::new(
+ ErrorKind::Unsupported,
+ format!("Unsupported partition type {:?}", partition_type),
+ )),
+ }
+ .map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("Failed to initialize partition as {:?}: {}", partition_type, e),
+ )
+ })?;
+
+ Ok(())
+ }
+
+ /// Creates or update the idsig file by digesting the input APK file.
+ fn createOrUpdateIdsigFile(
+ &self,
+ input_fd: &ParcelFileDescriptor,
+ idsig_fd: &ParcelFileDescriptor,
+ ) -> binder::Result<()> {
+ // TODO(b/193504400): do this only when (1) idsig_fd is empty or (2) the APK digest in
+ // idsig_fd is different from APK digest in input_fd
+
+ let mut input = clone_file(input_fd)?;
+ let mut sig = V4Signature::create(&mut input, 4096, &[], HashAlgorithm::SHA256).unwrap();
+
+ let mut output = clone_file(idsig_fd)?;
+ output.set_len(0).unwrap();
+ sig.write_into(&mut output).unwrap();
Ok(())
}
@@ -225,7 +344,7 @@
requesterUid: vm.requester_uid as i32,
requesterSid: vm.requester_sid.clone(),
requesterPid: vm.requester_debug_pid,
- running: vm.running(),
+ state: get_state(&vm),
})
.collect();
Ok(cids)
@@ -255,18 +374,56 @@
impl VirtualizationService {
pub fn init() -> VirtualizationService {
let service = VirtualizationService::default();
+
+ // server for payload output
let state = service.state.clone(); // reference to state (not the state itself) is copied
std::thread::spawn(move || {
- handle_connection_from_vm(state).unwrap();
+ handle_stream_connection_from_vm(state).unwrap();
+ });
+
+ // binder server for vm
+ let mut state = service.state.clone(); // reference to state (not the state itself) is copied
+ std::thread::spawn(move || {
+ let state_ptr = &mut state as *mut _ as *mut raw::c_void;
+
+ debug!("virtual machine service is starting as an RPC service.");
+ // SAFETY: factory function is only ever called by RunRpcServerWithFactory, within the
+ // lifetime of the state, with context taking the pointer value above (so a properly
+ // aligned non-null pointer to an initialized instance).
+ let retval = unsafe {
+ binder_rpc_unstable_bindgen::RunRpcServerWithFactory(
+ Some(VirtualMachineService::factory),
+ state_ptr,
+ VM_BINDER_SERVICE_PORT as u32,
+ )
+ };
+ if retval {
+ debug!("RPC server has shut down gracefully");
+ } else {
+ bail!("Premature termination of RPC server");
+ }
+
+ Ok(retval)
});
service
}
}
-/// Waits for incoming connections from VM. If a new connection is made, notify the event to the
-/// client via the callback (if registered).
-fn handle_connection_from_vm(state: Arc<Mutex<State>>) -> Result<()> {
- let listener = VsockListener::bind_with_cid_port(VMADDR_CID_HOST, PORT_VIRT_SERVICE)?;
+/// Write the stats of VMCreation to statsd
+fn write_vm_creation_stats(protected: bool, success: bool) {
+ match stats_write(Hypervisor::Pkvm, protected, success) {
+ Err(e) => {
+ warn!("statslog_rust failed with error: {}", e);
+ }
+ Ok(_) => trace!("statslog_rust succeeded for virtualization service"),
+ }
+}
+
+/// Waits for incoming connections from VM. If a new connection is made, stores the stream in the
+/// corresponding `VmInstance`.
+fn handle_stream_connection_from_vm(state: Arc<Mutex<State>>) -> Result<()> {
+ let listener =
+ VsockListener::bind_with_cid_port(VMADDR_CID_HOST, VM_STREAM_SERVICE_PORT as u32)?;
for stream in listener.incoming() {
let stream = match stream {
Err(e) => {
@@ -278,20 +435,18 @@
if let Ok(SockAddr::Vsock(addr)) = stream.peer_addr() {
let cid = addr.cid();
let port = addr.port();
- info!("connected from cid={}, port={}", cid, port);
- if cid < FIRST_GUEST_CID {
- warn!("connection is not from a guest VM");
- continue;
- }
+ info!("payload stream connected from cid={}, port={}", cid, port);
if let Some(vm) = state.lock().unwrap().get_vm(cid) {
- vm.callbacks.notify_payload_started(cid, stream);
+ *vm.stream.lock().unwrap() = Some(stream);
+ } else {
+ error!("connection from cid={} is not from a guest VM", cid);
}
}
}
Ok(())
}
-fn write_zero_filler(zero_filler_path: &Path) -> Result<File> {
+fn write_zero_filler(zero_filler_path: &Path) -> Result<()> {
let file = OpenOptions::new()
.create_new(true)
.read(true)
@@ -299,7 +454,13 @@
.open(zero_filler_path)
.with_context(|| "Failed to create zero.img")?;
file.set_len(ZERO_FILLER_SIZE)?;
- Ok(file)
+ Ok(())
+}
+
+fn format_as_android_vm_instance(part: &mut dyn Write) -> std::io::Result<()> {
+ part.write_all(ANDROID_VM_INSTANCE_MAGIC.as_bytes())?;
+ part.write_all(&ANDROID_VM_INSTANCE_VERSION.to_le_bytes())?;
+ part.flush()
}
/// Given the configuration for a disk image, assembles the `DiskFile` to pass to crosvm.
@@ -382,20 +543,23 @@
let vm_config_file = File::open(vm_config_path)?;
let mut vm_config = VmConfig::load(&vm_config_file)?.to_parcelable()?;
- if config.memory_mib > 0 {
- vm_config.memory_mib = config.memory_mib;
+ if config.memoryMib > 0 {
+ vm_config.memoryMib = config.memoryMib;
}
+ vm_config.protectedVm = config.protectedVm;
+ vm_config.numCpus = config.numCpus;
+ vm_config.cpuAffinity = config.cpuAffinity.clone();
+
// Microdroid requires an additional payload disk image and the bootconfig partition.
if os_name == "microdroid" {
- let apexes = vm_payload_config.apexes.clone();
add_microdroid_images(
config,
temporary_directory,
apk_file,
idsig_file,
instance_file,
- apexes,
+ &vm_payload_config,
&mut vm_config,
)?;
}
@@ -443,8 +607,8 @@
}
}
} else {
- error!("Missing SID on startVm");
- Err(new_binder_exception(ExceptionCode::SECURITY, "Missing SID on startVm"))
+ error!("Missing SID on createVm");
+ Err(new_binder_exception(ExceptionCode::SECURITY, "Missing SID on createVm"))
}
})
}
@@ -479,15 +643,28 @@
check_permission("android.permission.MANAGE_VIRTUAL_MACHINE")
}
+/// Check if a partition has selinux labels that are not allowed
+fn check_label_for_partition(partition: &Partition) -> Result<()> {
+ let ctx = getfilecon(partition.image.as_ref().unwrap().as_ref())?;
+ if ctx == SeContext::new("u:object_r:app_data_file:s0").unwrap() {
+ Err(anyhow!("Partition {} shouldn't be labeled as {}", &partition.label, ctx))
+ } else {
+ Ok(())
+ }
+}
+
/// Implementation of the AIDL `IVirtualMachine` interface. Used as a handle to a VM.
#[derive(Debug)]
struct VirtualMachine {
instance: Arc<VmInstance>,
+ /// Keeps our service process running as long as this VM instance exists.
+ #[allow(dead_code)]
+ lazy_service_guard: LazyServiceGuard,
}
impl VirtualMachine {
fn create(instance: Arc<VmInstance>) -> Strong<dyn IVirtualMachine> {
- let binder = VirtualMachine { instance };
+ let binder = VirtualMachine { instance, lazy_service_guard: Default::default() };
BnVirtualMachine::new_binder(binder, BinderFeatures::default())
}
}
@@ -501,10 +678,10 @@
Ok(self.instance.cid as i32)
}
- fn isRunning(&self) -> binder::Result<bool> {
+ fn getState(&self) -> binder::Result<VirtualMachineState> {
// Don't check permission. The owner of the VM might have passed this binder object to
// others.
- Ok(self.instance.running())
+ Ok(get_state(&self.instance))
}
fn registerCallback(
@@ -518,6 +695,27 @@
self.instance.callbacks.add(callback.clone());
Ok(())
}
+
+ fn start(&self) -> binder::Result<()> {
+ self.instance.start().map_err(|e| {
+ error!("Error starting VM with CID {}: {:?}", self.instance.cid, e);
+ new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, e.to_string())
+ })
+ }
+
+ fn connectVsock(&self, port: i32) -> binder::Result<ParcelFileDescriptor> {
+ if !matches!(&*self.instance.vm_state.lock().unwrap(), VmState::Running { .. }) {
+ return Err(new_binder_exception(ExceptionCode::SERVICE_SPECIFIC, "VM is not running"));
+ }
+ let stream =
+ VsockStream::connect_with_cid_port(self.instance.cid, port as u32).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("Failed to connect: {}", e),
+ )
+ })?;
+ Ok(vsock_stream_to_pfd(stream))
+ }
}
impl Drop for VirtualMachine {
@@ -534,23 +732,51 @@
impl VirtualMachineCallbacks {
/// Call all registered callbacks to notify that the payload has started.
- pub fn notify_payload_started(&self, cid: Cid, stream: VsockStream) {
+ pub fn notify_payload_started(&self, cid: Cid, stream: Option<VsockStream>) {
let callbacks = &*self.0.lock().unwrap();
- // SAFETY: ownership is transferred from stream to f
- let f = unsafe { File::from_raw_fd(stream.into_raw_fd()) };
- let pfd = ParcelFileDescriptor::new(f);
+ let pfd = stream.map(vsock_stream_to_pfd);
for callback in callbacks {
- if let Err(e) = callback.onPayloadStarted(cid as i32, &pfd) {
+ if let Err(e) = callback.onPayloadStarted(cid as i32, pfd.as_ref()) {
error!("Error notifying payload start event from VM CID {}: {}", cid, e);
}
}
}
- /// Call all registered callbacks to say that the VM has died.
- pub fn callback_on_died(&self, cid: Cid) {
+ /// Call all registered callbacks to notify that the payload is ready to serve.
+ pub fn notify_payload_ready(&self, cid: Cid) {
let callbacks = &*self.0.lock().unwrap();
for callback in callbacks {
- if let Err(e) = callback.onDied(cid as i32) {
+ if let Err(e) = callback.onPayloadReady(cid as i32) {
+ error!("Error notifying payload ready event from VM CID {}: {}", cid, e);
+ }
+ }
+ }
+
+ /// Call all registered callbacks to notify that the payload has finished.
+ pub fn notify_payload_finished(&self, cid: Cid, exit_code: i32) {
+ let callbacks = &*self.0.lock().unwrap();
+ for callback in callbacks {
+ if let Err(e) = callback.onPayloadFinished(cid as i32, exit_code) {
+ error!("Error notifying payload finish event from VM CID {}: {}", cid, e);
+ }
+ }
+ }
+
+ /// Call all registered callbacks to say that the VM encountered an error.
+ pub fn notify_error(&self, cid: Cid, error_code: i32, message: &str) {
+ let callbacks = &*self.0.lock().unwrap();
+ for callback in callbacks {
+ if let Err(e) = callback.onError(cid as i32, error_code, message) {
+ error!("Error notifying error event from VM CID {}: {}", cid, e);
+ }
+ }
+ }
+
+ /// Call all registered callbacks to say that the VM has died.
+ pub fn callback_on_died(&self, cid: Cid, reason: DeathReason) {
+ let callbacks = &*self.0.lock().unwrap();
+ for callback in callbacks {
+ if let Err(e) = callback.onDied(cid as i32, reason) {
error!("Error notifying exit of VM CID {}: {}", cid, e);
}
}
@@ -564,11 +790,8 @@
/// The mutable state of the VirtualizationService. There should only be one instance of this
/// struct.
-#[derive(Debug)]
+#[derive(Debug, Default)]
struct State {
- /// The next available unused CID.
- next_cid: Cid,
-
/// The VMs which have been started. When VMs are started a weak reference is added to this list
/// while a strong reference is returned to the caller over Binder. Once all copies of the
/// Binder client are dropped the weak reference here will become invalid, and will be removed
@@ -609,27 +832,45 @@
/// Retrieve and remove a strong VM reference.
fn debug_drop_vm(&mut self, cid: i32) -> Option<Strong<dyn IVirtualMachine>> {
let pos = self.debug_held_vms.iter().position(|vm| vm.getCid() == Ok(cid))?;
- Some(self.debug_held_vms.swap_remove(pos))
- }
-
- /// Get the next available CID, or an error if we have run out.
- fn allocate_cid(&mut self) -> binder::Result<Cid> {
- // TODO(qwandor): keep track of which CIDs are currently in use so that we can reuse them.
- let cid = self.next_cid;
- self.next_cid = self.next_cid.checked_add(1).ok_or(ExceptionCode::ILLEGAL_STATE)?;
- Ok(cid)
+ let vm = self.debug_held_vms.swap_remove(pos);
+ Some(vm)
}
}
-impl Default for State {
- fn default() -> Self {
- State { next_cid: FIRST_GUEST_CID, vms: vec![], debug_held_vms: vec![] }
- }
+/// Get the next available CID, or an error if we have run out. The last CID used is stored in
+/// a system property so that restart of virtualizationservice doesn't reuse CID while the host
+/// Android is up.
+fn next_cid() -> Result<Cid> {
+ let next = if let Some(val) = system_properties::read(SYSPROP_LAST_CID)? {
+ if let Ok(num) = val.parse::<u32>() {
+ num.checked_add(1).ok_or_else(|| anyhow!("run out of CID"))?
+ } else {
+ error!("Invalid last CID {}. Using {}", &val, FIRST_GUEST_CID);
+ FIRST_GUEST_CID
+ }
+ } else {
+ // First VM since the boot
+ FIRST_GUEST_CID
+ };
+ // Persist the last value for next use
+ let str_val = format!("{}", next);
+ system_properties::write(SYSPROP_LAST_CID, &str_val)?;
+ Ok(next)
}
-/// Converts an `&Option<T>` to an `Option<U>` where `T` implements `AsRef<U>`.
-fn as_asref<T: AsRef<U>, U>(option: &Option<T>) -> Option<&U> {
- option.as_ref().map(|t| t.as_ref())
+/// Gets the `VirtualMachineState` of the given `VmInstance`.
+fn get_state(instance: &VmInstance) -> VirtualMachineState {
+ match &*instance.vm_state.lock().unwrap() {
+ VmState::NotStarted { .. } => VirtualMachineState::NOT_STARTED,
+ VmState::Running { .. } => match instance.payload_state() {
+ PayloadState::Starting => VirtualMachineState::STARTING,
+ PayloadState::Started => VirtualMachineState::STARTED,
+ PayloadState::Ready => VirtualMachineState::READY,
+ PayloadState::Finished => VirtualMachineState::FINISHED,
+ },
+ VmState::Dead => VirtualMachineState::DEAD,
+ VmState::Failed => VirtualMachineState::DEAD,
+ }
}
/// Converts a `&ParcelFileDescriptor` to a `File` by cloning the file.
@@ -642,9 +883,26 @@
})
}
-/// Constructs a new Binder error `Status` with the given `ExceptionCode` and message.
-fn new_binder_exception<T: AsRef<str>>(exception: ExceptionCode, message: T) -> Status {
- Status::new_exception(exception, CString::new(message.as_ref()).ok().as_deref())
+/// Converts an `&Option<ParcelFileDescriptor>` to an `Option<File>` by cloning the file.
+fn maybe_clone_file(file: &Option<ParcelFileDescriptor>) -> Result<Option<File>, Status> {
+ file.as_ref().map(clone_file).transpose()
+}
+
+/// Converts a `VsockStream` to a `ParcelFileDescriptor`.
+fn vsock_stream_to_pfd(stream: VsockStream) -> ParcelFileDescriptor {
+ // SAFETY: ownership is transferred from stream to f
+ let f = unsafe { File::from_raw_fd(stream.into_raw_fd()) };
+ ParcelFileDescriptor::new(f)
+}
+
+/// Parses the platform version requirement string.
+fn parse_platform_version_req(s: &str) -> Result<VersionReq, Status> {
+ VersionReq::parse(s).map_err(|e| {
+ new_binder_exception(
+ ExceptionCode::BAD_PARCELABLE,
+ format!("Invalid platform version requirement {}: {}", s, e),
+ )
+ })
}
/// Simple utility for referencing Borrowed or Owned. Similar to std::borrow::Cow, but
@@ -658,7 +916,113 @@
fn as_ref(&self) -> &T {
match self {
Self::Borrowed(b) => b,
- Self::Owned(o) => &o,
+ Self::Owned(o) => o,
}
}
}
+
+/// Implementation of `IVirtualMachineService`, the entry point of the AIDL service.
+#[derive(Debug, Default)]
+struct VirtualMachineService {
+ state: Arc<Mutex<State>>,
+ cid: Cid,
+}
+
+impl Interface for VirtualMachineService {}
+
+impl IVirtualMachineService for VirtualMachineService {
+ fn notifyPayloadStarted(&self) -> binder::Result<()> {
+ let cid = self.cid;
+ if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
+ info!("VM having CID {} started payload", cid);
+ vm.update_payload_state(PayloadState::Started)
+ .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))?;
+ let stream = vm.stream.lock().unwrap().take();
+ vm.callbacks.notify_payload_started(cid, stream);
+ Ok(())
+ } else {
+ error!("notifyPayloadStarted is called from an unknown CID {}", cid);
+ Err(new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("cannot find a VM with CID {}", cid),
+ ))
+ }
+ }
+
+ fn notifyPayloadReady(&self) -> binder::Result<()> {
+ let cid = self.cid;
+ if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
+ info!("VM having CID {} payload is ready", cid);
+ vm.update_payload_state(PayloadState::Ready)
+ .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))?;
+ vm.callbacks.notify_payload_ready(cid);
+ Ok(())
+ } else {
+ error!("notifyPayloadReady is called from an unknown CID {}", cid);
+ Err(new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("cannot find a VM with CID {}", cid),
+ ))
+ }
+ }
+
+ fn notifyPayloadFinished(&self, exit_code: i32) -> binder::Result<()> {
+ let cid = self.cid;
+ if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
+ info!("VM having CID {} finished payload", cid);
+ vm.update_payload_state(PayloadState::Finished)
+ .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))?;
+ vm.callbacks.notify_payload_finished(cid, exit_code);
+ Ok(())
+ } else {
+ error!("notifyPayloadFinished is called from an unknown CID {}", cid);
+ Err(new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("cannot find a VM with CID {}", cid),
+ ))
+ }
+ }
+
+ fn notifyError(&self, error_code: i32, message: &str) -> binder::Result<()> {
+ let cid = self.cid;
+ if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
+ info!("VM having CID {} encountered an error", cid);
+ vm.update_payload_state(PayloadState::Finished)
+ .map_err(|e| new_binder_exception(ExceptionCode::ILLEGAL_STATE, e.to_string()))?;
+ vm.callbacks.notify_error(cid, error_code, message);
+ Ok(())
+ } else {
+ error!("notifyPayloadStarted is called from an unknown CID {}", cid);
+ Err(new_binder_exception(
+ ExceptionCode::SERVICE_SPECIFIC,
+ format!("cannot find a VM with CID {}", cid),
+ ))
+ }
+ }
+}
+
+impl VirtualMachineService {
+ // SAFETY: Service ownership is held by state, and the binder objects are threadsafe.
+ pub unsafe extern "C" fn factory(
+ cid: Cid,
+ context: *mut raw::c_void,
+ ) -> *mut binder_rpc_unstable_bindgen::AIBinder {
+ let state_ptr = context as *mut Arc<Mutex<State>>;
+ let state = state_ptr.as_ref().unwrap();
+ if let Some(vm) = state.lock().unwrap().get_vm(cid) {
+ let mut vm_service = vm.vm_service.lock().unwrap();
+ let service = vm_service.get_or_insert_with(|| Self::new_binder(state.clone(), cid));
+ service.as_binder().as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder
+ } else {
+ error!("connection from cid={} is not from a guest VM", cid);
+ null_mut()
+ }
+ }
+
+ fn new_binder(state: Arc<Mutex<State>>, cid: Cid) -> Strong<dyn IVirtualMachineService> {
+ BnVirtualMachineService::new_binder(
+ VirtualMachineService { state, cid },
+ BinderFeatures::default(),
+ )
+ }
+}
diff --git a/virtualizationservice/src/composite.rs b/virtualizationservice/src/composite.rs
index 378ed78..cb814f3 100644
--- a/virtualizationservice/src/composite.rs
+++ b/virtualizationservice/src/composite.rs
@@ -14,299 +14,20 @@
//! Functions for creating a composite disk image.
-use crate::gpt::{
- write_gpt_header, write_protective_mbr, GptPartitionEntry, GPT_BEGINNING_SIZE, GPT_END_SIZE,
- GPT_HEADER_SIZE, GPT_NUM_PARTITIONS, GPT_PARTITION_ENTRY_SIZE, SECTOR_SIZE,
-};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::Partition::Partition;
-use anyhow::{anyhow, bail, Context, Error};
-use crc32fast::Hasher;
-use disk::create_disk_file;
-use log::{trace, warn};
-use protobuf::Message;
-use protos::cdisk_spec::{ComponentDisk, CompositeDisk, ReadWriteCapability};
-use std::convert::TryInto;
+use anyhow::{anyhow, Context, Error};
+use disk::{
+ create_composite_disk, create_disk_file, ImagePartitionType, PartitionInfo, MAX_NESTING_DEPTH,
+};
use std::fs::{File, OpenOptions};
-use std::io::Write;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
-use uuid::Uuid;
-
-/// A magic string placed at the beginning of a composite disk file to identify it.
-const CDISK_MAGIC: &str = "composite_disk\x1d";
-/// The version of the composite disk format supported by this implementation.
-const COMPOSITE_DISK_VERSION: u64 = 1;
-/// The amount of padding needed between the last partition entry and the first partition, to align
-/// the partition appropriately. The two sectors are for the MBR and the GPT header.
-const PARTITION_ALIGNMENT_SIZE: usize = GPT_BEGINNING_SIZE as usize
- - 2 * SECTOR_SIZE as usize
- - GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize;
-const HEADER_PADDING_LENGTH: usize = SECTOR_SIZE as usize - GPT_HEADER_SIZE as usize;
-// Keep all partitions 4k aligned for performance.
-const PARTITION_SIZE_SHIFT: u8 = 12;
-// Keep the disk size a multiple of 64k for crosvm's virtio_blk driver.
-const DISK_SIZE_SHIFT: u8 = 16;
-
-const LINUX_FILESYSTEM_GUID: Uuid = Uuid::from_u128(0x0FC63DAF_8483_4772_8E79_3D69D8477DE4);
-const EFI_SYSTEM_PARTITION_GUID: Uuid = Uuid::from_u128(0xC12A7328_F81F_11D2_BA4B_00A0C93EC93B);
-
-/// Information about a single image file to be included in a partition.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct PartitionFileInfo {
- path: PathBuf,
- size: u64,
-}
-
-/// Information about a partition to create, including the set of image files which make it up.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct PartitionInfo {
- label: String,
- files: Vec<PartitionFileInfo>,
- partition_type: ImagePartitionType,
- writable: bool,
-}
-
-/// Round `val` up to the next multiple of 2**`align_log`.
-fn align_to_power_of_2(val: u64, align_log: u8) -> u64 {
- let align = 1 << align_log;
- ((val + (align - 1)) / align) * align
-}
-
-/// Round `val` to partition size(4K)
-fn align_to_partition_size(val: u64) -> u64 {
- align_to_power_of_2(val, PARTITION_SIZE_SHIFT)
-}
-
-impl PartitionInfo {
- fn aligned_size(&self) -> u64 {
- align_to_partition_size(self.files.iter().map(|file| file.size).sum())
- }
-}
-
-/// The type of partition.
-#[allow(dead_code)]
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum ImagePartitionType {
- LinuxFilesystem,
- EfiSystemPartition,
-}
-
-impl ImagePartitionType {
- fn guid(self) -> Uuid {
- match self {
- Self::LinuxFilesystem => LINUX_FILESYSTEM_GUID,
- Self::EfiSystemPartition => EFI_SYSTEM_PARTITION_GUID,
- }
- }
-}
-
-/// Write protective MBR and primary GPT table.
-fn write_beginning(
- file: &mut impl Write,
- disk_guid: Uuid,
- partitions: &[u8],
- partition_entries_crc32: u32,
- secondary_table_offset: u64,
- disk_size: u64,
-) -> Result<(), Error> {
- // Write the protective MBR to the first sector.
- write_protective_mbr(file, disk_size)?;
-
- // Write the GPT header, and pad out to the end of the sector.
- write_gpt_header(file, disk_guid, partition_entries_crc32, secondary_table_offset, false)?;
- file.write_all(&[0; HEADER_PADDING_LENGTH])?;
-
- // Write partition entries, including unused ones.
- file.write_all(partitions)?;
-
- // Write zeroes to align the first partition appropriately.
- file.write_all(&[0; PARTITION_ALIGNMENT_SIZE])?;
-
- Ok(())
-}
-
-/// Write secondary GPT table.
-fn write_end(
- file: &mut impl Write,
- disk_guid: Uuid,
- partitions: &[u8],
- partition_entries_crc32: u32,
- secondary_table_offset: u64,
- disk_size: u64,
-) -> Result<(), Error> {
- // Write partition entries, including unused ones.
- file.write_all(partitions)?;
-
- // Write the GPT header, and pad out to the end of the sector.
- write_gpt_header(file, disk_guid, partition_entries_crc32, secondary_table_offset, true)?;
- file.write_all(&[0; HEADER_PADDING_LENGTH])?;
-
- // Pad out to the aligned disk size.
- let used_disk_size = secondary_table_offset + GPT_END_SIZE;
- let padding = disk_size - used_disk_size;
- file.write_all(&vec![0; padding as usize])?;
-
- Ok(())
-}
-
-/// Create the `GptPartitionEntry` for the given partition.
-fn create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry {
- let mut partition_name: Vec<u16> = partition.label.encode_utf16().collect();
- partition_name.resize(36, 0);
-
- GptPartitionEntry {
- partition_type_guid: partition.partition_type.guid(),
- unique_partition_guid: Uuid::new_v4(),
- first_lba: offset / SECTOR_SIZE,
- last_lba: (offset + partition.aligned_size()) / SECTOR_SIZE - 1,
- attributes: 0,
- partition_name: partition_name.try_into().unwrap(),
- }
-}
-
-/// Create one or more `ComponentDisk` proto messages for the given partition.
-fn create_component_disks(
- partition: &PartitionInfo,
- offset: u64,
- zero_filler_path: &str,
-) -> Result<Vec<ComponentDisk>, Error> {
- let aligned_size = partition.aligned_size();
-
- if partition.files.is_empty() {
- bail!("No image files for partition {:?}", partition);
- }
- let mut file_size_sum = 0;
- let mut component_disks = vec![];
- for file in &partition.files {
- component_disks.push(ComponentDisk {
- offset: offset + file_size_sum,
- file_path: file.path.to_str().context("Invalid partition path")?.to_string(),
- read_write_capability: if partition.writable {
- ReadWriteCapability::READ_WRITE
- } else {
- ReadWriteCapability::READ_ONLY
- },
- ..ComponentDisk::new()
- });
- file_size_sum += file.size;
- }
-
- if file_size_sum != aligned_size {
- if partition.writable {
- bail!(
- "Read-write partition {:?} size is not a multiple of {}.",
- partition,
- 1 << PARTITION_SIZE_SHIFT
- );
- } else {
- // Fill in the gap by reusing the header file, because we know it is always bigger
- // than the alignment size (i.e. GPT_BEGINNING_SIZE > 1 << PARTITION_SIZE_SHIFT).
- warn!(
- "Read-only partition {:?} size is not a multiple of {}, filling gap.",
- partition,
- 1 << PARTITION_SIZE_SHIFT
- );
- component_disks.push(ComponentDisk {
- offset: offset + file_size_sum,
- file_path: zero_filler_path.to_owned(),
- read_write_capability: ReadWriteCapability::READ_ONLY,
- ..ComponentDisk::new()
- });
- }
- }
-
- Ok(component_disks)
-}
-
-/// Create a new composite disk containing the given partitions, and write it out to the given
-/// files.
-pub fn create_composite_disk(
- partitions: &[PartitionInfo],
- zero_filler_path: &Path,
- header_path: &Path,
- header_file: &mut File,
- footer_path: &Path,
- footer_file: &mut File,
- output_composite: &mut File,
-) -> Result<(), Error> {
- let zero_filler_path =
- zero_filler_path.to_str().context("Invalid zero filler path")?.to_string();
- let header_path = header_path.to_str().context("Invalid header path")?.to_string();
- let footer_path = footer_path.to_str().context("Invalid footer path")?.to_string();
-
- let mut composite_proto = CompositeDisk::new();
- composite_proto.version = COMPOSITE_DISK_VERSION;
- composite_proto.component_disks.push(ComponentDisk {
- file_path: header_path,
- offset: 0,
- read_write_capability: ReadWriteCapability::READ_ONLY,
- ..ComponentDisk::new()
- });
-
- // Write partitions to a temporary buffer so that we can calculate the CRC, and construct the
- // ComponentDisk proto messages at the same time.
- let mut partitions_buffer =
- [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
- let mut writer: &mut [u8] = &mut partitions_buffer;
- let mut next_disk_offset = GPT_BEGINNING_SIZE;
- for partition in partitions {
- create_gpt_entry(partition, next_disk_offset).write_bytes(&mut writer)?;
-
- for component_disk in
- create_component_disks(partition, next_disk_offset, &zero_filler_path)?
- {
- composite_proto.component_disks.push(component_disk);
- }
-
- next_disk_offset += partition.aligned_size();
- }
- let secondary_table_offset = next_disk_offset;
- let disk_size = align_to_power_of_2(secondary_table_offset + GPT_END_SIZE, DISK_SIZE_SHIFT);
- trace!("Partitions: {:#?}", partitions);
- trace!("Secondary table offset: {} disk size: {}", secondary_table_offset, disk_size);
-
- composite_proto.component_disks.push(ComponentDisk {
- file_path: footer_path,
- offset: secondary_table_offset,
- read_write_capability: ReadWriteCapability::READ_ONLY,
- ..ComponentDisk::new()
- });
-
- // Calculate CRC32 of partition entries.
- let mut hasher = Hasher::new();
- hasher.update(&partitions_buffer);
- let partition_entries_crc32 = hasher.finalize();
-
- let disk_guid = Uuid::new_v4();
- write_beginning(
- header_file,
- disk_guid,
- &partitions_buffer,
- partition_entries_crc32,
- secondary_table_offset,
- disk_size,
- )?;
- write_end(
- footer_file,
- disk_guid,
- &partitions_buffer,
- partition_entries_crc32,
- secondary_table_offset,
- disk_size,
- )?;
-
- composite_proto.length = disk_size;
- output_composite.write_all(CDISK_MAGIC.as_bytes())?;
- composite_proto.write_to_writer(output_composite)?;
-
- Ok(())
-}
/// Constructs a composite disk image for the given list of partitions, and opens it ready to use.
///
-/// Returns the composite disk image file, and a list of FD mappings which must be applied to any
-/// process which wants to use it. This is necessary because the composite image contains paths of
-/// the form `/proc/self/fd/N` for the partition images.
+/// Returns the composite disk image file, and a list of files whose file descriptors must be passed
+/// to any process which wants to use it. This is necessary because the composite image contains
+/// paths of the form `/proc/self/fd/N` for the partition images.
pub fn make_composite_image(
partitions: &[Partition],
zero_filler_path: &Path,
@@ -314,7 +35,7 @@
header_path: &Path,
footer_path: &Path,
) -> Result<(File, Vec<File>), Error> {
- let (partitions, files) = convert_partitions(partitions)?;
+ let (partitions, mut files) = convert_partitions(partitions)?;
let mut composite_image = OpenOptions::new()
.create_new(true)
@@ -330,13 +51,16 @@
OpenOptions::new().create_new(true).read(true).write(true).open(footer_path).with_context(
|| format!("Failed to create composite image header {:?}", footer_path),
)?;
+ let zero_filler_file = File::open(&zero_filler_path).with_context(|| {
+ format!("Failed to open composite image zero filler {:?}", zero_filler_path)
+ })?;
create_composite_disk(
&partitions,
- zero_filler_path,
- header_path,
+ &fd_path_for_file(&zero_filler_file),
+ &fd_path_for_file(&header_file),
&mut header_file,
- footer_path,
+ &fd_path_for_file(&footer_file),
&mut footer_file,
&mut composite_image,
)?;
@@ -345,12 +69,16 @@
let composite_image = File::open(&output_path)
.with_context(|| format!("Failed to open composite image {:?}", output_path))?;
+ files.push(header_file);
+ files.push(footer_file);
+ files.push(zero_filler_file);
+
Ok((composite_image, files))
}
-/// Given the AIDL config containing a list of partitions, with [`ParcelFileDescriptor`]s for each
-/// partition, return the list of file descriptors which must be passed to the composite disk image
-/// partition configuration for it.
+/// Given the AIDL config containing a list of partitions, with a [`ParcelFileDescriptor`] for each
+/// partition, returns the corresponding list of PartitionInfo and the list of files whose file
+/// descriptors must be passed to any process using the composite image.
fn convert_partitions(partitions: &[Partition]) -> Result<(Vec<PartitionInfo>, Vec<File>), Error> {
// File descriptors to pass to child process.
let mut files = vec![];
@@ -358,29 +86,24 @@
let partitions = partitions
.iter()
.map(|partition| {
- let image_files = partition
- .images
- .iter()
- .map(|image| {
- let file = image
- .as_ref()
- .try_clone()
- .context("Failed to clone partition image file descriptor")?;
-
- let size = get_partition_size(&file)?;
- let fd = file.as_raw_fd();
- let partition_info_file =
- PartitionFileInfo { path: format!("/proc/self/fd/{}", fd).into(), size };
- files.push(file);
- Ok(partition_info_file)
- })
- .collect::<Result<Vec<_>, Error>>()?;
+ // TODO(b/187187765): This shouldn't be an Option.
+ let file = partition
+ .image
+ .as_ref()
+ .context("Invalid partition image file descriptor")?
+ .as_ref()
+ .try_clone()
+ .context("Failed to clone partition image file descriptor")?;
+ let size = get_partition_size(&file)?;
+ let path = fd_path_for_file(&file);
+ files.push(file);
Ok(PartitionInfo {
label: partition.label.to_owned(),
- files: image_files,
+ path,
partition_type: ImagePartitionType::LinuxFilesystem,
writable: partition.writable,
+ size,
})
})
.collect::<Result<_, Error>>()?;
@@ -388,72 +111,17 @@
Ok((partitions, files))
}
+fn fd_path_for_file(file: &File) -> PathBuf {
+ let fd = file.as_raw_fd();
+ format!("/proc/self/fd/{}", fd).into()
+}
+
/// Find the size of the partition image in the given file by parsing the header.
///
/// This will work for raw, QCOW2, composite and Android sparse images.
fn get_partition_size(partition: &File) -> Result<u64, Error> {
// TODO: Use `context` once disk::Error implements std::error::Error.
- Ok(create_disk_file(partition.try_clone()?)
+ Ok(create_disk_file(partition.try_clone()?, MAX_NESTING_DEPTH)
.map_err(|e| anyhow!("Failed to open partition image: {}", e))?
.get_len()?)
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn beginning_size() {
- let mut buffer = vec![];
- let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
- let disk_size = 1000 * SECTOR_SIZE;
- write_beginning(
- &mut buffer,
- Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
- &partitions,
- 42,
- disk_size - GPT_END_SIZE,
- disk_size,
- )
- .unwrap();
-
- assert_eq!(buffer.len(), GPT_BEGINNING_SIZE as usize);
- }
-
- #[test]
- fn end_size() {
- let mut buffer = vec![];
- let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
- let disk_size = 1000 * SECTOR_SIZE;
- write_end(
- &mut buffer,
- Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
- &partitions,
- 42,
- disk_size - GPT_END_SIZE,
- disk_size,
- )
- .unwrap();
-
- assert_eq!(buffer.len(), GPT_END_SIZE as usize);
- }
-
- #[test]
- fn end_size_with_padding() {
- let mut buffer = vec![];
- let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
- let disk_size = 1000 * SECTOR_SIZE;
- let padding = 3 * SECTOR_SIZE;
- write_end(
- &mut buffer,
- Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
- &partitions,
- 42,
- disk_size - GPT_END_SIZE - padding,
- disk_size,
- )
- .unwrap();
-
- assert_eq!(buffer.len(), GPT_END_SIZE as usize + padding as usize);
- }
-}
diff --git a/virtualizationservice/src/crosvm.rs b/virtualizationservice/src/crosvm.rs
index 5873cd9..f1b179e 100644
--- a/virtualizationservice/src/crosvm.rs
+++ b/virtualizationservice/src/crosvm.rs
@@ -19,29 +19,55 @@
use anyhow::{bail, Error};
use command_fds::CommandFdExt;
use log::{debug, error, info};
+use semver::{Version, VersionReq};
+use nix::{fcntl::OFlag, unistd::pipe2};
use shared_child::SharedChild;
use std::fs::{remove_dir_all, File};
+use std::io::{self, Read};
+use std::mem;
use std::num::NonZeroU32;
-use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, RawFd, FromRawFd};
use std::path::PathBuf;
-use std::process::Command;
-use std::sync::atomic::{AtomicBool, Ordering};
-use std::sync::Arc;
+use std::process::{Command, ExitStatus};
+use std::sync::{Arc, Mutex};
use std::thread;
+use vsock::VsockStream;
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice::DeathReason::DeathReason;
+use android_system_virtualmachineservice::binder::Strong;
+use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::IVirtualMachineService;
const CROSVM_PATH: &str = "/apex/com.android.virt/bin/crosvm";
+/// Version of the platform that crosvm currently implements. The format follows SemVer. This
+/// should be updated when there is a platform change in the crosvm side. Having this value here is
+/// fine because virtualizationservice and crosvm are supposed to be updated together in the virt
+/// APEX.
+const CROSVM_PLATFORM_VERSION: &str = "1.0.0";
+
+/// The exit status which crosvm returns when it has an error starting a VM.
+const CROSVM_ERROR_STATUS: i32 = 1;
+/// The exit status which crosvm returns when a VM requests a reboot.
+const CROSVM_REBOOT_STATUS: i32 = 32;
+/// The exit status which crosvm returns when it crashes due to an error.
+const CROSVM_CRASH_STATUS: i32 = 33;
+
/// Configuration for a VM to run with crosvm.
#[derive(Debug)]
-pub struct CrosvmConfig<'a> {
+pub struct CrosvmConfig {
pub cid: Cid,
- pub bootloader: Option<&'a File>,
- pub kernel: Option<&'a File>,
- pub initrd: Option<&'a File>,
+ pub bootloader: Option<File>,
+ pub kernel: Option<File>,
+ pub initrd: Option<File>,
pub disks: Vec<DiskFile>,
pub params: Option<String>,
pub protected: bool,
pub memory_mib: Option<NonZeroU32>,
+ pub cpus: Option<NonZeroU32>,
+ pub cpu_affinity: Option<String>,
+ pub console_fd: Option<File>,
+ pub log_fd: Option<File>,
+ pub indirect_files: Vec<File>,
+ pub platform_version: VersionReq,
}
/// A disk image to pass to crosvm for a VM.
@@ -51,11 +77,69 @@
pub writable: bool,
}
-/// Information about a particular instance of a VM which is running.
+/// The lifecycle state which the payload in the VM has reported itself to be in.
+///
+/// Note that the order of enum variants is significant; only forward transitions are allowed by
+/// [`VmInstance::update_payload_state`].
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum PayloadState {
+ Starting,
+ Started,
+ Ready,
+ Finished,
+}
+
+/// The current state of the VM itself.
+#[derive(Debug)]
+pub enum VmState {
+ /// The VM has not yet tried to start.
+ NotStarted {
+ ///The configuration needed to start the VM, if it has not yet been started.
+ config: CrosvmConfig,
+ },
+ /// The VM has been started.
+ Running {
+ /// The crosvm child process.
+ child: Arc<SharedChild>,
+ },
+ /// The VM died or was killed.
+ Dead,
+ /// The VM failed to start.
+ Failed,
+}
+
+impl VmState {
+ /// Tries to start the VM, if it is in the `NotStarted` state.
+ ///
+ /// Returns an error if the VM is in the wrong state, or fails to start.
+ fn start(&mut self, instance: Arc<VmInstance>) -> Result<(), Error> {
+ let state = mem::replace(self, VmState::Failed);
+ if let VmState::NotStarted { config } = state {
+ let (failure_pipe_read, failure_pipe_write) = create_pipe()?;
+
+ // If this fails and returns an error, `self` will be left in the `Failed` state.
+ let child = Arc::new(run_vm(config, failure_pipe_write)?);
+
+ let child_clone = child.clone();
+ thread::spawn(move || {
+ instance.monitor(child_clone, failure_pipe_read);
+ });
+
+ // If it started correctly, update the state.
+ *self = VmState::Running { child };
+ Ok(())
+ } else {
+ *self = state;
+ bail!("VM already started or failed")
+ }
+ }
+}
+
+/// Information about a particular instance of a VM which may be running.
#[derive(Debug)]
pub struct VmInstance {
- /// The crosvm child process.
- child: SharedChild,
+ /// The current state of the VM.
+ pub vm_state: Mutex<VmState>,
/// The CID assigned to the VM for vsock communication.
pub cid: Cid,
/// Whether the VM is a protected VM.
@@ -69,75 +153,76 @@
/// The PID of the process which requested the VM. Note that this process may no longer exist
/// and the PID may have been reused for a different process, so this should not be trusted.
pub requester_debug_pid: i32,
- /// Whether the VM is still running.
- running: AtomicBool,
/// Callbacks to clients of the VM.
pub callbacks: VirtualMachineCallbacks,
+ /// Input/output stream of the payload run in the VM.
+ pub stream: Mutex<Option<VsockStream>>,
+ /// VirtualMachineService binder object for the VM.
+ pub vm_service: Mutex<Option<Strong<dyn IVirtualMachineService>>>,
+ /// The latest lifecycle state which the payload reported itself to be in.
+ payload_state: Mutex<PayloadState>,
}
impl VmInstance {
- /// Create a new `VmInstance` for the given process.
- fn new(
- child: SharedChild,
- cid: Cid,
- protected: bool,
+ /// Validates the given config and creates a new `VmInstance` but doesn't start running it.
+ pub fn new(
+ config: CrosvmConfig,
temporary_directory: PathBuf,
requester_uid: u32,
requester_sid: String,
requester_debug_pid: i32,
- ) -> VmInstance {
- VmInstance {
- child,
+ ) -> Result<VmInstance, Error> {
+ validate_config(&config)?;
+ let cid = config.cid;
+ let protected = config.protected;
+ Ok(VmInstance {
+ vm_state: Mutex::new(VmState::NotStarted { config }),
cid,
protected,
temporary_directory,
requester_uid,
requester_sid,
requester_debug_pid,
- running: AtomicBool::new(true),
callbacks: Default::default(),
- }
+ stream: Mutex::new(None),
+ vm_service: Mutex::new(None),
+ payload_state: Mutex::new(PayloadState::Starting),
+ })
}
- /// Start an instance of `crosvm` to manage a new VM. The `crosvm` instance will be killed when
+ /// Starts an instance of `crosvm` to manage the VM. The `crosvm` instance will be killed when
/// the `VmInstance` is dropped.
- pub fn start(
- config: &CrosvmConfig,
- log_fd: Option<File>,
- composite_disk_fds: &[RawFd],
- temporary_directory: PathBuf,
- requester_uid: u32,
- requester_sid: String,
- requester_debug_pid: i32,
- ) -> Result<Arc<VmInstance>, Error> {
- let child = run_vm(config, log_fd, composite_disk_fds)?;
- let instance = Arc::new(VmInstance::new(
- child,
- config.cid,
- config.protected,
- temporary_directory,
- requester_uid,
- requester_sid,
- requester_debug_pid,
- ));
-
- let instance_clone = instance.clone();
- thread::spawn(move || {
- instance_clone.monitor();
- });
-
- Ok(instance)
+ pub fn start(self: &Arc<Self>) -> Result<(), Error> {
+ self.vm_state.lock().unwrap().start(self.clone())
}
- /// Wait for the crosvm child process to finish, then mark the VM as no longer running and call
- /// any callbacks.
- fn monitor(&self) {
- match self.child.wait() {
- Err(e) => error!("Error waiting for crosvm instance to die: {}", e),
- Ok(status) => info!("crosvm exited with status {}", status),
+ /// Waits for the crosvm child process to finish, then marks the VM as no longer running and
+ /// calls any callbacks.
+ ///
+ /// This takes a separate reference to the `SharedChild` rather than using the one in
+ /// `self.vm_state` to avoid holding the lock on `vm_state` while it is running.
+ fn monitor(&self, child: Arc<SharedChild>, mut failure_pipe_read: File) {
+ let result = child.wait();
+ match &result {
+ Err(e) => error!("Error waiting for crosvm({}) instance to die: {}", child.id(), e),
+ Ok(status) => info!("crosvm({}) exited with status {}", child.id(), status),
}
- self.running.store(false, Ordering::Release);
- self.callbacks.callback_on_died(self.cid);
+
+ let mut vm_state = self.vm_state.lock().unwrap();
+ *vm_state = VmState::Dead;
+ // Ensure that the mutex is released before calling the callbacks.
+ drop(vm_state);
+
+ let mut failure_string = String::new();
+ let failure_read_result = failure_pipe_read.read_to_string(&mut failure_string);
+ if let Err(e) = &failure_read_result {
+ error!("Error reading VM failure reason from pipe: {}", e);
+ }
+ if !failure_string.is_empty() {
+ info!("VM returned failure reason '{}'", failure_string);
+ }
+
+ self.callbacks.callback_on_died(self.cid, death_reason(&result, &failure_string));
// Delete temporary files.
if let Err(e) = remove_dir_all(&self.temporary_directory) {
@@ -145,49 +230,132 @@
}
}
- /// Return whether `crosvm` is still running the VM.
- pub fn running(&self) -> bool {
- self.running.load(Ordering::Acquire)
+ /// Returns the last reported state of the VM payload.
+ pub fn payload_state(&self) -> PayloadState {
+ *self.payload_state.lock().unwrap()
}
- /// Kill the crosvm instance.
+ /// Updates the payload state to the given value, if it is a valid state transition.
+ pub fn update_payload_state(&self, new_state: PayloadState) -> Result<(), Error> {
+ let mut state_locked = self.payload_state.lock().unwrap();
+ // Only allow forward transitions, e.g. from starting to started or finished, not back in
+ // the other direction.
+ if new_state > *state_locked {
+ *state_locked = new_state;
+ Ok(())
+ } else {
+ bail!("Invalid payload state transition from {:?} to {:?}", *state_locked, new_state)
+ }
+ }
+
+ /// Kills the crosvm instance, if it is running.
pub fn kill(&self) {
- // TODO: Talk to crosvm to shutdown cleanly.
- if let Err(e) = self.child.kill() {
- error!("Error killing crosvm instance: {}", e);
+ let vm_state = &*self.vm_state.lock().unwrap();
+ if let VmState::Running { child } = vm_state {
+ let id = child.id();
+ debug!("Killing crosvm({})", id);
+ // TODO: Talk to crosvm to shutdown cleanly.
+ if let Err(e) = child.kill() {
+ error!("Error killing crosvm({}) instance: {}", id, e);
+ }
}
}
}
-/// Start an instance of `crosvm` to manage a new VM.
-fn run_vm(
- config: &CrosvmConfig,
- log_fd: Option<File>,
- composite_disk_fds: &[RawFd],
-) -> Result<SharedChild, Error> {
- validate_config(config)?;
+fn death_reason(result: &Result<ExitStatus, io::Error>, failure_reason: &str) -> DeathReason {
+ if let Ok(status) = result {
+ match failure_reason {
+ "PVM_FIRMWARE_PUBLIC_KEY_MISMATCH" => {
+ return DeathReason::PVM_FIRMWARE_PUBLIC_KEY_MISMATCH
+ }
+ "PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED" => {
+ return DeathReason::PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED
+ }
+ "BOOTLOADER_PUBLIC_KEY_MISMATCH" => return DeathReason::BOOTLOADER_PUBLIC_KEY_MISMATCH,
+ "BOOTLOADER_INSTANCE_IMAGE_CHANGED" => {
+ return DeathReason::BOOTLOADER_INSTANCE_IMAGE_CHANGED
+ }
+ _ => {}
+ }
+ match status.code() {
+ None => DeathReason::KILLED,
+ Some(0) => DeathReason::SHUTDOWN,
+ Some(CROSVM_ERROR_STATUS) => DeathReason::ERROR,
+ Some(CROSVM_REBOOT_STATUS) => DeathReason::REBOOT,
+ Some(CROSVM_CRASH_STATUS) => DeathReason::CRASH,
+ Some(_) => DeathReason::UNKNOWN,
+ }
+ } else {
+ DeathReason::INFRASTRUCTURE_ERROR
+ }
+}
+
+/// Starts an instance of `crosvm` to manage a new VM.
+fn run_vm(config: CrosvmConfig, failure_pipe_write: File) -> Result<SharedChild, Error> {
+ validate_config(&config)?;
let mut command = Command::new(CROSVM_PATH);
// TODO(qwandor): Remove --disable-sandbox.
- command.arg("run").arg("--disable-sandbox").arg("--cid").arg(config.cid.to_string());
+ command
+ .arg("--extended-status")
+ .arg("run")
+ .arg("--disable-sandbox")
+ .arg("--cid")
+ .arg(config.cid.to_string());
if config.protected {
command.arg("--protected-vm");
+
+ // 3 virtio-console devices + vsock = 4.
+ let virtio_pci_device_count = 4 + config.disks.len();
+ // crosvm virtio queue has 256 entries, so 2 MiB per device (2 pages per entry) should be
+ // enough.
+ let swiotlb_size_mib = 2 * virtio_pci_device_count;
+ command.arg("--swiotlb").arg(swiotlb_size_mib.to_string());
}
if let Some(memory_mib) = config.memory_mib {
command.arg("--mem").arg(memory_mib.to_string());
}
- if let Some(log_fd) = log_fd {
- command.stdout(log_fd);
- } else {
- // Ignore console output.
- command.arg("--serial=type=sink");
+ if let Some(cpus) = config.cpus {
+ command.arg("--cpus").arg(cpus.to_string());
+ }
+
+ if let Some(cpu_affinity) = config.cpu_affinity {
+ command.arg("--cpu-affinity").arg(cpu_affinity);
}
// Keep track of what file descriptors should be mapped to the crosvm process.
- let mut preserved_fds = composite_disk_fds.to_vec();
+ let mut preserved_fds = config.indirect_files.iter().map(|file| file.as_raw_fd()).collect();
+
+ // Setup the serial devices.
+ // 1. uart device: used as the output device by bootloaders and as early console by linux
+ // 2. uart device: used to report the reason for the VM failing.
+ // 3. virtio-console device: used as the console device where kmsg is redirected to
+ // 4. virtio-console device: used as the androidboot.console device (not used currently)
+ // 5. virtio-console device: used as the logcat output
+ //
+ // When [console|log]_fd is not specified, the devices are attached to sink, which means what's
+ // written there is discarded.
+ let console_arg = format_serial_arg(&mut preserved_fds, &config.console_fd);
+ let log_arg = format_serial_arg(&mut preserved_fds, &config.log_fd);
+ let failure_serial_path = add_preserved_fd(&mut preserved_fds, &failure_pipe_write);
+
+ // Warning: Adding more serial devices requires you to shift the PCI device ID of the boot
+ // disks in bootconfig.x86_64. This is because x86 crosvm puts serial devices and the block
+ // devices in the same PCI bus and serial devices comes before the block devices. Arm crosvm
+ // doesn't have the issue.
+ // /dev/ttyS0
+ command.arg(format!("--serial={},hardware=serial,num=1", &console_arg));
+ // /dev/ttyS1
+ command.arg(format!("--serial=type=file,path={},hardware=serial,num=2", &failure_serial_path));
+ // /dev/hvc0
+ command.arg(format!("--serial={},hardware=virtio-console,num=1", &console_arg));
+ // /dev/hvc1 (not used currently)
+ command.arg("--serial=type=sink,hardware=virtio-console,num=2");
+ // /dev/hvc2
+ command.arg(format!("--serial={},hardware=virtio-console,num=3", &log_arg));
if let Some(bootloader) = &config.bootloader {
command.arg("--bios").arg(add_preserved_fd(&mut preserved_fds, bootloader));
@@ -216,6 +384,7 @@
info!("Running {:?}", command);
let result = SharedChild::spawn(&mut command)?;
+ debug!("Spawned crosvm({}).", result.id());
Ok(result)
}
@@ -227,6 +396,16 @@
if config.bootloader.is_some() && (config.kernel.is_some() || config.initrd.is_some()) {
bail!("Can't have both bootloader and kernel/initrd image.");
}
+ let version = Version::parse(CROSVM_PLATFORM_VERSION).unwrap();
+ if !config.platform_version.matches(&version) {
+ bail!(
+ "Incompatible platform version. The config is compatible with platform version(s) \
+ {}, but the actual platform version is {}",
+ config.platform_version,
+ version
+ );
+ }
+
Ok(())
}
@@ -237,3 +416,22 @@
preserved_fds.push(fd);
format!("/proc/self/fd/{}", fd)
}
+
+/// Adds the file descriptor for `file` (if any) to `preserved_fds`, and returns the appropriate
+/// string for a crosvm `--serial` flag. If `file` is none, creates a dummy sink device.
+fn format_serial_arg(preserved_fds: &mut Vec<RawFd>, file: &Option<File>) -> String {
+ if let Some(file) = file {
+ format!("type=file,path={}", add_preserved_fd(preserved_fds, file))
+ } else {
+ "type=sink".to_string()
+ }
+}
+
+/// Creates a new pipe with the `O_CLOEXEC` flag set, and returns the read side and write side.
+fn create_pipe() -> Result<(File, File), Error> {
+ let (raw_read, raw_write) = pipe2(OFlag::O_CLOEXEC)?;
+ // SAFETY: We are the sole owners of these fds as they were just created.
+ let read_fd = unsafe { File::from_raw_fd(raw_read) };
+ let write_fd = unsafe { File::from_raw_fd(raw_write) };
+ Ok((read_fd, write_fd))
+}
diff --git a/virtualizationservice/src/gpt.rs b/virtualizationservice/src/gpt.rs
deleted file mode 100644
index 346a40a..0000000
--- a/virtualizationservice/src/gpt.rs
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Functions for writing GUID Partition Tables for use in a composite disk image.
-
-use anyhow::Error;
-use crc32fast::Hasher;
-use std::convert::TryInto;
-use std::io::Write;
-use uuid::Uuid;
-
-/// The size in bytes of a disk sector (also called a block).
-pub const SECTOR_SIZE: u64 = 1 << 9;
-/// The size in bytes on an MBR partition entry.
-const MBR_PARTITION_ENTRY_SIZE: usize = 16;
-/// The size in bytes of a GPT header.
-pub const GPT_HEADER_SIZE: u32 = 92;
-/// The number of partition entries in the GPT, which is the maximum number of partitions which are
-/// supported.
-pub const GPT_NUM_PARTITIONS: u32 = 128;
-/// The size in bytes of a single GPT partition entry.
-pub const GPT_PARTITION_ENTRY_SIZE: u32 = 128;
-/// The size in bytes of everything before the first partition: i.e. the MBR, GPT header and GPT
-/// partition entries.
-pub const GPT_BEGINNING_SIZE: u64 = SECTOR_SIZE * 40;
-/// The size in bytes of everything after the last partition: i.e. the GPT partition entries and GPT
-/// footer.
-pub const GPT_END_SIZE: u64 = SECTOR_SIZE * 33;
-
-/// Write a protective MBR for a disk of the given total size (in bytes).
-///
-/// This should be written at the start of the disk, before the GPT header. It
-/// is one `SECTOR_SIZE` long.
-pub fn write_protective_mbr(file: &mut impl Write, disk_size: u64) -> Result<(), Error> {
- // Bootstrap code
- file.write_all(&[0; 446])?;
-
- // Partition status
- file.write_all(&[0x00])?;
- // Begin CHS
- file.write_all(&[0; 3])?;
- // Partition type
- file.write_all(&[0xEE])?;
- // End CHS
- file.write_all(&[0; 3])?;
- let first_lba: u32 = 1;
- file.write_all(&first_lba.to_le_bytes())?;
- let number_of_sectors: u32 = (disk_size / SECTOR_SIZE).try_into()?;
- file.write_all(&number_of_sectors.to_le_bytes())?;
-
- // Three more empty partitions
- file.write_all(&[0; MBR_PARTITION_ENTRY_SIZE * 3])?;
-
- // Boot signature
- file.write_all(&[0x55, 0xAA])?;
-
- Ok(())
-}
-
-#[derive(Clone, Debug, Default, Eq, PartialEq)]
-struct GptHeader {
- signature: [u8; 8],
- revision: [u8; 4],
- header_size: u32,
- header_crc32: u32,
- current_lba: u64,
- backup_lba: u64,
- first_usable_lba: u64,
- last_usable_lba: u64,
- disk_guid: Uuid,
- partition_entries_lba: u64,
- num_partition_entries: u32,
- partition_entry_size: u32,
- partition_entries_crc32: u32,
-}
-
-impl GptHeader {
- fn write_bytes(&self, out: &mut impl Write) -> Result<(), Error> {
- out.write_all(&self.signature)?;
- out.write_all(&self.revision)?;
- out.write_all(&self.header_size.to_le_bytes())?;
- out.write_all(&self.header_crc32.to_le_bytes())?;
- // Reserved
- out.write_all(&[0; 4])?;
- out.write_all(&self.current_lba.to_le_bytes())?;
- out.write_all(&self.backup_lba.to_le_bytes())?;
- out.write_all(&self.first_usable_lba.to_le_bytes())?;
- out.write_all(&self.last_usable_lba.to_le_bytes())?;
-
- // GUID is mixed-endian for some reason, so we can't just use `Uuid::as_bytes()`.
- write_guid(out, self.disk_guid)?;
-
- out.write_all(&self.partition_entries_lba.to_le_bytes())?;
- out.write_all(&self.num_partition_entries.to_le_bytes())?;
- out.write_all(&self.partition_entry_size.to_le_bytes())?;
- out.write_all(&self.partition_entries_crc32.to_le_bytes())?;
- Ok(())
- }
-}
-
-/// Write a GPT header for the disk.
-///
-/// It may either be a primary header (which should go at LBA 1) or a secondary header (which should
-/// go at the end of the disk).
-pub fn write_gpt_header(
- out: &mut impl Write,
- disk_guid: Uuid,
- partition_entries_crc32: u32,
- secondary_table_offset: u64,
- secondary: bool,
-) -> Result<(), Error> {
- let primary_header_lba = 1;
- let secondary_header_lba = (secondary_table_offset + GPT_END_SIZE) / SECTOR_SIZE - 1;
- let mut gpt_header = GptHeader {
- signature: *b"EFI PART",
- revision: [0, 0, 1, 0],
- header_size: GPT_HEADER_SIZE,
- current_lba: if secondary { secondary_header_lba } else { primary_header_lba },
- backup_lba: if secondary { primary_header_lba } else { secondary_header_lba },
- first_usable_lba: GPT_BEGINNING_SIZE / SECTOR_SIZE,
- last_usable_lba: secondary_table_offset / SECTOR_SIZE - 1,
- disk_guid,
- partition_entries_lba: 2,
- num_partition_entries: GPT_NUM_PARTITIONS,
- partition_entry_size: GPT_PARTITION_ENTRY_SIZE,
- partition_entries_crc32,
- header_crc32: 0,
- };
-
- // Write once to a temporary buffer to calculate the CRC.
- let mut header_without_crc = [0u8; GPT_HEADER_SIZE as usize];
- gpt_header.write_bytes(&mut &mut header_without_crc[..])?;
- let mut hasher = Hasher::new();
- hasher.update(&header_without_crc);
- gpt_header.header_crc32 = hasher.finalize();
-
- gpt_header.write_bytes(out)?;
-
- Ok(())
-}
-
-/// A GPT entry for a particular partition.
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct GptPartitionEntry {
- pub partition_type_guid: Uuid,
- pub unique_partition_guid: Uuid,
- pub first_lba: u64,
- pub last_lba: u64,
- pub attributes: u64,
- /// UTF-16LE
- pub partition_name: [u16; 36],
-}
-
-// TODO: Derive this once arrays of more than 32 elements have default values.
-impl Default for GptPartitionEntry {
- fn default() -> Self {
- Self {
- partition_type_guid: Default::default(),
- unique_partition_guid: Default::default(),
- first_lba: 0,
- last_lba: 0,
- attributes: 0,
- partition_name: [0; 36],
- }
- }
-}
-
-impl GptPartitionEntry {
- /// Write out the partition table entry. It will take
- /// `GPT_PARTITION_ENTRY_SIZE` bytes.
- pub fn write_bytes(&self, out: &mut impl Write) -> Result<(), Error> {
- write_guid(out, self.partition_type_guid)?;
- write_guid(out, self.unique_partition_guid)?;
- out.write_all(&self.first_lba.to_le_bytes())?;
- out.write_all(&self.last_lba.to_le_bytes())?;
- out.write_all(&self.attributes.to_le_bytes())?;
- for code_unit in &self.partition_name {
- out.write_all(&code_unit.to_le_bytes())?;
- }
- Ok(())
- }
-}
-
-/// Write a UUID in the mixed-endian format which GPT uses for GUIDs.
-fn write_guid(out: &mut impl Write, guid: Uuid) -> Result<(), Error> {
- let guid_fields = guid.as_fields();
- out.write_all(&guid_fields.0.to_le_bytes())?;
- out.write_all(&guid_fields.1.to_le_bytes())?;
- out.write_all(&guid_fields.2.to_le_bytes())?;
- out.write_all(guid_fields.3)?;
-
- Ok(())
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn protective_mbr_size() {
- let mut buffer = vec![];
- write_protective_mbr(&mut buffer, 1000 * SECTOR_SIZE).unwrap();
-
- assert_eq!(buffer.len(), SECTOR_SIZE as usize);
- }
-
- #[test]
- fn header_size() {
- let mut buffer = vec![];
- write_gpt_header(
- &mut buffer,
- Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
- 42,
- 1000 * SECTOR_SIZE,
- false,
- )
- .unwrap();
-
- assert_eq!(buffer.len(), GPT_HEADER_SIZE as usize);
- }
-
- #[test]
- fn partition_entry_size() {
- let mut buffer = vec![];
- GptPartitionEntry::default().write_bytes(&mut buffer).unwrap();
-
- assert_eq!(buffer.len(), GPT_PARTITION_ENTRY_SIZE as usize);
- }
-}
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index c9cc029..7bfb531 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -17,12 +17,12 @@
mod aidl;
mod composite;
mod crosvm;
-mod gpt;
mod payload;
+mod selinux;
use crate::aidl::{VirtualizationService, BINDER_SERVICE_IDENTIFIER, TEMPORARY_DIRECTORY};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualizationService::BnVirtualizationService;
-use android_system_virtualizationservice::binder::{add_service, BinderFeatures, ProcessState};
+use android_system_virtualizationservice::binder::{register_lazy_service, BinderFeatures, ProcessState};
use anyhow::Error;
use log::{info, Level};
use std::fs::{remove_dir_all, remove_file, read_dir};
@@ -31,6 +31,8 @@
/// are reserved for the host or other usage.
const FIRST_GUEST_CID: Cid = 10;
+const SYSPROP_LAST_CID: &str = "virtualizationservice.state.last_cid";
+
const LOG_TAG: &str = "VirtualizationService";
/// The unique ID of a VM used (together with a port number) for vsock communication.
@@ -38,7 +40,10 @@
fn main() {
android_logger::init_once(
- android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Trace),
+ android_logger::Config::default()
+ .with_tag(LOG_TAG)
+ .with_min_level(Level::Info)
+ .with_log_id(android_logger::LogId::System),
);
clear_temporary_files().expect("Failed to delete old temporary files");
@@ -48,7 +53,7 @@
service,
BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
);
- add_service(BINDER_SERVICE_IDENTIFIER, service.as_binder()).unwrap();
+ register_lazy_service(BINDER_SERVICE_IDENTIFIER, service.as_binder()).unwrap();
info!("Registered Binder service, joining threadpool.");
ProcessState::join_thread_pool();
}
@@ -66,3 +71,12 @@
}
Ok(())
}
+
+#[cfg(test)]
+mod tests {
+ /// We need to have at least one test to avoid errors running the test suite, so this is a
+ /// placeholder until we have real tests.
+ #[test]
+ #[ignore]
+ fn placeholder() {}
+}
diff --git a/virtualizationservice/src/payload.rs b/virtualizationservice/src/payload.rs
index a176e71..7b8cb7f 100644
--- a/virtualizationservice/src/payload.rs
+++ b/virtualizationservice/src/payload.rs
@@ -15,40 +15,66 @@
//! Payload disk image
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
- DiskImage::DiskImage, Partition::Partition, VirtualMachineAppConfig::VirtualMachineAppConfig,
+ DiskImage::DiskImage, Partition::Partition, VirtualMachineAppConfig::DebugLevel::DebugLevel,
+ VirtualMachineAppConfig::VirtualMachineAppConfig,
VirtualMachineRawConfig::VirtualMachineRawConfig,
};
use android_system_virtualizationservice::binder::ParcelFileDescriptor;
-use anyhow::{anyhow, Context, Result};
+use anyhow::{anyhow, bail, Context, Result};
+use binder::wait_for_interface;
+use log::{info, warn};
use microdroid_metadata::{ApexPayload, ApkPayload, Metadata};
-use microdroid_payload_config::ApexConfig;
+use microdroid_payload_config::{ApexConfig, VmPayloadConfig};
use once_cell::sync::OnceCell;
+use packagemanager_aidl::aidl::android::content::pm::IPackageManagerNative::IPackageManagerNative;
+use regex::Regex;
use serde::Deserialize;
use serde_xml_rs::from_reader;
-use std::fs::{File, OpenOptions};
+use std::collections::HashSet;
+use std::fs::{metadata, File, OpenOptions};
use std::path::{Path, PathBuf};
+use std::process::Command;
+use std::time::SystemTime;
use vmconfig::open_parcel_file;
/// The list of APEXes which microdroid requires.
// TODO(b/192200378) move this to microdroid.json?
-const MICRODROID_REQUIRED_APEXES: [&str; 3] =
- ["com.android.adbd", "com.android.i18n", "com.android.os.statsd"];
+const MICRODROID_REQUIRED_APEXES: [&str; 1] = ["com.android.os.statsd"];
+const MICRODROID_REQUIRED_APEXES_DEBUG: [&str; 1] = ["com.android.adbd"];
const APEX_INFO_LIST_PATH: &str = "/apex/apex-info-list.xml";
+const PACKAGE_MANAGER_NATIVE_SERVICE: &str = "package_native";
+
/// Represents the list of APEXes
-#[derive(Debug, Deserialize)]
+#[derive(Clone, Debug, Deserialize)]
struct ApexInfoList {
#[serde(rename = "apex-info")]
list: Vec<ApexInfo>,
}
-#[derive(Debug, Deserialize)]
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
struct ApexInfo {
#[serde(rename = "moduleName")]
name: String,
#[serde(rename = "modulePath")]
path: PathBuf,
+
+ #[serde(default)]
+ has_classpath_jar: bool,
+
+ // The field claims to be milliseconds but is actually seconds.
+ #[serde(rename = "lastUpdateMillis")]
+ last_update_seconds: u64,
+
+ #[serde(rename = "isFactory")]
+ is_factory: bool,
+
+ #[serde(rename = "isActive")]
+ is_active: bool,
+
+ #[serde(rename = "provideSharedApexLibs")]
+ provide_shared_apex_libs: bool,
}
impl ApexInfoList {
@@ -58,35 +84,96 @@
INSTANCE.get_or_try_init(|| {
let apex_info_list = File::open(APEX_INFO_LIST_PATH)
.context(format!("Failed to open {}", APEX_INFO_LIST_PATH))?;
- let apex_info_list: ApexInfoList = from_reader(apex_info_list)
+ let mut apex_info_list: ApexInfoList = from_reader(apex_info_list)
.context(format!("Failed to parse {}", APEX_INFO_LIST_PATH))?;
+
+ // For active APEXes, we run derive_classpath and parse its output to see if it
+ // contributes to the classpath(s). (This allows us to handle any new classpath env
+ // vars seamlessly.)
+ let classpath_vars = run_derive_classpath()?;
+ let classpath_apexes = find_apex_names_in_classpath(&classpath_vars)?;
+
+ for apex_info in apex_info_list.list.iter_mut() {
+ apex_info.has_classpath_jar = classpath_apexes.contains(&apex_info.name);
+ }
+
Ok(apex_info_list)
})
}
+}
- fn get_path_for(&self, apex_name: &str) -> Result<PathBuf> {
- Ok(self
- .list
- .iter()
- .find(|apex| apex.name == apex_name)
- .ok_or_else(|| anyhow!("{} not found.", apex_name))?
- .path
- .clone())
+impl ApexInfo {
+ fn matches(&self, apex_config: &ApexConfig) -> bool {
+ // Match with pseudo name "{CLASSPATH}" which represents APEXes contributing
+ // to any derive_classpath environment variable
+ if apex_config.name == "{CLASSPATH}" && self.has_classpath_jar {
+ return true;
+ }
+ if apex_config.name == self.name {
+ return true;
+ }
+ false
+ }
+}
+
+struct PackageManager {
+ apex_info_list: &'static ApexInfoList,
+}
+
+impl PackageManager {
+ fn new() -> Result<Self> {
+ let apex_info_list = ApexInfoList::load()?;
+ Ok(Self { apex_info_list })
+ }
+
+ fn get_apex_list(&self, prefer_staged: bool) -> Result<ApexInfoList> {
+ // get the list of active apexes
+ let mut list = self.apex_info_list.clone();
+ // When prefer_staged, we override ApexInfo by consulting "package_native"
+ if prefer_staged {
+ let pm =
+ wait_for_interface::<dyn IPackageManagerNative>(PACKAGE_MANAGER_NATIVE_SERVICE)
+ .context("Failed to get service when prefer_staged is set.")?;
+ let staged = pm.getStagedApexModuleNames()?;
+ for apex_info in list.list.iter_mut() {
+ if staged.contains(&apex_info.name) {
+ if let Some(staged_apex_info) = pm.getStagedApexInfo(&apex_info.name)? {
+ apex_info.path = PathBuf::from(staged_apex_info.diskImagePath);
+ apex_info.has_classpath_jar = staged_apex_info.hasClassPathJars;
+ let metadata = metadata(&apex_info.path)?;
+ apex_info.last_update_seconds =
+ metadata.modified()?.duration_since(SystemTime::UNIX_EPOCH)?.as_secs();
+ // by definition, staged apex can't be a factory apex.
+ apex_info.is_factory = false;
+ }
+ }
+ }
+ }
+ Ok(list)
}
}
fn make_metadata_file(
config_path: &str,
- apexes: &[ApexConfig],
+ apex_infos: &[&ApexInfo],
temporary_directory: &Path,
) -> Result<ParcelFileDescriptor> {
let metadata_path = temporary_directory.join("metadata");
let metadata = Metadata {
version: 1,
- apexes: apexes
+ apexes: apex_infos
.iter()
- .map(|apex| ApexPayload { name: apex.name.clone(), ..Default::default() })
- .collect(),
+ .enumerate()
+ .map(|(i, apex_info)| {
+ Ok(ApexPayload {
+ name: apex_info.name.clone(),
+ partition_name: format!("microdroid-apex-{}", i),
+ last_update_seconds: apex_info.last_update_seconds,
+ is_factory: apex_info.is_factory,
+ ..Default::default()
+ })
+ })
+ .collect::<Result<_>>()?,
apk: Some(ApkPayload {
name: "apk".to_owned(),
payload_partition_name: "microdroid-apk".to_owned(),
@@ -118,84 +205,328 @@
/// ..
/// microdroid-apk: apk
/// microdroid-apk-idsig: idsig
+/// extra-apk-0: additional apk 0
+/// extra-idsig-0: additional idsig 0
+/// extra-apk-1: additional apk 1
+/// extra-idsig-1: additional idsig 1
+/// ..
fn make_payload_disk(
+ app_config: &VirtualMachineAppConfig,
apk_file: File,
idsig_file: File,
- config_path: &str,
- apexes: &[ApexConfig],
+ vm_payload_config: &VmPayloadConfig,
temporary_directory: &Path,
) -> Result<DiskImage> {
- let metadata_file = make_metadata_file(config_path, apexes, temporary_directory)?;
+ if vm_payload_config.extra_apks.len() != app_config.extraIdsigs.len() {
+ bail!(
+ "payload config has {} apks, but app config has {} idsigs",
+ vm_payload_config.extra_apks.len(),
+ app_config.extraIdsigs.len()
+ );
+ }
+
+ let pm = PackageManager::new()?;
+ let apex_list = pm.get_apex_list(vm_payload_config.prefer_staged)?;
+
+ // collect APEXes from config
+ let apex_infos =
+ collect_apex_infos(&apex_list, &vm_payload_config.apexes, app_config.debugLevel);
+ info!("Microdroid payload APEXes: {:?}", apex_infos.iter().map(|ai| &ai.name));
+
+ let metadata_file =
+ make_metadata_file(&app_config.configPath, &apex_infos, temporary_directory)?;
// put metadata at the first partition
let mut partitions = vec![Partition {
label: "payload-metadata".to_owned(),
- images: vec![metadata_file],
+ image: Some(metadata_file),
writable: false,
}];
- let apex_info_list = ApexInfoList::load()?;
- for (i, apex) in apexes.iter().enumerate() {
- let apex_path = apex_info_list.get_path_for(&apex.name)?;
- let apex_file = open_parcel_file(&apex_path, false)?;
+ for (i, apex_info) in apex_infos.iter().enumerate() {
+ let apex_file = open_parcel_file(&apex_info.path, false)?;
partitions.push(Partition {
label: format!("microdroid-apex-{}", i),
- images: vec![apex_file],
+ image: Some(apex_file),
writable: false,
});
}
partitions.push(Partition {
label: "microdroid-apk".to_owned(),
- images: vec![ParcelFileDescriptor::new(apk_file)],
+ image: Some(ParcelFileDescriptor::new(apk_file)),
writable: false,
});
partitions.push(Partition {
label: "microdroid-apk-idsig".to_owned(),
- images: vec![ParcelFileDescriptor::new(idsig_file)],
+ image: Some(ParcelFileDescriptor::new(idsig_file)),
writable: false,
});
+ // we've already checked that extra_apks and extraIdsigs are in the same size.
+ let extra_apks = &vm_payload_config.extra_apks;
+ let extra_idsigs = &app_config.extraIdsigs;
+ for (i, (extra_apk, extra_idsig)) in extra_apks.iter().zip(extra_idsigs.iter()).enumerate() {
+ partitions.push(Partition {
+ label: format!("extra-apk-{}", i),
+ image: Some(ParcelFileDescriptor::new(File::open(PathBuf::from(&extra_apk.path))?)),
+ writable: false,
+ });
+
+ partitions.push(Partition {
+ label: format!("extra-idsig-{}", i),
+ image: Some(ParcelFileDescriptor::new(extra_idsig.as_ref().try_clone()?)),
+ writable: false,
+ });
+ }
+
Ok(DiskImage { image: None, partitions, writable: false })
}
+fn run_derive_classpath() -> Result<String> {
+ let result = Command::new("/apex/com.android.sdkext/bin/derive_classpath")
+ .arg("/proc/self/fd/1")
+ .output()
+ .context("Failed to run derive_classpath")?;
+
+ if !result.status.success() {
+ bail!("derive_classpath returned {}", result.status);
+ }
+
+ String::from_utf8(result.stdout).context("Converting derive_classpath output")
+}
+
+fn find_apex_names_in_classpath(classpath_vars: &str) -> Result<HashSet<String>> {
+ // Each line should be in the format "export <var name> <paths>", where <paths> is a
+ // colon-separated list of paths to JARs. We don't care about the var names, and we're only
+ // interested in paths that look like "/apex/<apex name>/<anything>" so we know which APEXes
+ // contribute to at least one var.
+ let mut apexes = HashSet::new();
+
+ let pattern = Regex::new(r"^export [^ ]+ ([^ ]+)$").context("Failed to construct Regex")?;
+ for line in classpath_vars.lines() {
+ if let Some(captures) = pattern.captures(line) {
+ if let Some(paths) = captures.get(1) {
+ apexes.extend(paths.as_str().split(':').filter_map(|path| {
+ let path = path.strip_prefix("/apex/")?;
+ Some(path[..path.find('/')?].to_owned())
+ }));
+ continue;
+ }
+ }
+ warn!("Malformed line from derive_classpath: {}", line);
+ }
+
+ Ok(apexes)
+}
+
+// Collect ApexInfos from VM config
+fn collect_apex_infos<'a>(
+ apex_list: &'a ApexInfoList,
+ apex_configs: &[ApexConfig],
+ debug_level: DebugLevel,
+) -> Vec<&'a ApexInfo> {
+ let mut additional_apexes: Vec<&str> = MICRODROID_REQUIRED_APEXES.to_vec();
+ if debug_level != DebugLevel::NONE {
+ additional_apexes.extend(MICRODROID_REQUIRED_APEXES_DEBUG.to_vec());
+ }
+
+ apex_list
+ .list
+ .iter()
+ .filter(|ai| {
+ apex_configs.iter().any(|cfg| ai.matches(cfg) && ai.is_active)
+ || additional_apexes.iter().any(|name| name == &ai.name && ai.is_active)
+ || ai.provide_shared_apex_libs
+ })
+ .collect()
+}
+
pub fn add_microdroid_images(
config: &VirtualMachineAppConfig,
temporary_directory: &Path,
apk_file: File,
idsig_file: File,
instance_file: File,
- mut apexes: Vec<ApexConfig>,
+ vm_payload_config: &VmPayloadConfig,
vm_config: &mut VirtualMachineRawConfig,
) -> Result<()> {
- apexes.extend(
- MICRODROID_REQUIRED_APEXES.iter().map(|name| ApexConfig { name: name.to_string() }),
- );
- apexes.dedup_by(|a, b| a.name == b.name);
-
vm_config.disks.push(make_payload_disk(
+ config,
apk_file,
idsig_file,
- &config.configPath,
- &apexes,
+ vm_payload_config,
temporary_directory,
)?);
- if config.debug {
- vm_config.disks[1].partitions.push(Partition {
- label: "bootconfig".to_owned(),
- images: vec![open_parcel_file(
- Path::new("/apex/com.android.virt/etc/microdroid_bootconfig.debug"),
- false,
- )?],
- writable: false,
- });
- }
+ vm_config.disks[1].partitions.push(Partition {
+ label: "vbmeta".to_owned(),
+ image: Some(open_parcel_file(
+ Path::new("/apex/com.android.virt/etc/fs/microdroid_vbmeta_bootconfig.img"),
+ false,
+ )?),
+ writable: false,
+ });
+ let bootconfig_image = "/apex/com.android.virt/etc/microdroid_bootconfig.".to_owned()
+ + match config.debugLevel {
+ DebugLevel::NONE => "normal",
+ DebugLevel::APP_ONLY => "app_debuggable",
+ DebugLevel::FULL => "full_debuggable",
+ _ => return Err(anyhow!("unsupported debug level: {:?}", config.debugLevel)),
+ };
+ vm_config.disks[1].partitions.push(Partition {
+ label: "bootconfig".to_owned(),
+ image: Some(open_parcel_file(Path::new(&bootconfig_image), false)?),
+ writable: false,
+ });
// instance image is at the second partition in the second disk.
vm_config.disks[1].partitions.push(Partition {
label: "vm-instance".to_owned(),
- images: vec![ParcelFileDescriptor::new(instance_file)],
+ image: Some(ParcelFileDescriptor::new(instance_file)),
writable: true,
});
Ok(())
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_find_apex_names_in_classpath() {
+ let vars = r#"
+export FOO /apex/unterminated
+export BAR /apex/valid.apex/something
+wrong
+export EMPTY
+export OTHER /foo/bar:/baz:/apex/second.valid.apex/:gibberish:"#;
+ let expected = vec!["valid.apex", "second.valid.apex"];
+ let expected: HashSet<_> = expected.into_iter().map(ToString::to_string).collect();
+
+ assert_eq!(find_apex_names_in_classpath(vars).unwrap(), expected);
+ }
+
+ #[test]
+ fn test_collect_apexes() {
+ let apex_info_list = ApexInfoList {
+ list: vec![
+ ApexInfo {
+ // 0
+ name: "com.android.adbd".to_string(),
+ path: PathBuf::from("adbd"),
+ has_classpath_jar: false,
+ last_update_seconds: 12345678,
+ is_factory: true,
+ is_active: true,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 1
+ name: "com.android.os.statsd".to_string(),
+ path: PathBuf::from("statsd"),
+ has_classpath_jar: false,
+ last_update_seconds: 12345678,
+ is_factory: true,
+ is_active: false,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 2
+ name: "com.android.os.statsd".to_string(),
+ path: PathBuf::from("statsd/updated"),
+ has_classpath_jar: false,
+ last_update_seconds: 12345678 + 1,
+ is_factory: false,
+ is_active: true,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 3
+ name: "no_classpath".to_string(),
+ path: PathBuf::from("no_classpath"),
+ has_classpath_jar: false,
+ last_update_seconds: 12345678,
+ is_factory: true,
+ is_active: true,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 4
+ name: "has_classpath".to_string(),
+ path: PathBuf::from("has_classpath"),
+ has_classpath_jar: true,
+ last_update_seconds: 87654321,
+ is_factory: true,
+ is_active: false,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 5
+ name: "has_classpath".to_string(),
+ path: PathBuf::from("has_classpath/updated"),
+ has_classpath_jar: true,
+ last_update_seconds: 87654321 + 1,
+ is_factory: false,
+ is_active: true,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 6
+ name: "apex-foo".to_string(),
+ path: PathBuf::from("apex-foo"),
+ has_classpath_jar: false,
+ last_update_seconds: 87654321,
+ is_factory: true,
+ is_active: false,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 7
+ name: "apex-foo".to_string(),
+ path: PathBuf::from("apex-foo/updated"),
+ has_classpath_jar: false,
+ last_update_seconds: 87654321 + 1,
+ is_factory: false,
+ is_active: true,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 8
+ name: "sharedlibs".to_string(),
+ path: PathBuf::from("apex-foo"),
+ last_update_seconds: 87654321,
+ is_factory: true,
+ provide_shared_apex_libs: true,
+ ..Default::default()
+ },
+ ApexInfo {
+ // 9
+ name: "sharedlibs".to_string(),
+ path: PathBuf::from("apex-foo/updated"),
+ last_update_seconds: 87654321 + 1,
+ is_active: true,
+ provide_shared_apex_libs: true,
+ ..Default::default()
+ },
+ ],
+ };
+ let apex_configs = vec![
+ ApexConfig { name: "apex-foo".to_string() },
+ ApexConfig { name: "{CLASSPATH}".to_string() },
+ ];
+ assert_eq!(
+ collect_apex_infos(&apex_info_list, &apex_configs, DebugLevel::FULL),
+ vec![
+ // Pass active/required APEXes
+ &apex_info_list.list[0],
+ &apex_info_list.list[2],
+ // Pass active APEXes specified in the config
+ &apex_info_list.list[5],
+ &apex_info_list.list[7],
+ // Pass both preinstalled(inactive) and updated(active) for "sharedlibs" APEXes
+ &apex_info_list.list[8],
+ &apex_info_list.list[9],
+ ]
+ );
+ }
+}
diff --git a/virtualizationservice/src/selinux.rs b/virtualizationservice/src/selinux.rs
new file mode 100644
index 0000000..e450dee
--- /dev/null
+++ b/virtualizationservice/src/selinux.rs
@@ -0,0 +1,104 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrapper to libselinux
+
+use anyhow::{anyhow, Context, Result};
+use std::ffi::{CStr, CString};
+use std::fmt;
+use std::fs::File;
+use std::io;
+use std::ops::Deref;
+use std::os::raw::c_char;
+use std::os::unix::io::AsRawFd;
+use std::ptr;
+
+// Partially copied from system/security/keystore2/selinux/src/lib.rs
+/// SeContext represents an SELinux context string. It can take ownership of a raw
+/// s-string as allocated by `getcon` or `selabel_lookup`. In this case it uses
+/// `freecon` to free the resources when dropped. In its second variant it stores
+/// an `std::ffi::CString` that can be initialized from a Rust string slice.
+#[derive(Debug)]
+pub enum SeContext {
+ /// Wraps a raw context c-string as returned by libselinux.
+ Raw(*mut ::std::os::raw::c_char),
+ /// Stores a context string as `std::ffi::CString`.
+ CString(CString),
+}
+
+impl PartialEq for SeContext {
+ fn eq(&self, other: &Self) -> bool {
+ // We dereference both and thereby delegate the comparison
+ // to `CStr`'s implementation of `PartialEq`.
+ **self == **other
+ }
+}
+
+impl Eq for SeContext {}
+
+impl fmt::Display for SeContext {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.to_str().unwrap_or("Invalid context"))
+ }
+}
+
+impl Drop for SeContext {
+ fn drop(&mut self) {
+ if let Self::Raw(p) = self {
+ // SAFETY: SeContext::Raw is created only with a pointer that is set by libselinux and
+ // has to be freed with freecon.
+ unsafe { selinux_bindgen::freecon(*p) };
+ }
+ }
+}
+
+impl Deref for SeContext {
+ type Target = CStr;
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ // SAFETY: the non-owned C string pointed by `p` is guaranteed to be valid (non-null
+ // and shorter than i32::MAX). It is freed when SeContext is dropped.
+ Self::Raw(p) => unsafe { CStr::from_ptr(*p) },
+ Self::CString(cstr) => cstr,
+ }
+ }
+}
+
+impl SeContext {
+ /// Initializes the `SeContext::CString` variant from a Rust string slice.
+ pub fn new(con: &str) -> Result<Self> {
+ Ok(Self::CString(
+ CString::new(con)
+ .with_context(|| format!("Failed to create SeContext with \"{}\"", con))?,
+ ))
+ }
+}
+
+pub fn getfilecon(file: &File) -> Result<SeContext> {
+ let fd = file.as_raw_fd();
+ let mut con: *mut c_char = ptr::null_mut();
+ // SAFETY: the returned pointer `con` is wrapped in SeContext::Raw which is freed with
+ // `freecon` when it is dropped.
+ match unsafe { selinux_bindgen::fgetfilecon(fd, &mut con) } {
+ 1.. => {
+ if !con.is_null() {
+ Ok(SeContext::Raw(con))
+ } else {
+ Err(anyhow!("fgetfilecon returned a NULL context"))
+ }
+ }
+ _ => Err(anyhow!(io::Error::last_os_error())).context("fgetfilecon failed"),
+ }
+}
diff --git a/vm/Android.bp b/vm/Android.bp
index 734f2d3..d1d53d0 100644
--- a/vm/Android.bp
+++ b/vm/Android.bp
@@ -7,16 +7,20 @@
crate_name: "vm",
srcs: ["src/main.rs"],
edition: "2018",
+ prefer_rlib: true,
rustlibs: [
"android.system.virtualizationservice-rust",
"libanyhow",
"libenv_logger",
"liblibc",
"liblog_rust",
+ "libmicrodroid_payload_config",
+ "librustutils",
"libserde_json",
"libserde",
"libstructopt",
"libvmconfig",
+ "libzip",
],
apex_available: [
"com.android.virt",
diff --git a/vm/src/create_idsig.rs b/vm/src/create_idsig.rs
new file mode 100644
index 0000000..a0d64d5
--- /dev/null
+++ b/vm/src/create_idsig.rs
@@ -0,0 +1,44 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Command to create or update an idsig for APK
+
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualizationService::IVirtualizationService;
+use android_system_virtualizationservice::binder::{ParcelFileDescriptor, Strong};
+use anyhow::{Context, Error};
+use std::fs::{File, OpenOptions};
+use std::path::Path;
+
+/// Creates or update the idsig file by digesting the input APK file.
+pub fn command_create_idsig(
+ service: Strong<dyn IVirtualizationService>,
+ apk: &Path,
+ idsig: &Path,
+) -> Result<(), Error> {
+ let apk_file = File::open(apk).with_context(|| format!("Failed to open {:?}", apk))?;
+ let idsig_file = OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(idsig)
+ .with_context(|| format!("Failed to create/open {:?}", idsig))?;
+ service
+ .createOrUpdateIdsigFile(
+ &ParcelFileDescriptor::new(apk_file),
+ &ParcelFileDescriptor::new(idsig_file),
+ )
+ .with_context(|| format!("Failed to create/update idsig for {:?}", apk))?;
+ Ok(())
+}
diff --git a/vm/src/create_partition.rs b/vm/src/create_partition.rs
index acebbf2..22f7bea 100644
--- a/vm/src/create_partition.rs
+++ b/vm/src/create_partition.rs
@@ -15,6 +15,7 @@
//! Command to create an empty partition
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualizationService::IVirtualizationService;
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice::PartitionType::PartitionType;
use android_system_virtualizationservice::binder::{ParcelFileDescriptor, Strong};
use anyhow::{Context, Error};
use std::convert::TryInto;
@@ -26,6 +27,7 @@
service: Strong<dyn IVirtualizationService>,
image_path: &Path,
size: u64,
+ partition_type: PartitionType,
) -> Result<(), Error> {
let image = OpenOptions::new()
.create_new(true)
@@ -34,7 +36,14 @@
.open(image_path)
.with_context(|| format!("Failed to create {:?}", image_path))?;
service
- .initializeWritablePartition(&ParcelFileDescriptor::new(image), size.try_into()?)
- .context("Failed to initialize partition with size {}, size")?;
+ .initializeWritablePartition(
+ &ParcelFileDescriptor::new(image),
+ size.try_into()?,
+ partition_type,
+ )
+ .context(format!(
+ "Failed to initialize partition type: {:?}, size: {}",
+ partition_type, size
+ ))?;
Ok(())
}
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 09f11d5..80ea9be 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -14,22 +14,31 @@
//! Android VM control tool.
+mod create_idsig;
mod create_partition;
mod run;
mod sync;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualizationService::IVirtualizationService;
+use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
+ IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
+ VirtualMachineAppConfig::DebugLevel::DebugLevel,
+};
use android_system_virtualizationservice::binder::{wait_for_interface, ProcessState, Strong};
use anyhow::{Context, Error};
+use create_idsig::command_create_idsig;
use create_partition::command_create_partition;
use run::{command_run, command_run_app};
-use std::path::PathBuf;
+use rustutils::system_properties;
+use std::path::{Path, PathBuf};
use structopt::clap::AppSettings;
use structopt::StructOpt;
const VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER: &str =
"android.system.virtualizationservice";
+#[derive(Debug)]
+struct Idsigs(Vec<PathBuf>);
+
#[derive(StructOpt)]
#[structopt(no_version, global_settings = &[AppSettings::DisableVersion])]
enum Opt {
@@ -54,13 +63,38 @@
#[structopt(short, long)]
daemonize: bool,
+ /// Path to file for VM console output.
+ #[structopt(long)]
+ console: Option<PathBuf>,
+
/// Path to file for VM log output.
- #[structopt(short, long)]
+ #[structopt(long)]
log: Option<PathBuf>,
- /// Whether to run VM in debug mode.
+ /// Debug level of the VM. Supported values: "none" (default), "app_only", and "full".
+ #[structopt(long, default_value = "none", parse(try_from_str=parse_debug_level))]
+ debug: DebugLevel,
+
+ /// Run VM in protected mode.
#[structopt(short, long)]
- debug: bool,
+ protected: bool,
+
+ /// Memory size (in MiB) of the VM. If unspecified, defaults to the value of `memory_mib`
+ /// in the VM config file.
+ #[structopt(short, long)]
+ mem: Option<u32>,
+
+ /// Number of vCPUs in the VM. If unspecified, defaults to 1.
+ #[structopt(long)]
+ cpus: Option<u32>,
+
+ /// Host CPUs where vCPUs are run on. If unspecified, vCPU runs on any host CPU.
+ #[structopt(long)]
+ cpu_affinity: Option<String>,
+
+ /// Paths to extra idsig files.
+ #[structopt(long = "extra-idsig")]
+ extra_idsigs: Vec<PathBuf>,
},
/// Run a virtual machine
Run {
@@ -72,8 +106,24 @@
#[structopt(short, long)]
daemonize: bool,
+ /// Number of vCPUs in the VM. If unspecified, defaults to 1.
+ #[structopt(long)]
+ cpus: Option<u32>,
+
+ /// Host CPUs where vCPUs are run on. If unspecified, vCPU runs on any host CPU. The format
+ /// can be either a comma-separated list of CPUs or CPU ranges to run vCPUs on (e.g.
+ /// "0,1-3,5" to choose host CPUs 0, 1, 2, 3, and 5, or a colon-separated list of
+ /// assignments of vCPU-to-host-CPU assignments e.g. "0=0:1=1:2=2" to map vCPU 0 to host
+ /// CPU 0 and so on.
+ #[structopt(long)]
+ cpu_affinity: Option<String>,
+
+ /// Path to file for VM console output.
+ #[structopt(long)]
+ console: Option<PathBuf>,
+
/// Path to file for VM log output.
- #[structopt(short, long)]
+ #[structopt(long)]
log: Option<PathBuf>,
},
/// Stop a virtual machine running in the background
@@ -83,6 +133,8 @@
},
/// List running virtual machines
List,
+ /// Print information about virtual machine support
+ Info,
/// Create a new empty partition to be used as a writable partition for a VM
CreatePartition {
/// Path at which to create the image file
@@ -91,7 +143,37 @@
/// The desired size of the partition, in bytes.
size: u64,
+
+ /// Type of the partition
+ #[structopt(short="t", long="type", default_value="raw", parse(try_from_str=parse_partition_type))]
+ partition_type: PartitionType,
},
+ /// Creates or update the idsig file by digesting the input APK file.
+ CreateIdsig {
+ /// Path to VM Payload APK
+ #[structopt(parse(from_os_str))]
+ apk: PathBuf,
+ /// Path to idsig of the APK
+ #[structopt(parse(from_os_str))]
+ path: PathBuf,
+ },
+}
+
+fn parse_debug_level(s: &str) -> Result<DebugLevel, String> {
+ match s {
+ "none" => Ok(DebugLevel::NONE),
+ "app_only" => Ok(DebugLevel::APP_ONLY),
+ "full" => Ok(DebugLevel::FULL),
+ _ => Err(format!("Invalid debug level {}", s)),
+ }
+}
+
+fn parse_partition_type(s: &str) -> Result<PartitionType, String> {
+ match s {
+ "raw" => Ok(PartitionType::RAW),
+ "instance" => Ok(PartitionType::ANDROID_VM_INSTANCE),
+ _ => Err(format!("Invalid partition type {}", s)),
+ }
}
fn main() -> Result<(), Error> {
@@ -105,24 +187,55 @@
.context("Failed to find VirtualizationService")?;
match opt {
- Opt::RunApp { apk, idsig, instance, config_path, daemonize, log, debug } => {
- command_run_app(
+ Opt::RunApp {
+ apk,
+ idsig,
+ instance,
+ config_path,
+ daemonize,
+ console,
+ log,
+ debug,
+ protected,
+ mem,
+ cpus,
+ cpu_affinity,
+ extra_idsigs,
+ } => command_run_app(
+ service,
+ &apk,
+ &idsig,
+ &instance,
+ &config_path,
+ daemonize,
+ console.as_deref(),
+ log.as_deref(),
+ debug,
+ protected,
+ mem,
+ cpus,
+ cpu_affinity,
+ &extra_idsigs,
+ ),
+ Opt::Run { config, daemonize, cpus, cpu_affinity, console, log } => {
+ command_run(
service,
- &apk,
- &idsig,
- &instance,
- &config_path,
+ &config,
daemonize,
+ console.as_deref(),
log.as_deref(),
- debug,
+ /* mem */ None,
+ cpus,
+ cpu_affinity,
)
}
- Opt::Run { config, daemonize, log } => {
- command_run(service, &config, daemonize, log.as_deref())
- }
Opt::Stop { cid } => command_stop(service, cid),
Opt::List => command_list(service),
- Opt::CreatePartition { path, size } => command_create_partition(service, &path, size),
+ Opt::Info => command_info(),
+ Opt::CreatePartition { path, size, partition_type } => {
+ command_create_partition(service, &path, size, partition_type)
+ }
+ Opt::CreateIdsig { apk, path } => command_create_idsig(service, &apk, &path),
}
}
@@ -141,3 +254,31 @@
println!("Running VMs: {:#?}", vms);
Ok(())
}
+
+/// Print information about supported VM types.
+fn command_info() -> Result<(), Error> {
+ let unprotected_vm_supported =
+ system_properties::read_bool("ro.boot.hypervisor.vm.supported", false)?;
+ let protected_vm_supported =
+ system_properties::read_bool("ro.boot.hypervisor.protected_vm.supported", false)?;
+ match (unprotected_vm_supported, protected_vm_supported) {
+ (false, false) => println!("VMs are not supported."),
+ (false, true) => println!("Only protected VMs are supported."),
+ (true, false) => println!("Only unprotected VMs are supported."),
+ (true, true) => println!("Both protected and unprotected VMs are supported."),
+ }
+
+ if let Some(version) = system_properties::read("ro.boot.hypervisor.version")? {
+ println!("Hypervisor version: {}", version);
+ } else {
+ println!("Hypervisor version not set.");
+ }
+
+ if Path::new("/dev/kvm").exists() {
+ println!("/dev/kvm exists.");
+ } else {
+ println!("/dev/kvm does not exist.");
+ }
+
+ Ok(())
+}
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 1b1d5a3..ef38d7d 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -16,25 +16,27 @@
use crate::create_partition::command_create_partition;
use crate::sync::AtomicFlag;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualizationService::IVirtualizationService;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualMachine::IVirtualMachine;
-use android_system_virtualizationservice::aidl::android::system::virtualizationservice::IVirtualMachineCallback::{
- BnVirtualMachineCallback, IVirtualMachineCallback,
-};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
- VirtualMachineAppConfig::VirtualMachineAppConfig,
- VirtualMachineConfig::VirtualMachineConfig,
+ DeathReason::DeathReason, IVirtualMachine::IVirtualMachine,
+ IVirtualMachineCallback::BnVirtualMachineCallback,
+ IVirtualMachineCallback::IVirtualMachineCallback,
+ IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
+ VirtualMachineAppConfig::DebugLevel::DebugLevel,
+ VirtualMachineAppConfig::VirtualMachineAppConfig, VirtualMachineConfig::VirtualMachineConfig,
+ VirtualMachineState::VirtualMachineState,
};
use android_system_virtualizationservice::binder::{
BinderFeatures, DeathRecipient, IBinder, ParcelFileDescriptor, Strong,
};
use android_system_virtualizationservice::binder::{Interface, Result as BinderResult};
-use anyhow::{Context, Error};
+use anyhow::{bail, Context, Error};
+use microdroid_payload_config::VmPayloadConfig;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::os::unix::io::{AsRawFd, FromRawFd};
-use std::path::Path;
+use std::path::{Path, PathBuf};
use vmconfig::{open_parcel_file, VmConfig};
+use zip::ZipArchive;
/// Run a VM from the given APK, idsig, and config.
#[allow(clippy::too_many_arguments)]
@@ -45,56 +47,138 @@
instance: &Path,
config_path: &str,
daemonize: bool,
+ console_path: Option<&Path>,
log_path: Option<&Path>,
- debug: bool,
+ debug_level: DebugLevel,
+ protected: bool,
+ mem: Option<u32>,
+ cpus: Option<u32>,
+ cpu_affinity: Option<String>,
+ extra_idsigs: &[PathBuf],
) -> Result<(), Error> {
+ let extra_apks = parse_extra_apk_list(apk, config_path)?;
+ if extra_apks.len() != extra_idsigs.len() {
+ bail!(
+ "Found {} extra apks, but there are {} extra idsigs",
+ extra_apks.len(),
+ extra_idsigs.len()
+ )
+ }
+
+ for i in 0..extra_apks.len() {
+ let extra_apk_fd = ParcelFileDescriptor::new(File::open(&extra_apks[i])?);
+ let extra_idsig_fd = ParcelFileDescriptor::new(File::create(&extra_idsigs[i])?);
+ service.createOrUpdateIdsigFile(&extra_apk_fd, &extra_idsig_fd)?;
+ }
+
let apk_file = File::open(apk).context("Failed to open APK file")?;
+ let idsig_file = File::create(idsig).context("Failed to create idsig file")?;
+
+ let apk_fd = ParcelFileDescriptor::new(apk_file);
+ let idsig_fd = ParcelFileDescriptor::new(idsig_file);
+ service.createOrUpdateIdsigFile(&apk_fd, &idsig_fd)?;
+
let idsig_file = File::open(idsig).context("Failed to open idsig file")?;
+ let idsig_fd = ParcelFileDescriptor::new(idsig_file);
if !instance.exists() {
const INSTANCE_FILE_SIZE: u64 = 10 * 1024 * 1024;
- command_create_partition(service.clone(), instance, INSTANCE_FILE_SIZE)?;
+ command_create_partition(
+ service.clone(),
+ instance,
+ INSTANCE_FILE_SIZE,
+ PartitionType::ANDROID_VM_INSTANCE,
+ )?;
}
+ let extra_idsig_files: Result<Vec<File>, _> = extra_idsigs.iter().map(File::open).collect();
+ let extra_idsig_fds = extra_idsig_files?.into_iter().map(ParcelFileDescriptor::new).collect();
+
let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
- apk: ParcelFileDescriptor::new(apk_file).into(),
- idsig: ParcelFileDescriptor::new(idsig_file).into(),
+ apk: apk_fd.into(),
+ idsig: idsig_fd.into(),
+ extraIdsigs: extra_idsig_fds,
instanceImage: open_parcel_file(instance, true /* writable */)?.into(),
configPath: config_path.to_owned(),
- debug,
- // Use the default.
- memory_mib: 0,
+ debugLevel: debug_level,
+ protectedVm: protected,
+ memoryMib: mem.unwrap_or(0) as i32, // 0 means use the VM default
+ numCpus: cpus.unwrap_or(1) as i32,
+ cpuAffinity: cpu_affinity,
});
- run(service, &config, &format!("{:?}!{:?}", apk, config_path), daemonize, log_path)
+ run(
+ service,
+ &config,
+ &format!("{:?}!{:?}", apk, config_path),
+ daemonize,
+ console_path,
+ log_path,
+ )
}
/// Run a VM from the given configuration file.
+#[allow(clippy::too_many_arguments)]
pub fn command_run(
service: Strong<dyn IVirtualizationService>,
config_path: &Path,
daemonize: bool,
+ console_path: Option<&Path>,
log_path: Option<&Path>,
+ mem: Option<u32>,
+ cpus: Option<u32>,
+ cpu_affinity: Option<String>,
) -> Result<(), Error> {
let config_file = File::open(config_path).context("Failed to open config file")?;
- let config =
+ let mut config =
VmConfig::load(&config_file).context("Failed to parse config file")?.to_parcelable()?;
+ if let Some(mem) = mem {
+ config.memoryMib = mem as i32;
+ }
+ if let Some(cpus) = cpus {
+ config.numCpus = cpus as i32;
+ }
+ config.cpuAffinity = cpu_affinity;
run(
service,
&VirtualMachineConfig::RawConfig(config),
&format!("{:?}", config_path),
daemonize,
+ console_path,
log_path,
)
}
+fn state_to_str(vm_state: VirtualMachineState) -> &'static str {
+ match vm_state {
+ VirtualMachineState::NOT_STARTED => "NOT_STARTED",
+ VirtualMachineState::STARTING => "STARTING",
+ VirtualMachineState::STARTED => "STARTED",
+ VirtualMachineState::READY => "READY",
+ VirtualMachineState::FINISHED => "FINISHED",
+ VirtualMachineState::DEAD => "DEAD",
+ _ => "(invalid state)",
+ }
+}
+
fn run(
service: Strong<dyn IVirtualizationService>,
config: &VirtualMachineConfig,
config_path: &str,
daemonize: bool,
+ console_path: Option<&Path>,
log_path: Option<&Path>,
) -> Result<(), Error> {
- let stdout = if let Some(log_path) = log_path {
+ let console = if let Some(console_path) = console_path {
+ Some(ParcelFileDescriptor::new(
+ File::create(console_path)
+ .with_context(|| format!("Failed to open console file {:?}", console_path))?,
+ ))
+ } else if daemonize {
+ None
+ } else {
+ Some(ParcelFileDescriptor::new(duplicate_stdout()?))
+ };
+ let log = if let Some(log_path) = log_path {
Some(ParcelFileDescriptor::new(
File::create(log_path)
.with_context(|| format!("Failed to open log file {:?}", log_path))?,
@@ -104,10 +188,19 @@
} else {
Some(ParcelFileDescriptor::new(duplicate_stdout()?))
};
- let vm = service.startVm(config, stdout.as_ref()).context("Failed to start VM")?;
+
+ let vm =
+ service.createVm(config, console.as_ref(), log.as_ref()).context("Failed to create VM")?;
let cid = vm.getCid().context("Failed to get CID")?;
- println!("Started VM from {} with CID {}.", config_path, cid);
+ println!(
+ "Created VM from {} with CID {}, state is {}.",
+ config_path,
+ cid,
+ state_to_str(vm.getState()?)
+ );
+ vm.start()?;
+ println!("Started VM, state now {}.", state_to_str(vm.getState()?));
if daemonize {
// Pass the VM reference back to VirtualizationService and have it hold it in the
@@ -148,6 +241,13 @@
Ok(death_recipient)
}
+fn parse_extra_apk_list(apk: &Path, config_path: &str) -> Result<Vec<String>, Error> {
+ let mut archive = ZipArchive::new(File::open(apk)?)?;
+ let config_file = archive.by_name(config_path)?;
+ let config: VmPayloadConfig = serde_json::from_reader(config_file)?;
+ Ok(config.extra_apks.into_iter().map(|x| x.path).collect())
+}
+
#[derive(Debug)]
struct VirtualMachineCallback {
dead: AtomicFlag,
@@ -156,27 +256,54 @@
impl Interface for VirtualMachineCallback {}
impl IVirtualMachineCallback for VirtualMachineCallback {
- fn onPayloadStarted(&self, _cid: i32, stdout: &ParcelFileDescriptor) -> BinderResult<()> {
- // Show the stdout of the payload
- let mut reader = BufReader::new(stdout.as_ref());
- loop {
- let mut s = String::new();
- match reader.read_line(&mut s) {
- Ok(0) => break,
- Ok(_) => print!("{}", s),
- Err(e) => eprintln!("error reading from virtual machine: {}", e),
- };
+ fn onPayloadStarted(
+ &self,
+ _cid: i32,
+ stream: Option<&ParcelFileDescriptor>,
+ ) -> BinderResult<()> {
+ // Show the output of the payload
+ if let Some(stream) = stream {
+ let mut reader = BufReader::new(stream.as_ref());
+ loop {
+ let mut s = String::new();
+ match reader.read_line(&mut s) {
+ Ok(0) => break,
+ Ok(_) => print!("{}", s),
+ Err(e) => eprintln!("error reading from virtual machine: {}", e),
+ };
+ }
}
Ok(())
}
- fn onDied(&self, _cid: i32) -> BinderResult<()> {
- // No need to explicitly report the event to the user (e.g. via println!) because this
- // callback is registered only when the vm tool is invoked as interactive mode (e.g. not
- // --daemonize) in which case the tool will exit to the shell prompt upon VM shutdown.
- // Printing something will actually even confuse the user as the output from the app
- // payload is printed.
+ fn onPayloadReady(&self, _cid: i32) -> BinderResult<()> {
+ eprintln!("payload is ready");
+ Ok(())
+ }
+
+ fn onPayloadFinished(&self, _cid: i32, exit_code: i32) -> BinderResult<()> {
+ eprintln!("payload finished with exit code {}", exit_code);
+ Ok(())
+ }
+
+ fn onError(&self, _cid: i32, error_code: i32, message: &str) -> BinderResult<()> {
+ eprintln!("VM encountered an error: code={}, message={}", error_code, message);
+ Ok(())
+ }
+
+ fn onDied(&self, _cid: i32, reason: DeathReason) -> BinderResult<()> {
self.dead.raise();
+
+ match reason {
+ DeathReason::INFRASTRUCTURE_ERROR => println!("Error waiting for VM to finish."),
+ DeathReason::KILLED => println!("VM was killed."),
+ DeathReason::UNKNOWN => println!("VM died for an unknown reason."),
+ DeathReason::SHUTDOWN => println!("VM shutdown cleanly."),
+ DeathReason::ERROR => println!("Error starting VM."),
+ DeathReason::REBOOT => println!("VM tried to reboot, possibly due to a kernel panic."),
+ DeathReason::CRASH => println!("VM crashed."),
+ _ => println!("VM died for an unrecognised reason."),
+ }
Ok(())
}
}
diff --git a/zipfuse/Android.bp b/zipfuse/Android.bp
index 24cfaa0..e10fc31 100644
--- a/zipfuse/Android.bp
+++ b/zipfuse/Android.bp
@@ -14,6 +14,8 @@
"libfuse_rust",
"liblibc",
"libzip",
+ "libscopeguard",
+ "liblog_rust",
],
// libfuse_rust, etc don't support 32-bit targets
multilib: {
@@ -26,7 +28,7 @@
rust_binary {
name: "zipfuse",
defaults: ["zipfuse.defaults"],
- init_rc: ["zipfuse.rc"],
+ bootstrap: true,
}
rust_test {
diff --git a/zipfuse/Cargo.toml b/zipfuse/Cargo.toml
index c8f2f3a..17fd293 100644
--- a/zipfuse/Cargo.toml
+++ b/zipfuse/Cargo.toml
@@ -12,7 +12,8 @@
zip = "0.5"
tempfile = "3.2"
nix = "0.20"
+scopeguard = "1.1"
+log = "0.4"
[dev-dependencies]
loopdev = "0.2"
-scopeguard = "1.1"
diff --git a/zipfuse/src/main.rs b/zipfuse/src/main.rs
index 9b70d08..a91642c 100644
--- a/zipfuse/src/main.rs
+++ b/zipfuse/src/main.rs
@@ -44,7 +44,7 @@
.short("o")
.takes_value(true)
.required(false)
- .help("Comma separated list of mount options")
+ .help("Comma separated list of mount options"),
)
.arg(Arg::with_name("ZIPFILE").required(true))
.arg(Arg::with_name("MOUNTPOINT").required(true))
@@ -87,20 +87,23 @@
struct ZipFuse {
zip_archive: Mutex<zip::ZipArchive<File>>,
+ raw_file: Mutex<File>,
inode_table: InodeTable,
- open_files: Mutex<HashMap<Handle, OpenFileBuf>>,
+ open_files: Mutex<HashMap<Handle, OpenFile>>,
open_dirs: Mutex<HashMap<Handle, OpenDirBuf>>,
}
-/// Holds the (decompressed) contents of a [`ZipFile`].
-///
-/// This buf is needed because `ZipFile` is in general not seekable due to the compression.
-///
-/// TODO(jiyong): do this only for compressed `ZipFile`s. Uncompressed (store) files don't need
-/// this; they can be directly read from `zip_archive`.
-struct OpenFileBuf {
+/// Represents a [`ZipFile`] that is opened.
+struct OpenFile {
open_count: u32, // multiple opens share the buf because this is a read-only filesystem
- buf: Box<[u8]>,
+ content: OpenFileContent,
+}
+
+/// Holds the content of a [`ZipFile`]. Depending on whether it is compressed or not, the
+/// entire content is stored, or only the zip index is stored.
+enum OpenFileContent {
+ Compressed(Box<[u8]>),
+ Uncompressed(usize), // zip index
}
/// Holds the directory entries in a directory opened by [`opendir`].
@@ -123,11 +126,15 @@
fn new(zip_file: &Path) -> Result<ZipFuse> {
// TODO(jiyong): Use O_DIRECT to avoid double caching.
// `.custom_flags(nix::fcntl::OFlag::O_DIRECT.bits())` currently doesn't work.
- let f = OpenOptions::new().read(true).open(zip_file)?;
+ let f = File::open(zip_file)?;
let mut z = zip::ZipArchive::new(f)?;
+ // Open the same file again so that we can directly access it when accessing
+ // uncompressed zip_file entries in it. `ZipFile` doesn't implement `Seek`.
+ let raw_file = File::open(zip_file)?;
let it = InodeTable::from_zip(&mut z)?;
Ok(ZipFuse {
zip_archive: Mutex::new(z),
+ raw_file: Mutex::new(raw_file),
inode_table: it,
open_files: Mutex::new(HashMap::new()),
open_dirs: Mutex::new(HashMap::new()),
@@ -208,21 +215,37 @@
// If the file is already opened, just increase the reference counter. If not, read the
// entire file content to the buffer. When `read` is called, a portion of the buffer is
// copied to the kernel.
- // TODO(jiyong): do this only for compressed zip files. Files that are not compressed
- // (store) can be directly read from zip_archive. That will help reduce the memory usage.
- if let Some(ofb) = open_files.get_mut(&handle) {
- if ofb.open_count == 0 {
+ if let Some(file) = open_files.get_mut(&handle) {
+ if file.open_count == 0 {
return Err(ebadf());
}
- ofb.open_count += 1;
+ file.open_count += 1;
} else {
let inode_data = self.find_inode(inode)?;
let zip_index = inode_data.get_zip_index().ok_or_else(ebadf)?;
let mut zip_archive = self.zip_archive.lock().unwrap();
let mut zip_file = zip_archive.by_index(zip_index)?;
- let mut buf = Vec::with_capacity(inode_data.size as usize);
- zip_file.read_to_end(&mut buf)?;
- open_files.insert(handle, OpenFileBuf { open_count: 1, buf: buf.into_boxed_slice() });
+ let content = match zip_file.compression() {
+ zip::CompressionMethod::Stored => OpenFileContent::Uncompressed(zip_index),
+ _ => {
+ if let Some(mode) = zip_file.unix_mode() {
+ let is_reg_file = zip_file.is_file();
+ let is_executable =
+ mode & (libc::S_IXUSR | libc::S_IXGRP | libc::S_IXOTH) != 0;
+ if is_reg_file && is_executable {
+ log::warn!(
+ "Executable file {:?} is stored compressed. Consider \
+ storing it uncompressed to save memory",
+ zip_file.mangled_name()
+ );
+ }
+ }
+ let mut buf = Vec::with_capacity(inode_data.size as usize);
+ zip_file.read_to_end(&mut buf)?;
+ OpenFileContent::Compressed(buf.into_boxed_slice())
+ }
+ };
+ open_files.insert(handle, OpenFile { open_count: 1, content });
}
// Note: we don't return `DIRECT_IO` here, because then applications wouldn't be able to
// mmap the files.
@@ -244,8 +267,8 @@
// again when the same file is opened in the future.
let mut open_files = self.open_files.lock().unwrap();
let handle = inode as Handle;
- if let Some(ofb) = open_files.get_mut(&handle) {
- if ofb.open_count.checked_sub(1).ok_or_else(ebadf)? == 0 {
+ if let Some(file) = open_files.get_mut(&handle) {
+ if file.open_count.checked_sub(1).ok_or_else(ebadf)? == 0 {
open_files.remove(&handle);
}
Ok(())
@@ -266,15 +289,28 @@
_flags: u32,
) -> io::Result<usize> {
let open_files = self.open_files.lock().unwrap();
- let ofb = open_files.get(&handle).ok_or_else(ebadf)?;
- if ofb.open_count == 0 {
+ let file = open_files.get(&handle).ok_or_else(ebadf)?;
+ if file.open_count == 0 {
return Err(ebadf());
}
- let start = offset as usize;
- let end = start + size as usize;
- let end = std::cmp::min(end, ofb.buf.len());
- let read_len = w.write(&ofb.buf[start..end])?;
- Ok(read_len)
+ Ok(match &file.content {
+ OpenFileContent::Uncompressed(zip_index) => {
+ let mut zip_archive = self.zip_archive.lock().unwrap();
+ let zip_file = zip_archive.by_index(*zip_index)?;
+ let start = zip_file.data_start() + offset;
+ let remaining_size = zip_file.size() - offset;
+ let size = std::cmp::min(remaining_size, size.into());
+
+ let mut raw_file = self.raw_file.lock().unwrap();
+ w.write_from(&mut raw_file, size as usize, start)?
+ }
+ OpenFileContent::Compressed(buf) => {
+ let start = offset as usize;
+ let end = start + size as usize;
+ let end = std::cmp::min(end, buf.len());
+ w.write(&buf[start..end])?
+ }
+ })
}
fn opendir(
@@ -650,7 +686,7 @@
let mnt_path = test_dir.join("mnt");
assert!(fs::create_dir(&mnt_path).is_ok());
- start_fuse(&zip_path, &mnt_path);
+ start_fuse(zip_path, &mnt_path);
// Give some time for the fuse to boot up
assert!(wait_for_mount(&mnt_path).is_ok());
@@ -669,7 +705,26 @@
let mut zip_file = File::create(&zip_path).unwrap();
zip_file.write_all(include_bytes!("../testdata/test.zip")).unwrap();
- run_fuse_and_check_test_zip(&test_dir.path(), &zip_path);
+ run_fuse_and_check_test_zip(test_dir.path(), &zip_path);
+ }
+
+ #[test]
+ fn supports_store() {
+ run_test(
+ |zip| {
+ let data = vec![10; 2 << 20];
+ zip.start_file(
+ "foo",
+ FileOptions::default().compression_method(zip::CompressionMethod::Stored),
+ )
+ .unwrap();
+ zip.write_all(&data).unwrap();
+ },
+ |root| {
+ let data = vec![10; 2 << 20];
+ check_file(root, "foo", &data);
+ },
+ );
}
#[cfg(not(target_os = "android"))] // Android doesn't have the loopdev crate
diff --git a/zipfuse/zipfuse.rc b/zipfuse/zipfuse.rc
deleted file mode 100644
index 1905705..0000000
--- a/zipfuse/zipfuse.rc
+++ /dev/null
@@ -1,2 +0,0 @@
-service zipfuse /system/bin/zipfuse -o fscontext=u:object_r:zipfusefs:s0,context=u:object_r:system_file:s0 /dev/block/mapper/microdroid-apk /mnt/apk
- disabled