Merge "Append dtbo_vendor into --device-tree-overlay crosvm option" into main
diff --git a/Android.bp b/Android.bp
index 550a6be..54919d4 100644
--- a/Android.bp
+++ b/Android.bp
@@ -22,6 +22,7 @@
module_type: "rust_defaults",
config_namespace: "ANDROID",
bool_variables: [
+ "release_avf_enable_device_assignment",
"release_avf_enable_dice_changes",
"release_avf_enable_llpvm_changes",
"release_avf_enable_multi_tenant_microdroid_vm",
@@ -36,6 +37,9 @@
avf_flag_aware_rust_defaults {
name: "avf_build_flags_rust",
soong_config_variables: {
+ release_avf_enable_device_assignment: {
+ cfgs: ["device_assignment"],
+ },
release_avf_enable_dice_changes: {
cfgs: ["dice_changes"],
},
@@ -57,5 +61,5 @@
genrule_defaults {
name: "dts_to_dtb",
tools: ["dtc"],
- cmd: "$(location dtc) -I dts -O dtb $(in) -o $(out)",
+ cmd: "FILES=($(in)) && $(location dtc) -I dts -O dtb $${FILES[-1]} -o $(out)",
}
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 77ccc1d..4da96c8 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -34,6 +34,9 @@
},
{
"name": "libapkzip.test"
+ },
+ {
+ "name": "libsecretkeeper_comm.test"
}
],
"avf-postsubmit": [
@@ -105,12 +108,18 @@
"path": "packages/modules/Virtualization/rialto"
},
{
+ "path": "packages/modules/Virtualization/service_vm/client_vm_csr"
+ },
+ {
"path": "packages/modules/Virtualization/service_vm/comm"
},
{
"path": "packages/modules/Virtualization/service_vm/requests"
},
{
+ "path": "packages/modules/Virtualization/virtualizationservice"
+ },
+ {
"path": "packages/modules/Virtualization/vm"
},
{
diff --git a/apex/Android.bp b/apex/Android.bp
index a4c8861..b09cf58 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -67,7 +67,23 @@
],
}
-apex_defaults {
+soong_config_module_type {
+ name: "avf_flag_aware_apex_defaults",
+ module_type: "apex_defaults",
+ config_namespace: "ANDROID",
+ bool_variables: [
+ "release_avf_enable_device_assignment",
+ "release_avf_enable_remote_attestation",
+ "release_avf_enable_vendor_modules",
+ ],
+ properties: [
+ "arch",
+ "prebuilts",
+ "vintf_fragments",
+ ],
+}
+
+avf_flag_aware_apex_defaults {
name: "com.android.virt_avf_enabled",
defaults: ["com.android.virt_common"],
@@ -79,7 +95,6 @@
arm64: {
binaries: [
"crosvm",
- "vfio_handler",
"virtmgr",
"virtualizationservice",
],
@@ -88,7 +103,6 @@
x86_64: {
binaries: [
"crosvm",
- "vfio_handler",
"virtmgr",
"virtualizationservice",
],
@@ -100,7 +114,6 @@
"vm",
],
prebuilts: [
- "com.android.virt.init.rc",
"features_com.android.virt.xml",
"microdroid_initrd_debuggable",
"microdroid_initrd_normal",
@@ -110,11 +123,42 @@
],
host_required: [
"vm_shell",
- "prepare_device_vfio",
],
apps: [
"EmptyPayloadApp",
],
+ soong_config_variables: {
+ release_avf_enable_device_assignment: {
+ prebuilts: [
+ "com.android.virt.vfio_handler.rc",
+ ],
+ arch: {
+ arm64: {
+ binaries: ["vfio_handler"],
+ },
+ x86_64: {
+ binaries: ["vfio_handler"],
+ },
+ },
+ },
+ release_avf_enable_vendor_modules: {
+ prebuilts: [
+ "microdroid_gki-android14-6.1_initrd_debuggable",
+ "microdroid_gki-android14-6.1_initrd_normal",
+ "microdroid_gki-android14-6.1_kernel",
+ "microdroid_gki-android14-6.1.json",
+ ],
+ },
+ release_avf_enable_remote_attestation: {
+ prebuilts: ["com.android.virt.init_attestation_enabled.rc"],
+ vintf_fragments: [
+ "virtualizationservice.xml",
+ ],
+ conditions_default: {
+ prebuilts: ["com.android.virt.init.rc"],
+ },
+ },
+ },
}
apex_defaults {
@@ -137,14 +181,22 @@
prebuilt_etc {
name: "com.android.virt.init.rc",
src: "virtualizationservice.rc",
- filename: "init.rc",
+ filename: "virtualizationservice.rc",
installable: false,
}
-sh_binary_host {
- name: "prepare_device_vfio",
- src: "prepare_device_vfio.sh",
- filename: "prepare_device_vfio.sh",
+prebuilt_etc {
+ name: "com.android.virt.init_attestation_enabled.rc",
+ src: "virtualizationservice_attestation_enabled.rc",
+ filename: "virtualizationservice.rc",
+ installable: false,
+}
+
+prebuilt_etc {
+ name: "com.android.virt.vfio_handler.rc",
+ src: "vfio_handler.rc",
+ filename: "vfio_handler.rc",
+ installable: false,
}
// Virt apex needs a custom signer for its payload
diff --git a/apex/prepare_device_vfio.sh b/apex/prepare_device_vfio.sh
deleted file mode 100755
index de2d502..0000000
--- a/apex/prepare_device_vfio.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-
-# Copyright 2023 Google Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# prepare_device_vfio.sh: prepares a device for VFIO assignment by binding a VFIO driver to it
-
-adb="${ADB:="adb"}" # ADB command to use
-vfio_dir="/dev/vfio"
-platform_bus="/sys/bus/platform"
-vfio_reset_required="/sys/module/vfio_platform/parameters/reset_required"
-vfio_noiommu_param="/sys/module/vfio/parameters/enable_unsafe_noiommu_mode"
-vfio_unsafe_interrupts_param="/sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts"
-
-function print_help() {
- echo "prepare_device_vfio.sh prepares a device for VFIO assignment"
- echo ""
- echo " Usage:"
- echo " $0 DEVICE_NAME"
- echo " Prepare device DEVICE_NAME for VFIO assignment."
- echo ""
- echo " help - prints this help message"
-}
-
-function cmd() {
- $adb shell $@
-}
-
-function tcmd() {
- trap "echo \"Error: adb shell command '$@' failed\" ; exit 1" ERR
- $adb shell $@
-}
-
-function ensure_root() {
- # Check user id
- if [ $(cmd "id -u") != 0 ]; then
- read -p "Must run as root; restart ADBD? [y/n] " answer
- case $answer in
- [Yy]* )
- $adb root && $adb wait-for-device && sleep 3 || exit 1
- ;;
- * )
- exit 1
- esac
- fi
-}
-
-function check_vfio() {
- cmd "[ -c $vfio_dir/vfio ]"
- if [ $? -ne 0 ]; then
- echo "cannot find $vfio_dir/vfio"
- exit 1
- fi
-
- cmd "[ -d $platform_bus/drivers/vfio-platform ]"
- if [ $? -ne 0 ]; then
- echo "VFIO-platform is not supported"
- exit 1
- fi
-}
-
-function check_device() {
- cmd "[ -d $device_sys ]"
- if [ $? -ne 0 ]; then
- echo "no device $device ($device_sys)"
- exit 1
- fi
-}
-
-function get_device_iommu_group() {
- local group=$(cmd "basename \$(readlink \"$device_sys/iommu_group\")")
- if [ $? -eq 0 ]; then
- echo $group
- else
- echo ""
- fi
-}
-
-function misc_setup() {
- # VFIO NOIOMMU check
- if [ -z "$group" ]; then
- echo "$device_sys does not have an IOMMU group - setting $vfio_noiommu_param"
- tcmd "echo y > \"$vfio_noiommu_param\""
- fi
-
- # Disable SELinux to allow virtualizationmanager and crosvm to access sysfs
- echo "[*WARN*] setenforce=0: SELinux is disabled"
- tcmd "setenforce 0"
-
- # Samsung IOMMU does not report interrupt remapping support, so enable unsafe uinterrupts
- if [ -n "$group" ]; then
- local iommu_drv=$(cmd "basename \$(readlink \"$device_sys/iommu/device/driver\")")
- if [ "$iommu_drv" = "samsung-sysmmu-v9" ]; then
- tcmd "echo y > \"$vfio_unsafe_interrupts_param\""
- fi
- fi
-}
-
-function bind_vfio_driver() {
- # Check if non-VFIO driver is currently bound, ie unbinding is needed
- cmd "[ -e \"$device_driver\" ] && \
- [ ! \$(basename \$(readlink \"$device_driver\")) = \"vfio-platform\" ]"
- if [ $? -eq 0 ]; then
- # Unbind current driver
- tcmd "echo \"$device\" > \"$device_driver/unbind\""
- fi
-
- # Bind to VFIO driver
- cmd "[ ! -e \"$device_driver\" ]"
- if [ $? -eq 0 ]; then
- # Bind vfio-platform driver
- tcmd "echo \"vfio-platform\" > \"$device_sys/driver_override\""
- tcmd "echo \"$device\" > \"$platform_bus/drivers_probe\""
- sleep 2
- fi
-}
-
-function verify_vfio_driver() {
- # Verify new VFIO file structure
- group=$(get_device_iommu_group)
- if [ -z "$group" ]; then
- echo "cannot setup VFIO-NOIOMMU for $device_sys"
- exit 1
- fi
-
- cmd "[ ! -c \"$vfio_dir/$group\" ] || \
- [ ! -e \"$device_driver\" ] || \
- [ ! \$(basename \$(readlink \"$device_driver\")) = \"vfio-platform\" ]"
- if [ $? -eq 0 ]; then
- echo "could not bind $device to VFIO platform driver"
-
- if [ $(cmd "cat $vfio_reset_required") = Y ]; then
- echo "VFIO device reset handler must be registered. Either unset $vfio_reset_required, \
-or register a reset handler for $device_sys"
- fi
- exit 1
- fi
-}
-
-function prepare_device() {
- device="$1"
- device_sys="/sys/bus/platform/devices/$device"
- device_driver="$device_sys/driver"
-
- ensure_root
- check_vfio
- check_device
- group=$(get_device_iommu_group)
- misc_setup
-
- bind_vfio_driver
- verify_vfio_driver
-
- echo "Device: $device_sys"
- echo "IOMMU group: $group"
- echo "VFIO group file: $vfio_dir/$group"
- echo "Ready!"
-}
-
-cmd=$1
-
-case $cmd in
- ""|help) print_help ;;
- *) prepare_device "$cmd" $@ ;;
-esac
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index 8257aae..b21a355 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -27,6 +27,7 @@
- lpmake, lpunpack, simg2img, img2simg, initrd_bootconfig
"""
import argparse
+import builtins
import hashlib
import os
import re
@@ -107,6 +108,7 @@
action='store_true',
help='This will NOT update the vbmeta related bootconfigs while signing the apex.\
Used for testing only!!')
+ parser.add_argument('--do_not_validate_avb_version', action='store_true', help='Do not validate the avb_version when updating vbmeta bootconfig. Only use in tests!')
args = parser.parse_args(argv)
# preprocess --key_override into a map
args.key_overrides = {}
@@ -282,7 +284,7 @@
avb_version_bc = re.search(
r"androidboot.vbmeta.avb_version = \"([^\"]*)\"", bootconfigs).group(1)
if avb_version_curr != avb_version_bc:
- raise Exception(f'AVB version mismatch between current & one & \
+ raise builtins.Exception(f'AVB version mismatch between current & one & \
used to build bootconfigs:{avb_version_curr}&{avb_version_bc}')
def calc_vbmeta_digest():
@@ -327,7 +329,8 @@
detach_bootconfigs(initrd, tmp_initrd, tmp_bc)
bc_file = open(tmp_bc, "rt", encoding="utf-8")
bc_data = bc_file.read()
- validate_avb_version(bc_data)
+ if not args.do_not_validate_avb_version:
+ validate_avb_version(bc_data)
bc_data = update_vbmeta_digest(bc_data)
bc_data = update_vbmeta_size(bc_data)
bc_file.close()
@@ -409,8 +412,11 @@
'--output_vbmeta_image', output]
RunCommand(args, cmd)
+
+gki_versions = ['android14-6.1']
+
# dict of (key, file) for re-sign/verification. keys are un-versioned for readability.
-virt_apex_files = {
+virt_apex_non_gki_files = {
'kernel': 'etc/fs/microdroid_kernel',
'vbmeta.img': 'etc/fs/microdroid_vbmeta.img',
'super.img': 'etc/fs/microdroid_super.img',
@@ -418,9 +424,23 @@
'initrd_debuggable.img': 'etc/microdroid_initrd_debuggable.img',
}
-
def TargetFiles(input_dir):
- return {k: os.path.join(input_dir, v) for k, v in virt_apex_files.items()}
+ ret = {k: os.path.join(input_dir, v) for k, v in virt_apex_non_gki_files.items()}
+
+ for ver in gki_versions:
+ kernel = os.path.join(input_dir, f'etc/fs/microdroid_gki-{ver}_kernel')
+ initrd_normal = os.path.join(input_dir, f'etc/microdroid_gki-{ver}_initrd_normal.img')
+ initrd_debug = os.path.join(input_dir, f'etc/microdroid_gki-{ver}_initrd_debuggable.img')
+
+ if os.path.isfile(kernel):
+ ret[f'gki-{ver}_kernel'] = kernel
+ ret[f'gki-{ver}_initrd_normal.img'] = initrd_normal
+ ret[f'gki-{ver}_initrd_debuggable.img'] = initrd_debug
+
+ return ret
+
+def IsInitrdImage(path):
+ return path.endswith('initrd_normal.img') or path.endswith('initrd_debuggable.img')
def SignVirtApex(args):
@@ -430,42 +450,67 @@
# unpacked files (will be unpacked from super.img below)
system_a_img = os.path.join(unpack_dir.name, 'system_a.img')
+ vendor_a_img = os.path.join(unpack_dir.name, 'vendor_a.img')
# re-sign super.img
# 1. unpack super.img
- # 2. resign system
- # 3. repack super.img out of resigned system
+ # 2. resign system and vendor (if exists)
+ # 3. repack super.img out of resigned system and vendor (if exists)
UnpackSuperImg(args, files['super.img'], unpack_dir.name)
system_a_f = Async(AddHashTreeFooter, args, key, system_a_img)
partitions = {"system_a": system_a_img}
+ images = [system_a_img]
+ images_f = [system_a_f]
+
+ # if vendor_a.img exists, resign it
+ if os.path.exists(vendor_a_img):
+ partitions.update({'vendor_a': vendor_a_img})
+ images.append(vendor_a_img)
+ vendor_a_f = Async(AddHashTreeFooter, args, key, vendor_a_img)
+ images_f.append(vendor_a_f)
+
Async(MakeSuperImage, args, partitions,
- files['super.img'], wait=[system_a_f])
+ files['super.img'], wait=images_f)
# re-generate vbmeta from re-signed system_a.img
vbmeta_f = Async(MakeVbmetaImage, args, key, files['vbmeta.img'],
- images=[system_a_img],
- wait=[system_a_f])
+ images=images,
+ wait=images_f)
vbmeta_bc_f = None
if not args.do_not_update_bootconfigs:
- vbmeta_bc_f = Async(UpdateVbmetaBootconfig, args,
- [files['initrd_normal.img'],
- files['initrd_debuggable.img']], files['vbmeta.img'],
+ initrd_files = [v for k, v in files.items() if IsInitrdImage(k)]
+ vbmeta_bc_f = Async(UpdateVbmetaBootconfig, args, initrd_files,
+ files['vbmeta.img'],
wait=[vbmeta_f])
# Re-sign kernel. Note kernel's vbmeta contain addition descriptor from ramdisk(s)
- initrd_normal_hashdesc = tempfile.NamedTemporaryFile(delete=False).name
- initrd_debug_hashdesc = tempfile.NamedTemporaryFile(delete=False).name
- initrd_n_f = Async(GenVbmetaImage, args, files['initrd_normal.img'],
- initrd_normal_hashdesc, "initrd_normal",
- wait=[vbmeta_bc_f] if vbmeta_bc_f is not None else [])
- initrd_d_f = Async(GenVbmetaImage, args, files['initrd_debuggable.img'],
- initrd_debug_hashdesc, "initrd_debug",
- wait=[vbmeta_bc_f] if vbmeta_bc_f is not None else [])
- Async(AddHashFooter, args, key, files['kernel'], partition_name="boot",
- additional_descriptors=[
- initrd_normal_hashdesc, initrd_debug_hashdesc],
- wait=[initrd_n_f, initrd_d_f])
+ def resign_kernel(kernel, initrd_normal, initrd_debug):
+ kernel_file = files[kernel]
+ initrd_normal_file = files[initrd_normal]
+ initrd_debug_file = files[initrd_debug]
+
+ initrd_normal_hashdesc = tempfile.NamedTemporaryFile(delete=False).name
+ initrd_debug_hashdesc = tempfile.NamedTemporaryFile(delete=False).name
+ initrd_n_f = Async(GenVbmetaImage, args, initrd_normal_file,
+ initrd_normal_hashdesc, "initrd_normal",
+ wait=[vbmeta_bc_f] if vbmeta_bc_f is not None else [])
+ initrd_d_f = Async(GenVbmetaImage, args, initrd_debug_file,
+ initrd_debug_hashdesc, "initrd_debug",
+ wait=[vbmeta_bc_f] if vbmeta_bc_f is not None else [])
+ Async(AddHashFooter, args, key, kernel_file, partition_name="boot",
+ additional_descriptors=[
+ initrd_normal_hashdesc, initrd_debug_hashdesc],
+ wait=[initrd_n_f, initrd_d_f])
+
+ resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')
+
+ for ver in gki_versions:
+ if f'gki-{ver}_kernel' in files:
+ resign_kernel(
+ f'gki-{ver}_kernel',
+ f'gki-{ver}_initrd_normal.img',
+ f'gki-{ver}_initrd_debuggable.img')
def VerifyVirtApex(args):
@@ -489,11 +534,11 @@
assert info is not None, f'no avbinfo: {file}'
assert info['Public key (sha1)'] == pubkey_digest, f'pubkey mismatch: {file}'
- for f in files.values():
- if f in (files['initrd_normal.img'], files['initrd_debuggable.img']):
+ for k, f in files.items():
+ if IsInitrdImage(k):
# TODO(b/245277660): Verify that ramdisks contain the correct vbmeta digest
continue
- if f == files['super.img']:
+ if k == 'super.img':
Async(check_avb_pubkey, system_a_img)
else:
# Check pubkey for other files using avbtool
diff --git a/apex/vfio_handler.rc b/apex/vfio_handler.rc
new file mode 100644
index 0000000..419acef
--- /dev/null
+++ b/apex/vfio_handler.rc
@@ -0,0 +1,20 @@
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+service vfio_handler /apex/com.android.virt/bin/vfio_handler
+ user root
+ group system
+ interface aidl android.system.virtualizationservice_internal.IVfioHandler
+ disabled
+ oneshot
diff --git a/apex/virtualizationservice.rc b/apex/virtualizationservice.rc
index 8283594..02b2081 100644
--- a/apex/virtualizationservice.rc
+++ b/apex/virtualizationservice.rc
@@ -19,10 +19,3 @@
interface aidl android.system.virtualizationservice
disabled
oneshot
-
-service vfio_handler /apex/com.android.virt/bin/vfio_handler
- user root
- group system
- interface aidl android.system.virtualizationservice_internal.IVfioHandler
- disabled
- oneshot
diff --git a/apex/virtualizationservice.xml b/apex/virtualizationservice.xml
index 0ce1e10..60f466f 100644
--- a/apex/virtualizationservice.xml
+++ b/apex/virtualizationservice.xml
@@ -1,6 +1,6 @@
<manifest version="1.0" type="framework">
<hal format="aidl">
- <name>android.system.virtualization</name>
+ <name>android.hardware.security.keymint</name>
<version>3</version>
<fqname>IRemotelyProvisionedComponent/avf</fqname>
</hal>
diff --git a/apex/virtualizationservice_attestation_enabled.rc b/apex/virtualizationservice_attestation_enabled.rc
new file mode 100644
index 0000000..8eaccae
--- /dev/null
+++ b/apex/virtualizationservice_attestation_enabled.rc
@@ -0,0 +1,22 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+service virtualizationservice /apex/com.android.virt/bin/virtualizationservice
+ class main
+ user system
+ group system
+ interface aidl android.system.virtualizationservice
+ interface aidl android.hardware.security.keymint.IRemotelyProvisionedComponent/avf
+ disabled
+ oneshot
diff --git a/apkdmverity/Android.bp b/apkdmverity/Android.bp
index c4c90cd..0cb8ca1 100644
--- a/apkdmverity/Android.bp
+++ b/apkdmverity/Android.bp
@@ -15,6 +15,7 @@
"libbitflags",
"libclap",
"libdm_rust",
+ "libhex",
"libitertools",
"liblibc",
"libnix",
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index d9e9e2b..0ecb0ea 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -46,7 +46,7 @@
for (apk, idsig, name, roothash) in apks.tuples() {
let roothash = if roothash != "none" {
- Some(util::parse_hexstring(roothash).expect("failed to parse roothash"))
+ Some(hex::decode(roothash).expect("failed to parse roothash"))
} else {
None
};
@@ -108,8 +108,10 @@
bail!("The size of {:?} is not multiple of {}.", &apk, BLOCK_SIZE)
}
(
- loopdevice::attach(&apk, 0, apk_size, /*direct_io*/ true, /*writable*/ false)
- .context("Failed to attach APK to a loop device")?,
+ loopdevice::attach(
+ &apk, 0, apk_size, /* direct_io */ true, /* writable */ false,
+ )
+ .context("Failed to attach APK to a loop device")?,
apk_size,
)
};
@@ -123,9 +125,10 @@
// Due to unknown reason(b/191344832), we can't enable "direct IO" for the IDSIG file (backing
// the hash). For now we don't use "direct IO" but it seems OK since the IDSIG file is very
// small and the benefit of direct-IO would be negliable.
- let hash_device =
- loopdevice::attach(&idsig, offset, size, /*direct_io*/ false, /*writable*/ false)
- .context("Failed to attach idsig to a loop device")?;
+ let hash_device = loopdevice::attach(
+ &idsig, offset, size, /* direct_io */ false, /* writable */ false,
+ )
+ .context("Failed to attach idsig to a loop device")?;
// Build a dm-verity target spec from the information from the idsig file. The apk and the
// idsig files are used as the data device and the hash device, respectively.
@@ -338,7 +341,7 @@
// of the data device is done in the scopeguard for the return value of `enable_verity`
// below. Only the idsig_loop_device needs detatching.
let apk_loop_device = loopdevice::attach(
- &apk_path, 0, apk_size, /*direct_io*/ true, /*writable*/ false,
+ &apk_path, 0, apk_size, /* direct_io */ true, /* writable */ false,
)
.unwrap();
let idsig_loop_device = scopeguard::guard(
@@ -346,8 +349,8 @@
&idsig_path,
0,
idsig_size,
- /*direct_io*/ false,
- /*writable*/ false,
+ /* direct_io */ false,
+ /* writable */ false,
)
.unwrap(),
|dev| loopdevice::detach(dev).unwrap(),
diff --git a/authfs/Android.bp b/authfs/Android.bp
index a4151c2..8ac600d 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -19,6 +19,7 @@
"libclap",
"libfsverity_digests_proto_rust",
"libfuse_rust",
+ "libhex",
"liblibc",
"liblog_rust",
"libnix",
diff --git a/authfs/service/src/main.rs b/authfs/service/src/main.rs
index 78de07a..67e22a5 100644
--- a/authfs/service/src/main.rs
+++ b/authfs/service/src/main.rs
@@ -127,6 +127,7 @@
Ok(unsafe { OwnedFd::from_raw_fd(raw_fd) })
}
+#[allow(clippy::eq_op)]
fn try_main() -> Result<()> {
let debuggable = env!("TARGET_BUILD_VARIANT") != "user";
let log_level = if debuggable { log::Level::Trace } else { log::Level::Info };
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index 8585fdf..6d724ca 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -159,7 +159,7 @@
#[test]
fn merkle_tree_empty_file() -> Result<()> {
assert_eq!(
- to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95"),
+ hex::decode("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")?,
generate_fsverity_digest_sequentially(&Vec::new())?
);
Ok(())
@@ -169,7 +169,7 @@
fn merkle_tree_file_size_less_than_or_equal_to_4k() -> Result<()> {
// Test a file that contains 4096 '\01's.
assert_eq!(
- to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af"),
+ hex::decode("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")?,
generate_fsverity_digest_sequentially(&vec![1; 4096])?
);
Ok(())
@@ -180,24 +180,24 @@
// Test files that contains >4096 '\01's.
assert_eq!(
- to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52"),
+ hex::decode("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")?,
generate_fsverity_digest_sequentially(&vec![1; 4097])?
);
assert_eq!(
- to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d"),
+ hex::decode("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")?,
generate_fsverity_digest_sequentially(&vec![1; 8192])?
);
// Test with max size that still fits in 2 levels.
assert_eq!(
- to_u8_vec("26b7c190a34e19f420808ee7ec233b09fa6c34543b5a9d2950530114c205d14f"),
+ hex::decode("26b7c190a34e19f420808ee7ec233b09fa6c34543b5a9d2950530114c205d14f")?,
generate_fsverity_digest_sequentially(&vec![1; 524288])?
);
// Test with data that requires 3 levels.
assert_eq!(
- to_u8_vec("316835d9be1c95b5cd55d07ae7965d651689efad186e26cbf680e40b683a3262"),
+ hex::decode("316835d9be1c95b5cd55d07ae7965d651689efad186e26cbf680e40b683a3262")?,
generate_fsverity_digest_sequentially(&vec![1; 524289])?
);
Ok(())
@@ -215,7 +215,7 @@
tree.update_hash(2, &hash, CHUNK_SIZE * 3);
assert_eq!(
- to_u8_vec("7d3c0d2e1dc54230b20ed875f5f3a4bd3f9873df601936b3ca8127d4db3548f3"),
+ hex::decode("7d3c0d2e1dc54230b20ed875f5f3a4bd3f9873df601936b3ca8127d4db3548f3")?,
tree.calculate_fsverity_digest()?
);
Ok(())
@@ -268,12 +268,4 @@
}
Ok(tree.calculate_fsverity_digest()?)
}
-
- fn to_u8_vec(hex_str: &str) -> Vec<u8> {
- assert!(hex_str.len() % 2 == 0);
- (0..hex_str.len())
- .step_by(2)
- .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
- .collect()
- }
}
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 4af6e80..c84500b 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -373,7 +373,7 @@
let file = VerifiedFileEditor::new(InMemoryEditor::new());
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")
+ hex::decode("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")?
.as_slice()
);
Ok(())
@@ -386,7 +386,7 @@
assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
+ hex::decode("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")?
.as_slice()
);
@@ -395,7 +395,7 @@
assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")
+ hex::decode("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")?
.as_slice()
);
@@ -404,7 +404,7 @@
assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")
+ hex::decode("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")?
.as_slice()
);
Ok(())
@@ -417,7 +417,7 @@
assert_eq!(file.write_at(&[1; 5], 3)?, 5);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")
+ hex::decode("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")?
.as_slice()
);
@@ -426,7 +426,7 @@
assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")
+ hex::decode("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")?
.as_slice()
);
Ok(())
@@ -439,7 +439,7 @@
assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")
+ hex::decode("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")?
.as_slice()
);
@@ -448,7 +448,7 @@
assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")
+ hex::decode("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")?
.as_slice()
);
@@ -457,7 +457,7 @@
assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")
+ hex::decode("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")?
.as_slice()
);
Ok(())
@@ -470,34 +470,34 @@
assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
+ hex::decode("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")?
.as_slice()
);
assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
+ hex::decode("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")?
.as_slice()
);
assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
+ hex::decode("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")?
.as_slice()
);
assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
+ hex::decode("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")?
.as_slice()
);
assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")
+ hex::decode("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")?
.as_slice()
);
Ok(())
@@ -555,7 +555,7 @@
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
+ hex::decode("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")?
.as_slice()
);
Ok(())
@@ -572,7 +572,7 @@
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")
+ hex::decode("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")?
.as_slice()
);
Ok(())
@@ -589,7 +589,7 @@
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
+ hex::decode("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")?
.as_slice()
);
Ok(())
@@ -621,17 +621,9 @@
assert_eq!(
file.calculate_fsverity_digest()?,
- to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
+ hex::decode("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")?
.as_slice()
);
Ok(())
}
-
- fn to_u8_vec(hex_str: &str) -> Vec<u8> {
- assert!(hex_str.len() % 2 == 0);
- (0..hex_str.len())
- .step_by(2)
- .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
- .collect()
- }
}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 9ff0ae3..e14b771 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -169,21 +169,6 @@
})
}
-fn from_hex_string(s: &str) -> Result<Vec<u8>> {
- if s.len() % 2 == 1 {
- bail!("Incomplete hex string: {}", s);
- } else {
- let results = (0..s.len())
- .step_by(2)
- .map(|i| {
- u8::from_str_radix(&s[i..i + 2], 16)
- .map_err(|e| anyhow!("Cannot parse hex {}: {}", &s[i..i + 2], e))
- })
- .collect::<Result<Vec<_>>>();
- Ok(results?)
- }
-}
-
fn new_remote_verified_file_entry(
service: file::VirtFdService,
remote_fd: i32,
@@ -193,7 +178,7 @@
reader: LazyVerifiedReadonlyFile::prepare_by_fd(
service,
remote_fd,
- from_hex_string(expected_digest)?,
+ hex::decode(expected_digest)?,
),
})
}
@@ -332,18 +317,3 @@
std::process::exit(1);
}
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn parse_hex_string() {
- assert_eq!(from_hex_string("deadbeef").unwrap(), vec![0xde, 0xad, 0xbe, 0xef]);
- assert_eq!(from_hex_string("DEADBEEF").unwrap(), vec![0xde, 0xad, 0xbe, 0xef]);
- assert_eq!(from_hex_string("").unwrap(), Vec::<u8>::new());
-
- assert!(from_hex_string("deadbee").is_err());
- assert!(from_hex_string("X").is_err());
- }
-}
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
index b558f06..9f6ce9c 100644
--- a/compos/composd/src/composd_main.rs
+++ b/compos/composd/src/composd_main.rs
@@ -31,6 +31,7 @@
use std::panic;
use std::sync::Arc;
+#[allow(clippy::eq_op)]
fn try_main() -> Result<()> {
let debuggable = env!("TARGET_BUILD_VARIANT") != "user";
let log_level = if debuggable { log::Level::Debug } else { log::Level::Info };
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index b0fc323..128d581 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -54,10 +54,7 @@
// SAFETY: We hold a strong pointer, so the raw pointer remains valid. The bindgen AIBinder
// is the same type as sys::AIBinder. It is safe for on_ready to be invoked at any time, with
// any parameter.
- unsafe {
- AVmPayload_runVsockRpcServer(service, COMPOS_VSOCK_PORT, Some(on_ready), param);
- }
- Ok(())
+ unsafe { AVmPayload_runVsockRpcServer(service, COMPOS_VSOCK_PORT, Some(on_ready), param) }
}
extern "C" fn on_ready(_param: *mut c_void) {
diff --git a/javalib/README.md b/javalib/README.md
index 4eb64df..cf7a6cb 100644
--- a/javalib/README.md
+++ b/javalib/README.md
@@ -15,6 +15,9 @@
`android.permission.MANAGE_VIRTUAL_MACHINE` permission, so they are not
available to third party apps.
+All of these APIs were introduced in API level 34 (Android 14). The classes may
+not exist in devices running an earlier version.
+
## Detecting AVF Support
The simplest way to detect whether a device has support for AVF is to retrieve
@@ -22,7 +25,7 @@
[`VirtualMachineManager`](src/android/system/virtualmachine/VirtualMachineManager.java)
class; if the result is not `null` then the device has support. You can then
find out whether protected, non-protected VMs, or both are supported using the
-`getCapabilities()` method:
+`getCapabilities()` method. Note that this code requires API level 34 or higher:
```Java
VirtualMachineManager vmm = context.getSystemService(VirtualMachineManager.class);
@@ -41,7 +44,8 @@
```
An alternative for detecting AVF support is to query support for the
-`android.software.virtualization_framework` system feature:
+`android.software.virtualization_framework` system feature. This method will
+work on any API level, and return false if it is below 34:
```Java
if (getPackageManager().hasSystemFeature(PackageManager.FEATURE_VIRTUALIZATION_FRAMEWORK)) {
@@ -116,7 +120,9 @@
reached - but there is some overhead proportional to the maximum size.)
- How many virtual CPUs the VM has.
- How much encrypted storage the VM has.
-- The path to the installed APK containing the code to run as the VM payload.
+- The path to the installed APK containing the code to run as the VM
+ payload. (Normally you don't need this; the APK path is determined from the
+ context passed to the config builder.)
## VM Life-cycle
@@ -244,7 +250,7 @@
### Binder
-The use of AIDL interfaces between the VM and app is support via Binder RPC,
+The use of AIDL interfaces between the VM and app is supported via Binder RPC,
which transmits messages over an underlying vsock socket.
Note that Binder RPC has some limitations compared to the kernel Binder used in
@@ -304,10 +310,12 @@
which includes the payload's exit code.
Use of `stop()` should be reserved as a recovery mechanism - for example if the
-VM has not stopped within a reasonable time after being requested to.
+VM has not stopped within a reasonable time (a few seconds, say) after being
+requested to.
-The status of a VM will be `STATUS_STOPPED` after a successful call to `stop()`,
-or if your `onPayloadStopped()` callback is invoked.
+The status of a VM will be `STATUS_STOPPED` if your `onStopped()` callback is
+invoked, or after a successful call to `stop()`. Note that your `onStopped()`
+will be called on the VM even if it ended as a result of a call to `stop()`.
# Encrypted Storage
@@ -333,9 +341,8 @@
# Transferring a VM
-It is possible to make a copy of a VM instance with a new name. This can be used
-to transfer a VM from one app to another, which can be useful in some
-circumstances.
+It is possible to make a copy of a VM instance. This can be used to transfer a
+VM from one app to another, which can be useful in some circumstances.
This should only be done while the VM is stopped. The first step is to call
`toDescriptor()` on the
diff --git a/libs/apexutil/Android.bp b/libs/apexutil/Android.bp
index 92d4e80..beff58d 100644
--- a/libs/apexutil/Android.bp
+++ b/libs/apexutil/Android.bp
@@ -6,11 +6,12 @@
name: "libapexutil_rust.defaults",
crate_name: "apexutil",
defaults: ["avf_build_flags_rust"],
- host_supported: true,
srcs: ["src/lib.rs"],
edition: "2021",
rustlibs: [
+ "libapex_manifest_rs",
"liblog_rust",
+ "libprotobuf",
"libthiserror",
"libvbmeta_rust",
"libzip",
@@ -27,18 +28,13 @@
defaults: ["libapexutil_rust.defaults"],
prefer_rlib: true,
test_suites: ["general-tests"],
- data: ["tests/data/*"],
+ // We're reusing test APEXes from system/apex/apexd
+ data: [
+ ":apex.apexd_test",
+ ":apex.apexd_test_v2_no_pb",
+ ":gen_key_mismatch_with_image_apex",
+ ],
rustlibs: [
"libhex",
],
- target: {
- host: {
- // TODO(b/204562227): remove once the build does this automatically
- data_libs: [
- "libc++",
- "libcrypto",
- "libz",
- ],
- },
- },
}
diff --git a/libs/apexutil/src/lib.rs b/libs/apexutil/src/lib.rs
index 8a934e2..639135f 100644
--- a/libs/apexutil/src/lib.rs
+++ b/libs/apexutil/src/lib.rs
@@ -14,6 +14,8 @@
//! Routines for handling APEX payload
+use apex_manifest::apex_manifest::ApexManifest;
+use protobuf::Message;
use std::fs::File;
use std::io::{self, Read};
use thiserror::Error;
@@ -23,28 +25,32 @@
const APEX_PUBKEY_ENTRY: &str = "apex_pubkey";
const APEX_PAYLOAD_ENTRY: &str = "apex_payload.img";
+const APEX_MANIFEST_ENTRY: &str = "apex_manifest.pb";
/// Errors from parsing an APEX.
#[derive(Debug, Error)]
pub enum ApexParseError {
/// There was an IO error.
- #[error("IO error")]
+ #[error("IO error: {0}")]
Io(#[from] io::Error),
/// The Zip archive was invalid.
- #[error("Cannot read zip archive")]
+ #[error("Cannot read zip archive: {0}")]
InvalidZip(&'static str),
- /// The apex_pubkey file was missing from the APEX.
- #[error("APEX doesn't contain apex_pubkey")]
- PubkeyMissing,
- /// The apex_payload.img file was missing from the APEX.
- #[error("APEX doesn't contain apex_payload.img")]
- PayloadMissing,
+ /// An expected file was missing from the APEX.
+ #[error("APEX doesn't contain {0}")]
+ MissingFile(&'static str),
/// There was no hashtree descriptor in the APEX payload's VBMeta image.
#[error("Non-hashtree descriptor found in payload's VBMeta image")]
DescriptorNotHashtree,
/// There was an error parsing the APEX payload's VBMeta image.
- #[error("Could not parse payload's VBMeta image")]
+ #[error("Could not parse payload's VBMeta image: {0}")]
PayloadVbmetaError(#[from] vbmeta::VbMetaImageParseError),
+ /// Data was missing from the VBMeta
+ #[error("Data missing from VBMeta: {0}")]
+ VbmetaMissingData(&'static str),
+ /// An error occurred parsing the APEX manifest as a protobuf
+ #[error("Error parsing manifest protobuf: {0}")]
+ ManifestProtobufError(#[from] protobuf::Error),
}
/// Errors from verifying an APEX.
@@ -58,29 +64,44 @@
PayloadVbmetaError(#[from] vbmeta::VbMetaImageVerificationError),
/// The APEX payload was not verified with the apex_pubkey.
#[error("APEX pubkey mismatch")]
- ApexPubkeyMistmatch,
+ ApexPubkeyMismatch,
}
-/// Verification result holds public key and root digest of apex_payload.img
+/// Information extracted from the APEX during AVB verification.
+#[derive(Debug)]
pub struct ApexVerificationResult {
+ /// The name of the APEX, from its manifest.
+ pub name: Option<String>,
+ /// The version of the APEX, from its manifest.
+ pub version: Option<i64>,
/// The public key that verifies the payload signature.
pub public_key: Vec<u8>,
/// The root digest of the payload hashtree.
pub root_digest: Vec<u8>,
}
-/// Verify APEX payload by AVB verification and return public key and root digest
+/// Verify APEX payload by AVB verification and return information about the APEX.
+/// This verifies that the VBMeta is correctly signed by the public key specified in the APEX.
+/// It doesn't verify that that is the correct key, nor does it verify that the payload matches
+/// the signed root hash - that is handled by dm-verity once apexd has mounted the APEX.
pub fn verify(path: &str) -> Result<ApexVerificationResult, ApexVerificationError> {
let apex_file = File::open(path).map_err(ApexParseError::Io)?;
- let (public_key, image_offset, image_size) = get_public_key_and_image_info(&apex_file)?;
+ let ApexZipInfo { public_key, image_offset, image_size, manifest } =
+ get_apex_zip_info(&apex_file)?;
let vbmeta = VbMetaImage::verify_reader_region(apex_file, image_offset, image_size)?;
let root_digest = find_root_digest(&vbmeta)?;
- match vbmeta.public_key() {
- Some(payload_public_key) if public_key == payload_public_key => {
- Ok(ApexVerificationResult { public_key, root_digest })
- }
- _ => Err(ApexVerificationError::ApexPubkeyMistmatch),
+ let vbmeta_public_key =
+ vbmeta.public_key().ok_or(ApexParseError::VbmetaMissingData("public key"))?;
+ if vbmeta_public_key != public_key {
+ return Err(ApexVerificationError::ApexPubkeyMismatch);
}
+ let (name, version) = if cfg!(dice_changes) {
+ let ApexManifestInfo { name, version } = decode_manifest(&manifest)?;
+ (Some(name), Some(version))
+ } else {
+ (None, None)
+ };
+ Ok(ApexVerificationResult { name, version, public_key, root_digest })
}
fn find_root_digest(vbmeta: &VbMetaImage) -> Result<Vec<u8>, ApexParseError> {
@@ -93,46 +114,52 @@
Err(ApexParseError::DescriptorNotHashtree)
}
-/// Gets the hash of the payload's verified VBMeta image data.
-pub fn get_payload_vbmeta_image_hash(path: &str) -> Result<Vec<u8>, ApexVerificationError> {
- let apex_file = File::open(path).map_err(ApexParseError::Io)?;
- let (_, offset, size) = get_public_key_and_image_info(&apex_file)?;
- let vbmeta = VbMetaImage::verify_reader_region(apex_file, offset, size)?;
- Ok(vbmeta.hash().ok_or(ApexVerificationError::ApexPubkeyMistmatch)?.to_vec())
+struct ApexZipInfo {
+ public_key: Vec<u8>,
+ image_offset: u64,
+ image_size: u64,
+ manifest: Vec<u8>,
}
-fn get_public_key_and_image_info(apex_file: &File) -> Result<(Vec<u8>, u64, u64), ApexParseError> {
- let mut z = ZipArchive::new(apex_file).map_err(|err| match err {
- ZipError::Io(err) => ApexParseError::Io(err),
- ZipError::InvalidArchive(s) | ZipError::UnsupportedArchive(s) => {
- ApexParseError::InvalidZip(s)
- }
- ZipError::FileNotFound => unreachable!(),
- })?;
+fn get_apex_zip_info(apex_file: &File) -> Result<ApexZipInfo, ApexParseError> {
+ let mut z = ZipArchive::new(apex_file).map_err(|err| from_zip_error(err, "?"))?;
let mut public_key = Vec::new();
z.by_name(APEX_PUBKEY_ENTRY)
- .map_err(|err| match err {
- ZipError::Io(err) => ApexParseError::Io(err),
- ZipError::FileNotFound => ApexParseError::PubkeyMissing,
- ZipError::InvalidArchive(s) | ZipError::UnsupportedArchive(s) => {
- ApexParseError::InvalidZip(s)
- }
- })?
+ .map_err(|err| from_zip_error(err, APEX_PUBKEY_ENTRY))?
.read_to_end(&mut public_key)?;
let (image_offset, image_size) = z
.by_name(APEX_PAYLOAD_ENTRY)
.map(|f| (f.data_start(), f.size()))
- .map_err(|err| match err {
- ZipError::Io(err) => ApexParseError::Io(err),
- ZipError::FileNotFound => ApexParseError::PayloadMissing,
- ZipError::InvalidArchive(s) | ZipError::UnsupportedArchive(s) => {
- ApexParseError::InvalidZip(s)
- }
- })?;
+ .map_err(|err| from_zip_error(err, APEX_PAYLOAD_ENTRY))?;
- Ok((public_key, image_offset, image_size))
+ let mut manifest = Vec::new();
+ z.by_name(APEX_MANIFEST_ENTRY)
+ .map_err(|err| from_zip_error(err, APEX_MANIFEST_ENTRY))?
+ .read_to_end(&mut manifest)?;
+
+ Ok(ApexZipInfo { public_key, image_offset, image_size, manifest })
+}
+
+struct ApexManifestInfo {
+ name: String,
+ version: i64,
+}
+
+fn decode_manifest(mut manifest: &[u8]) -> Result<ApexManifestInfo, ApexParseError> {
+ let manifest = ApexManifest::parse_from_reader(&mut manifest)?;
+ Ok(ApexManifestInfo { name: manifest.name, version: manifest.version })
+}
+
+fn from_zip_error(err: ZipError, name: &'static str) -> ApexParseError {
+ match err {
+ ZipError::Io(err) => ApexParseError::Io(err),
+ ZipError::InvalidArchive(s) | ZipError::UnsupportedArchive(s) => {
+ ApexParseError::InvalidZip(s)
+ }
+ ZipError::FileNotFound => ApexParseError::MissingFile(name),
+ }
}
#[cfg(test)]
@@ -141,20 +168,68 @@
#[test]
fn apex_verification_returns_valid_result() {
- let res = verify("tests/data/test.apex").unwrap();
- // The expected hex is generated when we ran the method the first time.
+ let res = verify("apex.apexd_test.apex").unwrap();
+ let (expected_name, expected_version) = if cfg!(dice_changes) {
+ (Some("com.android.apex.test_package"), Some(1))
+ } else {
+ (None, None)
+ };
+ assert_eq!(res.name.as_deref(), expected_name);
+ assert_eq!(res.version, expected_version);
+ // The expected hex values were generated when we ran the method the first time.
assert_eq!(
hex::encode(res.root_digest),
- "fe11ab17da0a3a738b54bdc3a13f6139cbdf91ec32f001f8d4bbbf8938e04e39"
+ "54265da77ae1fd619e39809ad99fedc576bb20c0c7a8002190fa64438436299f"
+ );
+ assert_eq!(
+ hex::encode(res.public_key),
+ "\
+ 00001000963a5527aaf0145b3bb5f899a05034ccc76dafdd671dbf4e42c04df2eeba15\
+ 6c884816d7d08ef8d834d4adc27979afed9eaf406694d0d600f0b6d31e3ab85da47d27\
+ 9c223a1630e02332d920587617ea766a136057a3a3232a7c42f83fb3763e853be4026c\
+ 067524a95fcbfcc6caadfb553210bb5385f5adc5caeb0e3f6a9aa56af88d8899d962eb\
+ 807864feabeeacdd868697935fb4cc4843957e0d90ee4293c715c4e5b970e6545a17d1\
+ 735f814c7d4dbdeaac97275a84f292e3715c158d38eb00eebd010dd2fa56595c0e5627\
+ 06c7a94e566912f993e5e35c04b2a314d1bce1ceb10de6c50f8101ddb6ee993fc79959\
+ 2e79ee73b77741ee5c076c89343684344a6d080e5529a046d506d104bf32903e39c363\
+ b020fee9d87e7c6ffdad120b630386e958416ac156bc2d7301836c79e926e8f185a640\
+ be05135e17018c88dde02cd7bd49655e9e9dff7f965fb8e68217236c18d23b6d7e7632\
+ 184acb95b088598601c809d5e66c19f5e06b5e5ff1bbae7e3142959d9380db2d4a25c8\
+ 757975232ea311016e830703a6023b0986e885f2eda066517fce09f33f359b6ef7cc5a\
+ 2fdaced74257661bad184a653ea2d80d1af68de5821c06a472635f0276dc42d699f588\
+ ea6c46189ca1ad544bbd4951a766bc4119b0ea671cb16556762721723bf1db47c83c76\
+ a7cc2fd3b6029efec9908d9d4640294f6ea46f6e1a3195e9252c393e35698911a7c496\
+ 138dc2dd8d9dcb470ae1c6d2224d13b160fb3ae4bc235f6133c2ff5f9232fb89adfdba\
+ 48dcc47cf29a22cd47dcec0b1a179f352c9848a8e04ac37f35777a24312c821febc591\
+ 84c8cdefc88e50b4d6bc9530ca743f4284c9773677d38527e6e8020fe367f0f16a6c49\
+ 9a7f2da95ec6471f7382e5c0da98b531702cb55a560de7cafc7b6111aae0f896fb1fed\
+ d4997a954c6c083ef1fd3bb13fef3f95022523fb1fbe7f4a49e12e54a5206f95daa316\
+ ac009b7bee4039f769fd28033db6013df841c86d8345d44418fbc9f669e4ee3294b2ff\
+ 29d048f53d768c0a41f9a280f0229d9912e8b2fb734617a9947be973ed1dc7bdeac9e2\
+ 6028d59317098a44bacdb3b10ccde6ef02f7c94124461032a033701ce523b13142658c\
+ 265385198903ccf227ad5ae88ec31e586cd8f855641fd2646dba8053d0d0924f132505\
+ 8141f1c7433aa9686f48e3f3a972b56776eaf8bf22a740d1aea2ef473184d697de1dab\
+ 9b62a227611c7500b11dea2e5eb8051807c0d1f2fe032acfd7701c017e629f99c74de5\
+ da4c2a542f17b9833beb14442aa7c2990b828473376ea03fdb4a650b88e821fe5026e8\
+ ffb7002d095c9877ee3a98a4488ed3287e9be4942a223f4e32bc26c2ebd02eec20dc82\
+ 7493b44f4efaf9b2e175d4de2b07c32d6d359e234c9e50ef905ffa7f6907c313a3c9f4\
+ 40d1efd5ec7cbeef06dcfd649f4c8219ad"
);
}
#[test]
- fn payload_vbmeta_has_valid_image_hash() {
- let result = get_payload_vbmeta_image_hash("tests/data/test.apex").unwrap();
- assert_eq!(
- hex::encode(result),
- "296e32a76544de9da01713e471403ab4667705ad527bb4f1fac0cf61e7ce122d"
- );
+ fn apex_no_manifest_fails_verification() {
+ match verify("apex.apexd_test_v2_no_pb.apex").unwrap_err() {
+ ApexVerificationError::ParseError(ApexParseError::MissingFile(_)) => (),
+ e => panic!("Unexpected error {e}"),
+ }
+ }
+
+ #[test]
+ fn apex_signature_mismatch_fails_verification() {
+ match verify("apex.apexd_test_wrong_public_key.apex").unwrap_err() {
+ ApexVerificationError::ApexPubkeyMismatch => (),
+ e => panic!("Unexpected error {e}"),
+ }
}
}
diff --git a/libs/apexutil/tests/data/README.md b/libs/apexutil/tests/data/README.md
deleted file mode 100644
index 82ebec6..0000000
--- a/libs/apexutil/tests/data/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Test data
-
-- test.apex: copied from system/apexshim/prebuilts/x86/com.android.apex.cts.shim.v1.apex
\ No newline at end of file
diff --git a/libs/apexutil/tests/data/test.apex b/libs/apexutil/tests/data/test.apex
deleted file mode 100644
index fd79365..0000000
--- a/libs/apexutil/tests/data/test.apex
+++ /dev/null
Binary files differ
diff --git a/libs/apkverify/Android.bp b/libs/apkverify/Android.bp
index 1c18d2d..4c5a622 100644
--- a/libs/apkverify/Android.bp
+++ b/libs/apkverify/Android.bp
@@ -48,10 +48,12 @@
test_suites: ["general-tests"],
rustlibs: [
"libandroid_logger",
+ "libanyhow",
"libapkverify",
"libapkzip",
"libbyteorder",
"liblog_rust",
+ "libopenssl",
"libzip",
],
data: ["tests/data/*"],
diff --git a/libs/apkverify/src/lib.rs b/libs/apkverify/src/lib.rs
index 6af8122..1f3c74f 100644
--- a/libs/apkverify/src/lib.rs
+++ b/libs/apkverify/src/lib.rs
@@ -26,5 +26,5 @@
mod v4;
pub use algorithms::{HashAlgorithm, SignatureAlgorithmID};
-pub use v3::{get_public_key_der, verify};
+pub use v3::{extract_signed_data, verify, SignedData};
pub use v4::{get_apk_digest, V4Signature};
diff --git a/libs/apkverify/src/v3.rs b/libs/apkverify/src/v3.rs
index 8a8ad73..88644c7 100644
--- a/libs/apkverify/src/v3.rs
+++ b/libs/apkverify/src/v3.rs
@@ -44,13 +44,9 @@
public_key: PKey<pkey::Public>,
}
-impl Signer {
- fn sdk_range(&self) -> RangeInclusive<u32> {
- self.min_sdk..=self.max_sdk
- }
-}
-
-struct SignedData {
+/// Contains the signed data part of an APK v3 signature.
+#[derive(Debug)]
+pub struct SignedData {
digests: LengthPrefixed<Vec<LengthPrefixed<Digest>>>,
certificates: LengthPrefixed<Vec<LengthPrefixed<X509Certificate>>>,
min_sdk: u32,
@@ -59,20 +55,6 @@
additional_attributes: LengthPrefixed<Vec<LengthPrefixed<AdditionalAttributes>>>,
}
-impl SignedData {
- fn sdk_range(&self) -> RangeInclusive<u32> {
- self.min_sdk..=self.max_sdk
- }
-
- fn find_digest_by_algorithm(&self, algorithm_id: SignatureAlgorithmID) -> Result<&Digest> {
- Ok(self
- .digests
- .iter()
- .find(|&dig| dig.signature_algorithm_id == Some(algorithm_id))
- .context(format!("Digest not found for algorithm: {:?}", algorithm_id))?)
- }
-}
-
#[derive(Debug)]
pub(crate) struct Signature {
/// Option is used here to allow us to ignore unsupported algorithm.
@@ -80,6 +62,7 @@
signature: LengthPrefixed<Bytes>,
}
+#[derive(Debug)]
struct Digest {
signature_algorithm_id: Option<SignatureAlgorithmID>,
digest: LengthPrefixed<Bytes>,
@@ -88,19 +71,19 @@
type X509Certificate = Bytes;
type AdditionalAttributes = Bytes;
-/// Verifies APK Signature Scheme v3 signatures of the provided APK and returns the public key
-/// associated with the signer in DER format.
-pub fn verify<P: AsRef<Path>>(apk_path: P, current_sdk: u32) -> Result<Box<[u8]>> {
+/// Verifies APK Signature Scheme v3 signatures of the provided APK and returns the SignedData from
+/// the signature.
+pub fn verify<P: AsRef<Path>>(apk_path: P, current_sdk: u32) -> Result<SignedData> {
let apk = File::open(apk_path.as_ref())?;
let (signer, mut sections) = extract_signer_and_apk_sections(apk, current_sdk)?;
signer.verify(&mut sections)
}
-/// Gets the public key (in DER format) that was used to sign the given APK/APEX file
-pub fn get_public_key_der<P: AsRef<Path>>(apk_path: P, current_sdk: u32) -> Result<Box<[u8]>> {
+/// Extracts the SignedData from the signature of the given APK. (The signature is not verified.)
+pub fn extract_signed_data<P: AsRef<Path>>(apk_path: P, current_sdk: u32) -> Result<SignedData> {
let apk = File::open(apk_path.as_ref())?;
let (signer, _) = extract_signer_and_apk_sections(apk, current_sdk)?;
- Ok(signer.public_key.public_key_to_der()?.into_boxed_slice())
+ signer.parse_signed_data()
}
pub(crate) fn extract_signer_and_apk_sections<R: Read + Seek>(
@@ -123,6 +106,10 @@
}
impl Signer {
+ fn sdk_range(&self) -> RangeInclusive<u32> {
+ self.min_sdk..=self.max_sdk
+ }
+
/// Selects the signature that has the strongest supported `SignatureAlgorithmID`.
/// The strongest signature is used in both v3 verification and v4 apk digest computation.
pub(crate) fn strongest_signature(&self) -> Result<&Signature> {
@@ -143,27 +130,33 @@
Ok(digest.digest.as_ref().to_vec().into_boxed_slice())
}
- /// Verifies the strongest signature from signatures against signed data using public key.
- /// Returns the verified signed data.
- fn verify_signature(&self, strongest: &Signature) -> Result<SignedData> {
- let mut verifier = strongest
+ /// Verifies a signature over the signed data using the public key.
+ fn verify_signature(&self, signature: &Signature) -> Result<()> {
+ let mut verifier = signature
.signature_algorithm_id
.context("Unsupported algorithm")?
.new_verifier(&self.public_key)?;
verifier.update(&self.signed_data)?;
- ensure!(verifier.verify(&strongest.signature)?, "Signature is invalid.");
- // It is now safe to parse signed data.
+ ensure!(verifier.verify(&signature.signature)?, "Signature is invalid.");
+ Ok(())
+ }
+
+ /// Returns the signed data, converted from bytes.
+ fn parse_signed_data(&self) -> Result<SignedData> {
self.signed_data.slice(..).read()
}
/// The steps in this method implements APK Signature Scheme v3 verification step 3.
- fn verify<R: Read + Seek>(&self, sections: &mut ApkSections<R>) -> Result<Box<[u8]>> {
+ fn verify<R: Read + Seek>(&self, sections: &mut ApkSections<R>) -> Result<SignedData> {
// 1. Choose the strongest supported signature algorithm ID from signatures.
let strongest = self.strongest_signature()?;
// 2. Verify the corresponding signature from signatures against signed data using public
// key.
- let verified_signed_data = self.verify_signature(strongest)?;
+ self.verify_signature(strongest)?;
+
+ // It is now safe to parse signed data.
+ let verified_signed_data = self.parse_signed_data()?;
// 3. Verify the min and max SDK versions in the signed data match those specified for the
// signer.
@@ -199,8 +192,7 @@
// 7. Verify that public key of the first certificate of certificates is identical to public
// key.
- let cert = verified_signed_data.certificates.first().context("No certificates listed")?;
- let cert = X509::from_der(cert.as_ref())?;
+ let cert = X509::from_der(verified_signed_data.first_certificate_der()?)?;
ensure!(
cert.public_key()?.public_eq(&self.public_key),
"Public key mismatch between certificate and signature record"
@@ -209,7 +201,29 @@
// TODO(b/245914104)
// 8. If the proof-of-rotation attribute exists for the signer verify that the
// struct is valid and this signer is the last certificate in the list.
- Ok(self.public_key.public_key_to_der()?.into_boxed_slice())
+
+ Ok(verified_signed_data)
+ }
+}
+
+impl SignedData {
+ /// Returns the first X.509 certificate in the signed data, encoded in DER form. (All other
+ /// certificates are ignored for v3; this certificate describes the public key that was actually
+ /// used to sign the APK.)
+ pub fn first_certificate_der(&self) -> Result<&[u8]> {
+ Ok(self.certificates.first().context("No certificates listed")?)
+ }
+
+ fn sdk_range(&self) -> RangeInclusive<u32> {
+ self.min_sdk..=self.max_sdk
+ }
+
+ fn find_digest_by_algorithm(&self, algorithm_id: SignatureAlgorithmID) -> Result<&Digest> {
+ Ok(self
+ .digests
+ .iter()
+ .find(|&dig| dig.signature_algorithm_id == Some(algorithm_id))
+ .context(format!("Digest not found for algorithm: {:?}", algorithm_id))?)
}
}
diff --git a/libs/apkverify/tests/apkverify_test.rs b/libs/apkverify/tests/apkverify_test.rs
index 0d8e020..441b708 100644
--- a/libs/apkverify/tests/apkverify_test.rs
+++ b/libs/apkverify/tests/apkverify_test.rs
@@ -14,12 +14,14 @@
* limitations under the License.
*/
+use anyhow::Result;
use apkverify::{
- get_apk_digest, get_public_key_der, testing::assert_contains, verify, SignatureAlgorithmID,
+ extract_signed_data, get_apk_digest, testing::assert_contains, verify, SignatureAlgorithmID,
};
use apkzip::zip_sections;
use byteorder::{LittleEndian, ReadBytesExt};
use log::info;
+use openssl::x509::X509;
use std::fmt::Write;
use std::io::{Seek, SeekFrom};
use std::{fs, matches, path::Path};
@@ -286,22 +288,30 @@
/// * public key extracted from apk without verification
/// * expected public key from the corresponding .der file
fn validate_apk_public_key<P: AsRef<Path>>(apk_path: P) {
- let public_key_from_verification = verify(&apk_path, SDK_INT);
- let public_key_from_verification =
- public_key_from_verification.expect("Error in verification result");
+ let signed_data_from_verification =
+ verify(&apk_path, SDK_INT).expect("Error in verification result");
+ let cert_from_verification = signed_data_from_verification.first_certificate_der().unwrap();
+ let public_key_from_verification = public_key_der_from_cert(cert_from_verification).unwrap();
let expected_public_key_path = format!("{}.der", apk_path.as_ref().to_str().unwrap());
assert_bytes_eq_to_data_in_file(&public_key_from_verification, expected_public_key_path);
- let public_key_from_apk = get_public_key_der(&apk_path, SDK_INT);
- let public_key_from_apk =
- public_key_from_apk.expect("Error when extracting public key from apk");
+ let signed_data_from_apk = extract_signed_data(&apk_path, SDK_INT)
+ .expect("Error when extracting signed data from apk");
+ let cert_from_apk = signed_data_from_apk.first_certificate_der().unwrap();
+ // If the two certficiates are byte for byte identical (which they should be), then so are
+ // the public keys embedded in them.
assert_eq!(
- public_key_from_verification, public_key_from_apk,
- "Public key extracted directly from apk does not match the public key from verification."
+ cert_from_verification, cert_from_apk,
+ "Certificate extracted directly from apk does not match the certificate from verification."
);
}
+fn public_key_der_from_cert(cert_der: &[u8]) -> Result<Vec<u8>> {
+ let cert = X509::from_der(cert_der)?;
+ Ok(cert.public_key()?.public_key_to_der()?)
+}
+
/// Validates that the following apk_digest are equal:
/// * apk_digest directly extracted from apk without computation
/// * computed apk_digest
diff --git a/libs/bssl/Android.bp b/libs/bssl/Android.bp
index 0a2f334..bed3dfb 100644
--- a/libs/bssl/Android.bp
+++ b/libs/bssl/Android.bp
@@ -23,6 +23,8 @@
rustlibs: [
"libbssl_avf_error_nostd",
"libbssl_ffi_nostd",
+ "libcbor_util_nostd",
+ "libciborium_nostd",
"libcoset_nostd",
"liblog_rust_nostd",
"libzeroize_nostd",
@@ -44,5 +46,7 @@
defaults: ["libbssl_avf_test_defaults"],
rustlibs: [
"libbssl_avf_nostd",
+ "libcoset_nostd",
+ "libspki_nostd",
],
}
diff --git a/libs/bssl/error/Android.bp b/libs/bssl/error/Android.bp
index dc2902e..000e385 100644
--- a/libs/bssl/error/Android.bp
+++ b/libs/bssl/error/Android.bp
@@ -21,6 +21,8 @@
"libcore.rust_sysroot",
],
rustlibs: [
+ "libcoset_nostd",
+ "liblog_rust_nostd",
"libserde_nostd",
],
}
@@ -32,6 +34,8 @@
"std",
],
rustlibs: [
+ "libcoset",
+ "liblog_rust",
"libserde",
],
}
diff --git a/libs/bssl/error/src/code.rs b/libs/bssl/error/src/code.rs
index 9b661e9..a318a07 100644
--- a/libs/bssl/error/src/code.rs
+++ b/libs/bssl/error/src/code.rs
@@ -25,6 +25,8 @@
NoError,
Global(GlobalError),
Cipher(CipherError),
+ Ec(EcError),
+ Ecdsa(EcdsaError),
Unknown(BsslReasonCode, BsslLibraryCode),
}
@@ -102,3 +104,86 @@
write!(f, "An error occurred in a Cipher function: {self:?}")
}
}
+
+/// Errors occurred in the EC functions.
+///
+/// The values are from:
+/// boringssl/src/include/openssl/ec.h
+#[allow(missing_docs)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub enum EcError {
+ BufferTooSmall,
+ CoordinatesOutOfRange,
+ D2IEcpkparametersFailure,
+ EcGroupNewByNameFailure,
+ Group2PkparametersFailure,
+ I2DEcpkparametersFailure,
+ IncompatibleObjects,
+ InvalidCompressedPoint,
+ InvalidCompressionBit,
+ InvalidEncoding,
+ InvalidField,
+ InvalidForm,
+ InvalidGroupOrder,
+ InvalidPrivateKey,
+ MissingParameters,
+ MissingPrivateKey,
+ NonNamedCurve,
+ NotInitialized,
+ Pkparameters2GroupFailure,
+ PointAtInfinity,
+ PointIsNotOnCurve,
+ SlotFull,
+ UndefinedGenerator,
+ UnknownGroup,
+ UnknownOrder,
+ WrongOrder,
+ BignumOutOfRange,
+ WrongCurveParameters,
+ DecodeError,
+ EncodeError,
+ GroupMismatch,
+ InvalidCofactor,
+ PublicKeyValidationFailed,
+ InvalidScalar,
+}
+
+impl From<EcError> for ReasonCode {
+ fn from(e: EcError) -> ReasonCode {
+ ReasonCode::Ec(e)
+ }
+}
+
+impl fmt::Display for EcError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "An error occurred in an EC function: {self:?}")
+ }
+}
+
+/// Errors occurred in the ECDSA functions.
+///
+/// The values are from:
+/// boringssl/src/include/openssl/ecdsa.h
+#[allow(missing_docs)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub enum EcdsaError {
+ BadSignature,
+ MissingParameters,
+ NeedNewSetupValues,
+ NotImplemented,
+ RandomNumberGenerationFailed,
+ EncodeError,
+ TooManyIterations,
+}
+
+impl From<EcdsaError> for ReasonCode {
+ fn from(e: EcdsaError) -> ReasonCode {
+ ReasonCode::Ecdsa(e)
+ }
+}
+
+impl fmt::Display for EcdsaError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "An error occurred in an ECDSA function: {self:?}")
+ }
+}
diff --git a/libs/bssl/error/src/lib.rs b/libs/bssl/error/src/lib.rs
index 3766c41..82a2d5e 100644
--- a/libs/bssl/error/src/lib.rs
+++ b/libs/bssl/error/src/lib.rs
@@ -21,7 +21,7 @@
use core::{fmt, result};
use serde::{Deserialize, Serialize};
-pub use crate::code::{CipherError, GlobalError, ReasonCode};
+pub use crate::code::{CipherError, EcError, EcdsaError, GlobalError, ReasonCode};
/// libbssl_avf result type.
pub type Result<T> = result::Result<T, Error>;
@@ -34,6 +34,15 @@
/// An unexpected internal error occurred.
InternalError,
+
+ /// Failed to decode the COSE_Key.
+ CoseKeyDecodingFailed,
+
+ /// An error occurred when interacting with the coset crate.
+ CosetError,
+
+ /// Unimplemented operation.
+ Unimplemented,
}
impl fmt::Display for Error {
@@ -43,30 +52,59 @@
write!(f, "Failed to invoke the BoringSSL API: {api_name:?}. Reason: {reason}")
}
Self::InternalError => write!(f, "An unexpected internal error occurred"),
+ Self::CoseKeyDecodingFailed => write!(f, "Failed to decode the COSE_Key"),
+ Self::CosetError => {
+ write!(f, "An error occurred when interacting with the coset crate")
+ }
+ Self::Unimplemented => write!(f, "Unimplemented operation"),
}
}
}
+impl From<coset::CoseError> for Error {
+ fn from(e: coset::CoseError) -> Self {
+ log::error!("Coset error: {e}");
+ Self::CosetError
+ }
+}
+
/// BoringSSL API names.
#[allow(missing_docs)]
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum ApiName {
BN_new,
+ BN_bin2bn,
BN_bn2bin_padded,
CBB_flush,
CBB_len,
+ EC_GROUP_new_by_curve_name,
EC_KEY_check_key,
EC_KEY_generate_key,
EC_KEY_get0_group,
EC_KEY_get0_public_key,
EC_KEY_marshal_private_key,
+ EC_KEY_parse_private_key,
EC_KEY_new_by_curve_name,
+ EC_KEY_set_public_key_affine_coordinates,
EC_POINT_get_affine_coordinates,
+ ECDSA_sign,
+ ECDSA_size,
+ ECDSA_verify,
+ ED25519_verify,
EVP_AEAD_CTX_new,
EVP_AEAD_CTX_open,
EVP_AEAD_CTX_seal,
+ EVP_Digest,
+ EVP_MD_CTX_new,
+ EVP_PKEY_new,
+ EVP_PKEY_new_raw_public_key,
+ EVP_PKEY_set1_EC_KEY,
+ EVP_marshal_public_key,
+ EVP_DigestVerify,
+ EVP_DigestVerifyInit,
HKDF,
HMAC,
RAND_bytes,
+ SHA256,
}
diff --git a/libs/bssl/src/aead.rs b/libs/bssl/src/aead.rs
index e0c9fbb..1ac2c22 100644
--- a/libs/bssl/src/aead.rs
+++ b/libs/bssl/src/aead.rs
@@ -18,8 +18,8 @@
use bssl_avf_error::{ApiName, Result};
use bssl_ffi::{
EVP_AEAD_CTX_free, EVP_AEAD_CTX_new, EVP_AEAD_CTX_open, EVP_AEAD_CTX_seal,
- EVP_AEAD_max_overhead, EVP_AEAD_nonce_length, EVP_aead_aes_256_gcm, EVP_AEAD, EVP_AEAD_CTX,
- EVP_AEAD_DEFAULT_TAG_LENGTH,
+ EVP_AEAD_max_overhead, EVP_AEAD_nonce_length, EVP_aead_aes_256_gcm,
+ EVP_aead_aes_256_gcm_randnonce, EVP_AEAD, EVP_AEAD_CTX, EVP_AEAD_DEFAULT_TAG_LENGTH,
};
use core::ptr::NonNull;
@@ -51,6 +51,17 @@
Self(unsafe { &*p })
}
+ /// AES-256 in Galois Counter Mode with internal nonce generation.
+ /// The 12-byte nonce is appended to the tag and is generated internally.
+ pub fn aes_256_gcm_randnonce() -> Self {
+ // SAFETY: This function does not access any Rust variables and simply returns
+ // a pointer to the static variable in BoringSSL.
+ let p = unsafe { EVP_aead_aes_256_gcm_randnonce() };
+ // SAFETY: The returned pointer should always be valid and points to a static
+ // `EVP_AEAD`.
+ Self(unsafe { &*p })
+ }
+
/// Returns the maximum number of additional bytes added by the act of sealing data.
pub fn max_overhead(&self) -> usize {
// SAFETY: This function only reads from self.
diff --git a/libs/bssl/src/cbs.rs b/libs/bssl/src/cbs.rs
new file mode 100644
index 0000000..9718903
--- /dev/null
+++ b/libs/bssl/src/cbs.rs
@@ -0,0 +1,55 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Helpers for using BoringSSL CBS (crypto byte string) objects.
+
+use bssl_ffi::{CBS_init, CBS};
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+
+/// CRYPTO ByteString.
+///
+/// Wraps a `CBS` that references an existing fixed-sized buffer; no memory is allocated, but the
+/// buffer cannot grow.
+pub struct Cbs<'a> {
+ cbs: CBS,
+ /// The CBS contains a mutable reference to the buffer, disguised as a pointer.
+ /// Make sure the borrow checker knows that.
+ _buffer: PhantomData<&'a [u8]>,
+}
+
+impl<'a> Cbs<'a> {
+ /// Creates a new CBS that points to the given buffer.
+ pub fn new(buffer: &'a [u8]) -> Self {
+ let mut cbs = MaybeUninit::uninit();
+ // SAFETY: `CBS_init()` only sets `cbs` to point to `buffer`. It doesn't take ownership
+ // of data.
+ unsafe { CBS_init(cbs.as_mut_ptr(), buffer.as_ptr(), buffer.len()) };
+ // SAFETY: `cbs` has just been initialized by `CBS_init()`.
+ let cbs = unsafe { cbs.assume_init() };
+ Self { cbs, _buffer: PhantomData }
+ }
+}
+
+impl<'a> AsRef<CBS> for Cbs<'a> {
+ fn as_ref(&self) -> &CBS {
+ &self.cbs
+ }
+}
+
+impl<'a> AsMut<CBS> for Cbs<'a> {
+ fn as_mut(&mut self) -> &mut CBS {
+ &mut self.cbs
+ }
+}
diff --git a/libs/bssl/src/curve25519.rs b/libs/bssl/src/curve25519.rs
new file mode 100644
index 0000000..499a3d0
--- /dev/null
+++ b/libs/bssl/src/curve25519.rs
@@ -0,0 +1,39 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrappers of the Curve25519 related functions in BoringSSL curve25519.h.
+
+use crate::util::check_int_result;
+use bssl_avf_error::{ApiName, Result};
+
+const ED25519_PUBLIC_KEY_LEN: usize = bssl_ffi::ED25519_PUBLIC_KEY_LEN as usize;
+const ED25519_SIGNATURE_LEN: usize = bssl_ffi::ED25519_SIGNATURE_LEN as usize;
+
+/// Verifies the signature of a message with the given ED25519 public key.
+pub fn ed25519_verify(
+ message: &[u8],
+ signature: &[u8; ED25519_SIGNATURE_LEN],
+ public_key: &[u8; ED25519_PUBLIC_KEY_LEN],
+) -> Result<()> {
+ // SAFETY: The function only reads the parameters within their bounds.
+ let ret = unsafe {
+ bssl_ffi::ED25519_verify(
+ message.as_ptr(),
+ message.len(),
+ signature.as_ptr(),
+ public_key.as_ptr(),
+ )
+ };
+ check_int_result(ret, ApiName::ED25519_verify)
+}
diff --git a/libs/bssl/src/digest.rs b/libs/bssl/src/digest.rs
index 49e66e6..e986a38 100644
--- a/libs/bssl/src/digest.rs
+++ b/libs/bssl/src/digest.rs
@@ -14,7 +14,18 @@
//! Wrappers of the digest functions in BoringSSL digest.h.
-use bssl_ffi::{EVP_MD_size, EVP_sha256, EVP_sha512, EVP_MD};
+use crate::util::{check_int_result, to_call_failed_error};
+use alloc::vec;
+use alloc::vec::Vec;
+use bssl_avf_error::{ApiName, Error, Result};
+use bssl_ffi::{
+ EVP_Digest, EVP_MD_CTX_free, EVP_MD_CTX_new, EVP_MD_size, EVP_sha256, EVP_sha384, EVP_sha512,
+ EVP_MAX_MD_SIZE, EVP_MD, EVP_MD_CTX,
+};
+use core::ptr::{self, NonNull};
+use log::error;
+
+const MAX_DIGEST_SIZE: usize = EVP_MAX_MD_SIZE as usize;
/// Message digester wrapping `EVP_MD`.
#[derive(Clone, Debug)]
@@ -28,7 +39,17 @@
let p = unsafe { EVP_sha256() };
// SAFETY: The returned pointer should always be valid and points to a static
// `EVP_MD`.
- Self(unsafe { &*p })
+ Self(unsafe { p.as_ref().unwrap() })
+ }
+
+ /// Returns a `Digester` implementing `SHA-384` algorithm.
+ pub fn sha384() -> Self {
+ // SAFETY: This function does not access any Rust variables and simply returns
+ // a pointer to the static variable in BoringSSL.
+ let p = unsafe { EVP_sha384() };
+ // SAFETY: The returned pointer should always be valid and points to a static
+ // `EVP_MD`.
+ Self(unsafe { p.as_ref().unwrap() })
}
/// Returns a `Digester` implementing `SHA-512` algorithm.
@@ -38,7 +59,7 @@
let p = unsafe { EVP_sha512() };
// SAFETY: The returned pointer should always be valid and points to a static
// `EVP_MD`.
- Self(unsafe { &*p })
+ Self(unsafe { p.as_ref().unwrap() })
}
/// Returns the digest size in bytes.
@@ -46,4 +67,64 @@
// SAFETY: The inner pointer is fetched from EVP_* hash functions in BoringSSL digest.h
unsafe { EVP_MD_size(self.0) }
}
+
+ /// Computes the digest of the provided `data`.
+ pub fn digest(&self, data: &[u8]) -> Result<Vec<u8>> {
+ let mut out = vec![0u8; MAX_DIGEST_SIZE];
+ let mut out_size = 0;
+ let engine = ptr::null_mut(); // Use the default engine.
+ let ret =
+ // SAFETY: This function reads `data` and writes to `out` within its bounds.
+ // `out` has `MAX_DIGEST_SIZE` bytes of space for write as required in the
+ // BoringSSL spec.
+ // The digester is a valid pointer to a static `EVP_MD` as it is returned by
+ // BoringSSL API during the construction of this struct.
+ unsafe {
+ EVP_Digest(
+ data.as_ptr() as *const _,
+ data.len(),
+ out.as_mut_ptr(),
+ &mut out_size,
+ self.0,
+ engine,
+ )
+ };
+ check_int_result(ret, ApiName::EVP_Digest)?;
+ let out_size = usize::try_from(out_size).map_err(|e| {
+ error!("Failed to convert digest size to usize: {:?}", e);
+ Error::InternalError
+ })?;
+ if self.size() != out_size {
+ return Err(to_call_failed_error(ApiName::EVP_Digest));
+ }
+ out.truncate(out_size);
+ Ok(out)
+ }
+}
+
+/// Message digester context wrapping `EVP_MD_CTX`.
+#[derive(Clone, Debug)]
+pub struct DigesterContext(NonNull<EVP_MD_CTX>);
+
+impl Drop for DigesterContext {
+ fn drop(&mut self) {
+ // SAFETY: This function frees any resources owned by `EVP_MD_CTX` and resets it to a
+ // freshly initialised state and then frees the context.
+ // It is safe because `EVP_MD_CTX` has been allocated by BoringSSL and isn't used after
+ // this.
+ unsafe { EVP_MD_CTX_free(self.0.as_ptr()) }
+ }
+}
+
+impl DigesterContext {
+ /// Creates a new `DigesterContext` wrapping a freshly allocated and initialised `EVP_MD_CTX`.
+ pub fn new() -> Result<Self> {
+ // SAFETY: The returned pointer is checked below.
+ let ctx = unsafe { EVP_MD_CTX_new() };
+ NonNull::new(ctx).map(Self).ok_or(to_call_failed_error(ApiName::EVP_MD_CTX_new))
+ }
+
+ pub(crate) fn as_mut_ptr(&mut self) -> *mut EVP_MD_CTX {
+ self.0.as_ptr()
+ }
}
diff --git a/libs/bssl/src/ec_key.rs b/libs/bssl/src/ec_key.rs
index 7038e21..894934d 100644
--- a/libs/bssl/src/ec_key.rs
+++ b/libs/bssl/src/ec_key.rs
@@ -16,26 +16,37 @@
//! BoringSSL.
use crate::cbb::CbbFixed;
+use crate::cbs::Cbs;
use crate::util::{check_int_result, to_call_failed_error};
+use alloc::vec;
use alloc::vec::Vec;
use bssl_avf_error::{ApiName, Error, Result};
use bssl_ffi::{
- BN_bn2bin_padded, BN_clear_free, BN_new, CBB_flush, CBB_len, EC_KEY_free, EC_KEY_generate_key,
- EC_KEY_get0_group, EC_KEY_get0_public_key, EC_KEY_marshal_private_key,
- EC_KEY_new_by_curve_name, EC_POINT_get_affine_coordinates, NID_X9_62_prime256v1, BIGNUM,
- EC_GROUP, EC_KEY, EC_POINT,
+ BN_bin2bn, BN_bn2bin_padded, BN_clear_free, BN_new, CBB_flush, CBB_len, ECDSA_sign, ECDSA_size,
+ ECDSA_verify, EC_GROUP_get_curve_name, EC_GROUP_new_by_curve_name, EC_KEY_check_key,
+ EC_KEY_free, EC_KEY_generate_key, EC_KEY_get0_group, EC_KEY_get0_public_key,
+ EC_KEY_marshal_private_key, EC_KEY_new_by_curve_name, EC_KEY_parse_private_key,
+ EC_KEY_set_public_key_affine_coordinates, EC_POINT_get_affine_coordinates,
+ NID_X9_62_prime256v1, NID_secp384r1, BIGNUM, EC_GROUP, EC_KEY, EC_POINT,
};
+use cbor_util::{get_label_value, get_label_value_as_bytes};
+use ciborium::Value;
use core::ptr::{self, NonNull};
-use core::result;
-use coset::{iana, CoseKey, CoseKeyBuilder};
+use coset::{
+ iana::{self, EnumI64},
+ CborSerializable, CoseKey, CoseKeyBuilder, KeyType, Label,
+};
+use log::error;
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
+const ES256_ALGO: iana::Algorithm = iana::Algorithm::ES256;
+const P256_CURVE: iana::EllipticCurve = iana::EllipticCurve::P_256;
+const P384_CURVE: iana::EllipticCurve = iana::EllipticCurve::P_384;
const P256_AFFINE_COORDINATE_SIZE: usize = 32;
-
-type Coordinate = [u8; P256_AFFINE_COORDINATE_SIZE];
+const P384_AFFINE_COORDINATE_SIZE: usize = 48;
/// Wrapper of an `EC_KEY` object, representing a public or private EC key.
-pub struct EcKey(NonNull<EC_KEY>);
+pub struct EcKey(pub(crate) NonNull<EC_KEY>);
impl Drop for EcKey {
fn drop(&mut self) {
@@ -52,16 +63,151 @@
let ec_key = unsafe {
EC_KEY_new_by_curve_name(NID_X9_62_prime256v1) // EC P-256 CURVE Nid
};
- let mut ec_key = NonNull::new(ec_key)
+ NonNull::new(ec_key)
.map(Self)
- .ok_or(to_call_failed_error(ApiName::EC_KEY_new_by_curve_name))?;
- ec_key.generate_key()?;
+ .ok_or(to_call_failed_error(ApiName::EC_KEY_new_by_curve_name))
+ }
+
+ /// Creates a new EC P-384 key pair.
+ pub fn new_p384() -> Result<Self> {
+ // SAFETY: The returned pointer is checked below.
+ let ec_key = unsafe {
+ EC_KEY_new_by_curve_name(NID_secp384r1) // EC P-384 CURVE Nid
+ };
+ NonNull::new(ec_key)
+ .map(Self)
+ .ok_or(to_call_failed_error(ApiName::EC_KEY_new_by_curve_name))
+ }
+
+ /// Constructs an `EcKey` instance from the provided COSE_Key encoded public key slice.
+ pub fn from_cose_public_key_slice(cose_key: &[u8]) -> Result<Self> {
+ let cose_key = CoseKey::from_slice(cose_key).map_err(|e| {
+ error!("Failed to deserialize COSE_Key: {e:?}");
+ Error::CoseKeyDecodingFailed
+ })?;
+ Self::from_cose_public_key(&cose_key)
+ }
+
+ /// Constructs an `EcKey` instance from the provided `COSE_Key`.
+ ///
+ /// The lifetime of the returned `EcKey` is not tied to the lifetime of the `cose_key`,
+ /// because the affine coordinates stored in the `cose_key` are copied into the `EcKey`.
+ ///
+ /// Currently, only the EC P-256 and P-384 curves are supported.
+ pub fn from_cose_public_key(cose_key: &CoseKey) -> Result<Self> {
+ if cose_key.kty != KeyType::Assigned(iana::KeyType::EC2) {
+ error!("Only EC2 keys are supported. Key type in the COSE Key: {:?}", cose_key.kty);
+ return Err(Error::Unimplemented);
+ }
+ let ec_key =
+ match get_label_value(cose_key, Label::Int(iana::Ec2KeyParameter::Crv.to_i64()))? {
+ crv if crv == &Value::from(P256_CURVE.to_i64()) => EcKey::new_p256()?,
+ crv if crv == &Value::from(P384_CURVE.to_i64()) => EcKey::new_p384()?,
+ crv => {
+ error!(
+ "Only EC P-256 and P-384 curves are supported. \
+ Curve type in the COSE Key: {crv:?}"
+ );
+ return Err(Error::Unimplemented);
+ }
+ };
+ let x = get_label_value_as_bytes(cose_key, Label::Int(iana::Ec2KeyParameter::X.to_i64()))?;
+ let y = get_label_value_as_bytes(cose_key, Label::Int(iana::Ec2KeyParameter::Y.to_i64()))?;
+
+ let group = ec_key.ec_group()?;
+ group.check_affine_coordinate_size(x)?;
+ group.check_affine_coordinate_size(y)?;
+
+ let x = BigNum::from_slice(x)?;
+ let y = BigNum::from_slice(y)?;
+
+ // SAFETY: All the parameters are checked non-null and initialized.
+ // The function only reads the coordinates x and y within their bounds.
+ let ret = unsafe {
+ EC_KEY_set_public_key_affine_coordinates(ec_key.0.as_ptr(), x.as_ref(), y.as_ref())
+ };
+ check_int_result(ret, ApiName::EC_KEY_set_public_key_affine_coordinates)?;
+ ec_key.check_key()?;
Ok(ec_key)
}
+ /// Performs several checks on the key. See BoringSSL doc for more details:
+ ///
+ /// https://commondatastorage.googleapis.com/chromium-boringssl-docs/ec_key.h.html#EC_KEY_check_key
+ pub fn check_key(&self) -> Result<()> {
+ // SAFETY: This function only reads the `EC_KEY` pointer, the non-null check is performed
+ // within the function.
+ let ret = unsafe { EC_KEY_check_key(self.0.as_ptr()) };
+ check_int_result(ret, ApiName::EC_KEY_check_key)
+ }
+
+ /// Verifies the DER-encoded ECDSA `signature` of the `digest` with the current `EcKey`.
+ ///
+ /// Returns Ok(()) if the verification succeeds, otherwise an error will be returned.
+ pub fn ecdsa_verify(&self, signature: &[u8], digest: &[u8]) -> Result<()> {
+ // The `type` argument should be 0 as required in the BoringSSL spec.
+ const TYPE: i32 = 0;
+
+ // SAFETY: This function only reads the given data within its bounds.
+ // The `EC_KEY` passed to this function has been initialized and checked non-null.
+ let ret = unsafe {
+ ECDSA_verify(
+ TYPE,
+ digest.as_ptr(),
+ digest.len(),
+ signature.as_ptr(),
+ signature.len(),
+ self.0.as_ptr(),
+ )
+ };
+ check_int_result(ret, ApiName::ECDSA_verify)
+ }
+
+ /// Signs the `digest` with the current `EcKey` using ECDSA.
+ ///
+ /// Returns the DER-encoded ECDSA signature.
+ pub fn ecdsa_sign(&self, digest: &[u8]) -> Result<Vec<u8>> {
+ // The `type` argument should be 0 as required in the BoringSSL spec.
+ const TYPE: i32 = 0;
+
+ let mut signature = vec![0u8; self.ecdsa_size()?];
+ let mut signature_len = 0;
+ // SAFETY: This function only reads the given data within its bounds.
+ // The `EC_KEY` passed to this function has been initialized and checked non-null.
+ let ret = unsafe {
+ ECDSA_sign(
+ TYPE,
+ digest.as_ptr(),
+ digest.len(),
+ signature.as_mut_ptr(),
+ &mut signature_len,
+ self.0.as_ptr(),
+ )
+ };
+ check_int_result(ret, ApiName::ECDSA_sign)?;
+ if signature.len() < (signature_len as usize) {
+ Err(to_call_failed_error(ApiName::ECDSA_sign))
+ } else {
+ signature.truncate(signature_len as usize);
+ Ok(signature)
+ }
+ }
+
+ /// Returns the maximum size of an ECDSA signature using the current `EcKey`.
+ fn ecdsa_size(&self) -> Result<usize> {
+ // SAFETY: This function only reads the `EC_KEY` that has been initialized
+ // and checked non-null when this instance is created.
+ let size = unsafe { ECDSA_size(self.0.as_ptr()) };
+ if size == 0 {
+ Err(to_call_failed_error(ApiName::ECDSA_size))
+ } else {
+ Ok(size)
+ }
+ }
+
/// Generates a random, private key, calculates the corresponding public key and stores both
/// in the `EC_KEY`.
- fn generate_key(&mut self) -> Result<()> {
+ pub fn generate_key(&mut self) -> Result<()> {
// SAFETY: The non-null pointer is created with `EC_KEY_new_by_curve_name` and should
// point to a valid `EC_KEY`.
// The randomness is provided by `getentropy()` in `vmbase`.
@@ -71,17 +217,14 @@
/// Returns the `CoseKey` for the public key.
pub fn cose_public_key(&self) -> Result<CoseKey> {
- const ALGO: iana::Algorithm = iana::Algorithm::ES256;
- const CURVE: iana::EllipticCurve = iana::EllipticCurve::P_256;
-
let (x, y) = self.public_key_coordinates()?;
- let key =
- CoseKeyBuilder::new_ec2_pub_key(CURVE, x.to_vec(), y.to_vec()).algorithm(ALGO).build();
+ let curve = self.ec_group()?.coset_curve()?;
+ let key = CoseKeyBuilder::new_ec2_pub_key(curve, x, y).algorithm(ES256_ALGO).build();
Ok(key)
}
/// Returns the x and y coordinates of the public key.
- fn public_key_coordinates(&self) -> Result<(Coordinate, Coordinate)> {
+ fn public_key_coordinates(&self) -> Result<(Vec<u8>, Vec<u8>)> {
let ec_group = self.ec_group()?;
let ec_point = self.public_key_ec_point()?;
let mut x = BigNum::new()?;
@@ -90,10 +233,17 @@
// SAFETY: All the parameters are checked non-null and initialized when needed.
// The last parameter `ctx` is generated when needed inside the function.
let ret = unsafe {
- EC_POINT_get_affine_coordinates(ec_group, ec_point, x.as_mut_ptr(), y.as_mut_ptr(), ctx)
+ EC_POINT_get_affine_coordinates(
+ ec_group.as_ref(),
+ ec_point,
+ x.as_mut_ptr(),
+ y.as_mut_ptr(),
+ ctx,
+ )
};
check_int_result(ret, ApiName::EC_POINT_get_affine_coordinates)?;
- Ok((x.try_into()?, y.try_into()?))
+ let len = ec_group.affine_coordinate_size()?;
+ Ok((x.to_padded_vec(len)?, y.to_padded_vec(len)?))
}
/// Returns a pointer to the public key point inside `EC_KEY`. The memory region pointed
@@ -112,7 +262,7 @@
/// Returns a pointer to the `EC_GROUP` object inside `EC_KEY`. The memory region pointed
/// by the pointer is owned by the `EC_KEY`.
- fn ec_group(&self) -> Result<*const EC_GROUP> {
+ fn ec_group(&self) -> Result<EcGroup<'_>> {
let group =
// SAFETY: It is safe since the key pair has been generated and stored in the
// `EC_KEY` pointer.
@@ -120,14 +270,40 @@
if group.is_null() {
Err(to_call_failed_error(ApiName::EC_KEY_get0_group))
} else {
- Ok(group)
+ // SAFETY: The pointer should be valid and points to an initialized `EC_GROUP`
+ // since it is read from a valid `EC_KEY`.
+ Ok(EcGroup(unsafe { &*group }))
}
}
+ /// Constructs an `EcKey` instance from the provided DER-encoded ECPrivateKey slice.
+ ///
+ /// Currently, only the EC P-256 curve is supported.
+ pub fn from_ec_private_key(der_encoded_ec_private_key: &[u8]) -> Result<Self> {
+ // SAFETY: This function only returns a pointer to a static object, and the
+ // return is checked below.
+ let ec_group = unsafe {
+ EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1) // EC P-256 CURVE Nid
+ };
+ if ec_group.is_null() {
+ return Err(to_call_failed_error(ApiName::EC_GROUP_new_by_curve_name));
+ }
+ let mut cbs = Cbs::new(der_encoded_ec_private_key);
+ // SAFETY: The function only reads bytes from the buffer managed by the valid `CBS`
+ // object, and the returned EC_KEY is checked.
+ let ec_key = unsafe { EC_KEY_parse_private_key(cbs.as_mut(), ec_group) };
+
+ let ec_key = NonNull::new(ec_key)
+ .map(Self)
+ .ok_or(to_call_failed_error(ApiName::EC_KEY_parse_private_key))?;
+ ec_key.check_key()?;
+ Ok(ec_key)
+ }
+
/// Returns the DER-encoded ECPrivateKey structure described in RFC 5915 Section 3:
///
/// https://datatracker.ietf.org/doc/html/rfc5915#section-3
- pub fn private_key(&self) -> Result<ZVec> {
+ pub fn ec_private_key(&self) -> Result<ZVec> {
const CAPACITY: usize = 256;
let mut buf = Zeroizing::new([0u8; CAPACITY]);
let mut cbb = CbbFixed::new(buf.as_mut());
@@ -148,6 +324,62 @@
}
}
+/// Wrapper of an `EC_GROUP` reference.
+struct EcGroup<'a>(&'a EC_GROUP);
+
+impl<'a> EcGroup<'a> {
+ /// Returns the NID that identifies the EC group of the key.
+ fn curve_nid(&self) -> i32 {
+ // SAFETY: It is safe since the inner pointer is valid and points to an initialized
+ // instance of `EC_GROUP`.
+ unsafe { EC_GROUP_get_curve_name(self.as_ref()) }
+ }
+
+ fn coset_curve(&self) -> Result<iana::EllipticCurve> {
+ #[allow(non_upper_case_globals)]
+ match self.curve_nid() {
+ NID_X9_62_prime256v1 => Ok(P256_CURVE),
+ NID_secp384r1 => Ok(P384_CURVE),
+ name => {
+ error!("Unsupported curve NID: {}", name);
+ Err(Error::Unimplemented)
+ }
+ }
+ }
+
+ fn affine_coordinate_size(&self) -> Result<usize> {
+ #[allow(non_upper_case_globals)]
+ match self.curve_nid() {
+ NID_X9_62_prime256v1 => Ok(P256_AFFINE_COORDINATE_SIZE),
+ NID_secp384r1 => Ok(P384_AFFINE_COORDINATE_SIZE),
+ name => {
+ error!("Unsupported curve NID: {}", name);
+ Err(Error::Unimplemented)
+ }
+ }
+ }
+
+ fn check_affine_coordinate_size(&self, coordinate: &[u8]) -> Result<()> {
+ let expected_len = self.affine_coordinate_size()?;
+ if expected_len == coordinate.len() {
+ Ok(())
+ } else {
+ error!(
+ "The size of the affine coordinate '{}' does not match the expected size '{}'",
+ coordinate.len(),
+ expected_len
+ );
+ Err(Error::CoseKeyDecodingFailed)
+ }
+ }
+}
+
+impl<'a> AsRef<EC_GROUP> for EcGroup<'a> {
+ fn as_ref(&self) -> &EC_GROUP {
+ self.0
+ }
+}
+
/// A u8 vector that is zeroed when dropped.
#[derive(Zeroize, ZeroizeOnDrop)]
pub struct ZVec(Vec<u8>);
@@ -175,29 +407,38 @@
}
impl BigNum {
+ fn from_slice(x: &[u8]) -> Result<Self> {
+ // SAFETY: The function reads `x` within its bounds, and the returned
+ // pointer is checked below.
+ let bn = unsafe { BN_bin2bn(x.as_ptr(), x.len(), ptr::null_mut()) };
+ NonNull::new(bn).map(Self).ok_or(to_call_failed_error(ApiName::BN_bin2bn))
+ }
+
fn new() -> Result<Self> {
// SAFETY: The returned pointer is checked below.
let bn = unsafe { BN_new() };
NonNull::new(bn).map(Self).ok_or(to_call_failed_error(ApiName::BN_new))
}
+ /// Converts the `BigNum` to a big-endian integer. The integer is padded with leading zeros up
+ /// to size `len`. The conversion fails if `len` is smaller than the size of the integer.
+ fn to_padded_vec(&self, len: usize) -> Result<Vec<u8>> {
+ let mut num = vec![0u8; len];
+ // SAFETY: The `BIGNUM` pointer has been created with `BN_new`.
+ let ret = unsafe { BN_bn2bin_padded(num.as_mut_ptr(), num.len(), self.0.as_ptr()) };
+ check_int_result(ret, ApiName::BN_bn2bin_padded)?;
+ Ok(num)
+ }
+
fn as_mut_ptr(&mut self) -> *mut BIGNUM {
self.0.as_ptr()
}
}
-/// Converts the `BigNum` to a big-endian integer. The integer is padded with leading zeros up to
-/// size `N`. The conversion fails if `N` is smaller thanthe size of the integer.
-impl<const N: usize> TryFrom<BigNum> for [u8; N] {
- type Error = Error;
-
- fn try_from(bn: BigNum) -> result::Result<Self, Self::Error> {
- let mut num = [0u8; N];
- // SAFETY: The `BIGNUM` pointer has been created with `BN_new`.
- let ret = unsafe { BN_bn2bin_padded(num.as_mut_ptr(), num.len(), bn.0.as_ptr()) };
- check_int_result(ret, ApiName::BN_bn2bin_padded)?;
- Ok(num)
+impl AsRef<BIGNUM> for BigNum {
+ fn as_ref(&self) -> &BIGNUM {
+ // SAFETY: The pointer is valid and points to an initialized instance of `BIGNUM`
+ // when the instance was created.
+ unsafe { self.0.as_ref() }
}
}
-
-// TODO(b/301068421): Unit tests the EcKey.
diff --git a/libs/bssl/src/err.rs b/libs/bssl/src/err.rs
index 1ee40c9..7040441 100644
--- a/libs/bssl/src/err.rs
+++ b/libs/bssl/src/err.rs
@@ -14,7 +14,7 @@
//! Wrappers of the error handling functions in BoringSSL err.h.
-use bssl_avf_error::{CipherError, GlobalError, ReasonCode};
+use bssl_avf_error::{CipherError, EcError, EcdsaError, GlobalError, ReasonCode};
use bssl_ffi::{self, ERR_get_error, ERR_GET_LIB_RUST, ERR_GET_REASON_RUST};
const NO_ERROR_REASON_CODE: i32 = 0;
@@ -75,6 +75,8 @@
fn map_library_reason_code(reason: i32, lib: i32) -> Option<ReasonCode> {
u32::try_from(lib).ok().and_then(|x| match x {
bssl_ffi::ERR_LIB_CIPHER => map_cipher_reason_code(reason).map(ReasonCode::Cipher),
+ bssl_ffi::ERR_LIB_EC => map_ec_reason_code(reason).map(ReasonCode::Ec),
+ bssl_ffi::ERR_LIB_ECDSA => map_ecdsa_reason_code(reason).map(ReasonCode::Ecdsa),
_ => None,
})
}
@@ -110,3 +112,60 @@
};
Some(error)
}
+
+fn map_ec_reason_code(reason: i32) -> Option<EcError> {
+ let error = match reason {
+ bssl_ffi::EC_R_BUFFER_TOO_SMALL => EcError::BufferTooSmall,
+ bssl_ffi::EC_R_COORDINATES_OUT_OF_RANGE => EcError::CoordinatesOutOfRange,
+ bssl_ffi::EC_R_D2I_ECPKPARAMETERS_FAILURE => EcError::D2IEcpkparametersFailure,
+ bssl_ffi::EC_R_EC_GROUP_NEW_BY_NAME_FAILURE => EcError::EcGroupNewByNameFailure,
+ bssl_ffi::EC_R_GROUP2PKPARAMETERS_FAILURE => EcError::Group2PkparametersFailure,
+ bssl_ffi::EC_R_I2D_ECPKPARAMETERS_FAILURE => EcError::I2DEcpkparametersFailure,
+ bssl_ffi::EC_R_INCOMPATIBLE_OBJECTS => EcError::IncompatibleObjects,
+ bssl_ffi::EC_R_INVALID_COMPRESSED_POINT => EcError::InvalidCompressedPoint,
+ bssl_ffi::EC_R_INVALID_COMPRESSION_BIT => EcError::InvalidCompressionBit,
+ bssl_ffi::EC_R_INVALID_ENCODING => EcError::InvalidEncoding,
+ bssl_ffi::EC_R_INVALID_FIELD => EcError::InvalidField,
+ bssl_ffi::EC_R_INVALID_FORM => EcError::InvalidForm,
+ bssl_ffi::EC_R_INVALID_GROUP_ORDER => EcError::InvalidGroupOrder,
+ bssl_ffi::EC_R_INVALID_PRIVATE_KEY => EcError::InvalidPrivateKey,
+ bssl_ffi::EC_R_MISSING_PARAMETERS => EcError::MissingParameters,
+ bssl_ffi::EC_R_MISSING_PRIVATE_KEY => EcError::MissingPrivateKey,
+ bssl_ffi::EC_R_NON_NAMED_CURVE => EcError::NonNamedCurve,
+ bssl_ffi::EC_R_NOT_INITIALIZED => EcError::NotInitialized,
+ bssl_ffi::EC_R_PKPARAMETERS2GROUP_FAILURE => EcError::Pkparameters2GroupFailure,
+ bssl_ffi::EC_R_POINT_AT_INFINITY => EcError::PointAtInfinity,
+ bssl_ffi::EC_R_POINT_IS_NOT_ON_CURVE => EcError::PointIsNotOnCurve,
+ bssl_ffi::EC_R_SLOT_FULL => EcError::SlotFull,
+ bssl_ffi::EC_R_UNDEFINED_GENERATOR => EcError::UndefinedGenerator,
+ bssl_ffi::EC_R_UNKNOWN_GROUP => EcError::UnknownGroup,
+ bssl_ffi::EC_R_UNKNOWN_ORDER => EcError::UnknownOrder,
+ bssl_ffi::EC_R_WRONG_ORDER => EcError::WrongOrder,
+ bssl_ffi::EC_R_BIGNUM_OUT_OF_RANGE => EcError::BignumOutOfRange,
+ bssl_ffi::EC_R_WRONG_CURVE_PARAMETERS => EcError::WrongCurveParameters,
+ bssl_ffi::EC_R_DECODE_ERROR => EcError::DecodeError,
+ bssl_ffi::EC_R_ENCODE_ERROR => EcError::EncodeError,
+ bssl_ffi::EC_R_GROUP_MISMATCH => EcError::GroupMismatch,
+ bssl_ffi::EC_R_INVALID_COFACTOR => EcError::InvalidCofactor,
+ bssl_ffi::EC_R_PUBLIC_KEY_VALIDATION_FAILED => EcError::PublicKeyValidationFailed,
+ bssl_ffi::EC_R_INVALID_SCALAR => EcError::InvalidScalar,
+ _ => return None,
+ };
+ Some(error)
+}
+
+fn map_ecdsa_reason_code(reason: i32) -> Option<EcdsaError> {
+ let error = match reason {
+ bssl_ffi::ECDSA_R_BAD_SIGNATURE => EcdsaError::BadSignature,
+ bssl_ffi::ECDSA_R_MISSING_PARAMETERS => EcdsaError::MissingParameters,
+ bssl_ffi::ECDSA_R_NEED_NEW_SETUP_VALUES => EcdsaError::NeedNewSetupValues,
+ bssl_ffi::ECDSA_R_NOT_IMPLEMENTED => EcdsaError::NotImplemented,
+ bssl_ffi::ECDSA_R_RANDOM_NUMBER_GENERATION_FAILED => {
+ EcdsaError::RandomNumberGenerationFailed
+ }
+ bssl_ffi::ECDSA_R_ENCODE_ERROR => EcdsaError::EncodeError,
+ bssl_ffi::ECDSA_R_TOO_MANY_ITERATIONS => EcdsaError::TooManyIterations,
+ _ => return None,
+ };
+ Some(error)
+}
diff --git a/libs/bssl/src/evp.rs b/libs/bssl/src/evp.rs
new file mode 100644
index 0000000..fe3d88e
--- /dev/null
+++ b/libs/bssl/src/evp.rs
@@ -0,0 +1,221 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrappers of the EVP functions in BoringSSL evp.h.
+
+use crate::cbb::CbbFixed;
+use crate::digest::{Digester, DigesterContext};
+use crate::ec_key::EcKey;
+use crate::util::{check_int_result, to_call_failed_error};
+use alloc::vec::Vec;
+use bssl_avf_error::{ApiName, Error, Result};
+use bssl_ffi::{
+ CBB_flush, CBB_len, EVP_DigestVerify, EVP_DigestVerifyInit, EVP_PKEY_free, EVP_PKEY_new,
+ EVP_PKEY_new_raw_public_key, EVP_PKEY_set1_EC_KEY, EVP_marshal_public_key, EVP_PKEY,
+ EVP_PKEY_ED25519, EVP_PKEY_X25519,
+};
+use cbor_util::{get_label_value, get_label_value_as_bytes};
+use ciborium::Value;
+use core::ptr::{self, NonNull};
+use coset::{
+ iana::{self, EnumI64},
+ CoseKey, KeyType, Label,
+};
+use log::error;
+
+/// Wrapper of an `EVP_PKEY` object, representing a public or private key.
+pub struct PKey {
+ pkey: NonNull<EVP_PKEY>,
+ /// If this struct owns the inner EC key, the inner EC key should remain valid as
+ /// long as the pointer to `EVP_PKEY` is valid.
+ _inner_ec_key: Option<EcKey>,
+}
+
+impl Drop for PKey {
+ fn drop(&mut self) {
+ // SAFETY: It is safe because `EVP_PKEY` has been allocated by BoringSSL and isn't
+ // used after this.
+ unsafe { EVP_PKEY_free(self.pkey.as_ptr()) }
+ }
+}
+
+/// Creates a new empty `EVP_PKEY`.
+fn new_pkey() -> Result<NonNull<EVP_PKEY>> {
+ // SAFETY: The returned pointer is checked below.
+ let key = unsafe { EVP_PKEY_new() };
+ NonNull::new(key).ok_or(to_call_failed_error(ApiName::EVP_PKEY_new))
+}
+
+impl TryFrom<EcKey> for PKey {
+ type Error = bssl_avf_error::Error;
+
+ fn try_from(key: EcKey) -> Result<Self> {
+ let pkey = new_pkey()?;
+ // SAFETY: The function only sets the inner EC key of the initialized and
+ // non-null `EVP_PKEY` to point to the given `EC_KEY`. It only reads from
+ // and writes to the initialized `EVP_PKEY`.
+ // Since this struct owns the inner key, the inner key remains valid as
+ // long as `EVP_PKEY` is valid.
+ let ret = unsafe { EVP_PKEY_set1_EC_KEY(pkey.as_ptr(), key.0.as_ptr()) };
+ check_int_result(ret, ApiName::EVP_PKEY_set1_EC_KEY)?;
+ Ok(Self { pkey, _inner_ec_key: Some(key) })
+ }
+}
+
+impl PKey {
+ /// Returns a DER-encoded SubjectPublicKeyInfo structure as specified
+ /// in RFC 5280 s4.1.2.7:
+ ///
+ /// https://www.rfc-editor.org/rfc/rfc5280.html#section-4.1.2.7
+ pub fn subject_public_key_info(&self) -> Result<Vec<u8>> {
+ const CAPACITY: usize = 256;
+ let mut buf = [0u8; CAPACITY];
+ let mut cbb = CbbFixed::new(buf.as_mut());
+ // SAFETY: The function only write bytes to the buffer managed by the valid `CBB`.
+ // The inner key in `EVP_PKEY` was set to a valid key when the object was created.
+ // As this struct owns the inner key, the inner key is guaranteed to be valid
+ // throughout the execution of the function.
+ let ret = unsafe { EVP_marshal_public_key(cbb.as_mut(), self.pkey.as_ptr()) };
+ check_int_result(ret, ApiName::EVP_marshal_public_key)?;
+ // SAFETY: This is safe because the CBB pointer is a valid pointer initialized with
+ // `CBB_init_fixed()`.
+ check_int_result(unsafe { CBB_flush(cbb.as_mut()) }, ApiName::CBB_flush)?;
+ // SAFETY: This is safe because the CBB pointer is initialized with `CBB_init_fixed()`,
+ // and it has been flushed, thus it has no active children.
+ let len = unsafe { CBB_len(cbb.as_ref()) };
+ Ok(buf.get(0..len).ok_or(to_call_failed_error(ApiName::CBB_len))?.to_vec())
+ }
+
+ /// This function takes a raw public key data slice and creates a `PKey` instance wrapping
+ /// a freshly allocated `EVP_PKEY` object from it.
+ ///
+ /// The lifetime of the returned instance is not tied to the lifetime of the raw public
+ /// key slice because the raw data is copied into the `EVP_PKEY` object.
+ ///
+ /// Currently the only supported raw formats are X25519 and Ed25519, where the formats
+ /// are specified in RFC 7748 and RFC 8032 respectively.
+ pub fn new_raw_public_key(raw_public_key: &[u8], type_: PKeyType) -> Result<Self> {
+ let engine = ptr::null_mut(); // Engine is not used.
+ let pkey =
+ // SAFETY: The function only reads from the given raw public key within its bounds.
+ // The returned pointer is checked below.
+ unsafe {
+ EVP_PKEY_new_raw_public_key(
+ type_.0,
+ engine,
+ raw_public_key.as_ptr(),
+ raw_public_key.len(),
+ )
+ };
+ let pkey =
+ NonNull::new(pkey).ok_or(to_call_failed_error(ApiName::EVP_PKEY_new_raw_public_key))?;
+ Ok(Self { pkey, _inner_ec_key: None })
+ }
+
+ /// Creates a `PKey` from the given `cose_key`.
+ ///
+ /// The lifetime of the returned instance is not tied to the lifetime of the `cose_key` as the
+ /// data of `cose_key` is copied into the `EVP_PKEY` or `EC_KEY` object.
+ pub fn from_cose_public_key(cose_key: &CoseKey) -> Result<Self> {
+ match &cose_key.kty {
+ KeyType::Assigned(iana::KeyType::EC2) => {
+ EcKey::from_cose_public_key(cose_key)?.try_into()
+ }
+ KeyType::Assigned(iana::KeyType::OKP) => {
+ let curve_type =
+ get_label_value(cose_key, Label::Int(iana::OkpKeyParameter::Crv.to_i64()))?;
+ let curve_type = match curve_type {
+ crv if crv == &Value::from(iana::EllipticCurve::Ed25519.to_i64()) => {
+ PKeyType::ED25519
+ }
+ crv if crv == &Value::from(iana::EllipticCurve::X25519.to_i64()) => {
+ PKeyType::X25519
+ }
+ crv => {
+ error!("Unsupported curve type in OKP COSE key: {:?}", crv);
+ return Err(Error::Unimplemented);
+ }
+ };
+ let x = get_label_value_as_bytes(
+ cose_key,
+ Label::Int(iana::OkpKeyParameter::X.to_i64()),
+ )?;
+ Self::new_raw_public_key(x, curve_type)
+ }
+ kty => {
+ error!("Unsupported key type in COSE key: {:?}", kty);
+ Err(Error::Unimplemented)
+ }
+ }
+ }
+
+ /// Verifies the given `signature` of the `message` using the current public key.
+ ///
+ /// The `message` will be hashed using the given `digester` before verification.
+ ///
+ /// For algorithms like Ed25519 that do not use pre-hashed inputs, the `digester` should
+ /// be `None`.
+ pub fn verify(
+ &self,
+ signature: &[u8],
+ message: &[u8],
+ digester: Option<Digester>,
+ ) -> Result<()> {
+ let mut digester_context = DigesterContext::new()?;
+ // The `EVP_PKEY_CTX` is set to null as this function does not collect the context
+ // during the verification.
+ let pkey_context = ptr::null_mut();
+ let engine = ptr::null_mut(); // Use the default engine.
+ let ret =
+ // SAFETY: All the non-null parameters passed to this function have been properly
+ // initialized as required in the BoringSSL spec.
+ unsafe {
+ EVP_DigestVerifyInit(
+ digester_context.as_mut_ptr(),
+ pkey_context,
+ digester.map_or(ptr::null(), |d| d.0),
+ engine,
+ self.pkey.as_ptr(),
+ )
+ };
+ check_int_result(ret, ApiName::EVP_DigestVerifyInit)?;
+
+ // SAFETY: The function only reads from the given slices within their bounds.
+ // The `EVP_MD_CTX` is successfully initialized before this call.
+ let ret = unsafe {
+ EVP_DigestVerify(
+ digester_context.as_mut_ptr(),
+ signature.as_ptr(),
+ signature.len(),
+ message.as_ptr(),
+ message.len(),
+ )
+ };
+ check_int_result(ret, ApiName::EVP_DigestVerify)
+ }
+}
+
+/// Type of the keys supported by `PKey`.
+///
+/// It is a wrapper of the `EVP_PKEY_*` macros defined BoringSSL evp.h, which are the
+/// NID values of the corresponding keys.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct PKeyType(i32);
+
+impl PKeyType {
+ /// EVP_PKEY_X25519 / NID_X25519
+ pub const X25519: PKeyType = PKeyType(EVP_PKEY_X25519);
+ /// EVP_PKEY_ED25519 / NID_ED25519
+ pub const ED25519: PKeyType = PKeyType(EVP_PKEY_ED25519);
+}
diff --git a/libs/bssl/src/hmac.rs b/libs/bssl/src/hmac.rs
index ddbbe4a..1b3a403 100644
--- a/libs/bssl/src/hmac.rs
+++ b/libs/bssl/src/hmac.rs
@@ -15,15 +15,14 @@
//! Wrappers of the HMAC functions in BoringSSL hmac.h.
use crate::digest::Digester;
+use crate::sha::SHA256_DIGEST_LENGTH;
use crate::util::to_call_failed_error;
use bssl_avf_error::{ApiName, Result};
-use bssl_ffi::{HMAC, SHA256_DIGEST_LENGTH};
-
-const SHA256_LEN: usize = SHA256_DIGEST_LENGTH as usize;
+use bssl_ffi::HMAC;
/// Computes the HMAC using SHA-256 for the given `data` with the given `key`.
-pub fn hmac_sha256(key: &[u8], data: &[u8]) -> Result<[u8; SHA256_LEN]> {
- hmac::<SHA256_LEN>(key, data, Digester::sha256())
+pub fn hmac_sha256(key: &[u8], data: &[u8]) -> Result<[u8; SHA256_DIGEST_LENGTH]> {
+ hmac::<SHA256_DIGEST_LENGTH>(key, data, Digester::sha256())
}
/// Computes the HMAC for the given `data` with the given `key` and `digester`.
diff --git a/libs/bssl/src/lib.rs b/libs/bssl/src/lib.rs
index 709e8ad..ad51b61 100644
--- a/libs/bssl/src/lib.rs
+++ b/libs/bssl/src/lib.rs
@@ -20,20 +20,28 @@
mod aead;
mod cbb;
+mod cbs;
+mod curve25519;
mod digest;
mod ec_key;
mod err;
+mod evp;
mod hkdf;
mod hmac;
mod rand;
+mod sha;
mod util;
-pub use bssl_avf_error::{ApiName, CipherError, Error, ReasonCode, Result};
+pub use bssl_avf_error::{ApiName, CipherError, EcError, EcdsaError, Error, ReasonCode, Result};
pub use aead::{Aead, AeadContext, AES_GCM_NONCE_LENGTH};
pub use cbb::CbbFixed;
+pub use cbs::Cbs;
+pub use curve25519::ed25519_verify;
pub use digest::Digester;
pub use ec_key::{EcKey, ZVec};
+pub use evp::{PKey, PKeyType};
pub use hkdf::hkdf;
pub use hmac::hmac_sha256;
pub use rand::rand_bytes;
+pub use sha::sha256;
diff --git a/libs/bssl/src/sha.rs b/libs/bssl/src/sha.rs
new file mode 100644
index 0000000..6c65d7f
--- /dev/null
+++ b/libs/bssl/src/sha.rs
@@ -0,0 +1,35 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrappers of the SHA functions in BoringSSL sha.h.
+
+use crate::util::to_call_failed_error;
+use bssl_avf_error::{ApiName, Result};
+use bssl_ffi::SHA256;
+
+/// The length of a SHA256 digest.
+pub(crate) const SHA256_DIGEST_LENGTH: usize = bssl_ffi::SHA256_DIGEST_LENGTH as usize;
+
+/// Computes the SHA256 digest of the provided `data``.
+pub fn sha256(data: &[u8]) -> Result<[u8; SHA256_DIGEST_LENGTH]> {
+ let mut out = [0u8; SHA256_DIGEST_LENGTH];
+ // SAFETY: This function reads `data` and writes to `out` within its bounds.
+ // `out` has `SHA256_DIGEST_LENGTH` bytes of space for write.
+ let ret = unsafe { SHA256(data.as_ptr(), data.len(), out.as_mut_ptr()) };
+ if ret.is_null() {
+ Err(to_call_failed_error(ApiName::SHA256))
+ } else {
+ Ok(out)
+ }
+}
diff --git a/libs/bssl/tests/eckey_test.rs b/libs/bssl/tests/eckey_test.rs
new file mode 100644
index 0000000..3c0e45d
--- /dev/null
+++ b/libs/bssl/tests/eckey_test.rs
@@ -0,0 +1,144 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use bssl_avf::{sha256, ApiName, Digester, EcKey, EcdsaError, Error, PKey, Result};
+use coset::CborSerializable;
+use spki::{
+ der::{AnyRef, Decode, Encode},
+ AlgorithmIdentifier, ObjectIdentifier, SubjectPublicKeyInfoRef,
+};
+
+/// OID value for general-use NIST EC keys held in PKCS#8 and X.509; see RFC 5480 s2.1.1.
+const X509_NIST_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.2.840.10045.2.1");
+
+/// OID value in `AlgorithmIdentifier.parameters` for P-256; see RFC 5480 s2.1.1.1.
+const ALGO_PARAM_P256_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.2.840.10045.3.1.7");
+
+const MESSAGE1: &[u8] = b"test message 1";
+const MESSAGE2: &[u8] = b"test message 2";
+
+#[test]
+fn ec_private_key_serialization() -> Result<()> {
+ let mut ec_key = EcKey::new_p256()?;
+ ec_key.generate_key()?;
+ let der_encoded_ec_private_key = ec_key.ec_private_key()?;
+ let deserialized_ec_key = EcKey::from_ec_private_key(der_encoded_ec_private_key.as_slice())?;
+
+ assert_eq!(ec_key.cose_public_key()?, deserialized_ec_key.cose_public_key()?);
+ Ok(())
+}
+
+#[test]
+fn subject_public_key_info_serialization() -> Result<()> {
+ let mut ec_key = EcKey::new_p256()?;
+ ec_key.generate_key()?;
+ let pkey: PKey = ec_key.try_into()?;
+ let subject_public_key_info = pkey.subject_public_key_info()?;
+
+ let subject_public_key_info =
+ SubjectPublicKeyInfoRef::from_der(&subject_public_key_info).unwrap();
+ let expected_algorithm = AlgorithmIdentifier {
+ oid: X509_NIST_OID,
+ parameters: Some(AnyRef::from(&ALGO_PARAM_P256_OID)),
+ };
+ assert_eq!(expected_algorithm, subject_public_key_info.algorithm);
+ assert!(!subject_public_key_info.subject_public_key.to_der().unwrap().is_empty());
+ Ok(())
+}
+
+#[test]
+fn p256_cose_public_key_serialization() -> Result<()> {
+ let mut ec_key = EcKey::new_p256()?;
+ check_cose_public_key_serialization(&mut ec_key)
+}
+
+#[test]
+fn p384_cose_public_key_serialization() -> Result<()> {
+ let mut ec_key = EcKey::new_p384()?;
+ check_cose_public_key_serialization(&mut ec_key)
+}
+
+fn check_cose_public_key_serialization(ec_key: &mut EcKey) -> Result<()> {
+ ec_key.generate_key()?;
+ let cose_key = ec_key.cose_public_key()?;
+ let cose_key_data = cose_key.clone().to_vec().unwrap();
+ let deserialized_ec_key = EcKey::from_cose_public_key_slice(&cose_key_data)?;
+
+ assert_eq!(cose_key, deserialized_ec_key.cose_public_key()?);
+ Ok(())
+}
+
+#[test]
+fn ecdsa_p256_signing_and_verification_succeed() -> Result<()> {
+ let mut ec_key = EcKey::new_p256()?;
+ ec_key.generate_key()?;
+ let digester = Digester::sha256();
+ let digest = digester.digest(MESSAGE1)?;
+ assert_eq!(digest, sha256(MESSAGE1)?);
+
+ let signature = ec_key.ecdsa_sign(&digest)?;
+ ec_key.ecdsa_verify(&signature, &digest)?;
+ // Building a `PKey` from a temporary `CoseKey` should work as the lifetime
+ // of the `PKey` is not tied to the lifetime of the `CoseKey`.
+ let pkey = PKey::from_cose_public_key(&ec_key.cose_public_key()?)?;
+ pkey.verify(&signature, MESSAGE1, Some(digester))
+}
+
+#[test]
+fn ecdsa_p384_signing_and_verification_succeed() -> Result<()> {
+ let mut ec_key = EcKey::new_p384()?;
+ ec_key.generate_key()?;
+ let digester = Digester::sha384();
+ let digest = digester.digest(MESSAGE1)?;
+
+ let signature = ec_key.ecdsa_sign(&digest)?;
+ ec_key.ecdsa_verify(&signature, &digest)?;
+ let pkey = PKey::from_cose_public_key(&ec_key.cose_public_key()?)?;
+ pkey.verify(&signature, MESSAGE1, Some(digester))
+}
+
+#[test]
+fn verifying_ecdsa_p256_signed_with_a_different_key_fails() -> Result<()> {
+ let mut ec_key1 = EcKey::new_p256()?;
+ ec_key1.generate_key()?;
+ let digest = sha256(MESSAGE1)?;
+ let signature = ec_key1.ecdsa_sign(&digest)?;
+
+ let mut ec_key2 = EcKey::new_p256()?;
+ ec_key2.generate_key()?;
+ let err = ec_key2.ecdsa_verify(&signature, &digest).unwrap_err();
+ let expected_err = Error::CallFailed(ApiName::ECDSA_verify, EcdsaError::BadSignature.into());
+ assert_eq!(expected_err, err);
+
+ let pkey: PKey = ec_key2.try_into()?;
+ let err = pkey.verify(&signature, MESSAGE1, Some(Digester::sha256())).unwrap_err();
+ let expected_err =
+ Error::CallFailed(ApiName::EVP_DigestVerify, EcdsaError::BadSignature.into());
+ assert_eq!(expected_err, err);
+ Ok(())
+}
+
+#[test]
+fn verifying_ecdsa_p256_signed_with_a_different_message_fails() -> Result<()> {
+ let mut ec_key = EcKey::new_p256()?;
+ ec_key.generate_key()?;
+ let digest1 = sha256(MESSAGE1)?;
+ let signature = ec_key.ecdsa_sign(&digest1)?;
+ let digest2 = sha256(MESSAGE2)?;
+
+ let err = ec_key.ecdsa_verify(&signature, &digest2).unwrap_err();
+ let expected_err = Error::CallFailed(ApiName::ECDSA_verify, EcdsaError::BadSignature.into());
+ assert_eq!(expected_err, err);
+ Ok(())
+}
diff --git a/libs/bssl/tests/tests.rs b/libs/bssl/tests/tests.rs
index 4c0b0b0..02666d8 100644
--- a/libs/bssl/tests/tests.rs
+++ b/libs/bssl/tests/tests.rs
@@ -15,5 +15,6 @@
//! API tests of the crate `bssl_avf`.
mod aead_test;
+mod eckey_test;
mod hkdf_test;
mod hmac_test;
diff --git a/libs/cborutil/Android.bp b/libs/cborutil/Android.bp
index 4758c4b..96dbf09 100644
--- a/libs/cborutil/Android.bp
+++ b/libs/cborutil/Android.bp
@@ -24,6 +24,7 @@
rustlibs: [
"libciborium_nostd",
"libcoset_nostd",
+ "liblog_rust_nostd",
"libserde_nostd",
],
}
@@ -37,6 +38,7 @@
rustlibs: [
"libciborium",
"libcoset",
+ "liblog_rust",
"libserde",
],
}
diff --git a/libs/cborutil/src/lib.rs b/libs/cborutil/src/lib.rs
index 2ec5af4..6e834f1 100644
--- a/libs/cborutil/src/lib.rs
+++ b/libs/cborutil/src/lib.rs
@@ -18,8 +18,11 @@
extern crate alloc;
+use alloc::string::String;
use alloc::vec::Vec;
-use coset::{CoseError, Result};
+use ciborium::value::{Integer, Value};
+use coset::{CoseError, CoseKey, Label, Result};
+use log::error;
use serde::{de::DeserializeOwned, Serialize};
/// Serializes the given data to a CBOR-encoded byte vector.
@@ -39,3 +42,88 @@
Err(CoseError::ExtraneousData)
}
}
+
+/// Converts the provided value `v` to a value array.
+pub fn value_to_array(v: Value, context: &'static str) -> Result<Vec<Value>> {
+ v.into_array().map_err(|e| to_unexpected_item_error(&e, "array", context))
+}
+
+/// Converts the provided value `v` to a text string.
+pub fn value_to_text(v: Value, context: &'static str) -> Result<String> {
+ v.into_text().map_err(|e| to_unexpected_item_error(&e, "tstr", context))
+}
+
+/// Converts the provided value `v` to a map.
+pub fn value_to_map(v: Value, context: &'static str) -> Result<Vec<(Value, Value)>> {
+ v.into_map().map_err(|e| to_unexpected_item_error(&e, "map", context))
+}
+
+/// Converts the provided value `v` to a number.
+pub fn value_to_num<T: TryFrom<Integer>>(v: Value, context: &'static str) -> Result<T> {
+ let num = v.into_integer().map_err(|e| to_unexpected_item_error(&e, "int", context))?;
+ num.try_into().map_err(|_| {
+ error!("The provided value '{num:?}' is not a valid number: {context}");
+ CoseError::OutOfRangeIntegerValue
+ })
+}
+
+/// Converts the provided value `v` to a byte array of length `N`.
+pub fn value_to_byte_array<const N: usize>(v: Value, context: &'static str) -> Result<[u8; N]> {
+ let arr = value_to_bytes(v, context)?;
+ arr.try_into().map_err(|e| {
+ error!("The provided value '{context}' is not an array of length {N}: {e:?}");
+ CoseError::UnexpectedItem("bstr", "array of length {N}")
+ })
+}
+
+/// Converts the provided value `v` to bytes array.
+pub fn value_to_bytes(v: Value, context: &'static str) -> Result<Vec<u8>> {
+ v.into_bytes().map_err(|e| to_unexpected_item_error(&e, "bstr", context))
+}
+
+/// Builds a `CoseError::UnexpectedItem` error when the provided value `v` is not of the expected
+/// type `expected_type` and logs the error message with the provided `context`.
+pub fn to_unexpected_item_error(
+ v: &Value,
+ expected_type: &'static str,
+ context: &'static str,
+) -> CoseError {
+ let v_type = cbor_value_type(v);
+ assert!(v_type != expected_type);
+ error!("The provided value type '{v_type}' is not of type '{expected_type}': {context}");
+ CoseError::UnexpectedItem(v_type, expected_type)
+}
+
+/// Reads the type of the provided value `v`.
+pub fn cbor_value_type(v: &Value) -> &'static str {
+ match v {
+ Value::Integer(_) => "int",
+ Value::Bytes(_) => "bstr",
+ Value::Float(_) => "float",
+ Value::Text(_) => "tstr",
+ Value::Bool(_) => "bool",
+ Value::Null => "nul",
+ Value::Tag(_, _) => "tag",
+ Value::Array(_) => "array",
+ Value::Map(_) => "map",
+ _ => "other",
+ }
+}
+
+/// Returns the value of the given label in the given COSE key as bytes.
+pub fn get_label_value_as_bytes(key: &CoseKey, label: Label) -> Result<&[u8]> {
+ let v = get_label_value(key, label)?;
+ Ok(v.as_bytes().ok_or_else(|| {
+ to_unexpected_item_error(v, "bstr", "Get label value in CoseKey as bytes")
+ })?)
+}
+
+/// Returns the value of the given label in the given COSE key.
+pub fn get_label_value(key: &CoseKey, label: Label) -> Result<&Value> {
+ Ok(&key
+ .params
+ .iter()
+ .find(|(k, _)| k == &label)
+ .ok_or(CoseError::UnexpectedItem("", "Label not found in CoseKey"))?
+ .1)
+}
diff --git a/libs/cstr/Android.bp b/libs/cstr/Android.bp
new file mode 100644
index 0000000..4ea87df
--- /dev/null
+++ b/libs/cstr/Android.bp
@@ -0,0 +1,36 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library_rlib {
+ name: "libcstr",
+ crate_name: "cstr",
+ defaults: ["avf_build_flags_rust"],
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ host_supported: true,
+ prefer_rlib: true,
+ target: {
+ android: {
+ no_stdlibs: true,
+ stdlibs: [
+ "libcompiler_builtins.rust_sysroot",
+ "libcore.rust_sysroot",
+ ],
+ },
+ },
+ apex_available: [
+ "//apex_available:platform",
+ "//apex_available:anyapex",
+ ],
+}
+
+rust_test {
+ name: "libcstr.tests",
+ crate_name: "libcstr_test",
+ defaults: ["avf_build_flags_rust"],
+ srcs: ["src/lib.rs"],
+ test_suites: ["general-tests"],
+ prefer_rlib: true,
+ rustlibs: ["libcstr"],
+}
diff --git a/libs/cstr/src/lib.rs b/libs/cstr/src/lib.rs
new file mode 100644
index 0000000..ddf20fc
--- /dev/null
+++ b/libs/cstr/src/lib.rs
@@ -0,0 +1,50 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Provide a safe const-compatible no_std macro for readable &'static CStr.
+
+#![no_std]
+
+/// Create &CStr out of &str literal
+#[macro_export]
+macro_rules! cstr {
+ ($str:literal) => {{
+ const S: &str = concat!($str, "\0");
+ const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
+ Ok(v) => v,
+ Err(_) => panic!("string contains interior NUL"),
+ };
+ C
+ }};
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::ffi::CString;
+
+ #[test]
+ fn valid_input_string() {
+ let expected = CString::new("aaa").unwrap();
+ assert_eq!(cstr!("aaa"), expected.as_c_str());
+ }
+
+ #[test]
+ fn valid_empty_string() {
+ let expected = CString::new("").unwrap();
+ assert_eq!(cstr!(""), expected.as_c_str());
+ }
+
+ // As cstr!() panics at compile time, tests covering invalid inputs fail to compile!
+}
diff --git a/libs/devicemapper/src/util.rs b/libs/devicemapper/src/util.rs
index e8df424..cc071e4 100644
--- a/libs/devicemapper/src/util.rs
+++ b/libs/devicemapper/src/util.rs
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-use anyhow::{anyhow, bail, Result};
+use anyhow::{bail, Result};
use nix::sys::stat::FileStat;
use std::fs::File;
use std::os::unix::fs::FileTypeExt;
@@ -52,24 +52,6 @@
Ok(())
}
-/// Returns hexadecimal reprentation of a given byte array.
-pub fn hexstring_from(s: &[u8]) -> String {
- s.iter().map(|byte| format!("{:02x}", byte)).reduce(|i, j| i + &j).unwrap_or_default()
-}
-
-/// Parses a hexadecimal string into a byte array
-pub fn parse_hexstring(s: &str) -> Result<Vec<u8>> {
- let len = s.len();
- if len % 2 != 0 {
- bail!("length {} is not even", len)
- } else {
- (0..len)
- .step_by(2)
- .map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|e| anyhow!(e)))
- .collect()
- }
-}
-
/// fstat that accepts a path rather than FD
pub fn fstat(p: &Path) -> Result<FileStat> {
let f = File::open(p)?;
diff --git a/libs/devicemapper/src/verity.rs b/libs/devicemapper/src/verity.rs
index 24584f8..bbd9d38 100644
--- a/libs/devicemapper/src/verity.rs
+++ b/libs/devicemapper/src/verity.rs
@@ -151,7 +151,7 @@
};
let root_digest = if let Some(root_digest) = self.root_digest {
- hexstring_from(root_digest)
+ hex::encode(root_digest)
} else {
bail!("root digest is not set")
};
@@ -159,7 +159,7 @@
let salt = if self.salt.is_none() || self.salt.unwrap().is_empty() {
"-".to_string() // Note. It's not an empty string!
} else {
- hexstring_from(self.salt.unwrap())
+ hex::encode(self.salt.unwrap())
};
// Step2: serialize the information according to the spec, which is ...
diff --git a/libs/dice/open_dice/Android.bp b/libs/dice/open_dice/Android.bp
index 646080d..2d0f52c 100644
--- a/libs/dice/open_dice/Android.bp
+++ b/libs/dice/open_dice/Android.bp
@@ -27,12 +27,14 @@
],
visibility: [
"//packages/modules/Virtualization:__subpackages__",
+ "//system/authgraph/tests:__subpackages__",
],
}
rust_library {
name: "libdiced_open_dice",
defaults: ["libdiced_open_dice_defaults"],
+ host_supported: true,
vendor_available: true,
rustlibs: [
"libopen_dice_android_bindgen",
@@ -54,6 +56,7 @@
],
visibility: [
"//packages/modules/Virtualization:__subpackages__",
+ "//system/authgraph/tests:__subpackages__",
],
apex_available: [
"//apex_available:platform",
diff --git a/libs/dice/open_dice/src/bcc.rs b/libs/dice/open_dice/src/bcc.rs
index 199e1a9..9c9545b 100644
--- a/libs/dice/open_dice/src/bcc.rs
+++ b/libs/dice/open_dice/src/bcc.rs
@@ -20,7 +20,7 @@
DiceAndroidConfigValues, DiceAndroidFormatConfigDescriptor, DiceAndroidHandoverMainFlow,
DiceAndroidHandoverParse, DiceAndroidMainFlow, DICE_ANDROID_CONFIG_COMPONENT_NAME,
DICE_ANDROID_CONFIG_COMPONENT_VERSION, DICE_ANDROID_CONFIG_RESETTABLE,
- DICE_ANDROID_CONFIG_SECURITY_VERSION,
+ DICE_ANDROID_CONFIG_RKP_VM_MARKER, DICE_ANDROID_CONFIG_SECURITY_VERSION,
};
use std::{ffi::CStr, ptr};
@@ -36,6 +36,8 @@
pub resettable: bool,
/// Monotonically increasing version of the component.
pub security_version: Option<u64>,
+ /// Whether the component can take part in running the RKP VM.
+ pub rkp_vm_marker: bool,
}
/// Formats a configuration descriptor following the Android Profile for DICE specification.
@@ -58,6 +60,9 @@
configs |= DICE_ANDROID_CONFIG_SECURITY_VERSION;
version
});
+ if values.rkp_vm_marker {
+ configs |= DICE_ANDROID_CONFIG_RKP_VM_MARKER;
+ }
let values =
DiceAndroidConfigValues { configs, component_name, component_version, security_version };
diff --git a/libs/hyp/src/hypervisor.rs b/libs/hyp/src/hypervisor.rs
index 309f967..c53b886 100644
--- a/libs/hyp/src/hypervisor.rs
+++ b/libs/hyp/src/hypervisor.rs
@@ -24,7 +24,9 @@
use crate::error::{Error, Result};
use alloc::boxed::Box;
use common::Hypervisor;
-pub use common::{MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE};
+pub use common::{
+ DeviceAssigningHypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
pub use geniezone::GeniezoneError;
use geniezone::GeniezoneHypervisor;
use gunyah::GunyahHypervisor;
@@ -122,3 +124,8 @@
pub fn get_mem_sharer() -> Option<&'static dyn MemSharingHypervisor> {
get_hypervisor().as_mem_sharer()
}
+
+/// Gets the device assigning hypervisor singleton, if any.
+pub fn get_device_assigner() -> Option<&'static dyn DeviceAssigningHypervisor> {
+ get_hypervisor().as_device_assigner()
+}
diff --git a/libs/hyp/src/hypervisor/common.rs b/libs/hyp/src/hypervisor/common.rs
index 70fdd0a..eaac652 100644
--- a/libs/hyp/src/hypervisor/common.rs
+++ b/libs/hyp/src/hypervisor/common.rs
@@ -31,6 +31,11 @@
fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
None
}
+
+ /// Returns the hypervisor's device assigning implementation, if any.
+ fn as_device_assigner(&self) -> Option<&dyn DeviceAssigningHypervisor> {
+ None
+ }
}
pub trait MmioGuardedHypervisor {
@@ -73,3 +78,12 @@
/// Returns the memory protection granule size in bytes.
fn granule(&self) -> Result<usize>;
}
+
+/// Device assigning hypervisor
+pub trait DeviceAssigningHypervisor {
+ /// Returns MMIO token.
+ fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> Result<u64>;
+
+ /// Returns DMA token as a tuple of (phys_iommu_id, phys_sid).
+ fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> Result<(u64, u64)>;
+}
diff --git a/libs/hyp/src/hypervisor/kvm.rs b/libs/hyp/src/hypervisor/kvm.rs
index 5835346..720318e 100644
--- a/libs/hyp/src/hypervisor/kvm.rs
+++ b/libs/hyp/src/hypervisor/kvm.rs
@@ -14,7 +14,9 @@
//! Wrappers around calls to the KVM hypervisor.
-use super::common::{Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor};
+use super::common::{
+ DeviceAssigningHypervisor, Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor,
+};
use crate::error::{Error, Result};
use crate::util::page_address;
use core::fmt::{self, Display, Formatter};
@@ -70,6 +72,9 @@
const VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID: u32 = 0xc6000007;
const VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID: u32 = 0xc6000008;
+const VENDOR_HYP_KVM_DEV_REQ_MMIO_FUNC_ID: u32 = 0xc6000012;
+const VENDOR_HYP_KVM_DEV_REQ_DMA_FUNC_ID: u32 = 0xc6000013;
+
pub(super) struct RegularKvmHypervisor;
impl RegularKvmHypervisor {
@@ -90,6 +95,10 @@
fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
Some(self)
}
+
+ fn as_device_assigner(&self) -> Option<&dyn DeviceAssigningHypervisor> {
+ Some(self)
+ }
}
impl MmioGuardedHypervisor for ProtectedKvmHypervisor {
@@ -153,6 +162,26 @@
}
}
+impl DeviceAssigningHypervisor for ProtectedKvmHypervisor {
+ fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> Result<u64> {
+ let mut args = [0u64; 17];
+ args[0] = base_ipa;
+ args[1] = size;
+
+ let ret = checked_hvc64_expect_results(VENDOR_HYP_KVM_DEV_REQ_MMIO_FUNC_ID, args)?;
+ Ok(ret[0])
+ }
+
+ fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> Result<(u64, u64)> {
+ let mut args = [0u64; 17];
+ args[0] = pviommu_id;
+ args[1] = vsid;
+
+ let ret = checked_hvc64_expect_results(VENDOR_HYP_KVM_DEV_REQ_DMA_FUNC_ID, args)?;
+ Ok((ret[0], ret[1]))
+ }
+}
+
fn checked_hvc64_expect_zero(function: u32, args: [u64; 17]) -> Result<()> {
success_or_error_64(hvc64(function, args)[0]).map_err(|e| Error::KvmError(e, function))
}
@@ -160,3 +189,9 @@
fn checked_hvc64(function: u32, args: [u64; 17]) -> Result<u64> {
positive_or_error_64(hvc64(function, args)[0]).map_err(|e| Error::KvmError(e, function))
}
+
+fn checked_hvc64_expect_results(function: u32, args: [u64; 17]) -> Result<[u64; 17]> {
+ let [ret, results @ ..] = hvc64(function, args);
+ success_or_error_64(ret).map_err(|e| Error::KvmError(e, function))?;
+ Ok(results)
+}
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 486a181..6a23585 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -20,7 +20,10 @@
mod hypervisor;
mod util;
+pub use crate::hypervisor::DeviceAssigningHypervisor;
pub use error::{Error, Result};
-pub use hypervisor::{get_mem_sharer, get_mmio_guard, KvmError, MMIO_GUARD_GRANULE_SIZE};
+pub use hypervisor::{
+ get_device_assigner, get_mem_sharer, get_mmio_guard, KvmError, MMIO_GUARD_GRANULE_SIZE,
+};
use hypervisor::GeniezoneError;
diff --git a/libs/libfdt/Android.bp b/libs/libfdt/Android.bp
index b889ee5..ba9e971 100644
--- a/libs/libfdt/Android.bp
+++ b/libs/libfdt/Android.bp
@@ -37,6 +37,7 @@
"libcore.rust_sysroot",
],
rustlibs: [
+ "libcstr",
"liblibfdt_bindgen",
"libzerocopy_nostd",
],
@@ -61,28 +62,41 @@
],
prefer_rlib: true,
rustlibs: [
+ "libcstr",
"liblibfdt",
],
}
genrule {
name: "fdt_test_tree_one_memory_range_dtb",
- defaults: ["dts_to_dtb"],
- srcs: ["tests/data/test_tree_one_memory_range.dts"],
+ tools: ["dtc"],
+ srcs: [
+ "tests/data/test_tree_one_memory_range.dts",
+ "tests/data/test_tree_no_memory_node.dts",
+ ],
+ cmd: "$(location dtc) -I dts -O dtb $(location tests/data/test_tree_one_memory_range.dts) -o $(out)",
out: ["data/test_tree_one_memory_range.dtb"],
}
genrule {
name: "fdt_test_tree_multiple_memory_ranges_dtb",
- defaults: ["dts_to_dtb"],
- srcs: ["tests/data/test_tree_multiple_memory_ranges.dts"],
+ tools: ["dtc"],
+ srcs: [
+ "tests/data/test_tree_multiple_memory_ranges.dts",
+ "tests/data/test_tree_no_memory_node.dts",
+ ],
+ cmd: "$(location dtc) -I dts -O dtb $(location tests/data/test_tree_multiple_memory_ranges.dts) -o $(out)",
out: ["data/test_tree_multiple_memory_ranges.dtb"],
}
genrule {
name: "fdt_test_tree_empty_memory_range_dtb",
- defaults: ["dts_to_dtb"],
- srcs: ["tests/data/test_tree_empty_memory_range.dts"],
+ tools: ["dtc"],
+ srcs: [
+ "tests/data/test_tree_empty_memory_range.dts",
+ "tests/data/test_tree_no_memory_node.dts",
+ ],
+ cmd: "$(location dtc) -I dts -O dtb $(location tests/data/test_tree_empty_memory_range.dts) -o $(out)",
out: ["data/test_tree_empty_memory_range.dtb"],
}
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index 000f723..a524655 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -323,6 +323,29 @@
}
}
+/// Iterator over descendants
+#[derive(Debug)]
+pub struct DescendantsIterator<'a> {
+ node: Option<(FdtNode<'a>, usize)>,
+}
+
+impl<'a> DescendantsIterator<'a> {
+ pub(crate) fn new(node: &'a FdtNode) -> Self {
+ Self { node: Some((*node, 0)) }
+ }
+}
+
+impl<'a> Iterator for DescendantsIterator<'a> {
+ type Item = (FdtNode<'a>, usize);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let (node, depth) = self.node?;
+ self.node = node.next_node(depth).ok().flatten().filter(|(_, depth)| *depth > 0);
+
+ self.node
+ }
+}
+
/// Iterator over properties
#[derive(Debug)]
pub struct PropertyIterator<'a> {
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index b811730..7eb08b2 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -20,8 +20,8 @@
mod iterators;
pub use iterators::{
- AddressRange, CellIterator, CompatibleIterator, MemRegIterator, PropertyIterator,
- RangesIterator, Reg, RegIterator, SubnodeIterator,
+ AddressRange, CellIterator, CompatibleIterator, DescendantsIterator, MemRegIterator,
+ PropertyIterator, RangesIterator, Reg, RegIterator, SubnodeIterator,
};
use core::cmp::max;
@@ -31,15 +31,9 @@
use core::ops::Range;
use core::ptr;
use core::result;
+use cstr::cstr;
use zerocopy::AsBytes as _;
-// TODO(b/308694211): Use cstr!() from vmbase
-macro_rules! cstr {
- ($str:literal) => {{
- core::ffi::CStr::from_bytes_with_nul(concat!($str, "\0").as_bytes()).unwrap()
- }};
-}
-
/// Error type corresponding to libfdt error codes.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum FdtError {
@@ -492,6 +486,23 @@
Ok(fdt_err_or_option(ret)?.map(|offset| FdtNode { fdt: self.fdt, offset }))
}
+ /// Returns an iterator of descendants
+ pub fn descendants(&'a self) -> DescendantsIterator<'a> {
+ DescendantsIterator::new(self)
+ }
+
+ fn next_node(&self, depth: usize) -> Result<Option<(Self, usize)>> {
+ let mut next_depth: c_int = depth.try_into().unwrap();
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_next_node(self.fdt.as_ptr(), self.offset, &mut next_depth)
+ };
+ let Ok(next_depth) = usize::try_from(next_depth) else {
+ return Ok(None);
+ };
+ Ok(fdt_err_or_option(ret)?.map(|offset| (FdtNode { fdt: self.fdt, offset }, next_depth)))
+ }
+
/// Returns an iterator of properties
pub fn properties(&'a self) -> Result<PropertyIterator<'a>> {
PropertyIterator::new(self)
@@ -504,6 +515,44 @@
fdt_err_or_option(ret)?.map(|offset| FdtProperty::new(self.fdt, offset)).transpose()
}
+
+ /// Returns the phandle
+ pub fn get_phandle(&self) -> Result<Option<Phandle>> {
+ // This rewrites the fdt_get_phandle() because it doesn't return error code.
+ if let Some(prop) = self.getprop_u32(cstr!("phandle"))? {
+ Ok(Some(prop.try_into()?))
+ } else if let Some(prop) = self.getprop_u32(cstr!("linux,phandle"))? {
+ Ok(Some(prop.try_into()?))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Returns the subnode of the given name. The name doesn't need to be nul-terminated.
+ pub fn subnode(&self, name: &CStr) -> Result<Option<Self>> {
+ let offset = self.subnode_offset(name.to_bytes())?;
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
+ }
+
+ /// Returns the subnode of the given name bytes
+ pub fn subnode_with_name_bytes(&self, name: &[u8]) -> Result<Option<Self>> {
+ let offset = self.subnode_offset(name)?;
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
+ }
+
+ fn subnode_offset(&self, name: &[u8]) -> Result<Option<c_int>> {
+ let namelen = name.len().try_into().unwrap();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
+ let ret = unsafe {
+ libfdt_bindgen::fdt_subnode_offset_namelen(
+ self.fdt.as_ptr(),
+ self.offset,
+ name.as_ptr().cast::<_>(),
+ namelen,
+ )
+ };
+ fdt_err_or_option(ret)
+ }
}
impl<'a> PartialEq for FdtNode<'a> {
@@ -514,7 +563,7 @@
/// Phandle of a FDT node
#[repr(transparent)]
-#[derive(Debug, Copy, Clone, PartialEq)]
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct Phandle(u32);
impl Phandle {
@@ -728,24 +777,38 @@
fdt_err(ret)
}
- /// Returns the subnode of the given name with len.
- pub fn subnode_with_namelen(&'a mut self, name: &CStr, namelen: usize) -> Result<Option<Self>> {
- let offset = self.subnode_offset(&name.to_bytes()[..namelen])?;
- Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
+ /// Returns the first subnode of this
+ pub fn first_subnode(&'a mut self) -> Result<Option<Self>> {
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_first_subnode(self.fdt.as_ptr(), self.offset) };
+
+ Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
}
- fn subnode_offset(&self, name: &[u8]) -> Result<Option<c_int>> {
- let namelen = name.len().try_into().unwrap();
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
- let ret = unsafe {
- libfdt_bindgen::fdt_subnode_offset_namelen(
- self.fdt.as_ptr(),
- self.offset,
- name.as_ptr().cast::<_>(),
- namelen,
- )
- };
- fdt_err_or_option(ret)
+ /// Returns the next subnode that shares the same parent with this
+ pub fn next_subnode(self) -> Result<Option<Self>> {
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_next_subnode(self.fdt.as_ptr(), self.offset) };
+
+ Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ }
+
+ /// Deletes the current node and returns the next subnode
+ pub fn delete_and_next_subnode(mut self) -> Result<Option<Self>> {
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_next_subnode(self.fdt.as_ptr(), self.offset) };
+
+ let next_offset = fdt_err_or_option(ret)?;
+
+ if Some(self.offset) == next_offset {
+ return Err(FdtError::Internal);
+ }
+
+ // SAFETY: nop_self() only touches bytes of the self and its properties and subnodes, and
+ // doesn't alter any other blob in the tree. self.fdt and next_offset would remain valid.
+ unsafe { self.nop_self()? };
+
+ Ok(next_offset.map(|offset| Self { fdt: self.fdt, offset }))
}
fn parent(&'a self) -> Result<FdtNode<'a>> {
@@ -1012,9 +1075,20 @@
/// Returns a node with the phandle
pub fn node_with_phandle(&self, phandle: Phandle) -> Result<Option<FdtNode>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let offset = self.node_offset_with_phandle(phandle)?;
+ Ok(offset.map(|offset| FdtNode { fdt: self, offset }))
+ }
+
+ /// Returns a mutable node with the phandle
+ pub fn node_mut_with_phandle(&mut self, phandle: Phandle) -> Result<Option<FdtNodeMut>> {
+ let offset = self.node_offset_with_phandle(phandle)?;
+ Ok(offset.map(|offset| FdtNodeMut { fdt: self, offset }))
+ }
+
+ fn node_offset_with_phandle(&self, phandle: Phandle) -> Result<Option<c_int>> {
+ // SAFETY: Accesses are constrained to the DT totalsize.
let ret = unsafe { libfdt_bindgen::fdt_node_offset_by_phandle(self.as_ptr(), phandle.0) };
- Ok(fdt_err_or_option(ret)?.map(|offset| FdtNode { fdt: self, offset }))
+ fdt_err_or_option(ret)
}
/// Returns the mutable root node of the tree.
diff --git a/libs/libfdt/tests/api_test.rs b/libs/libfdt/tests/api_test.rs
index bc306ad..e68557f 100644
--- a/libs/libfdt/tests/api_test.rs
+++ b/libs/libfdt/tests/api_test.rs
@@ -16,23 +16,13 @@
//! Integration tests of the library libfdt.
+use core::ffi::CStr;
+use cstr::cstr;
use libfdt::{Fdt, FdtError, FdtNodeMut, Phandle};
use std::ffi::CString;
use std::fs;
use std::ops::Range;
-// TODO(b/308694211): Use cstr!() from vmbase
-macro_rules! cstr {
- ($str:literal) => {{
- const S: &str = concat!($str, "\0");
- const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
- Ok(v) => v,
- Err(_) => panic!("string contains interior NUL"),
- };
- C
- }};
-}
-
const TEST_TREE_WITH_ONE_MEMORY_RANGE_PATH: &str = "data/test_tree_one_memory_range.dtb";
const TEST_TREE_WITH_MULTIPLE_MEMORY_RANGES_PATH: &str =
"data/test_tree_multiple_memory_ranges.dtb";
@@ -106,11 +96,11 @@
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
let root = fdt.root().unwrap();
- let expected = [cstr!("cpus"), cstr!("randomnode"), cstr!("chosen")];
+ let expected = [Ok(cstr!("cpus")), Ok(cstr!("randomnode")), Ok(cstr!("chosen"))];
- for (node, name) in root.subnodes().unwrap().zip(expected) {
- assert_eq!(node.name(), Ok(name));
- }
+ let root_subnodes = root.subnodes().unwrap();
+ let subnode_names: Vec<_> = root_subnodes.map(|node| node.name()).collect();
+ assert_eq!(subnode_names, expected);
}
#[test]
@@ -119,18 +109,19 @@
let fdt = Fdt::from_slice(&data).unwrap();
let root = fdt.root().unwrap();
let one_be = 0x1_u32.to_be_bytes();
- let expected = [
- (cstr!("model"), b"MyBoardName\0".as_ref()),
- (cstr!("compatible"), b"MyBoardName\0MyBoardFamilyName\0".as_ref()),
- (cstr!("#address-cells"), &one_be),
- (cstr!("#size-cells"), &one_be),
- (cstr!("empty_prop"), &[]),
+ type Result<T> = core::result::Result<T, FdtError>;
+ let expected: Vec<(Result<&CStr>, Result<&[u8]>)> = vec![
+ (Ok(cstr!("model")), Ok(b"MyBoardName\0".as_ref())),
+ (Ok(cstr!("compatible")), Ok(b"MyBoardName\0MyBoardFamilyName\0".as_ref())),
+ (Ok(cstr!("#address-cells")), Ok(&one_be)),
+ (Ok(cstr!("#size-cells")), Ok(&one_be)),
+ (Ok(cstr!("empty_prop")), Ok(&[])),
];
let properties = root.properties().unwrap();
- for (prop, (name, value)) in properties.zip(expected.into_iter()) {
- assert_eq!((prop.name(), prop.value()), (Ok(name), Ok(value)));
- }
+ let subnode_properties: Vec<_> = properties.map(|prop| (prop.name(), prop.value())).collect();
+
+ assert_eq!(subnode_properties, expected);
}
#[test]
@@ -138,12 +129,16 @@
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
let node = fdt.node(cstr!("/cpus/PowerPC,970@1")).unwrap().unwrap();
- let expected = [cstr!(""), cstr!("cpus"), cstr!("PowerPC,970@1")];
+ let expected = vec![Ok(cstr!("")), Ok(cstr!("cpus")), Ok(cstr!("PowerPC,970@1"))];
- for (depth, name) in expected.into_iter().enumerate() {
- let supernode = node.supernode_at_depth(depth).unwrap();
- assert_eq!(supernode.name(), Ok(name));
+ let mut supernode_names = vec![];
+ let mut depth = 0;
+ while let Ok(supernode) = node.supernode_at_depth(depth) {
+ supernode_names.push(supernode.name());
+ depth += 1;
}
+
+ assert_eq!(supernode_names, expected);
}
#[test]
@@ -200,6 +195,40 @@
}
#[test]
+fn node_mut_with_phandle() {
+ let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut data).unwrap();
+
+ // Test linux,phandle
+ let phandle = Phandle::new(0xFF).unwrap();
+ let node: FdtNodeMut = fdt.node_mut_with_phandle(phandle).unwrap().unwrap();
+ assert_eq!(node.as_node().name(), Ok(cstr!("node_zz")));
+
+ // Test phandle
+ let phandle = Phandle::new(0x22).unwrap();
+ let node: FdtNodeMut = fdt.node_mut_with_phandle(phandle).unwrap().unwrap();
+ assert_eq!(node.as_node().name(), Ok(cstr!("node_abc")));
+}
+
+#[test]
+fn node_get_phandle() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ // Test linux,phandle
+ let node = fdt.node(cstr!("/node_z/node_zz")).unwrap().unwrap();
+ assert_eq!(node.get_phandle(), Ok(Phandle::new(0xFF)));
+
+ // Test phandle
+ let node = fdt.node(cstr!("/node_a/node_ab/node_abc")).unwrap().unwrap();
+ assert_eq!(node.get_phandle(), Ok(Phandle::new(0x22)));
+
+ // Test no phandle
+ let node = fdt.node(cstr!("/node_b")).unwrap().unwrap();
+ assert_eq!(node.get_phandle(), Ok(None));
+}
+
+#[test]
fn node_nop() {
let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut data).unwrap();
@@ -233,14 +262,15 @@
let subnode_name = cstr!("123456789");
for len in 0..subnode_name.to_bytes().len() {
- let mut node = fdt.node_mut(node_path).unwrap().unwrap();
- assert!(node.subnode_with_namelen(subnode_name, len).unwrap().is_none());
+ let name = &subnode_name.to_bytes()[0..len];
+ let node = fdt.node(node_path).unwrap().unwrap();
+ assert_eq!(Ok(None), node.subnode_with_name_bytes(name));
let mut node = fdt.node_mut(node_path).unwrap().unwrap();
node.add_subnode_with_namelen(subnode_name, len).unwrap();
- let mut node = fdt.node_mut(node_path).unwrap().unwrap();
- assert!(node.subnode_with_namelen(subnode_name, len).unwrap().is_some());
+ let node = fdt.node(node_path).unwrap().unwrap();
+ assert_ne!(Ok(None), node.subnode_with_name_bytes(name));
}
let node_path = node_path.to_str().unwrap();
@@ -254,6 +284,48 @@
}
#[test]
+fn node_subnode() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ let name = cstr!("node_a");
+ let root = fdt.root().unwrap();
+ let node = root.subnode(name).unwrap();
+ assert_ne!(None, node);
+ let node = node.unwrap();
+
+ assert_eq!(Ok(name), node.name());
+}
+
+#[test]
+fn node_subnode_with_name_bytes() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ let name = b"node_aaaaa";
+ let root = fdt.root().unwrap();
+ let node = root.subnode_with_name_bytes(&name[0..6]).unwrap();
+ assert_ne!(None, node);
+ let node = node.unwrap();
+
+ assert_eq!(Ok(cstr!("node_a")), node.name());
+}
+
+#[test]
+fn node_subnode_borrow_checker() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ let name = cstr!("node_a");
+ let node = {
+ let root = fdt.root().unwrap();
+ root.subnode(name).unwrap().unwrap()
+ };
+
+ assert_eq!(Ok(name), node.name());
+}
+
+#[test]
fn fdt_symbols() {
let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut data).unwrap();
@@ -279,3 +351,51 @@
// Just check whether borrow checker doesn't complain this.
memory.setprop_inplace(cstr!("device_type"), b"MEMORY\0").unwrap();
}
+
+#[test]
+fn node_descendants() {
+ let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut data).unwrap();
+
+ let node_z = fdt.node(cstr!("/node_z")).unwrap().unwrap();
+ let descendants: Vec<_> =
+ node_z.descendants().map(|(node, depth)| (node.name().unwrap(), depth)).collect();
+
+ assert_eq!(
+ descendants,
+ vec![
+ (cstr!("node_za"), 1),
+ (cstr!("node_zb"), 1),
+ (cstr!("node_zz"), 1),
+ (cstr!("node_zzz"), 2)
+ ]
+ );
+}
+
+#[test]
+fn node_mut_delete_and_next_subnode() {
+ let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut data).unwrap();
+
+ let mut root = fdt.root_mut().unwrap();
+ let mut subnode_iter = root.first_subnode().unwrap();
+
+ while let Some(subnode) = subnode_iter {
+ if subnode.as_node().name() == Ok(cstr!("node_z")) {
+ subnode_iter = subnode.delete_and_next_subnode().unwrap();
+ } else {
+ subnode_iter = subnode.next_subnode().unwrap();
+ }
+ }
+
+ let root = fdt.root().unwrap();
+ let expected_names = vec![
+ Ok(cstr!("node_a")),
+ Ok(cstr!("node_b")),
+ Ok(cstr!("node_c")),
+ Ok(cstr!("__symbols__")),
+ ];
+ let subnode_names: Vec<_> = root.subnodes().unwrap().map(|node| node.name()).collect();
+
+ assert_eq!(expected_names, subnode_names);
+}
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 42ff4b0..f98f3af 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -243,7 +243,35 @@
"echo ro.product.cpu.abi=arm64-v8a) > $(out)",
}
-logical_partition {
+// Need to keep microdroid_vendor for the release configurations that don't
+// have RELEASE_AVF_ENABLE_VENDOR_MODULES build flag enabled.
+android_filesystem {
+ name: "microdroid_vendor",
+ partition_name: "vendor",
+ use_avb: true,
+ avb_private_key: ":microdroid_sign_key",
+ avb_algorithm: "SHA256_RSA4096",
+ avb_hash_algorithm: "sha256",
+ file_contexts: ":microdroid_vendor_file_contexts.gen",
+ // For deterministic output, use fake_timestamp, hard-coded uuid
+ fake_timestamp: "1611569676",
+ // python -c "import uuid; print(uuid.uuid5(uuid.NAMESPACE_URL, 'www.android.com/avf/microdroid/vendor'))"
+ uuid: "156d40d7-8d8e-5c99-8913-ec82de549a70",
+}
+
+soong_config_module_type {
+ name: "flag_aware_microdroid_super_partition",
+ module_type: "logical_partition",
+ config_namespace: "ANDROID",
+ bool_variables: [
+ "release_avf_enable_vendor_modules",
+ ],
+ properties: [
+ "default_group",
+ ],
+}
+
+flag_aware_microdroid_super_partition {
name: "microdroid_super",
sparse: true,
size: "auto",
@@ -253,6 +281,18 @@
filesystem: ":microdroid",
},
],
+ soong_config_variables: {
+ release_avf_enable_vendor_modules: {
+ conditions_default: {
+ default_group: [
+ {
+ name: "vendor_a",
+ filesystem: ":microdroid_vendor",
+ },
+ ],
+ },
+ },
+ },
}
android_filesystem {
@@ -330,13 +370,40 @@
srcs: [":avb_testkey_rsa4096"],
}
-vbmeta {
+soong_config_module_type {
+ name: "flag_aware_microdroid_vbmeta",
+ module_type: "vbmeta",
+ config_namespace: "ANDROID",
+ bool_variables: [
+ "release_avf_enable_vendor_modules",
+ ],
+ properties: [
+ "partitions",
+ ],
+}
+
+flag_aware_microdroid_vbmeta {
name: "microdroid_vbmeta",
partition_name: "vbmeta",
private_key: ":microdroid_sign_key",
partitions: [
"microdroid",
],
+ soong_config_variables: {
+ release_avf_enable_vendor_modules: {
+ partitions: ["microdroid_vendor"],
+ },
+ },
+ // TODO(b/312809093): Remove hard-coded property after figuring out the
+ // long-term solution for microdroid vendor partition SPL. The hard-coded
+ // value is the minimum value of SPL that microdroid vendor partition will
+ // have. It's for passing the check 'IsStandaloneImageRollback'.
+ avb_properties: [
+ {
+ key: "com.android.build.microdroid-vendor.security_patch",
+ value: "2023-12-05",
+ },
+ ],
}
prebuilt_etc {
@@ -372,11 +439,8 @@
// python -c "import hashlib; print(hashlib.sha256(b'initrd_normal').hexdigest())"
initrd_normal_salt = "8041a07d54ac82290f6d90bac1fa8d7fdbc4db974d101d60faf294749d1ebaf8"
-avb_gen_vbmeta_image {
- name: "microdroid_initrd_normal_hashdesc",
- src: ":microdroid_initrd_normal",
- partition_name: "initrd_normal",
- salt: initrd_normal_salt,
+avb_gen_vbmeta_image_defaults {
+ name: "microdroid_initrd_defaults",
enabled: false,
arch: {
// Microdroid kernel is only available in these architectures.
@@ -389,29 +453,38 @@
},
}
+avb_gen_vbmeta_image_defaults {
+ name: "microdroid_initrd_normal_defaults",
+ defaults: ["microdroid_initrd_defaults"],
+ partition_name: "initrd_normal",
+ salt: initrd_normal_salt,
+}
+
+avb_gen_vbmeta_image {
+ name: "microdroid_initrd_normal_hashdesc",
+ defaults: ["microdroid_initrd_normal_defaults"],
+ src: ":microdroid_initrd_normal",
+}
+
// python -c "import hashlib; print(hashlib.sha256(b'initrd_debug').hexdigest())"
initrd_debug_salt = "8ab9dc9cb7e6456700ff6ef18c6b4c3acc24c5fa5381b829563f8d7a415d869a"
-avb_gen_vbmeta_image {
- name: "microdroid_initrd_debug_hashdesc",
- src: ":microdroid_initrd_debuggable",
+avb_gen_vbmeta_image_defaults {
+ name: "microdroid_initrd_debug_defaults",
+ defaults: ["microdroid_initrd_defaults"],
partition_name: "initrd_debug",
salt: initrd_debug_salt,
- enabled: false,
- arch: {
- // Microdroid kernel is only available in these architectures.
- arm64: {
- enabled: true,
- },
- x86_64: {
- enabled: true,
- },
- },
+}
+
+avb_gen_vbmeta_image {
+ name: "microdroid_initrd_debug_hashdesc",
+ defaults: ["microdroid_initrd_debug_defaults"],
+ src: ":microdroid_initrd_debuggable",
}
soong_config_module_type {
- name: "flag_aware_avb_add_hash_footer",
- module_type: "avb_add_hash_footer",
+ name: "flag_aware_avb_add_hash_footer_defaults",
+ module_type: "avb_add_hash_footer_defaults",
config_namespace: "ANDROID",
bool_variables: [
"release_avf_enable_llpvm_changes",
@@ -422,28 +495,21 @@
],
}
-flag_aware_avb_add_hash_footer {
- name: "microdroid_kernel_signed",
+flag_aware_avb_add_hash_footer_defaults {
+ name: "microdroid_kernel_signed_defaults",
src: ":empty_file",
- filename: "microdroid_kernel",
partition_name: "boot",
private_key: ":microdroid_sign_key",
salt: bootloader_salt,
enabled: false,
arch: {
arm64: {
- src: ":microdroid_kernel_prebuilts-6.1-arm64",
enabled: true,
},
x86_64: {
- src: ":microdroid_kernel_prebuilts-6.1-x86_64",
enabled: true,
},
},
- include_descriptors_from_images: [
- ":microdroid_initrd_normal_hashdesc",
- ":microdroid_initrd_debug_hashdesc",
- ],
// Below are properties that are conditionally set depending on value of build flags.
soong_config_variables: {
release_avf_enable_llpvm_changes: {
@@ -458,6 +524,24 @@
},
}
+avb_add_hash_footer {
+ name: "microdroid_kernel_signed",
+ defaults: ["microdroid_kernel_signed_defaults"],
+ filename: "microdroid_kernel",
+ arch: {
+ arm64: {
+ src: ":microdroid_kernel_prebuilts-6.1-arm64",
+ },
+ x86_64: {
+ src: ":microdroid_kernel_prebuilts-6.1-x86_64",
+ },
+ },
+ include_descriptors_from_images: [
+ ":microdroid_initrd_normal_hashdesc",
+ ":microdroid_initrd_debug_hashdesc",
+ ],
+}
+
prebuilt_etc {
name: "microdroid_kernel",
src: ":empty_file",
@@ -471,3 +555,55 @@
},
},
}
+
+///////////////////////////////////////
+// GKI-android14-6.1 modules
+///////////////////////////////////////
+prebuilt_etc {
+ name: "microdroid_gki-android14-6.1.json",
+ src: "microdroid_gki-android14-6.1.json",
+}
+
+avb_add_hash_footer {
+ name: "microdroid_gki-android14-6.1_kernel_signed",
+ defaults: ["microdroid_kernel_signed_defaults"],
+ filename: "microdroid_gki-android14-6.1_kernel",
+ arch: {
+ arm64: {
+ src: ":microdroid_gki_kernel_prebuilts-6.1-arm64",
+ },
+ x86_64: {
+ src: ":microdroid_gki_kernel_prebuilts-6.1-x86_64",
+ },
+ },
+ include_descriptors_from_images: [
+ ":microdroid_gki-android14-6.1_initrd_normal_hashdesc",
+ ":microdroid_gki-android14-6.1_initrd_debug_hashdesc",
+ ],
+}
+
+prebuilt_etc {
+ name: "microdroid_gki-android14-6.1_kernel",
+ src: ":empty_file",
+ relative_install_path: "fs",
+ arch: {
+ arm64: {
+ src: ":microdroid_gki-android14-6.1_kernel_signed",
+ },
+ x86_64: {
+ src: ":microdroid_gki-android14-6.1_kernel_signed",
+ },
+ },
+}
+
+avb_gen_vbmeta_image {
+ name: "microdroid_gki-android14-6.1_initrd_normal_hashdesc",
+ defaults: ["microdroid_initrd_normal_defaults"],
+ src: ":microdroid_gki-android14-6.1_initrd_normal",
+}
+
+avb_gen_vbmeta_image {
+ name: "microdroid_gki-android14-6.1_initrd_debug_hashdesc",
+ defaults: ["microdroid_initrd_debug_defaults"],
+ src: ":microdroid_gki-android14-6.1_initrd_debuggable",
+}
diff --git a/microdroid/init.rc b/microdroid/init.rc
index f5f3f15..4cc0475 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -30,6 +30,11 @@
# We don't directly exec the binary to specify stdio_to_kmsg.
exec_start init_debug_policy
+ # Wait for ueventd to have finished cold boot.
+ # This is needed by prng-seeder (at least).
+ # (In Android this happens inside apexd-bootstrap.)
+ wait_for_prop ro.cold_boot_done true
+
on init
mkdir /mnt/apk 0755 root root
mkdir /mnt/extra-apk 0755 root root
diff --git a/microdroid/initrd/Android.bp b/microdroid/initrd/Android.bp
index de28d8a..ec971fa 100644
--- a/microdroid/initrd/Android.bp
+++ b/microdroid/initrd/Android.bp
@@ -40,6 +40,28 @@
cmd: "cat $(in) > $(out)",
}
+genrule {
+ name: "microdroid_gki-android14-6.1_initrd_gen_arm64",
+ srcs: [
+ ":microdroid_ramdisk",
+ ":microdroid_fstab_ramdisk",
+ ":microdroid_gki_modules-6.1-arm64",
+ ],
+ out: ["microdroid_initrd.img"],
+ cmd: "cat $(in) > $(out)",
+}
+
+genrule {
+ name: "microdroid_gki-android14-6.1_initrd_gen_x86_64",
+ srcs: [
+ ":microdroid_ramdisk",
+ ":microdroid_fstab_ramdisk",
+ ":microdroid_gki_modules-6.1-x86_64",
+ ],
+ out: ["microdroid_initrd.img"],
+ cmd: "cat $(in) > $(out)",
+}
+
// This contains vbmeta hashes & related (boot)configs which are passed to kernel/init
genrule {
name: "microdroid_vbmeta_bootconfig_gen",
@@ -74,6 +96,17 @@
}
genrule {
+ name: "microdroid_gki-android14-6.1_initrd_debuggable_arm64",
+ tools: ["initrd_bootconfig"],
+ srcs: [
+ ":microdroid_gki-android14-6.1_initrd_gen_arm64",
+ ":microdroid_bootconfig_debuggable_src",
+ ] + bootconfigs_arm64,
+ out: ["microdroid_gki-android14-6.1_initrd_debuggable_arm64"],
+ cmd: "$(location initrd_bootconfig) attach --output $(out) $(in)",
+}
+
+genrule {
name: "microdroid_initrd_debuggable_x86_64",
tools: ["initrd_bootconfig"],
srcs: [
@@ -85,6 +118,17 @@
}
genrule {
+ name: "microdroid_gki-android14-6.1_initrd_debuggable_x86_64",
+ tools: ["initrd_bootconfig"],
+ srcs: [
+ ":microdroid_gki-android14-6.1_initrd_gen_x86_64",
+ ":microdroid_bootconfig_debuggable_src",
+ ] + bootconfigs_x86_64,
+ out: ["microdroid_gki-android14-6.1_initrd_debuggable_x86_64"],
+ cmd: "$(location initrd_bootconfig) attach --output $(out) $(in)",
+}
+
+genrule {
name: "microdroid_initrd_normal_arm64",
tools: ["initrd_bootconfig"],
srcs: [
@@ -96,6 +140,17 @@
}
genrule {
+ name: "microdroid_gki-android14-6.1_initrd_normal_arm64",
+ tools: ["initrd_bootconfig"],
+ srcs: [
+ ":microdroid_gki-android14-6.1_initrd_gen_arm64",
+ ":microdroid_bootconfig_normal_src",
+ ] + bootconfigs_arm64,
+ out: ["microdroid_gki-android14-6.1_initrd_normal_arm64"],
+ cmd: "$(location initrd_bootconfig) attach --output $(out) $(in)",
+}
+
+genrule {
name: "microdroid_initrd_normal_x86_64",
tools: ["initrd_bootconfig"],
srcs: [
@@ -106,6 +161,17 @@
cmd: "$(location initrd_bootconfig) attach --output $(out) $(in)",
}
+genrule {
+ name: "microdroid_gki-android14-6.1_initrd_normal_x86_64",
+ tools: ["initrd_bootconfig"],
+ srcs: [
+ ":microdroid_gki-android14-6.1_initrd_gen_x86_64",
+ ":microdroid_bootconfig_normal_src",
+ ] + bootconfigs_x86_64,
+ out: ["microdroid_gki-android14-6.1_initrd_normal_x86_64"],
+ cmd: "$(location initrd_bootconfig) attach --output $(out) $(in)",
+}
+
prebuilt_etc {
name: "microdroid_initrd_debuggable",
// We don't have ramdisk for architectures other than x86_64 & arm64
@@ -122,6 +188,21 @@
}
prebuilt_etc {
+ name: "microdroid_gki-android14-6.1_initrd_debuggable",
+ // We don't have ramdisk for architectures other than x86_64 & arm64
+ src: ":empty_file",
+ arch: {
+ x86_64: {
+ src: ":microdroid_gki-android14-6.1_initrd_debuggable_x86_64",
+ },
+ arm64: {
+ src: ":microdroid_gki-android14-6.1_initrd_debuggable_arm64",
+ },
+ },
+ filename: "microdroid_gki-android14-6.1_initrd_debuggable.img",
+}
+
+prebuilt_etc {
name: "microdroid_initrd_normal",
// We don't have ramdisk for architectures other than x86_64 & arm64
src: ":empty_file",
@@ -135,3 +216,18 @@
},
filename: "microdroid_initrd_normal.img",
}
+
+prebuilt_etc {
+ name: "microdroid_gki-android14-6.1_initrd_normal",
+ // We don't have ramdisk for architectures other than x86_64 & arm64
+ src: ":empty_file",
+ arch: {
+ x86_64: {
+ src: ":microdroid_gki-android14-6.1_initrd_normal_x86_64",
+ },
+ arm64: {
+ src: ":microdroid_gki-android14-6.1_initrd_normal_arm64",
+ },
+ },
+ filename: "microdroid_gki-android14-6.1_initrd_normal.img",
+}
diff --git a/microdroid/microdroid_gki-android14-6.1.json b/microdroid/microdroid_gki-android14-6.1.json
new file mode 100644
index 0000000..9392fae
--- /dev/null
+++ b/microdroid/microdroid_gki-android14-6.1.json
@@ -0,0 +1,20 @@
+{
+ "kernel": "/apex/com.android.virt/etc/fs/microdroid_gki-android14-6.1_kernel",
+ "disks": [
+ {
+ "partitions": [
+ {
+ "label": "vbmeta_a",
+ "path": "/apex/com.android.virt/etc/fs/microdroid_vbmeta.img"
+ },
+ {
+ "label": "super",
+ "path": "/apex/com.android.virt/etc/fs/microdroid_super.img"
+ }
+ ],
+ "writable": false
+ }
+ ],
+ "memory_mib": 256,
+ "platform_version": "~1.0"
+}
diff --git a/microdroid/payload/metadata.proto b/microdroid/payload/metadata.proto
index 6b999af..b03d466 100644
--- a/microdroid/payload/metadata.proto
+++ b/microdroid/payload/metadata.proto
@@ -37,14 +37,18 @@
}
message ApexPayload {
+ // Next id: 9
+
// Required.
string name = 1;
string partition_name = 2;
// Optional.
- // When specified, apex payload should be verified with the public key and root digest.
+ // When specified, apex payload should be verified against these values.
bytes public_key = 3;
bytes root_digest = 4;
+ int64 manifest_version = 7;
+ string manifest_name = 8;
// Required.
// The timestamp in seconds when the APEX was last updated. This should match the value in
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index 93f49ef..cb3b2aa 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -5,7 +5,10 @@
rust_defaults {
name: "microdroid_manager_defaults",
crate_name: "microdroid_manager",
- defaults: ["avf_build_flags_rust"],
+ defaults: [
+ "avf_build_flags_rust",
+ "secretkeeper_use_latest_hal_aidl_rust",
+ ],
srcs: ["src/main.rs"],
edition: "2021",
prefer_rlib: true,
@@ -23,6 +26,7 @@
"libbinder_rs",
"libbyteorder",
"libcap_rust",
+ "libclient_vm_csr",
"libciborium",
"libcoset",
"libdiced_open_dice",
@@ -42,11 +46,12 @@
"libprotobuf",
"librpcbinder_rs",
"librustutils",
+ "libsecretkeeper_client",
+ "libsecretkeeper_comm_nostd",
"libscopeguard",
"libserde",
"libserde_cbor",
"libserde_json",
- "libservice_vm_comm",
"libthiserror",
"libuuid",
"libvsock",
@@ -72,7 +77,6 @@
defaults: ["microdroid_manager_defaults"],
test_suites: ["general-tests"],
rustlibs: [
- "libhwtrust",
"libtempfile",
],
multilib: {
diff --git a/microdroid_manager/aidl/android/system/virtualization/payload/IVmPayloadService.aidl b/microdroid_manager/aidl/android/system/virtualization/payload/IVmPayloadService.aidl
index 51796f1..4813b35 100644
--- a/microdroid_manager/aidl/android/system/virtualization/payload/IVmPayloadService.aidl
+++ b/microdroid_manager/aidl/android/system/virtualization/payload/IVmPayloadService.aidl
@@ -59,8 +59,8 @@
* Sequence of DER-encoded X.509 certificates that make up the attestation
* key's certificate chain.
*
- * The certificate chain starts with a root certificate and ends with a leaf
- * certificate covering the attested public key.
+ * The certificate chain starts with a leaf certificate covering the attested
+ * public key and ends with a root certificate.
*/
Certificate[] certificateChain;
}
diff --git a/microdroid_manager/src/dice.rs b/microdroid_manager/src/dice.rs
index 6b0775a..a8b88aa 100644
--- a/microdroid_manager/src/dice.rs
+++ b/microdroid_manager/src/dice.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use crate::dice_driver::DiceDriver;
-use crate::instance::ApkData;
+use crate::instance::{ApexData, ApkData};
use crate::{is_debuggable, MicrodroidData};
use anyhow::{bail, Context, Result};
use ciborium::{cbor, Value};
@@ -26,24 +26,23 @@
/// Perform an open DICE derivation for the payload.
pub fn dice_derivation(
dice: DiceDriver,
- verified_data: &MicrodroidData,
+ instance_data: &MicrodroidData,
payload_metadata: &PayloadMetadata,
) -> Result<OwnedDiceArtifacts> {
- let subcomponents = build_subcomponent_list(verified_data);
-
- let config_descriptor = format_payload_config_descriptor(payload_metadata, &subcomponents)
+ let subcomponents = build_subcomponent_list(instance_data);
+ let config_descriptor = format_payload_config_descriptor(payload_metadata, subcomponents)
.context("Building config descriptor")?;
// Calculate compound digests of code and authorities
let mut code_hash_ctx = Sha512::new();
let mut authority_hash_ctx = Sha512::new();
- code_hash_ctx.update(verified_data.apk_data.root_hash.as_ref());
- authority_hash_ctx.update(verified_data.apk_data.pubkey.as_ref());
- for extra_apk in &verified_data.extra_apks_data {
+ code_hash_ctx.update(instance_data.apk_data.root_hash.as_ref());
+ authority_hash_ctx.update(instance_data.apk_data.cert_hash.as_ref());
+ for extra_apk in &instance_data.extra_apks_data {
code_hash_ctx.update(extra_apk.root_hash.as_ref());
- authority_hash_ctx.update(extra_apk.pubkey.as_ref());
+ authority_hash_ctx.update(extra_apk.cert_hash.as_ref());
}
- for apex in &verified_data.apex_data {
+ for apex in &instance_data.apex_data {
code_hash_ctx.update(apex.root_digest.as_ref());
authority_hash_ctx.update(apex.public_key.as_ref());
}
@@ -54,55 +53,65 @@
let debuggable = is_debuggable()?;
// Send the details to diced
- let hidden = verified_data.salt.clone().try_into().unwrap();
+ let hidden = instance_data.salt.clone().try_into().unwrap();
dice.derive(code_hash, &config_descriptor, authority_hash, debuggable, hidden)
}
-struct Subcomponent<'a> {
+struct Subcomponent {
name: String,
version: u64,
- code_hash: &'a [u8],
- authority_hash: Box<[u8]>,
+ code_hash: Vec<u8>,
+ authority_hash: Vec<u8>,
}
-impl<'a> Subcomponent<'a> {
- fn to_value(&self) -> Result<Value> {
+impl Subcomponent {
+ fn into_value(self) -> Result<Value> {
Ok(cbor!({
1 => self.name,
2 => self.version,
- 3 => self.code_hash,
- 4 => self.authority_hash
+ 3 => Value::Bytes(self.code_hash),
+ 4 => Value::Bytes(self.authority_hash),
})?)
}
- fn for_apk(apk: &'a ApkData) -> Self {
+ fn for_apk(apk: &ApkData) -> Self {
Self {
name: format!("apk:{}", apk.package_name),
version: apk.version_code,
- code_hash: &apk.root_hash,
- authority_hash:
- // TODO(b/305925597): Hash the certificate not the pubkey
- Box::new(sha512(&apk.pubkey)),
+ code_hash: apk.root_hash.clone(),
+ authority_hash: apk.cert_hash.clone(),
+ }
+ }
+
+ fn for_apex(apex: &ApexData) -> Self {
+ // Note that this is only reachable if the dice_changes flag is on, in which case
+ // the manifest data will always be present.
+ Self {
+ name: format!("apex:{}", apex.manifest_name.as_ref().unwrap()),
+ version: apex.manifest_version.unwrap() as u64,
+ code_hash: apex.root_digest.clone(),
+ authority_hash: sha512(&apex.public_key).to_vec(),
}
}
}
-fn build_subcomponent_list(verified_data: &MicrodroidData) -> Vec<Subcomponent> {
+fn build_subcomponent_list(instance_data: &MicrodroidData) -> Vec<Subcomponent> {
if !cfg!(dice_changes) {
return vec![];
}
- once(&verified_data.apk_data)
- .chain(&verified_data.extra_apks_data)
- .map(Subcomponent::for_apk)
- .collect()
+ let apks = once(&instance_data.apk_data)
+ .chain(&instance_data.extra_apks_data)
+ .map(Subcomponent::for_apk);
+ let apexes = instance_data.apex_data.iter().map(Subcomponent::for_apex);
+ apks.chain(apexes).collect()
}
-// Returns a configuration descriptor of the given payload. See vm_config.cddl for a definition
+// Returns a configuration descriptor of the given payload. See vm_config.cddl for the definition
// of the format.
fn format_payload_config_descriptor(
payload: &PayloadMetadata,
- subcomponents: &[Subcomponent],
+ subcomponents: Vec<Subcomponent>,
) -> Result<Vec<u8>> {
let mut map = Vec::new();
map.push((cbor!(-70002)?, cbor!("Microdroid payload")?));
@@ -118,7 +127,7 @@
if !subcomponents.is_empty() {
let values =
- subcomponents.iter().map(Subcomponent::to_value).collect::<Result<Vec<_>>>()?;
+ subcomponents.into_iter().map(Subcomponent::into_value).collect::<Result<Vec<_>>>()?;
map.push((cbor!(-71002)?, cbor!(values)?));
}
@@ -130,7 +139,7 @@
use super::*;
use microdroid_metadata::PayloadConfig;
- const NO_SUBCOMPONENTS: [Subcomponent; 0] = [];
+ const NO_SUBCOMPONENTS: Vec<Subcomponent> = Vec::new();
fn assert_eq_bytes(expected: &[u8], actual: &[u8]) {
assert_eq!(
@@ -146,7 +155,7 @@
fn payload_metadata_with_path_formats_correctly() -> Result<()> {
let payload_metadata = PayloadMetadata::ConfigPath("/config_path".to_string());
let config_descriptor =
- format_payload_config_descriptor(&payload_metadata, &NO_SUBCOMPONENTS)?;
+ format_payload_config_descriptor(&payload_metadata, NO_SUBCOMPONENTS)?;
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
0x6f, 0x69, 0x64, 0x20, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x00, 0x01,
@@ -165,7 +174,7 @@
};
let payload_metadata = PayloadMetadata::Config(payload_config);
let config_descriptor =
- format_payload_config_descriptor(&payload_metadata, &NO_SUBCOMPONENTS)?;
+ format_payload_config_descriptor(&payload_metadata, NO_SUBCOMPONENTS)?;
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
0x6f, 0x69, 0x64, 0x20, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x00, 0x01,
@@ -179,31 +188,30 @@
#[test]
fn payload_metadata_with_subcomponents_formats_correctly() -> Result<()> {
let payload_metadata = PayloadMetadata::ConfigPath("/config_path".to_string());
- let subcomponents = [
+ let subcomponents = vec![
Subcomponent {
name: "apk1".to_string(),
version: 1,
- code_hash: &[42u8],
- authority_hash: Box::new([17u8]),
+ code_hash: vec![42, 43],
+ authority_hash: vec![17],
},
Subcomponent {
name: "apk2".to_string(),
version: 0x1000_0000_0001,
- code_hash: &[43u8],
- authority_hash: Box::new([19u8]),
+ code_hash: vec![43],
+ authority_hash: vec![19, 20],
},
];
- let config_descriptor =
- format_payload_config_descriptor(&payload_metadata, &subcomponents)?;
+ let config_descriptor = format_payload_config_descriptor(&payload_metadata, subcomponents)?;
// Verified using cbor.me.
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa3, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
0x6f, 0x69, 0x64, 0x20, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x00, 0x01,
0x15, 0x57, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x61, 0x74,
0x68, 0x3a, 0x00, 0x01, 0x15, 0x59, 0x82, 0xa4, 0x01, 0x64, 0x61, 0x70, 0x6b, 0x31,
- 0x02, 0x01, 0x03, 0x81, 0x18, 0x2a, 0x04, 0x81, 0x11, 0xa4, 0x01, 0x64, 0x61, 0x70,
- 0x6b, 0x32, 0x02, 0x1b, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x81,
- 0x18, 0x2b, 0x04, 0x81, 0x13,
+ 0x02, 0x01, 0x03, 0x42, 0x2a, 0x2b, 0x04, 0x41, 0x11, 0xa4, 0x01, 0x64, 0x61, 0x70,
+ 0x6b, 0x32, 0x02, 0x1b, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x41,
+ 0x2b, 0x04, 0x42, 0x13, 0x14,
];
assert_eq_bytes(EXPECTED_CONFIG_DESCRIPTOR, &config_descriptor);
Ok(())
diff --git a/microdroid_manager/src/instance.rs b/microdroid_manager/src/instance.rs
index 6c9e245..7a9f0e0 100644
--- a/microdroid_manager/src/instance.rs
+++ b/microdroid_manager/src/instance.rs
@@ -287,23 +287,23 @@
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct ApkData {
- pub root_hash: Box<RootHash>,
- pub pubkey: Box<[u8]>,
+ pub root_hash: Vec<u8>,
+ pub cert_hash: Vec<u8>,
pub package_name: String,
pub version_code: u64,
}
impl ApkData {
pub fn root_hash_eq(&self, root_hash: &[u8]) -> bool {
- self.root_hash.as_ref() == root_hash
+ self.root_hash == root_hash
}
}
-pub type RootHash = [u8];
-
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct ApexData {
pub name: String,
+ pub manifest_name: Option<String>,
+ pub manifest_version: Option<i64>,
pub public_key: Vec<u8>,
pub root_digest: Vec<u8>,
pub last_update_seconds: u64,
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 7ba54f8..c94a937 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -34,7 +34,7 @@
use crate::dice::dice_derivation;
use crate::dice_driver::DiceDriver;
-use crate::instance::{ApexData, InstanceDisk, MicrodroidData};
+use crate::instance::{InstanceDisk, MicrodroidData};
use crate::verify::verify_payload;
use crate::vm_payload_service::register_vm_payload_service;
use anyhow::{anyhow, bail, ensure, Context, Error, Result};
@@ -42,10 +42,10 @@
use keystore2_crypto::ZVec;
use libc::VMADDR_CID_HOST;
use log::{error, info};
-use microdroid_metadata::{write_metadata, PayloadMetadata};
+use microdroid_metadata::PayloadMetadata;
use microdroid_payload_config::{OsConfig, Task, TaskType, VmPayloadConfig};
use nix::sys::signal::Signal;
-use payload::{load_metadata, to_metadata};
+use payload::load_metadata;
use rpcbinder::RpcSession;
use rustutils::sockets::android_get_control_socket;
use rustutils::system_properties;
@@ -105,7 +105,6 @@
MicrodroidError::PayloadInvalidConfig(msg) => {
(ErrorCode::PAYLOAD_INVALID_CONFIG, msg.to_string())
}
-
// Connection failure won't be reported to VS; return the default value
MicrodroidError::FailedToConnectToVirtualizationService(msg) => {
(ErrorCode::UNKNOWN, msg.to_string())
@@ -143,15 +142,6 @@
Ok(())
}
-fn get_vms_rpc_binder() -> Result<Strong<dyn IVirtualMachineService>> {
- // The host is running a VirtualMachineService for this VM on a port equal
- // to the CID of this VM.
- let port = vsock::get_local_cid().context("Could not determine local CID")?;
- RpcSession::new()
- .setup_vsock_client(VMADDR_CID_HOST, port)
- .context("Could not connect to IVirtualMachineService")
-}
-
fn main() -> Result<()> {
// If debuggable, print full backtrace to console log with stdio_to_kmsg
if is_debuggable()? {
@@ -174,25 +164,6 @@
})
}
-/// Prepares a socket file descriptor for the vm payload service.
-///
-/// # Safety
-///
-/// The caller must ensure that this function is the only place that claims ownership
-/// of the file descriptor and it is called only once.
-unsafe fn prepare_vm_payload_service_socket() -> Result<OwnedFd> {
- let raw_fd = android_get_control_socket(VM_PAYLOAD_SERVICE_SOCKET_NAME)?;
-
- // Creating OwnedFd for stdio FDs is not safe.
- if [libc::STDIN_FILENO, libc::STDOUT_FILENO, libc::STDERR_FILENO].contains(&raw_fd) {
- bail!("File descriptor {raw_fd} is standard I/O descriptor");
- }
- // SAFETY: Initializing OwnedFd for a RawFd created by the init.
- // We checked that the integer value corresponds to a valid FD and that the caller
- // ensures that this is the only place to claim its ownership.
- Ok(unsafe { OwnedFd::from_raw_fd(raw_fd) })
-}
-
fn try_main() -> Result<()> {
android_logger::init_once(
android_logger::Config::default()
@@ -245,6 +216,170 @@
}
}
+fn try_run_payload(
+ service: &Strong<dyn IVirtualMachineService>,
+ vm_payload_service_fd: OwnedFd,
+) -> Result<i32> {
+ let metadata = load_metadata().context("Failed to load payload metadata")?;
+ let dice = DiceDriver::new(Path::new("/dev/open-dice0")).context("Failed to load DICE")?;
+
+ let mut instance = InstanceDisk::new().context("Failed to load instance.img")?;
+ let saved_data =
+ instance.read_microdroid_data(&dice).context("Failed to read identity data")?;
+
+ if is_strict_boot() {
+ // Provisioning must happen on the first boot and never again.
+ if is_new_instance() {
+ ensure!(
+ saved_data.is_none(),
+ MicrodroidError::PayloadInvalidConfig(
+ "Found instance data on first boot.".to_string()
+ )
+ );
+ } else {
+ ensure!(
+ saved_data.is_some(),
+ MicrodroidError::PayloadInvalidConfig("Instance data not found.".to_string())
+ );
+ };
+ }
+
+ // Verify the payload before using it.
+ let extracted_data = verify_payload(&metadata, saved_data.as_ref())
+ .context("Payload verification failed")
+ .map_err(|e| MicrodroidError::PayloadVerificationFailed(e.to_string()))?;
+
+ // In case identity is ignored (by debug policy), we should reuse existing payload data, even
+ // when the payload is changed. This is to keep the derived secret same as before.
+ let instance_data = if let Some(saved_data) = saved_data {
+ if !is_verified_boot() {
+ if saved_data != extracted_data {
+ info!("Detected an update of the payload, but continue (regarding debug policy)")
+ }
+ } else {
+ ensure!(
+ saved_data == extracted_data,
+ MicrodroidError::PayloadChanged(String::from(
+ "Detected an update of the payload which isn't supported yet."
+ ))
+ );
+ info!("Saved data is verified.");
+ }
+ saved_data
+ } else {
+ info!("Saving verified data.");
+ instance
+ .write_microdroid_data(&extracted_data, &dice)
+ .context("Failed to write identity data")?;
+ extracted_data
+ };
+
+ let payload_metadata = metadata.payload.ok_or_else(|| {
+ MicrodroidError::PayloadInvalidConfig("No payload config in metadata".to_string())
+ })?;
+
+ // To minimize the exposure to untrusted data, derive dice profile as soon as possible.
+ info!("DICE derivation for payload");
+ let dice_artifacts = dice_derivation(dice, &instance_data, &payload_metadata)?;
+ let vm_secret =
+ VmSecret::new(dice_artifacts, service).context("Failed to create VM secrets")?;
+
+ if cfg!(dice_changes) {
+ // Now that the DICE derivation is done, it's ok to allow payload code to run.
+
+ // Start apexd to activate APEXes. This may allow code within them to run.
+ system_properties::write("ctl.start", "apexd-vm")?;
+ }
+
+ // Run encryptedstore binary to prepare the storage
+ let encryptedstore_child = if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
+ info!("Preparing encryptedstore ...");
+ Some(prepare_encryptedstore(&vm_secret).context("encryptedstore run")?)
+ } else {
+ None
+ };
+
+ let mut zipfuse = Zipfuse::default();
+
+ // Before reading a file from the APK, start zipfuse
+ zipfuse.mount(
+ MountForExec::Allowed,
+ "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:system_file:s0",
+ Path::new(verify::DM_MOUNTED_APK_PATH),
+ Path::new(VM_APK_CONTENTS_PATH),
+ "microdroid_manager.apk.mounted".to_owned(),
+ )?;
+
+ // Restricted APIs are only allowed to be used by platform or test components. Infer this from
+ // the use of a VM config file since those can only be used by platform and test components.
+ let allow_restricted_apis = match payload_metadata {
+ PayloadMetadata::ConfigPath(_) => true,
+ PayloadMetadata::Config(_) => false,
+ _ => false, // default is false for safety
+ };
+
+ let config = load_config(payload_metadata).context("Failed to load payload metadata")?;
+
+ let task = config
+ .task
+ .as_ref()
+ .ok_or_else(|| MicrodroidError::PayloadInvalidConfig("No task in VM config".to_string()))?;
+
+ ensure!(
+ config.extra_apks.len() == instance_data.extra_apks_data.len(),
+ "config expects {} extra apks, but found {}",
+ config.extra_apks.len(),
+ instance_data.extra_apks_data.len()
+ );
+ mount_extra_apks(&config, &mut zipfuse)?;
+
+ register_vm_payload_service(
+ allow_restricted_apis,
+ service.clone(),
+ vm_secret,
+ vm_payload_service_fd,
+ )?;
+
+ // Set export_tombstones if enabled
+ if should_export_tombstones(&config) {
+ // This property is read by tombstone_handler.
+ system_properties::write("microdroid_manager.export_tombstones.enabled", "1")
+ .context("set microdroid_manager.export_tombstones.enabled")?;
+ }
+
+ // Wait until apex config is done. (e.g. linker configuration for apexes)
+ wait_for_property_true(APEX_CONFIG_DONE_PROP).context("Failed waiting for apex config done")?;
+
+ // Trigger init post-fs-data. This will start authfs if we wask it to.
+ if config.enable_authfs {
+ system_properties::write("microdroid_manager.authfs.enabled", "1")
+ .context("failed to write microdroid_manager.authfs.enabled")?;
+ }
+ system_properties::write("microdroid_manager.config_done", "1")
+ .context("failed to write microdroid_manager.config_done")?;
+
+ // Wait until zipfuse has mounted the APKs so we can access the payload
+ zipfuse.wait_until_done()?;
+
+ // Wait for encryptedstore to finish mounting the storage (if enabled) before setting
+ // microdroid_manager.init_done. Reason is init stops uneventd after that.
+ // Encryptedstore, however requires ueventd
+ if let Some(mut child) = encryptedstore_child {
+ let exitcode = child.wait().context("Wait for encryptedstore child")?;
+ ensure!(exitcode.success(), "Unable to prepare encrypted storage. Exitcode={}", exitcode);
+ }
+
+ // Wait for init to have finished booting.
+ wait_for_property_true("dev.bootcomplete").context("failed waiting for dev.bootcomplete")?;
+
+ // And then tell it we're done so unnecessary services can be shut down.
+ system_properties::write("microdroid_manager.init_done", "1")
+ .context("set microdroid_manager.init_done")?;
+
+ info!("boot completed, time to run payload");
+ exec_task(task, service).context("Failed to run payload")
+}
+
fn post_payload_work() -> Result<()> {
// Sync the encrypted storage filesystem (flushes the filesystem caches).
if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
@@ -270,6 +405,55 @@
Ok(())
}
+fn mount_extra_apks(config: &VmPayloadConfig, zipfuse: &mut Zipfuse) -> Result<()> {
+ // For now, only the number of apks is important, as the mount point and dm-verity name is fixed
+ for i in 0..config.extra_apks.len() {
+ let mount_dir = format!("/mnt/extra-apk/{i}");
+ create_dir(Path::new(&mount_dir)).context("Failed to create mount dir for extra apks")?;
+
+ let mount_for_exec =
+ if cfg!(multi_tenant) { MountForExec::Allowed } else { MountForExec::Disallowed };
+ // These run asynchronously in parallel - we wait later for them to complete.
+ zipfuse.mount(
+ mount_for_exec,
+ "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:extra_apk_file:s0",
+ Path::new(&format!("/dev/block/mapper/extra-apk-{i}")),
+ Path::new(&mount_dir),
+ format!("microdroid_manager.extra_apk.mounted.{i}"),
+ )?;
+ }
+
+ Ok(())
+}
+
+fn get_vms_rpc_binder() -> Result<Strong<dyn IVirtualMachineService>> {
+ // The host is running a VirtualMachineService for this VM on a port equal
+ // to the CID of this VM.
+ let port = vsock::get_local_cid().context("Could not determine local CID")?;
+ RpcSession::new()
+ .setup_vsock_client(VMADDR_CID_HOST, port)
+ .context("Could not connect to IVirtualMachineService")
+}
+
+/// Prepares a socket file descriptor for the vm payload service.
+///
+/// # Safety
+///
+/// The caller must ensure that this function is the only place that claims ownership
+/// of the file descriptor and it is called only once.
+unsafe fn prepare_vm_payload_service_socket() -> Result<OwnedFd> {
+ let raw_fd = android_get_control_socket(VM_PAYLOAD_SERVICE_SOCKET_NAME)?;
+
+ // Creating OwnedFd for stdio FDs is not safe.
+ if [libc::STDIN_FILENO, libc::STDOUT_FILENO, libc::STDERR_FILENO].contains(&raw_fd) {
+ bail!("File descriptor {raw_fd} is standard I/O descriptor");
+ }
+ // SAFETY: Initializing OwnedFd for a RawFd created by the init.
+ // We checked that the integer value corresponds to a valid FD and that the caller
+ // ensures that this is the only place to claim its ownership.
+ Ok(unsafe { OwnedFd::from_raw_fd(raw_fd) })
+}
+
fn is_strict_boot() -> bool {
Path::new(AVF_STRICT_BOOT).exists()
}
@@ -310,153 +494,6 @@
Ok(Some(u32::from_be_bytes(log) == 1))
}
-fn try_run_payload(
- service: &Strong<dyn IVirtualMachineService>,
- vm_payload_service_fd: OwnedFd,
-) -> Result<i32> {
- let metadata = load_metadata().context("Failed to load payload metadata")?;
- let dice = DiceDriver::new(Path::new("/dev/open-dice0")).context("Failed to load DICE")?;
-
- let mut instance = InstanceDisk::new().context("Failed to load instance.img")?;
- let saved_data =
- instance.read_microdroid_data(&dice).context("Failed to read identity data")?;
-
- if is_strict_boot() {
- // Provisioning must happen on the first boot and never again.
- if is_new_instance() {
- ensure!(
- saved_data.is_none(),
- MicrodroidError::PayloadInvalidConfig(
- "Found instance data on first boot.".to_string()
- )
- );
- } else {
- ensure!(
- saved_data.is_some(),
- MicrodroidError::PayloadInvalidConfig("Instance data not found.".to_string())
- );
- };
- }
-
- // Verify the payload before using it.
- let verified_data = verify_payload(&metadata, saved_data.as_ref())
- .context("Payload verification failed")
- .map_err(|e| MicrodroidError::PayloadVerificationFailed(e.to_string()))?;
-
- // In case identity is ignored (by debug policy), we should reuse existing payload data, even
- // when the payload is changed. This is to keep the derived secret same as before.
- let verified_data = if let Some(saved_data) = saved_data {
- if !is_verified_boot() {
- if saved_data != verified_data {
- info!("Detected an update of the payload, but continue (regarding debug policy)")
- }
- } else {
- ensure!(
- saved_data == verified_data,
- MicrodroidError::PayloadChanged(String::from(
- "Detected an update of the payload which isn't supported yet."
- ))
- );
- info!("Saved data is verified.");
- }
- saved_data
- } else {
- info!("Saving verified data.");
- instance
- .write_microdroid_data(&verified_data, &dice)
- .context("Failed to write identity data")?;
- verified_data
- };
-
- let payload_metadata = metadata.payload.ok_or_else(|| {
- MicrodroidError::PayloadInvalidConfig("No payload config in metadata".to_string())
- })?;
-
- // To minimize the exposure to untrusted data, derive dice profile as soon as possible.
- info!("DICE derivation for payload");
- let dice_artifacts = dice_derivation(dice, &verified_data, &payload_metadata)?;
- let vm_secret = VmSecret::new(dice_artifacts).context("Failed to create VM secrets")?;
-
- // Run encryptedstore binary to prepare the storage
- let encryptedstore_child = if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
- info!("Preparing encryptedstore ...");
- Some(prepare_encryptedstore(&vm_secret).context("encryptedstore run")?)
- } else {
- None
- };
-
- let mut zipfuse = Zipfuse::default();
-
- // Before reading a file from the APK, start zipfuse
- zipfuse.mount(
- MountForExec::Allowed,
- "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:system_file:s0",
- Path::new(verify::DM_MOUNTED_APK_PATH),
- Path::new(VM_APK_CONTENTS_PATH),
- "microdroid_manager.apk.mounted".to_owned(),
- )?;
-
- // Restricted APIs are only allowed to be used by platform or test components. Infer this from
- // the use of a VM config file since those can only be used by platform and test components.
- let allow_restricted_apis = match payload_metadata {
- PayloadMetadata::ConfigPath(_) => true,
- PayloadMetadata::Config(_) => false,
- _ => false, // default is false for safety
- };
-
- let config = load_config(payload_metadata).context("Failed to load payload metadata")?;
-
- let task = config
- .task
- .as_ref()
- .ok_or_else(|| MicrodroidError::PayloadInvalidConfig("No task in VM config".to_string()))?;
-
- ensure!(
- config.extra_apks.len() == verified_data.extra_apks_data.len(),
- "config expects {} extra apks, but found {}",
- config.extra_apks.len(),
- verified_data.extra_apks_data.len()
- );
- mount_extra_apks(&config, &mut zipfuse)?;
-
- // Wait until apex config is done. (e.g. linker configuration for apexes)
- wait_for_apex_config_done()?;
-
- setup_config_sysprops(&config)?;
-
- // Set export_tombstones if enabled
- if should_export_tombstones(&config) {
- // This property is read by tombstone_handler.
- system_properties::write("microdroid_manager.export_tombstones.enabled", "1")
- .context("set microdroid_manager.export_tombstones.enabled")?;
- }
-
- // Wait until zipfuse has mounted the APKs so we can access the payload
- zipfuse.wait_until_done()?;
-
- register_vm_payload_service(
- allow_restricted_apis,
- service.clone(),
- vm_secret,
- vm_payload_service_fd,
- )?;
-
- // Wait for encryptedstore to finish mounting the storage (if enabled) before setting
- // microdroid_manager.init_done. Reason is init stops uneventd after that.
- // Encryptedstore, however requires ueventd
- if let Some(mut child) = encryptedstore_child {
- let exitcode = child.wait().context("Wait for encryptedstore child")?;
- ensure!(exitcode.success(), "Unable to prepare encrypted storage. Exitcode={}", exitcode);
- }
-
- wait_for_property_true("dev.bootcomplete").context("failed waiting for dev.bootcomplete")?;
- system_properties::write("microdroid_manager.init_done", "1")
- .context("set microdroid_manager.init_done")?;
-
- info!("boot completed, time to run payload");
- exec_task(task, service).context("Failed to run payload")
-}
-
enum MountForExec {
Allowed,
Disallowed,
@@ -504,65 +541,6 @@
}
}
-fn write_apex_payload_data(
- saved_data: Option<&MicrodroidData>,
- apex_data_from_payload: &[ApexData],
-) -> Result<()> {
- if let Some(saved_apex_data) = saved_data.map(|d| &d.apex_data) {
- // We don't support APEX updates. (assuming that update will change root digest)
- ensure!(
- saved_apex_data == apex_data_from_payload,
- MicrodroidError::PayloadChanged(String::from("APEXes have changed."))
- );
- let apex_metadata = to_metadata(apex_data_from_payload);
- // Pass metadata(with public keys and root digests) to apexd so that it uses the passed
- // metadata instead of the default one (/dev/block/by-name/payload-metadata)
- OpenOptions::new()
- .create_new(true)
- .write(true)
- .open("/apex/vm-payload-metadata")
- .context("Failed to open /apex/vm-payload-metadata")
- .and_then(|f| write_metadata(&apex_metadata, f))?;
- }
- Ok(())
-}
-
-fn mount_extra_apks(config: &VmPayloadConfig, zipfuse: &mut Zipfuse) -> Result<()> {
- // For now, only the number of apks is important, as the mount point and dm-verity name is fixed
- for i in 0..config.extra_apks.len() {
- let mount_dir = format!("/mnt/extra-apk/{i}");
- create_dir(Path::new(&mount_dir)).context("Failed to create mount dir for extra apks")?;
-
- let mount_for_exec =
- if cfg!(multi_tenant) { MountForExec::Allowed } else { MountForExec::Disallowed };
- // These run asynchronously in parallel - we wait later for them to complete.
- zipfuse.mount(
- mount_for_exec,
- "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:extra_apk_file:s0",
- Path::new(&format!("/dev/block/mapper/extra-apk-{i}")),
- Path::new(&mount_dir),
- format!("microdroid_manager.extra_apk.mounted.{i}"),
- )?;
- }
-
- Ok(())
-}
-
-fn setup_config_sysprops(config: &VmPayloadConfig) -> Result<()> {
- if config.enable_authfs {
- system_properties::write("microdroid_manager.authfs.enabled", "1")
- .context("failed to write microdroid_manager.authfs.enabled")?;
- }
- system_properties::write("microdroid_manager.config_done", "1")
- .context("failed to write microdroid_manager.config_done")?;
- Ok(())
-}
-
-// Waits until linker config is generated
-fn wait_for_apex_config_done() -> Result<()> {
- wait_for_property_true(APEX_CONFIG_DONE_PROP).context("Failed waiting for apex config done")
-}
-
fn wait_for_property_true(property_name: &str) -> Result<()> {
let mut prop = PropertyWatcher::new(property_name)?;
loop {
diff --git a/microdroid_manager/src/payload.rs b/microdroid_manager/src/payload.rs
index a553ce4..98fe24b 100644
--- a/microdroid_manager/src/payload.rs
+++ b/microdroid_manager/src/payload.rs
@@ -17,8 +17,7 @@
use crate::instance::ApexData;
use crate::ioutil::wait_for_file;
use anyhow::Result;
-use apexutil::verify;
-use log::info;
+use log::{info, warn};
use microdroid_metadata::{read_metadata, ApexPayload, Metadata};
use std::time::Duration;
@@ -38,13 +37,19 @@
.apexes
.iter()
.map(|apex| {
- let name = apex.name.clone();
let apex_path = format!("/dev/block/by-name/{}", apex.partition_name);
- let result = verify(&apex_path)?;
+ let extracted = apexutil::verify(&apex_path)?;
+ if let Some(manifest_name) = &extracted.name {
+ if &apex.name != manifest_name {
+ warn!("Apex named {} is named {} in its manifest", apex.name, manifest_name);
+ }
+ };
Ok(ApexData {
- name,
- public_key: result.public_key,
- root_digest: result.root_digest,
+ name: apex.name.clone(),
+ manifest_name: extracted.name,
+ manifest_version: extracted.version,
+ public_key: extracted.public_key,
+ root_digest: extracted.root_digest,
last_update_seconds: apex.last_update_seconds,
is_factory: apex.is_factory,
})
@@ -61,6 +66,8 @@
name: data.name.clone(),
public_key: data.public_key.clone(),
root_digest: data.root_digest.clone(),
+ manifest_name: data.manifest_name.clone().unwrap_or_default(),
+ manifest_version: data.manifest_version.unwrap_or_default(),
last_update_seconds: data.last_update_seconds,
is_factory: data.is_factory,
..Default::default()
diff --git a/microdroid_manager/src/verify.rs b/microdroid_manager/src/verify.rs
index 22f3414..445c1ae 100644
--- a/microdroid_manager/src/verify.rs
+++ b/microdroid_manager/src/verify.rs
@@ -12,18 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use crate::instance::{ApkData, MicrodroidData, RootHash};
-use crate::payload::get_apex_data_from_payload;
-use crate::{is_strict_boot, is_verified_boot, write_apex_payload_data, MicrodroidError};
+use crate::instance::{ApexData, ApkData, MicrodroidData};
+use crate::payload::{get_apex_data_from_payload, to_metadata};
+use crate::{is_strict_boot, is_verified_boot, MicrodroidError};
use anyhow::{anyhow, ensure, Context, Result};
use apkmanifest::get_manifest_info;
-use apkverify::{get_public_key_der, verify, V4Signature};
+use apkverify::{extract_signed_data, verify, V4Signature};
use glob::glob;
use itertools::sorted;
use log::{info, warn};
-use microdroid_metadata::Metadata;
+use microdroid_metadata::{write_metadata, Metadata};
+use openssl::sha::sha512;
use rand::Fill;
use rustutils::system_properties;
+use std::fs::OpenOptions;
use std::path::Path;
use std::process::{Child, Command};
use std::str;
@@ -134,8 +136,10 @@
write_apex_payload_data(saved_data, &apex_data_from_payload)?;
}
- // Start apexd to activate APEXes
- system_properties::write("ctl.start", "apexd-vm")?;
+ if cfg!(not(dice_changes)) {
+ // Start apexd to activate APEXes
+ system_properties::write("ctl.start", "apexd-vm")?;
+ }
// TODO(inseob): add timeout
apkdmverity_child.wait()?;
@@ -187,10 +191,10 @@
fn get_data_from_apk(
apk_path: &str,
- root_hash: Box<RootHash>,
+ root_hash: Box<[u8]>,
root_hash_trustful: bool,
) -> Result<ApkData> {
- let pubkey = get_public_key_from_apk(apk_path, root_hash_trustful)?;
+ let cert_hash = get_cert_hash_from_apk(apk_path, root_hash_trustful)?.to_vec();
// Read package name etc from the APK manifest. In the unlikely event that they aren't present
// we use the default values. We simply put these values in the DICE node for the payload, and
// users of that can decide how to handle blank information - there's no reason for us
@@ -200,28 +204,52 @@
.unwrap_or_default();
Ok(ApkData {
- root_hash,
- pubkey,
+ root_hash: root_hash.into(),
+ cert_hash,
package_name: manifest_info.package,
version_code: manifest_info.version_code,
})
}
-fn get_apk_root_hash_from_idsig<P: AsRef<Path>>(idsig_path: P) -> Result<Box<RootHash>> {
+fn write_apex_payload_data(
+ saved_data: Option<&MicrodroidData>,
+ apex_data_from_payload: &[ApexData],
+) -> Result<()> {
+ if let Some(saved_apex_data) = saved_data.map(|d| &d.apex_data) {
+ // We don't support APEX updates. (assuming that update will change root digest)
+ ensure!(
+ saved_apex_data == apex_data_from_payload,
+ MicrodroidError::PayloadChanged(String::from("APEXes have changed."))
+ );
+ let apex_metadata = to_metadata(apex_data_from_payload);
+ // Pass metadata(with public keys and root digests) to apexd so that it uses the passed
+ // metadata instead of the default one (/dev/block/by-name/payload-metadata)
+ OpenOptions::new()
+ .create_new(true)
+ .write(true)
+ .open("/apex/vm-payload-metadata")
+ .context("Failed to open /apex/vm-payload-metadata")
+ .and_then(|f| write_metadata(&apex_metadata, f))?;
+ }
+ Ok(())
+}
+
+fn get_apk_root_hash_from_idsig<P: AsRef<Path>>(idsig_path: P) -> Result<Box<[u8]>> {
Ok(V4Signature::from_idsig_path(idsig_path)?.hashing_info.raw_root_hash)
}
-fn get_public_key_from_apk(apk: &str, root_hash_trustful: bool) -> Result<Box<[u8]>> {
+fn get_cert_hash_from_apk(apk: &str, root_hash_trustful: bool) -> Result<[u8; 64]> {
let current_sdk = get_current_sdk()?;
- if !root_hash_trustful {
+ let signed_data = if !root_hash_trustful {
verify(apk, current_sdk).context(MicrodroidError::PayloadVerificationFailed(format!(
"failed to verify {}",
apk
)))
} else {
- get_public_key_der(apk, current_sdk)
- }
+ extract_signed_data(apk, current_sdk)
+ }?;
+ Ok(sha512(signed_data.first_certificate_der()?))
}
fn get_current_sdk() -> Result<u32> {
@@ -234,7 +262,7 @@
apk: &'a str,
idsig: &'a str,
name: &'a str,
- saved_root_hash: Option<&'a RootHash>,
+ saved_root_hash: Option<&'a [u8]>,
}
fn run_apkdmverity(args: &[ApkDmverityArgument]) -> Result<Child> {
diff --git a/microdroid_manager/src/vm_config.cddl b/microdroid_manager/src/vm_config.cddl
index 052262d..8508e8f 100644
--- a/microdroid_manager/src/vm_config.cddl
+++ b/microdroid_manager/src/vm_config.cddl
@@ -11,6 +11,10 @@
; The configuration descriptor node for a Microdroid VM, with extensions to describe the contents
; of the VM payload.
+; The subcomponents describe the APKs and then the APEXes that are part of the VM. The main APK
+; is first, followed by any extra APKs in the order they are specified in the VM config.
+; The APEXes are listed in the order specified when the VM is created, which is normally alphabetic
+; order by name.
VmConfigDescriptor = {
-70002 : "Microdroid payload", ; Component name
(? -71000: tstr // ; Path to the payload config file
@@ -23,9 +27,30 @@
}
; Describes a unit of code (e.g. an APK or an APEX) present inside the VM.
+;
+; For an APK, the fields are as follows:
+; - Component name: The string "apk:" followed by the package name.
+; - Security version: The long version code from the APK manifest
+; (https://developer.android.com/reference/android/content/pm/PackageInfo#getLongVersionCode()).
+; - Code hash: This is the root hash of a Merkle tree computed over all bytes of the APK, as used
+; in the APK Signature Scheme v4 (https://source.android.com/docs/security/features/apksigning/v4)
+; with empty salt and using SHA-256 as the hash algorithm.
+; - Authority hash: The SHA-512 hash of the DER representation of the X.509 certificate for the
+; public key used to sign the APK.
+;
+; For an APEX, they are as follows:
+; - Component name: The string "apex:" followed by the APEX name as specified in the APEX Manifest
+; (see https://source.android.com/docs/core/ota/apex).
+; - Security version: The version number from the APEX Manifest.
+; - Code hash: The root hash of the apex_payload.img file within the APEX, taken from the first
+; hashtree descriptor in the VBMeta image
+; (see https://android.googlesource.com/platform/external/avb/+/master/README.md).
+; - Authority hash: The SHA-512 hash of the public key used to sign the file system image in the
+; APEX (as stored in the apex_pubkey file). The format is as described for AvbRSAPublicKeyHeader
+; in https://cs.android.com/android/platform/superproject/main/+/main:external/avb/libavb/avb_crypto.h.
SubcomponentDescriptor = {
1: tstr, ; Component name
2: uint, ; Security version
- ? 3: bstr, ; Code hash
+ 3: bstr, ; Code hash
4: bstr, ; Authority hash
}
diff --git a/microdroid_manager/src/vm_payload_service.rs b/microdroid_manager/src/vm_payload_service.rs
index 0661314..d3346d8 100644
--- a/microdroid_manager/src/vm_payload_service.rs
+++ b/microdroid_manager/src/vm_payload_service.rs
@@ -22,31 +22,12 @@
use anyhow::{anyhow, Context, Result};
use avflog::LogResult;
use binder::{Interface, BinderFeatures, ExceptionCode, Strong, IntoBinderResult, Status};
-use diced_open_dice::{DiceArtifacts, derive_cdi_leaf_priv, PrivateKey, sign};
+use client_vm_csr::{generate_attestation_key_and_csr, ClientVmAttestationData};
+use diced_open_dice::DiceArtifacts;
use log::info;
use rpcbinder::RpcServer;
-
use crate::vm_secret::VmSecret;
-use coset::{
- iana, CborSerializable, CoseKey, CoseKeyBuilder, CoseSign, CoseSignBuilder, CoseSignature,
- CoseSignatureBuilder, HeaderBuilder,
-};
-use openssl::{
- bn::{BigNum, BigNumContext},
- ec::{EcGroup, EcKey, EcKeyRef},
- ecdsa::EcdsaSig,
- nid::Nid,
- pkey::Private,
- sha::sha256,
-};
-use service_vm_comm::{Csr, CsrPayload};
use std::os::unix::io::OwnedFd;
-use zeroize::Zeroizing;
-
-const ATTESTATION_KEY_NID: Nid = Nid::X9_62_PRIME256V1; // NIST P-256 curve
-const ATTESTATION_KEY_ALGO: iana::Algorithm = iana::Algorithm::ES256;
-const ATTESTATION_KEY_CURVE: iana::EllipticCurve = iana::EllipticCurve::P_256;
-const ATTESTATION_KEY_AFFINE_COORDINATE_SIZE: i32 = 32;
/// Implementation of `IVmPayloadService`.
struct VmPayloadService {
@@ -90,11 +71,21 @@
fn requestAttestation(&self, challenge: &[u8]) -> binder::Result<AttestationResult> {
self.check_restricted_apis_allowed()?;
- let (private_key, csr) = generate_attestation_key_and_csr(challenge, self.secret.dice())
+ let ClientVmAttestationData { private_key, csr } =
+ generate_attestation_key_and_csr(challenge, self.secret.dice())
+ .map_err(|e| {
+ Status::new_service_specific_error_str(
+ STATUS_FAILED_TO_PREPARE_CSR_AND_KEY,
+ Some(format!("Failed to prepare the CSR and key pair: {e:?}")),
+ )
+ })
+ .with_log()?;
+ let csr = csr
+ .into_cbor_vec()
.map_err(|e| {
Status::new_service_specific_error_str(
STATUS_FAILED_TO_PREPARE_CSR_AND_KEY,
- Some(format!("Failed to prepare the CSR and key pair: {e:?}")),
+ Some(format!("Failed to serialize CSR into CBOR: {e:?}")),
)
})
.with_log()?;
@@ -106,93 +97,6 @@
}
}
-fn generate_attestation_key_and_csr(
- challenge: &[u8],
- dice_artifacts: &dyn DiceArtifacts,
-) -> Result<(Zeroizing<Vec<u8>>, Vec<u8>)> {
- let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
- let attestation_key = EcKey::generate(&group)?;
- let csr = build_csr(challenge, attestation_key.as_ref(), dice_artifacts)?;
-
- let csr = csr.into_cbor_vec().context("Failed to serialize CSR")?;
- let private_key = attestation_key.private_key_to_der()?;
- Ok((Zeroizing::new(private_key), csr))
-}
-
-fn build_csr(
- challenge: &[u8],
- attestation_key: &EcKeyRef<Private>,
- dice_artifacts: &dyn DiceArtifacts,
-) -> Result<Csr> {
- // Builds CSR Payload to be signed.
- let public_key =
- to_cose_public_key(attestation_key)?.to_vec().context("Failed to serialize public key")?;
- let csr_payload = CsrPayload { public_key, challenge: challenge.to_vec() };
- let csr_payload = csr_payload.into_cbor_vec()?;
-
- // Builds signed CSR Payload.
- let cdi_leaf_priv = derive_cdi_leaf_priv(dice_artifacts)?;
- let signed_csr_payload = build_signed_data(csr_payload, &cdi_leaf_priv, attestation_key)?
- .to_vec()
- .context("Failed to serialize signed CSR payload")?;
-
- // Builds CSR.
- let dice_cert_chain = dice_artifacts.bcc().ok_or(anyhow!("bcc is none"))?.to_vec();
- Ok(Csr { dice_cert_chain, signed_csr_payload })
-}
-
-fn build_signed_data(
- payload: Vec<u8>,
- cdi_leaf_priv: &PrivateKey,
- attestation_key: &EcKeyRef<Private>,
-) -> Result<CoseSign> {
- let cdi_leaf_sig_headers = build_signature_headers(iana::Algorithm::EdDSA);
- let attestation_key_sig_headers = build_signature_headers(ATTESTATION_KEY_ALGO);
- let aad = &[];
- let signed_data = CoseSignBuilder::new()
- .payload(payload)
- .try_add_created_signature(cdi_leaf_sig_headers, aad, |message| {
- sign(message, cdi_leaf_priv.as_array()).map(|v| v.to_vec())
- })?
- .try_add_created_signature(attestation_key_sig_headers, aad, |message| {
- ecdsa_sign(message, attestation_key)
- })?
- .build();
- Ok(signed_data)
-}
-
-/// Builds a signature with headers filled with the provided algorithm.
-/// The signature data will be filled later when building the signed data.
-fn build_signature_headers(alg: iana::Algorithm) -> CoseSignature {
- let protected = HeaderBuilder::new().algorithm(alg).build();
- CoseSignatureBuilder::new().protected(protected).build()
-}
-
-fn ecdsa_sign(message: &[u8], key: &EcKeyRef<Private>) -> Result<Vec<u8>> {
- let digest = sha256(message);
- // Passes the digest to `ECDSA_do_sign` as recommended in the spec:
- // https://commondatastorage.googleapis.com/chromium-boringssl-docs/ecdsa.h.html#ECDSA_do_sign
- let sig = EcdsaSig::sign::<Private>(&digest, key)?;
- Ok(sig.to_der()?)
-}
-
-fn get_affine_coordinates(key: &EcKeyRef<Private>) -> Result<(Vec<u8>, Vec<u8>)> {
- let mut ctx = BigNumContext::new()?;
- let mut x = BigNum::new()?;
- let mut y = BigNum::new()?;
- key.public_key().affine_coordinates_gfp(key.group(), &mut x, &mut y, &mut ctx)?;
- let x = x.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
- let y = y.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
- Ok((x, y))
-}
-
-fn to_cose_public_key(key: &EcKeyRef<Private>) -> Result<CoseKey> {
- let (x, y) = get_affine_coordinates(key)?;
- Ok(CoseKeyBuilder::new_ec2_pub_key(ATTESTATION_KEY_CURVE, x, y)
- .algorithm(ATTESTATION_KEY_ALGO)
- .build())
-}
-
impl Interface for VmPayloadService {}
impl VmPayloadService {
@@ -237,106 +141,3 @@
});
Ok(())
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use anyhow::bail;
- use ciborium::Value;
- use coset::{iana::EnumI64, Label};
- use hwtrust::{dice, session::Session};
- use openssl::pkey::Public;
-
- /// The following data is generated randomly with urandom.
- const CHALLENGE: [u8; 16] = [
- 0xb3, 0x66, 0xfa, 0x72, 0x92, 0x32, 0x2c, 0xd4, 0x99, 0xcb, 0x00, 0x1f, 0x0e, 0xe0, 0xc7,
- 0x41,
- ];
-
- #[test]
- fn csr_and_private_key_have_correct_format() -> Result<()> {
- let dice_artifacts = diced_sample_inputs::make_sample_bcc_and_cdis()?;
-
- let (private_key, csr) = generate_attestation_key_and_csr(&CHALLENGE, &dice_artifacts)?;
- let ec_private_key = EcKey::private_key_from_der(&private_key)?;
- let csr = Csr::from_cbor_slice(&csr).unwrap();
- let cose_sign = CoseSign::from_slice(&csr.signed_csr_payload).unwrap();
- let aad = &[];
-
- // Checks CSR payload.
- let csr_payload =
- cose_sign.payload.as_ref().and_then(|v| CsrPayload::from_cbor_slice(v).ok()).unwrap();
- let public_key = to_cose_public_key(&ec_private_key)?.to_vec().unwrap();
- let expected_csr_payload = CsrPayload { challenge: CHALLENGE.to_vec(), public_key };
- assert_eq!(expected_csr_payload, csr_payload);
-
- // Checks the first signature is signed with CDI_Leaf_Priv.
- let session = Session::default();
- let chain = dice::Chain::from_cbor(&session, &csr.dice_cert_chain)?;
- let public_key = chain.leaf().subject_public_key();
- cose_sign
- .verify_signature(0, aad, |signature, message| public_key.verify(signature, message))?;
-
- // Checks the second signature is signed with attestation key.
- let attestation_public_key = CoseKey::from_slice(&csr_payload.public_key).unwrap();
- let ec_public_key = to_ec_public_key(&attestation_public_key)?;
- cose_sign.verify_signature(1, aad, |signature, message| {
- ecdsa_verify(signature, message, &ec_public_key)
- })?;
-
- // Verifies that private key and the public key form a valid key pair.
- let message = b"test message";
- let signature = ecdsa_sign(message, &ec_private_key)?;
- ecdsa_verify(&signature, message, &ec_public_key)?;
-
- Ok(())
- }
-
- fn ecdsa_verify(
- signature: &[u8],
- message: &[u8],
- ec_public_key: &EcKeyRef<Public>,
- ) -> Result<()> {
- let sig = EcdsaSig::from_der(signature)?;
- let digest = sha256(message);
- if sig.verify(&digest, ec_public_key)? {
- Ok(())
- } else {
- bail!("Signature does not match")
- }
- }
-
- fn to_ec_public_key(cose_key: &CoseKey) -> Result<EcKey<Public>> {
- check_ec_key_params(cose_key)?;
- let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
- let x = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::X.to_i64()))?;
- let y = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::Y.to_i64()))?;
- let key = EcKey::from_public_key_affine_coordinates(&group, &x, &y)?;
- key.check_key()?;
- Ok(key)
- }
-
- fn check_ec_key_params(cose_key: &CoseKey) -> Result<()> {
- assert_eq!(coset::KeyType::Assigned(iana::KeyType::EC2), cose_key.kty);
- assert_eq!(Some(coset::Algorithm::Assigned(ATTESTATION_KEY_ALGO)), cose_key.alg);
- let crv = get_label_value(cose_key, Label::Int(iana::Ec2KeyParameter::Crv.to_i64()))?;
- assert_eq!(&Value::from(ATTESTATION_KEY_CURVE.to_i64()), crv);
- Ok(())
- }
-
- fn get_label_value_as_bignum(key: &CoseKey, label: Label) -> Result<BigNum> {
- get_label_value(key, label)?
- .as_bytes()
- .map(|v| BigNum::from_slice(&v[..]).unwrap())
- .ok_or_else(|| anyhow!("Value not a bstr."))
- }
-
- fn get_label_value(key: &CoseKey, label: Label) -> Result<&Value> {
- Ok(&key
- .params
- .iter()
- .find(|(k, _)| k == &label)
- .ok_or_else(|| anyhow!("Label {:?} not found", label))?
- .1)
- }
-}
diff --git a/microdroid_manager/src/vm_secret.rs b/microdroid_manager/src/vm_secret.rs
index d84c2e2..df5d318 100644
--- a/microdroid_manager/src/vm_secret.rs
+++ b/microdroid_manager/src/vm_secret.rs
@@ -14,18 +14,28 @@
//! Class for encapsulating & managing represent VM secrets.
-use anyhow::Result;
+use anyhow::{anyhow, ensure, Result};
+use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::IVirtualMachineService;
+use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::ISecretkeeper::ISecretkeeper;
+use secretkeeper_comm::data_types::request::Request;
+use binder::{Strong};
+use coset::CborSerializable;
use diced_open_dice::{DiceArtifacts, OwnedDiceArtifacts};
use keystore2_crypto::ZVec;
use openssl::hkdf::hkdf;
use openssl::md::Md;
use openssl::sha;
+use secretkeeper_client::SkSession;
+use secretkeeper_comm::data_types::{Id, ID_SIZE, Secret, SECRET_SIZE};
+use secretkeeper_comm::data_types::response::Response;
+use secretkeeper_comm::data_types::packet::{ResponsePacket, ResponseType};
+use secretkeeper_comm::data_types::request_response_impl::{
+ StoreSecretRequest, GetSecretResponse, GetSecretRequest};
+use secretkeeper_comm::data_types::error::SecretkeeperError;
+use zeroize::Zeroizing;
const ENCRYPTEDSTORE_KEY_IDENTIFIER: &str = "encryptedstore_key";
-// Size of the secret stored in Secretkeeper.
-const SK_SECRET_SIZE: usize = 64;
-
// Generated using hexdump -vn32 -e'14/1 "0x%02X, " 1 "\n"' /dev/urandom
const SALT_ENCRYPTED_STORE: &[u8] = &[
0xFC, 0x1D, 0x35, 0x7B, 0x96, 0xF3, 0xEF, 0x17, 0x78, 0x7D, 0x70, 0xED, 0xEA, 0xFE, 0x1D, 0x6F,
@@ -36,6 +46,24 @@
0x55, 0xF8, 0x08, 0x23, 0x81, 0x5F, 0xF5, 0x16, 0x20, 0x3E, 0xBE, 0xBA, 0xB7, 0xA8, 0x43, 0x92,
];
+// TODO(b/291213394): Remove this once policy is generated from dice_chain
+const HYPOTHETICAL_DICE_POLICY: [u8; 43] = [
+ 0x83, 0x01, 0x81, 0x83, 0x01, 0x80, 0xA1, 0x01, 0x00, 0x82, 0x83, 0x01, 0x81, 0x01, 0x73, 0x74,
+ 0x65, 0x73, 0x74, 0x69, 0x6E, 0x67, 0x5F, 0x64, 0x69, 0x63, 0x65, 0x5F, 0x70, 0x6F, 0x6C, 0x69,
+ 0x63, 0x79, 0x83, 0x02, 0x82, 0x03, 0x18, 0x64, 0x19, 0xE9, 0x75,
+];
+// TODO(b/291213394): Differentiate the Id of nPVM based on 'salt'
+const ID_NP_VM: [u8; ID_SIZE] = [
+ 0xF1, 0xB2, 0xED, 0x3B, 0xD1, 0xBD, 0xF0, 0x7D, 0xE1, 0xF0, 0x01, 0xFC, 0x61, 0x71, 0xD3, 0x42,
+ 0xE5, 0x8A, 0xAF, 0x33, 0x6C, 0x11, 0xDC, 0xC8, 0x6F, 0xAE, 0x12, 0x5C, 0x26, 0x44, 0x6B, 0x86,
+ 0xCC, 0x24, 0xFD, 0xBF, 0x91, 0x4A, 0x54, 0x84, 0xF9, 0x01, 0x59, 0x25, 0x70, 0x89, 0x38, 0x8D,
+ 0x5E, 0xE6, 0x91, 0xDF, 0x68, 0x60, 0x69, 0x26, 0xBE, 0xFE, 0x79, 0x58, 0xF7, 0xEA, 0x81, 0x7D,
+];
+const SKP_SECRET_NP_VM: [u8; SECRET_SIZE] = [
+ 0xA9, 0x89, 0x97, 0xFE, 0xAE, 0x97, 0x55, 0x4B, 0x32, 0x35, 0xF0, 0xE8, 0x93, 0xDA, 0xEA, 0x24,
+ 0x06, 0xAC, 0x36, 0x8B, 0x3C, 0x95, 0x50, 0x16, 0x67, 0x71, 0x65, 0x26, 0xEB, 0xD0, 0xC3, 0x98,
+];
+
pub enum VmSecret {
// V2 secrets are derived from 2 independently secured secrets:
// 1. Secretkeeper protected secrets (skp secret).
@@ -54,15 +82,47 @@
V1 { dice: OwnedDiceArtifacts },
}
+fn get_id() -> [u8; ID_SIZE] {
+ if super::is_strict_boot() {
+ todo!("Id for protected VM is not implemented");
+ } else {
+ ID_NP_VM
+ }
+}
+
impl VmSecret {
- pub fn new(dice_artifacts: OwnedDiceArtifacts) -> Result<VmSecret> {
- if is_sk_supported() {
- // TODO(b/291213394): Change this to real Sk protected secret.
- let fake_skp_secret = ZVec::new(SK_SECRET_SIZE)?;
- return Ok(Self::V2 { dice: dice_artifacts, skp_secret: fake_skp_secret });
+ pub fn new(
+ dice_artifacts: OwnedDiceArtifacts,
+ vm_service: &Strong<dyn IVirtualMachineService>,
+ ) -> Result<VmSecret> {
+ ensure!(dice_artifacts.bcc().is_some(), "Dice chain missing");
+
+ if let Some(sk_service) = is_sk_supported(vm_service)? {
+ let id = get_id();
+ let mut skp_secret = Zeroizing::new([0u8; SECRET_SIZE]);
+ if super::is_strict_boot() {
+ if super::is_new_instance() {
+ *skp_secret = rand::random();
+ store_secret(sk_service.clone(), id, skp_secret.clone(), &dice_artifacts)?;
+ } else {
+ // Subsequent run of the pVM -> get the secret stored in Secretkeeper.
+ *skp_secret = get_secret(sk_service.clone(), id, &dice_artifacts)?;
+ }
+ } else {
+ // TODO(b/291213394): Non protected VM don't need to use Secretkeeper, remove this
+ // once we have sufficient testing on protected VM.
+ store_secret(sk_service.clone(), id, SKP_SECRET_NP_VM.into(), &dice_artifacts)?;
+ *skp_secret = get_secret(sk_service.clone(), id, &dice_artifacts)?;
+ }
+ return Ok(Self::V2 {
+ dice: dice_artifacts,
+ skp_secret: ZVec::try_from(skp_secret.to_vec())?,
+ });
}
+ // Use V1 secrets if Secretkeeper is not supported.
Ok(Self::V1 { dice: dice_artifacts })
}
+
pub fn dice(&self) -> &OwnedDiceArtifacts {
match self {
Self::V2 { dice, .. } => dice,
@@ -94,13 +154,87 @@
}
}
-// Does the hardware support Secretkeeper.
-fn is_sk_supported() -> bool {
- if cfg!(llpvm_changes) {
- return false;
+fn store_secret(
+ secretkeeper: binder::Strong<dyn ISecretkeeper>,
+ id: [u8; ID_SIZE],
+ secret: Zeroizing<[u8; SECRET_SIZE]>,
+ _dice_chain: &OwnedDiceArtifacts,
+) -> Result<()> {
+ // Start a new secretkeeper session!
+ let session = SkSession::new(secretkeeper).map_err(anyhow_err)?;
+ let store_request = StoreSecretRequest {
+ id: Id(id),
+ secret: Secret(*secret),
+ // TODO(b/291233371): Construct policy out of dice_chain.
+ sealing_policy: HYPOTHETICAL_DICE_POLICY.to_vec(),
};
- // TODO(b/292209416): This value should be extracted from device tree.
- // Note: this does not affect the security of pVM. pvmfw & microdroid_manager continue to block
- // upgraded images. Setting this true is equivalent to including constant salt in vm secrets.
- true
+ log::info!("Secretkeeper operation: {:?}", store_request);
+
+ let store_request = store_request.serialize_to_packet().to_vec().map_err(anyhow_err)?;
+ let store_response = session.secret_management_request(&store_request).map_err(anyhow_err)?;
+ let store_response = ResponsePacket::from_slice(&store_response).map_err(anyhow_err)?;
+ let response_type = store_response.response_type().map_err(anyhow_err)?;
+ ensure!(
+ response_type == ResponseType::Success,
+ "Secretkeeper store failed with error: {:?}",
+ *SecretkeeperError::deserialize_from_packet(store_response).map_err(anyhow_err)?
+ );
+ Ok(())
+}
+
+fn get_secret(
+ secretkeeper: binder::Strong<dyn ISecretkeeper>,
+ id: [u8; ID_SIZE],
+ _dice_chain: &OwnedDiceArtifacts,
+) -> Result<[u8; SECRET_SIZE]> {
+ // Start a new secretkeeper session!
+ let session = SkSession::new(secretkeeper).map_err(anyhow_err)?;
+ let get_request = GetSecretRequest {
+ id: Id(id),
+ // TODO(b/291233371): Construct policy out of dice_chain.
+ updated_sealing_policy: None,
+ };
+ log::info!("Secretkeeper operation: {:?}", get_request);
+
+ let get_request = get_request.serialize_to_packet().to_vec().map_err(anyhow_err)?;
+ let get_response = session.secret_management_request(&get_request).map_err(anyhow_err)?;
+ let get_response = ResponsePacket::from_slice(&get_response).map_err(anyhow_err)?;
+ let response_type = get_response.response_type().map_err(anyhow_err)?;
+ ensure!(
+ response_type == ResponseType::Success,
+ "Secretkeeper get failed with error: {:?}",
+ *SecretkeeperError::deserialize_from_packet(get_response).map_err(anyhow_err)?
+ );
+ let get_response =
+ *GetSecretResponse::deserialize_from_packet(get_response).map_err(anyhow_err)?;
+ Ok(get_response.secret.0)
+}
+
+#[inline]
+fn anyhow_err<E: core::fmt::Debug>(err: E) -> anyhow::Error {
+ anyhow!("{:?}", err)
+}
+
+// Get the secretkeeper connection if supported. Host can be consulted whether the device supports
+// secretkeeper but that should be used with caution for protected VM.
+fn is_sk_supported(
+ host: &Strong<dyn IVirtualMachineService>,
+) -> Result<Option<Strong<dyn ISecretkeeper>>> {
+ let sk = if cfg!(llpvm_changes) {
+ if super::is_strict_boot() {
+ // TODO: For protected VM check for Secretkeeper authentication data in device tree.
+ None
+ } else {
+ // For non-protected VM, believe what host claims.
+ host.getSecretkeeper()
+ // TODO rename this error!
+ .map_err(|e| {
+ super::MicrodroidError::FailedToConnectToVirtualizationService(e.to_string())
+ })?
+ }
+ } else {
+ // LLPVM flag is disabled
+ None
+ };
+ Ok(sk)
}
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index 946ed85..37d8ac9 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -16,6 +16,7 @@
"libbssl_ffi_nostd",
"libciborium_nostd",
"libciborium_io_nostd",
+ "libcstr",
"libdiced_open_dice_nostd",
"libfdtpci",
"libhyp",
@@ -25,6 +26,7 @@
"libpvmfw_avb_nostd",
"libpvmfw_embedded_key",
"libpvmfw_fdt_template",
+ "libservice_vm_version",
"libsmccc",
"libstatic_assertions",
"libtinyvec_nostd",
@@ -33,7 +35,6 @@
"libvmbase",
"libzerocopy_nostd",
"libzeroize_nostd",
- "libspin_nostd",
],
}
@@ -44,21 +45,71 @@
cmd: "touch $(out)",
}
-rust_test {
- name: "libpvmfw.bootargs.test",
- host_supported: true,
- // For now, only bootargs.rs is written to be conditionally compiled with std.
- srcs: ["src/bootargs.rs"],
+rust_defaults {
+ name: "libpvmfw.test.defaults",
defaults: ["avf_build_flags_rust"],
test_suites: ["general-tests"],
test_options: {
unit_test: true,
},
+ prefer_rlib: true,
+ rustlibs: [
+ "libcstr",
+ ],
+}
+
+rust_test {
+ name: "libpvmfw.bootargs.test",
+ host_supported: true,
+ // For now, only bootargs.rs is written to be conditionally compiled with std.
+ srcs: ["src/bootargs.rs"],
+ defaults: ["libpvmfw.test.defaults"],
rustlibs: [
"libzeroize",
],
}
+rust_test {
+ name: "libpvmfw.device_assignment.test",
+ srcs: ["src/device_assignment.rs"],
+ defaults: ["libpvmfw.test.defaults"],
+ rustlibs: [
+ "libhyp",
+ "liblibfdt",
+ "liblog_rust",
+ "libpvmfw_fdt_template",
+ ],
+ data: [
+ ":test_pvmfw_devices_vm_dtbo",
+ ":test_pvmfw_devices_vm_dtbo_without_symbols",
+ ":test_pvmfw_devices_with_rng",
+ ":test_pvmfw_devices_with_multiple_devices_iommus",
+ ":test_pvmfw_devices_with_iommu_sharing",
+ ":test_pvmfw_devices_with_iommu_id_conflict",
+ ":test_pvmfw_devices_without_device",
+ ":test_pvmfw_devices_without_iommus",
+ ],
+ // To use libpvmfw_fdt_template for testing
+ enabled: false,
+ target: {
+ android_arm64: {
+ enabled: true,
+ },
+ },
+}
+
+rust_test {
+ name: "libpvmfw.dice.test",
+ srcs: ["src/dice.rs"],
+ defaults: ["libpvmfw.test.defaults"],
+ rustlibs: [
+ "libcbor_util",
+ "libciborium",
+ "libdiced_open_dice_nostd",
+ "libpvmfw_avb_nostd",
+ ],
+}
+
genrule {
name: "test_pvmfw_devices_vm_dtbo",
defaults: ["dts_to_dtb"],
@@ -73,39 +124,52 @@
out: ["test_pvmfw_devices_vm_dtbo_without_symbols.dtbo"],
}
+genrule_defaults {
+ name: "test_device_assignment_dts_to_dtb",
+ defaults: ["dts_to_dtb"],
+ srcs: ["testdata/test_crosvm_dt_base.dtsi"],
+}
+
genrule {
name: "test_pvmfw_devices_with_rng",
- defaults: ["dts_to_dtb"],
+ defaults: ["test_device_assignment_dts_to_dtb"],
srcs: ["testdata/test_pvmfw_devices_with_rng.dts"],
out: ["test_pvmfw_devices_with_rng.dtb"],
}
-rust_test {
- name: "libpvmfw.device_assignment.test",
- srcs: ["src/device_assignment.rs"],
- defaults: ["avf_build_flags_rust"],
- test_suites: ["general-tests"],
- test_options: {
- unit_test: true,
- },
- prefer_rlib: true,
- rustlibs: [
- "liblibfdt",
- "liblog_rust",
- "libpvmfw_fdt_template",
- ],
- data: [
- ":test_pvmfw_devices_vm_dtbo",
- ":test_pvmfw_devices_vm_dtbo_without_symbols",
- ":test_pvmfw_devices_with_rng",
- ],
- // To use libpvmfw_fdt_template for testing
- enabled: false,
- target: {
- android_arm64: {
- enabled: true,
- },
- },
+genrule {
+ name: "test_pvmfw_devices_without_iommus",
+ defaults: ["test_device_assignment_dts_to_dtb"],
+ srcs: ["testdata/test_pvmfw_devices_without_iommus.dts"],
+ out: ["test_pvmfw_devices_without_iommus.dtb"],
+}
+
+genrule {
+ name: "test_pvmfw_devices_without_device",
+ defaults: ["test_device_assignment_dts_to_dtb"],
+ srcs: ["testdata/test_pvmfw_devices_without_device.dts"],
+ out: ["test_pvmfw_devices_without_device.dtb"],
+}
+
+genrule {
+ name: "test_pvmfw_devices_with_multiple_devices_iommus",
+ defaults: ["test_device_assignment_dts_to_dtb"],
+ srcs: ["testdata/test_pvmfw_devices_with_multiple_devices_iommus.dts"],
+ out: ["test_pvmfw_devices_with_multiple_devices_iommus.dtb"],
+}
+
+genrule {
+ name: "test_pvmfw_devices_with_iommu_sharing",
+ defaults: ["test_device_assignment_dts_to_dtb"],
+ srcs: ["testdata/test_pvmfw_devices_with_iommu_sharing.dts"],
+ out: ["test_pvmfw_devices_with_iommu_sharing.dtb"],
+}
+
+genrule {
+ name: "test_pvmfw_devices_with_iommu_id_conflict",
+ defaults: ["test_device_assignment_dts_to_dtb"],
+ srcs: ["testdata/test_pvmfw_devices_with_iommu_id_conflict.dts"],
+ out: ["test_pvmfw_devices_with_iommu_id_conflict.dtb"],
}
cc_binary {
@@ -224,7 +288,7 @@
srcs: [":pvmfw_platform.dts.preprocessed"],
out: ["lib.rs"],
tools: ["dtc"],
- cmd: "$(location dtc) -I dts -O dtb -o $(genDir)/compiled.dtbo $(in) && " +
+ cmd: "$(location dtc) -@ -I dts -O dtb -o $(genDir)/compiled.dtbo $(in) && " +
"(" +
" echo '#![no_std]';" +
" echo '#![allow(missing_docs)]';" +
diff --git a/pvmfw/TEST_MAPPING b/pvmfw/TEST_MAPPING
index f21318e..e948400 100644
--- a/pvmfw/TEST_MAPPING
+++ b/pvmfw/TEST_MAPPING
@@ -10,6 +10,9 @@
},
{
"name" : "libpvmfw.device_assignment.test"
+ },
+ {
+ "name" : "libpvmfw.dice.test"
}
]
}
diff --git a/pvmfw/avb/src/descriptor/collection.rs b/pvmfw/avb/src/descriptor/collection.rs
index f47bfbd..6784758 100644
--- a/pvmfw/avb/src/descriptor/collection.rs
+++ b/pvmfw/avb/src/descriptor/collection.rs
@@ -18,11 +18,11 @@
use super::hash::HashDescriptor;
use super::property::PropertyDescriptor;
use crate::partition::PartitionName;
-use crate::utils::{self, is_not_null, to_usize, usize_checked_add};
+use crate::utils::{to_usize, usize_checked_add};
use crate::PvmfwVerifyError;
+use avb::{IoError, IoResult, SlotVerifyError, SlotVerifyNoDataResult, VbmetaData};
use avb_bindgen::{
avb_descriptor_foreach, avb_descriptor_validate_and_byteswap, AvbDescriptor, AvbDescriptorTag,
- AvbVBMetaData,
};
use core::{ffi::c_void, mem::size_of, slice};
use tinyvec::ArrayVec;
@@ -36,24 +36,16 @@
}
impl<'a> Descriptors<'a> {
- /// Builds `Descriptors` from `AvbVBMetaData`.
- /// Returns an error if the given `AvbVBMetaData` contains non-hash descriptor, hash
+ /// Builds `Descriptors` from `VbmetaData`.
+ /// Returns an error if the given `VbmetaData` contains non-hash descriptor, hash
/// descriptor of unknown `PartitionName` or duplicated hash descriptors.
- ///
- /// # Safety
- ///
- /// Behavior is undefined if any of the following conditions are violated:
- /// * `vbmeta.vbmeta_data` must be non-null and points to a valid VBMeta.
- /// * `vbmeta.vbmeta_data` must be valid for reading `vbmeta.vbmeta_size` bytes.
- pub(crate) unsafe fn from_vbmeta(vbmeta: AvbVBMetaData) -> Result<Self, PvmfwVerifyError> {
- is_not_null(vbmeta.vbmeta_data).map_err(|_| avb::SlotVerifyError::Io)?;
- let mut res: Result<Self, avb::IoError> = Ok(Self::default());
- // SAFETY: It is safe as the raw pointer `vbmeta.vbmeta_data` is a non-null pointer and
- // points to a valid VBMeta structure.
+ pub(crate) fn from_vbmeta(vbmeta: &'a VbmetaData) -> Result<Self, PvmfwVerifyError> {
+ let mut res: IoResult<Self> = Ok(Self::default());
+ // SAFETY: It is safe as `vbmeta.data()` contains a valid VBMeta structure.
let output = unsafe {
avb_descriptor_foreach(
- vbmeta.vbmeta_data,
- vbmeta.vbmeta_size,
+ vbmeta.data().as_ptr(),
+ vbmeta.data().len(),
Some(check_and_save_descriptor),
&mut res as *mut _ as *mut c_void,
)
@@ -61,7 +53,7 @@
if output == res.is_ok() {
res.map_err(PvmfwVerifyError::InvalidDescriptors)
} else {
- Err(avb::SlotVerifyError::InvalidMetadata.into())
+ Err(SlotVerifyError::InvalidMetadata.into())
}
}
@@ -74,11 +66,11 @@
pub(crate) fn find_hash_descriptor(
&self,
partition_name: PartitionName,
- ) -> Result<&HashDescriptor, avb::SlotVerifyError> {
+ ) -> SlotVerifyNoDataResult<&HashDescriptor> {
self.hash_descriptors
.iter()
.find(|d| d.partition_name == partition_name)
- .ok_or(avb::SlotVerifyError::InvalidMetadata)
+ .ok_or(SlotVerifyError::InvalidMetadata)
}
pub(crate) fn has_property_descriptor(&self) -> bool {
@@ -89,27 +81,24 @@
self.prop_descriptor.as_ref().filter(|desc| desc.key == key).map(|desc| desc.value)
}
- fn push(&mut self, descriptor: Descriptor<'a>) -> utils::Result<()> {
+ fn push(&mut self, descriptor: Descriptor<'a>) -> IoResult<()> {
match descriptor {
Descriptor::Hash(d) => self.push_hash_descriptor(d),
Descriptor::Property(d) => self.push_property_descriptor(d),
}
}
- fn push_hash_descriptor(&mut self, descriptor: HashDescriptor<'a>) -> utils::Result<()> {
+ fn push_hash_descriptor(&mut self, descriptor: HashDescriptor<'a>) -> IoResult<()> {
if self.hash_descriptors.iter().any(|d| d.partition_name == descriptor.partition_name) {
- return Err(avb::IoError::Io);
+ return Err(IoError::Io);
}
self.hash_descriptors.push(descriptor);
Ok(())
}
- fn push_property_descriptor(
- &mut self,
- descriptor: PropertyDescriptor<'a>,
- ) -> utils::Result<()> {
+ fn push_property_descriptor(&mut self, descriptor: PropertyDescriptor<'a>) -> IoResult<()> {
if self.prop_descriptor.is_some() {
- return Err(avb::IoError::Io);
+ return Err(IoError::Io);
}
self.prop_descriptor.replace(descriptor);
Ok(())
@@ -120,8 +109,7 @@
///
/// Behavior is undefined if any of the following conditions are violated:
/// * The `descriptor` pointer must be non-null and points to a valid `AvbDescriptor` struct.
-/// * The `user_data` pointer must be non-null, points to a valid
-/// `Result<Descriptors, avb::IoError>`
+/// * The `user_data` pointer must be non-null, points to a valid `IoResult<Descriptors>`
/// struct and is initialized.
unsafe extern "C" fn check_and_save_descriptor(
descriptor: *const AvbDescriptor,
@@ -129,8 +117,7 @@
) -> bool {
// SAFETY: It is safe because the caller ensures that `user_data` points to a valid struct and
// is initialized.
- let Some(res) = (unsafe { (user_data as *mut Result<Descriptors, avb::IoError>).as_mut() })
- else {
+ let Some(res) = (unsafe { (user_data as *mut IoResult<Descriptors>).as_mut() }) else {
return false;
};
let Ok(descriptors) = res else {
@@ -154,7 +141,7 @@
unsafe fn try_check_and_save_descriptor(
descriptor: *const AvbDescriptor,
descriptors: &mut Descriptors,
-) -> utils::Result<()> {
+) -> IoResult<()> {
// SAFETY: It is safe because the caller ensures that `descriptor` is a non-null pointer
// pointing to a valid struct.
let descriptor = unsafe { Descriptor::from_descriptor_ptr(descriptor)? };
@@ -171,7 +158,7 @@
///
/// Behavior is undefined if any of the following conditions are violated:
/// * The `descriptor` pointer must be non-null and point to a valid `AvbDescriptor`.
- unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> utils::Result<Self> {
+ unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> IoResult<Self> {
let avb_descriptor =
// SAFETY: It is safe as the raw pointer `descriptor` is non-null and points to
// a valid `AvbDescriptor`.
@@ -197,7 +184,7 @@
unsafe { PropertyDescriptor::from_descriptor_ptr(descriptor, data)? };
Ok(Self::Property(descriptor))
}
- _ => Err(avb::IoError::NoSuchValue),
+ _ => Err(IoError::NoSuchValue),
}
}
}
diff --git a/pvmfw/avb/src/descriptor/common.rs b/pvmfw/avb/src/descriptor/common.rs
index 31ee0a5..6063a7c 100644
--- a/pvmfw/avb/src/descriptor/common.rs
+++ b/pvmfw/avb/src/descriptor/common.rs
@@ -14,7 +14,8 @@
//! Structs and functions used by all the descriptors.
-use crate::utils::{self, is_not_null};
+use crate::utils::is_not_null;
+use avb::{IoError, IoResult};
use core::mem::MaybeUninit;
/// # Safety
@@ -24,14 +25,14 @@
pub(super) unsafe fn get_valid_descriptor<T>(
descriptor_ptr: *const T,
descriptor_validate_and_byteswap: unsafe extern "C" fn(src: *const T, dest: *mut T) -> bool,
-) -> utils::Result<T> {
+) -> IoResult<T> {
is_not_null(descriptor_ptr)?;
// SAFETY: It is safe because the caller ensures that `descriptor_ptr` is a non-null pointer
// pointing to a valid struct.
let descriptor = unsafe {
let mut desc = MaybeUninit::uninit();
if !descriptor_validate_and_byteswap(descriptor_ptr, desc.as_mut_ptr()) {
- return Err(avb::IoError::Io);
+ return Err(IoError::Io);
}
desc.assume_init()
};
diff --git a/pvmfw/avb/src/descriptor/hash.rs b/pvmfw/avb/src/descriptor/hash.rs
index 089268f..35db66d 100644
--- a/pvmfw/avb/src/descriptor/hash.rs
+++ b/pvmfw/avb/src/descriptor/hash.rs
@@ -16,7 +16,8 @@
use super::common::get_valid_descriptor;
use crate::partition::PartitionName;
-use crate::utils::{self, to_usize, usize_checked_add};
+use crate::utils::{to_usize, usize_checked_add};
+use avb::{IoError, IoResult};
use avb_bindgen::{
avb_hash_descriptor_validate_and_byteswap, AvbDescriptor, AvbHashDescriptor,
AVB_SHA256_DIGEST_SIZE,
@@ -47,19 +48,19 @@
pub(super) unsafe fn from_descriptor_ptr(
descriptor: *const AvbDescriptor,
data: &'a [u8],
- ) -> utils::Result<Self> {
+ ) -> IoResult<Self> {
// SAFETY: It is safe as the raw pointer `descriptor` is non-null and points to
// a valid `AvbDescriptor`.
let h = unsafe { HashDescriptorHeader::from_descriptor_ptr(descriptor)? };
let partition_name = data
.get(h.partition_name_range()?)
- .ok_or(avb::IoError::RangeOutsidePartition)?
+ .ok_or(IoError::RangeOutsidePartition)?
.try_into()?;
let digest = data
.get(h.digest_range()?)
- .ok_or(avb::IoError::RangeOutsidePartition)?
+ .ok_or(IoError::RangeOutsidePartition)?
.try_into()
- .map_err(|_| avb::IoError::InvalidValueSize)?;
+ .map_err(|_| IoError::InvalidValueSize)?;
Ok(Self { partition_name, digest })
}
}
@@ -71,7 +72,7 @@
///
/// Behavior is undefined if any of the following conditions are violated:
/// * The `descriptor` pointer must be non-null and point to a valid `AvbDescriptor`.
- unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> utils::Result<Self> {
+ unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> IoResult<Self> {
// SAFETY: It is safe as the raw pointer `descriptor` is non-null and points to
// a valid `AvbDescriptor`.
unsafe {
@@ -83,16 +84,16 @@
}
}
- fn partition_name_end(&self) -> utils::Result<usize> {
+ fn partition_name_end(&self) -> IoResult<usize> {
usize_checked_add(size_of::<AvbHashDescriptor>(), to_usize(self.0.partition_name_len)?)
}
- fn partition_name_range(&self) -> utils::Result<Range<usize>> {
+ fn partition_name_range(&self) -> IoResult<Range<usize>> {
let start = size_of::<AvbHashDescriptor>();
Ok(start..(self.partition_name_end()?))
}
- fn digest_range(&self) -> utils::Result<Range<usize>> {
+ fn digest_range(&self) -> IoResult<Range<usize>> {
let start = usize_checked_add(self.partition_name_end()?, to_usize(self.0.salt_len)?)?;
let end = usize_checked_add(start, to_usize(self.0.digest_len)?)?;
Ok(start..end)
diff --git a/pvmfw/avb/src/descriptor/property.rs b/pvmfw/avb/src/descriptor/property.rs
index 336623a..8145d64 100644
--- a/pvmfw/avb/src/descriptor/property.rs
+++ b/pvmfw/avb/src/descriptor/property.rs
@@ -15,7 +15,8 @@
//! Structs and functions relating to the property descriptor.
use super::common::get_valid_descriptor;
-use crate::utils::{self, to_usize, usize_checked_add};
+use crate::utils::{to_usize, usize_checked_add};
+use avb::{IoError, IoResult};
use avb_bindgen::{
avb_property_descriptor_validate_and_byteswap, AvbDescriptor, AvbPropertyDescriptor,
};
@@ -34,7 +35,7 @@
pub(super) unsafe fn from_descriptor_ptr(
descriptor: *const AvbDescriptor,
data: &'a [u8],
- ) -> utils::Result<Self> {
+ ) -> IoResult<Self> {
// SAFETY: It is safe as the raw pointer `descriptor` is non-null and points to
// a valid `AvbDescriptor`.
let h = unsafe { PropertyDescriptorHeader::from_descriptor_ptr(descriptor)? };
@@ -43,12 +44,12 @@
Ok(Self { key, value })
}
- fn get_valid_slice(data: &[u8], start: usize, end: usize) -> utils::Result<&[u8]> {
+ fn get_valid_slice(data: &[u8], start: usize, end: usize) -> IoResult<&[u8]> {
const NUL_BYTE: u8 = b'\0';
match data.get(end) {
- Some(&NUL_BYTE) => data.get(start..end).ok_or(avb::IoError::RangeOutsidePartition),
- _ => Err(avb::IoError::NoSuchValue),
+ Some(&NUL_BYTE) => data.get(start..end).ok_or(IoError::RangeOutsidePartition),
+ _ => Err(IoError::NoSuchValue),
}
}
}
@@ -60,7 +61,7 @@
///
/// Behavior is undefined if any of the following conditions are violated:
/// * The `descriptor` pointer must be non-null and point to a valid `AvbDescriptor`.
- unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> utils::Result<Self> {
+ unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> IoResult<Self> {
// SAFETY: It is safe as the raw pointer `descriptor` is non-null and points to
// a valid `AvbDescriptor`.
unsafe {
@@ -76,16 +77,16 @@
size_of::<AvbPropertyDescriptor>()
}
- fn key_end(&self) -> utils::Result<usize> {
+ fn key_end(&self) -> IoResult<usize> {
usize_checked_add(self.key_start(), to_usize(self.0.key_num_bytes)?)
}
- fn value_start(&self) -> utils::Result<usize> {
+ fn value_start(&self) -> IoResult<usize> {
// There is a NUL byte between key and value.
usize_checked_add(self.key_end()?, 1)
}
- fn value_end(&self) -> utils::Result<usize> {
+ fn value_end(&self) -> IoResult<usize> {
usize_checked_add(self.value_start()?, to_usize(self.0.value_num_bytes)?)
}
}
diff --git a/pvmfw/avb/src/error.rs b/pvmfw/avb/src/error.rs
index af38c54..4e3f27e 100644
--- a/pvmfw/avb/src/error.rs
+++ b/pvmfw/avb/src/error.rs
@@ -15,25 +15,25 @@
//! This module contains the error thrown by the payload verification API
//! and other errors used in the library.
+use avb::{IoError, SlotVerifyError};
use core::fmt;
-/// Wrapper around `avb::SlotVerifyError` to add custom pvmfw errors.
+/// Wrapper around `SlotVerifyError` to add custom pvmfw errors.
/// It is the error thrown by the payload verification API `verify_payload()`.
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Debug, PartialEq, Eq)]
pub enum PvmfwVerifyError {
- /// Passthrough avb::SlotVerifyError.
- AvbError(avb::SlotVerifyError),
+ /// Passthrough `SlotVerifyError` with no `SlotVerifyData`.
+ AvbError(SlotVerifyError<'static>),
/// VBMeta has invalid descriptors.
- InvalidDescriptors(avb::IoError),
+ InvalidDescriptors(IoError),
/// Unknown vbmeta property.
UnknownVbmetaProperty,
}
-/// It's always possible to convert from an `avb::SlotVerifyError` since we are
-/// a superset.
-impl From<avb::SlotVerifyError> for PvmfwVerifyError {
- fn from(error: avb::SlotVerifyError) -> Self {
- Self::AvbError(error)
+impl From<SlotVerifyError<'_>> for PvmfwVerifyError {
+ fn from(error: SlotVerifyError) -> Self {
+ // We don't use verification data on failure, drop it to get a `'static` lifetime.
+ Self::AvbError(error.without_verify_data())
}
}
diff --git a/pvmfw/avb/src/ops.rs b/pvmfw/avb/src/ops.rs
index c7b8b01..9711f72 100644
--- a/pvmfw/avb/src/ops.rs
+++ b/pvmfw/avb/src/ops.rs
@@ -12,22 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! Structs and functions relating to `AvbOps`.
+//! Structs and functions relating to AVB callback operations.
use crate::partition::PartitionName;
-use crate::utils::{self, as_ref, is_not_null, to_nonnull, write};
-use avb::internal::{result_to_io_enum, slot_verify_enum_to_result};
-use avb_bindgen::{
- avb_slot_verify, avb_slot_verify_data_free, AvbHashtreeErrorMode, AvbIOResult, AvbOps,
- AvbPartitionData, AvbSlotVerifyData, AvbSlotVerifyFlags, AvbVBMetaData,
+use avb::{
+ slot_verify, HashtreeErrorMode, IoError, IoResult, PublicKeyForPartitionInfo, SlotVerifyData,
+ SlotVerifyFlags, SlotVerifyResult,
};
-use core::{
- ffi::{c_char, c_void, CStr},
- mem::MaybeUninit,
- ptr, slice,
-};
-
-const NULL_BYTE: &[u8] = b"\0";
+use core::ffi::CStr;
pub(crate) struct Payload<'a> {
kernel: &'a [u8],
@@ -35,15 +27,6 @@
trusted_public_key: &'a [u8],
}
-impl<'a> AsRef<Payload<'a>> for AvbOps {
- fn as_ref(&self) -> &Payload<'a> {
- let payload = self.user_data as *const Payload;
- // SAFETY: It is safe to cast the `AvbOps.user_data` to Payload as we have saved a
- // pointer to a valid value of Payload in user_data when creating AvbOps.
- unsafe { &*payload }
- }
-}
-
impl<'a> Payload<'a> {
pub(crate) fn new(
kernel: &'a [u8],
@@ -53,148 +36,116 @@
Self { kernel, initrd, trusted_public_key }
}
- fn get_partition(&self, partition_name: *const c_char) -> Result<&[u8], avb::IoError> {
+ fn get_partition(&self, partition_name: &CStr) -> IoResult<&[u8]> {
match partition_name.try_into()? {
PartitionName::Kernel => Ok(self.kernel),
PartitionName::InitrdNormal | PartitionName::InitrdDebug => {
- self.initrd.ok_or(avb::IoError::NoSuchPartition)
+ self.initrd.ok_or(IoError::NoSuchPartition)
}
}
}
}
-/// `Ops` wraps the class `AvbOps` in libavb. It provides pvmfw customized
-/// operations used in the verification.
-pub(crate) struct Ops(AvbOps);
-
-impl<'a> From<&mut Payload<'a>> for Ops {
- fn from(payload: &mut Payload<'a>) -> Self {
- let avb_ops = AvbOps {
- user_data: payload as *mut _ as *mut c_void,
- ab_ops: ptr::null_mut(),
- atx_ops: ptr::null_mut(),
- read_from_partition: Some(read_from_partition),
- get_preloaded_partition: Some(get_preloaded_partition),
- write_to_partition: None,
- validate_vbmeta_public_key: Some(validate_vbmeta_public_key),
- read_rollback_index: Some(read_rollback_index),
- write_rollback_index: None,
- read_is_device_unlocked: Some(read_is_device_unlocked),
- get_unique_guid_for_partition: Some(get_unique_guid_for_partition),
- get_size_of_partition: Some(get_size_of_partition),
- read_persistent_value: None,
- write_persistent_value: None,
- validate_public_key_for_partition: None,
- };
- Self(avb_ops)
- }
+/// Pvmfw customized operations used in the verification.
+pub(crate) struct Ops<'a> {
+ payload: &'a Payload<'a>,
}
-impl Ops {
+impl<'a> Ops<'a> {
+ pub(crate) fn new(payload: &'a Payload<'a>) -> Self {
+ Self { payload }
+ }
+
pub(crate) fn verify_partition(
&mut self,
partition_name: &CStr,
- ) -> Result<AvbSlotVerifyDataWrap, avb::SlotVerifyError> {
- let requested_partitions = [partition_name.as_ptr(), ptr::null()];
- let ab_suffix = CStr::from_bytes_with_nul(NULL_BYTE).unwrap();
- let mut out_data = MaybeUninit::uninit();
- // SAFETY: It is safe to call `avb_slot_verify()` as the pointer arguments (`ops`,
- // `requested_partitions` and `ab_suffix`) passed to the method are all valid and
- // initialized.
- let result = unsafe {
- avb_slot_verify(
- &mut self.0,
- requested_partitions.as_ptr(),
- ab_suffix.as_ptr(),
- AvbSlotVerifyFlags::AVB_SLOT_VERIFY_FLAGS_NONE,
- AvbHashtreeErrorMode::AVB_HASHTREE_ERROR_MODE_RESTART_AND_INVALIDATE,
- out_data.as_mut_ptr(),
- )
- };
- slot_verify_enum_to_result(result)?;
- // SAFETY: This is safe because `out_data` has been properly initialized after
- // calling `avb_slot_verify` and it returns OK.
- let out_data = unsafe { out_data.assume_init() };
- out_data.try_into()
+ ) -> SlotVerifyResult<SlotVerifyData> {
+ slot_verify(
+ self,
+ &[partition_name],
+ None, // No partition slot suffix.
+ SlotVerifyFlags::AVB_SLOT_VERIFY_FLAGS_NONE,
+ HashtreeErrorMode::AVB_HASHTREE_ERROR_MODE_RESTART_AND_INVALIDATE,
+ )
}
}
-extern "C" fn read_is_device_unlocked(
- _ops: *mut AvbOps,
- out_is_unlocked: *mut bool,
-) -> AvbIOResult {
- result_to_io_enum(write(out_is_unlocked, false))
+impl<'a> avb::Ops for Ops<'a> {
+ fn read_from_partition(
+ &mut self,
+ partition: &CStr,
+ offset: i64,
+ buffer: &mut [u8],
+ ) -> IoResult<usize> {
+ let partition = self.payload.get_partition(partition)?;
+ copy_data_to_dst(partition, offset, buffer)?;
+ Ok(buffer.len())
+ }
+
+ fn get_preloaded_partition(&mut self, partition: &CStr) -> IoResult<&[u8]> {
+ self.payload.get_partition(partition)
+ }
+
+ fn validate_vbmeta_public_key(
+ &mut self,
+ public_key: &[u8],
+ _public_key_metadata: Option<&[u8]>,
+ ) -> IoResult<bool> {
+ // The public key metadata is not used when we build the VBMeta.
+ Ok(self.payload.trusted_public_key == public_key)
+ }
+
+ fn read_rollback_index(&mut self, _rollback_index_location: usize) -> IoResult<u64> {
+ // TODO(291213394) : Refine this comment once capability for rollback protection is defined.
+ // pvmfw does not compare stored_rollback_index with rollback_index for Antirollback
+ // protection. Hence, we set `out_rollback_index` to 0 to ensure that the rollback_index
+ // (including default: 0) is never smaller than it, thus the rollback index check will pass.
+ Ok(0)
+ }
+
+ fn write_rollback_index(
+ &mut self,
+ _rollback_index_location: usize,
+ _index: u64,
+ ) -> IoResult<()> {
+ Err(IoError::NotImplemented)
+ }
+
+ fn read_is_device_unlocked(&mut self) -> IoResult<bool> {
+ Ok(false)
+ }
+
+ fn get_size_of_partition(&mut self, partition: &CStr) -> IoResult<u64> {
+ let partition = self.payload.get_partition(partition)?;
+ u64::try_from(partition.len()).map_err(|_| IoError::InvalidValueSize)
+ }
+
+ fn read_persistent_value(&mut self, _name: &CStr, _value: &mut [u8]) -> IoResult<usize> {
+ Err(IoError::NotImplemented)
+ }
+
+ fn write_persistent_value(&mut self, _name: &CStr, _value: &[u8]) -> IoResult<()> {
+ Err(IoError::NotImplemented)
+ }
+
+ fn erase_persistent_value(&mut self, _name: &CStr) -> IoResult<()> {
+ Err(IoError::NotImplemented)
+ }
+
+ fn validate_public_key_for_partition(
+ &mut self,
+ _partition: &CStr,
+ _public_key: &[u8],
+ _public_key_metadata: Option<&[u8]>,
+ ) -> IoResult<PublicKeyForPartitionInfo> {
+ Err(IoError::NotImplemented)
+ }
}
-extern "C" fn get_preloaded_partition(
- ops: *mut AvbOps,
- partition: *const c_char,
- num_bytes: usize,
- out_pointer: *mut *mut u8,
- out_num_bytes_preloaded: *mut usize,
-) -> AvbIOResult {
- result_to_io_enum(try_get_preloaded_partition(
- ops,
- partition,
- num_bytes,
- out_pointer,
- out_num_bytes_preloaded,
- ))
-}
-
-fn try_get_preloaded_partition(
- ops: *mut AvbOps,
- partition: *const c_char,
- num_bytes: usize,
- out_pointer: *mut *mut u8,
- out_num_bytes_preloaded: *mut usize,
-) -> utils::Result<()> {
- let ops = as_ref(ops)?;
- let partition = ops.as_ref().get_partition(partition)?;
- write(out_pointer, partition.as_ptr() as *mut u8)?;
- write(out_num_bytes_preloaded, partition.len().min(num_bytes))
-}
-
-extern "C" fn read_from_partition(
- ops: *mut AvbOps,
- partition: *const c_char,
- offset: i64,
- num_bytes: usize,
- buffer: *mut c_void,
- out_num_read: *mut usize,
-) -> AvbIOResult {
- result_to_io_enum(try_read_from_partition(
- ops,
- partition,
- offset,
- num_bytes,
- buffer,
- out_num_read,
- ))
-}
-
-fn try_read_from_partition(
- ops: *mut AvbOps,
- partition: *const c_char,
- offset: i64,
- num_bytes: usize,
- buffer: *mut c_void,
- out_num_read: *mut usize,
-) -> utils::Result<()> {
- let ops = as_ref(ops)?;
- let partition = ops.as_ref().get_partition(partition)?;
- let buffer = to_nonnull(buffer)?;
- // SAFETY: It is safe to copy the requested number of bytes to `buffer` as `buffer`
- // is created to point to the `num_bytes` of bytes in memory.
- let buffer_slice = unsafe { slice::from_raw_parts_mut(buffer.as_ptr() as *mut u8, num_bytes) };
- copy_data_to_dst(partition, offset, buffer_slice)?;
- write(out_num_read, buffer_slice.len())
-}
-
-fn copy_data_to_dst(src: &[u8], offset: i64, dst: &mut [u8]) -> utils::Result<()> {
- let start = to_copy_start(offset, src.len()).ok_or(avb::IoError::InvalidValueSize)?;
- let end = start.checked_add(dst.len()).ok_or(avb::IoError::InvalidValueSize)?;
- dst.copy_from_slice(src.get(start..end).ok_or(avb::IoError::RangeOutsidePartition)?);
+fn copy_data_to_dst(src: &[u8], offset: i64, dst: &mut [u8]) -> IoResult<()> {
+ let start = to_copy_start(offset, src.len()).ok_or(IoError::InvalidValueSize)?;
+ let end = start.checked_add(dst.len()).ok_or(IoError::InvalidValueSize)?;
+ dst.copy_from_slice(src.get(start..end).ok_or(IoError::RangeOutsidePartition)?);
Ok(())
}
@@ -203,143 +154,3 @@
.ok()
.or_else(|| isize::try_from(offset).ok().and_then(|v| len.checked_add_signed(v)))
}
-
-extern "C" fn get_size_of_partition(
- ops: *mut AvbOps,
- partition: *const c_char,
- out_size_num_bytes: *mut u64,
-) -> AvbIOResult {
- result_to_io_enum(try_get_size_of_partition(ops, partition, out_size_num_bytes))
-}
-
-fn try_get_size_of_partition(
- ops: *mut AvbOps,
- partition: *const c_char,
- out_size_num_bytes: *mut u64,
-) -> utils::Result<()> {
- let ops = as_ref(ops)?;
- let partition = ops.as_ref().get_partition(partition)?;
- let partition_size =
- u64::try_from(partition.len()).map_err(|_| avb::IoError::InvalidValueSize)?;
- write(out_size_num_bytes, partition_size)
-}
-
-extern "C" fn read_rollback_index(
- _ops: *mut AvbOps,
- _rollback_index_location: usize,
- out_rollback_index: *mut u64,
-) -> AvbIOResult {
- // This method is used by `avb_slot_verify()` to read the stored_rollback_index at
- // rollback_index_location.
-
- // TODO(291213394) : Refine this comment once capability for rollback protection is defined.
- // pvmfw does not compare stored_rollback_index with rollback_index for Antirollback protection
- // Hence, we set `out_rollback_index` to 0 to ensure that the
- // rollback_index (including default: 0) is never smaller than it,
- // thus the rollback index check will pass.
- result_to_io_enum(write(out_rollback_index, 0))
-}
-
-extern "C" fn get_unique_guid_for_partition(
- _ops: *mut AvbOps,
- _partition: *const c_char,
- _guid_buf: *mut c_char,
- _guid_buf_size: usize,
-) -> AvbIOResult {
- // TODO(b/256148034): Check if it's possible to throw an error here instead of having
- // an empty method.
- // This method is required by `avb_slot_verify()`.
- AvbIOResult::AVB_IO_RESULT_OK
-}
-
-extern "C" fn validate_vbmeta_public_key(
- ops: *mut AvbOps,
- public_key_data: *const u8,
- public_key_length: usize,
- public_key_metadata: *const u8,
- public_key_metadata_length: usize,
- out_is_trusted: *mut bool,
-) -> AvbIOResult {
- result_to_io_enum(try_validate_vbmeta_public_key(
- ops,
- public_key_data,
- public_key_length,
- public_key_metadata,
- public_key_metadata_length,
- out_is_trusted,
- ))
-}
-
-fn try_validate_vbmeta_public_key(
- ops: *mut AvbOps,
- public_key_data: *const u8,
- public_key_length: usize,
- _public_key_metadata: *const u8,
- _public_key_metadata_length: usize,
- out_is_trusted: *mut bool,
-) -> utils::Result<()> {
- // The public key metadata is not used when we build the VBMeta.
- is_not_null(public_key_data)?;
- // SAFETY: It is safe to create a slice with the given pointer and length as
- // `public_key_data` is a valid pointer and it points to an array of length
- // `public_key_length`.
- let public_key = unsafe { slice::from_raw_parts(public_key_data, public_key_length) };
- let ops = as_ref(ops)?;
- let trusted_public_key = ops.as_ref().trusted_public_key;
- write(out_is_trusted, public_key == trusted_public_key)
-}
-
-pub(crate) struct AvbSlotVerifyDataWrap(*mut AvbSlotVerifyData);
-
-impl TryFrom<*mut AvbSlotVerifyData> for AvbSlotVerifyDataWrap {
- type Error = avb::SlotVerifyError;
-
- fn try_from(data: *mut AvbSlotVerifyData) -> Result<Self, Self::Error> {
- is_not_null(data).map_err(|_| avb::SlotVerifyError::Io)?;
- Ok(Self(data))
- }
-}
-
-impl Drop for AvbSlotVerifyDataWrap {
- fn drop(&mut self) {
- // SAFETY: This is safe because `self.0` is checked nonnull when the
- // instance is created. We can free this pointer when the instance is
- // no longer needed.
- unsafe {
- avb_slot_verify_data_free(self.0);
- }
- }
-}
-
-impl AsRef<AvbSlotVerifyData> for AvbSlotVerifyDataWrap {
- fn as_ref(&self) -> &AvbSlotVerifyData {
- // This is safe because `self.0` is checked nonnull when the instance is created.
- as_ref(self.0).unwrap()
- }
-}
-
-impl AvbSlotVerifyDataWrap {
- pub(crate) fn vbmeta_images(&self) -> Result<&[AvbVBMetaData], avb::SlotVerifyError> {
- let data = self.as_ref();
- is_not_null(data.vbmeta_images).map_err(|_| avb::SlotVerifyError::Io)?;
- let vbmeta_images =
- // SAFETY: It is safe as the raw pointer `data.vbmeta_images` is a nonnull pointer.
- unsafe { slice::from_raw_parts(data.vbmeta_images, data.num_vbmeta_images) };
- Ok(vbmeta_images)
- }
-
- pub(crate) fn loaded_partitions(&self) -> Result<&[AvbPartitionData], avb::SlotVerifyError> {
- let data = self.as_ref();
- is_not_null(data.loaded_partitions).map_err(|_| avb::SlotVerifyError::Io)?;
- let loaded_partitions =
- // SAFETY: It is safe as the raw pointer `data.loaded_partitions` is a nonnull pointer and
- // is guaranteed by libavb to point to a valid `AvbPartitionData` array as part of the
- // `AvbSlotVerifyData` struct.
- unsafe { slice::from_raw_parts(data.loaded_partitions, data.num_loaded_partitions) };
- Ok(loaded_partitions)
- }
-
- pub(crate) fn rollback_indexes(&self) -> &[u64] {
- &self.as_ref().rollback_indexes
- }
-}
diff --git a/pvmfw/avb/src/partition.rs b/pvmfw/avb/src/partition.rs
index ca450c9..c05a0ac 100644
--- a/pvmfw/avb/src/partition.rs
+++ b/pvmfw/avb/src/partition.rs
@@ -14,8 +14,8 @@
//! Struct and functions relating to well-known partition names.
-use crate::utils::is_not_null;
-use core::ffi::{c_char, CStr};
+use avb::IoError;
+use core::ffi::CStr;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub(crate) enum PartitionName {
@@ -29,9 +29,9 @@
impl PartitionName {
pub(crate) const NUM_OF_KNOWN_PARTITIONS: usize = 3;
- const KERNEL_PARTITION_NAME: &[u8] = b"boot\0";
- const INITRD_NORMAL_PARTITION_NAME: &[u8] = b"initrd_normal\0";
- const INITRD_DEBUG_PARTITION_NAME: &[u8] = b"initrd_debug\0";
+ const KERNEL_PARTITION_NAME: &'static [u8] = b"boot\0";
+ const INITRD_NORMAL_PARTITION_NAME: &'static [u8] = b"initrd_normal\0";
+ const INITRD_DEBUG_PARTITION_NAME: &'static [u8] = b"initrd_debug\0";
pub(crate) fn as_cstr(&self) -> &CStr {
CStr::from_bytes_with_nul(self.as_bytes()).unwrap()
@@ -51,39 +51,28 @@
}
}
-impl TryFrom<*const c_char> for PartitionName {
- type Error = avb::IoError;
-
- fn try_from(partition_name: *const c_char) -> Result<Self, Self::Error> {
- is_not_null(partition_name)?;
- // SAFETY: It is safe as the raw pointer `partition_name` is a nonnull pointer.
- let partition_name = unsafe { CStr::from_ptr(partition_name) };
- partition_name.try_into()
- }
-}
-
impl TryFrom<&CStr> for PartitionName {
- type Error = avb::IoError;
+ type Error = IoError;
fn try_from(partition_name: &CStr) -> Result<Self, Self::Error> {
match partition_name.to_bytes_with_nul() {
Self::KERNEL_PARTITION_NAME => Ok(Self::Kernel),
Self::INITRD_NORMAL_PARTITION_NAME => Ok(Self::InitrdNormal),
Self::INITRD_DEBUG_PARTITION_NAME => Ok(Self::InitrdDebug),
- _ => Err(avb::IoError::NoSuchPartition),
+ _ => Err(IoError::NoSuchPartition),
}
}
}
impl TryFrom<&[u8]> for PartitionName {
- type Error = avb::IoError;
+ type Error = IoError;
fn try_from(non_null_terminated_name: &[u8]) -> Result<Self, Self::Error> {
match non_null_terminated_name {
x if x == Self::Kernel.as_non_null_terminated_bytes() => Ok(Self::Kernel),
x if x == Self::InitrdNormal.as_non_null_terminated_bytes() => Ok(Self::InitrdNormal),
x if x == Self::InitrdDebug.as_non_null_terminated_bytes() => Ok(Self::InitrdDebug),
- _ => Err(avb::IoError::NoSuchPartition),
+ _ => Err(IoError::NoSuchPartition),
}
}
}
diff --git a/pvmfw/avb/src/utils.rs b/pvmfw/avb/src/utils.rs
index f4f15e1..b4f099b 100644
--- a/pvmfw/avb/src/utils.rs
+++ b/pvmfw/avb/src/utils.rs
@@ -14,42 +14,20 @@
//! Common utility functions.
-use core::ptr::NonNull;
-use core::result;
+use avb::{IoError, IoResult};
-pub(crate) type Result<T> = result::Result<T, avb::IoError>;
-
-pub(crate) fn write<T>(ptr: *mut T, value: T) -> Result<()> {
- let ptr = to_nonnull(ptr)?;
- // SAFETY: It is safe as the raw pointer `ptr` is a non-null pointer.
- unsafe {
- *ptr.as_ptr() = value;
- }
- Ok(())
-}
-
-pub(crate) fn as_ref<'a, T>(ptr: *mut T) -> Result<&'a T> {
- let ptr = to_nonnull(ptr)?;
- // SAFETY: It is safe as the raw pointer `ptr` is a non-null pointer.
- unsafe { Ok(ptr.as_ref()) }
-}
-
-pub(crate) fn to_nonnull<T>(ptr: *mut T) -> Result<NonNull<T>> {
- NonNull::new(ptr).ok_or(avb::IoError::NoSuchValue)
-}
-
-pub(crate) fn is_not_null<T>(ptr: *const T) -> Result<()> {
+pub(crate) fn is_not_null<T>(ptr: *const T) -> IoResult<()> {
if ptr.is_null() {
- Err(avb::IoError::NoSuchValue)
+ Err(IoError::NoSuchValue)
} else {
Ok(())
}
}
-pub(crate) fn to_usize<T: TryInto<usize>>(num: T) -> Result<usize> {
- num.try_into().map_err(|_| avb::IoError::InvalidValueSize)
+pub(crate) fn to_usize<T: TryInto<usize>>(num: T) -> IoResult<usize> {
+ num.try_into().map_err(|_| IoError::InvalidValueSize)
}
-pub(crate) fn usize_checked_add(x: usize, y: usize) -> Result<usize> {
- x.checked_add(y).ok_or(avb::IoError::InvalidValueSize)
+pub(crate) fn usize_checked_add(x: usize, y: usize) -> IoResult<usize> {
+ x.checked_add(y).ok_or(IoError::InvalidValueSize)
}
diff --git a/pvmfw/avb/src/verify.rs b/pvmfw/avb/src/verify.rs
index 492d387..a85dbbb 100644
--- a/pvmfw/avb/src/verify.rs
+++ b/pvmfw/avb/src/verify.rs
@@ -20,10 +20,9 @@
use crate::PvmfwVerifyError;
use alloc::vec;
use alloc::vec::Vec;
-use avb_bindgen::{AvbPartitionData, AvbVBMetaData};
-use core::ffi::c_char;
+use avb::{PartitionData, SlotVerifyError, SlotVerifyNoDataResult, VbmetaData};
-// We use this for the rollback_index field if AvbSlotVerifyDataWrap has empty rollback_indexes
+// We use this for the rollback_index field if SlotVerifyData has empty rollback_indexes
const DEFAULT_ROLLBACK_INDEX: u64 = 0;
/// Verified data returned when the payload verification succeeds.
@@ -69,9 +68,9 @@
}
impl Capability {
- const KEY: &[u8] = b"com.android.virt.cap";
- const REMOTE_ATTEST: &[u8] = b"remote_attest";
- const SECRETKEEPER_PROTECTION: &[u8] = b"secretkeeper_protection";
+ const KEY: &'static [u8] = b"com.android.virt.cap";
+ const REMOTE_ATTEST: &'static [u8] = b"remote_attest";
+ const SECRETKEEPER_PROTECTION: &'static [u8] = b"secretkeeper_protection";
const SEPARATOR: u8 = b'|';
fn get_capabilities(property_value: &[u8]) -> Result<Vec<Self>, PvmfwVerifyError> {
@@ -84,7 +83,7 @@
_ => return Err(PvmfwVerifyError::UnknownVbmetaProperty),
};
if res.contains(&cap) {
- return Err(avb::SlotVerifyError::InvalidMetadata.into());
+ return Err(SlotVerifyError::InvalidMetadata.into());
}
res.push(cap);
}
@@ -92,55 +91,51 @@
}
}
-fn verify_only_one_vbmeta_exists(
- vbmeta_images: &[AvbVBMetaData],
-) -> Result<(), avb::SlotVerifyError> {
- if vbmeta_images.len() == 1 {
+fn verify_only_one_vbmeta_exists(vbmeta_data: &[VbmetaData]) -> SlotVerifyNoDataResult<()> {
+ if vbmeta_data.len() == 1 {
Ok(())
} else {
- Err(avb::SlotVerifyError::InvalidMetadata)
+ Err(SlotVerifyError::InvalidMetadata)
}
}
-fn verify_vbmeta_is_from_kernel_partition(
- vbmeta_image: &AvbVBMetaData,
-) -> Result<(), avb::SlotVerifyError> {
- match (vbmeta_image.partition_name as *const c_char).try_into() {
+fn verify_vbmeta_is_from_kernel_partition(vbmeta_image: &VbmetaData) -> SlotVerifyNoDataResult<()> {
+ match vbmeta_image.partition_name().try_into() {
Ok(PartitionName::Kernel) => Ok(()),
- _ => Err(avb::SlotVerifyError::InvalidMetadata),
+ _ => Err(SlotVerifyError::InvalidMetadata),
}
}
fn verify_vbmeta_has_only_one_hash_descriptor(
descriptors: &Descriptors,
-) -> Result<(), avb::SlotVerifyError> {
+) -> SlotVerifyNoDataResult<()> {
if descriptors.num_hash_descriptor() == 1 {
Ok(())
} else {
- Err(avb::SlotVerifyError::InvalidMetadata)
+ Err(SlotVerifyError::InvalidMetadata)
}
}
fn verify_loaded_partition_has_expected_length(
- loaded_partitions: &[AvbPartitionData],
+ loaded_partitions: &[PartitionData],
partition_name: PartitionName,
expected_len: usize,
-) -> Result<(), avb::SlotVerifyError> {
+) -> SlotVerifyNoDataResult<()> {
if loaded_partitions.len() != 1 {
// Only one partition should be loaded in each verify result.
- return Err(avb::SlotVerifyError::Io);
+ return Err(SlotVerifyError::Io);
}
- let loaded_partition = loaded_partitions[0];
- if !PartitionName::try_from(loaded_partition.partition_name as *const c_char)
+ let loaded_partition = &loaded_partitions[0];
+ if !PartitionName::try_from(loaded_partition.partition_name())
.map_or(false, |p| p == partition_name)
{
// Only the requested partition should be loaded.
- return Err(avb::SlotVerifyError::Io);
+ return Err(SlotVerifyError::Io);
}
- if loaded_partition.data_size == expected_len {
+ if loaded_partition.data().len() == expected_len {
Ok(())
} else {
- Err(avb::SlotVerifyError::Verification)
+ Err(SlotVerifyError::Verification(None))
}
}
@@ -158,28 +153,40 @@
.and_then(Capability::get_capabilities)
}
+/// Verifies the given initrd partition, and checks that the resulting contents looks like expected.
+fn verify_initrd(
+ ops: &mut Ops,
+ partition_name: PartitionName,
+ expected_initrd: &[u8],
+) -> SlotVerifyNoDataResult<()> {
+ let result =
+ ops.verify_partition(partition_name.as_cstr()).map_err(|e| e.without_verify_data())?;
+ verify_loaded_partition_has_expected_length(
+ result.partition_data(),
+ partition_name,
+ expected_initrd.len(),
+ )
+}
+
/// Verifies the payload (signed kernel + initrd) against the trusted public key.
pub fn verify_payload<'a>(
kernel: &[u8],
initrd: Option<&[u8]>,
trusted_public_key: &'a [u8],
) -> Result<VerifiedBootData<'a>, PvmfwVerifyError> {
- let mut payload = Payload::new(kernel, initrd, trusted_public_key);
- let mut ops = Ops::from(&mut payload);
+ let payload = Payload::new(kernel, initrd, trusted_public_key);
+ let mut ops = Ops::new(&payload);
let kernel_verify_result = ops.verify_partition(PartitionName::Kernel.as_cstr())?;
- let vbmeta_images = kernel_verify_result.vbmeta_images()?;
+ let vbmeta_images = kernel_verify_result.vbmeta_data();
// TODO(b/302093437): Use explicit rollback_index_location instead of default
// location (first element).
let rollback_index =
*kernel_verify_result.rollback_indexes().first().unwrap_or(&DEFAULT_ROLLBACK_INDEX);
verify_only_one_vbmeta_exists(vbmeta_images)?;
- let vbmeta_image = vbmeta_images[0];
- verify_vbmeta_is_from_kernel_partition(&vbmeta_image)?;
- // SAFETY: It is safe because the `vbmeta_image` is collected from `AvbSlotVerifyData`,
- // which is returned by `avb_slot_verify()` when the verification succeeds. It is
- // guaranteed by libavb to be non-null and to point to a valid VBMeta structure.
- let descriptors = unsafe { Descriptors::from_vbmeta(vbmeta_image)? };
+ let vbmeta_image = &vbmeta_images[0];
+ verify_vbmeta_is_from_kernel_partition(vbmeta_image)?;
+ let descriptors = Descriptors::from_vbmeta(vbmeta_image)?;
let capabilities = verify_property_and_get_capabilities(&descriptors)?;
let kernel_descriptor = descriptors.find_hash_descriptor(PartitionName::Kernel)?;
@@ -196,20 +203,15 @@
}
let initrd = initrd.unwrap();
- let (debug_level, initrd_verify_result, initrd_partition_name) =
- if let Ok(result) = ops.verify_partition(PartitionName::InitrdNormal.as_cstr()) {
- (DebugLevel::None, result, PartitionName::InitrdNormal)
- } else if let Ok(result) = ops.verify_partition(PartitionName::InitrdDebug.as_cstr()) {
- (DebugLevel::Full, result, PartitionName::InitrdDebug)
+ let mut initrd_ops = Ops::new(&payload);
+ let (debug_level, initrd_partition_name) =
+ if verify_initrd(&mut initrd_ops, PartitionName::InitrdNormal, initrd).is_ok() {
+ (DebugLevel::None, PartitionName::InitrdNormal)
+ } else if verify_initrd(&mut initrd_ops, PartitionName::InitrdDebug, initrd).is_ok() {
+ (DebugLevel::Full, PartitionName::InitrdDebug)
} else {
- return Err(avb::SlotVerifyError::Verification.into());
+ return Err(SlotVerifyError::Verification(None).into());
};
- let loaded_partitions = initrd_verify_result.loaded_partitions()?;
- verify_loaded_partition_has_expected_length(
- loaded_partitions,
- initrd_partition_name,
- initrd.len(),
- )?;
let initrd_descriptor = descriptors.find_hash_descriptor(initrd_partition_name)?;
Ok(VerifiedBootData {
debug_level,
diff --git a/pvmfw/avb/tests/api_test.rs b/pvmfw/avb/tests/api_test.rs
index 6344433..6dc5a0a 100644
--- a/pvmfw/avb/tests/api_test.rs
+++ b/pvmfw/avb/tests/api_test.rs
@@ -17,6 +17,7 @@
mod utils;
use anyhow::{anyhow, Result};
+use avb::{IoError, SlotVerifyError};
use avb_bindgen::{AvbFooter, AvbVBMetaImageHeader};
use pvmfw_avb::{verify_payload, Capability, DebugLevel, PvmfwVerifyError, VerifiedBootData};
use std::{fs, mem::size_of, ptr};
@@ -87,7 +88,7 @@
&fs::read(TEST_IMG_WITH_NON_INITRD_HASHDESC_PATH)?,
/* initrd= */ None,
&load_trusted_public_key()?,
- PvmfwVerifyError::InvalidDescriptors(avb::IoError::NoSuchPartition),
+ PvmfwVerifyError::InvalidDescriptors(IoError::NoSuchPartition),
)
}
@@ -97,7 +98,7 @@
&fs::read(TEST_IMG_WITH_INITRD_AND_NON_INITRD_DESC_PATH)?,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- PvmfwVerifyError::InvalidDescriptors(avb::IoError::NoSuchPartition),
+ PvmfwVerifyError::InvalidDescriptors(IoError::NoSuchPartition),
)
}
@@ -141,7 +142,7 @@
&fs::read(TEST_IMG_WITH_MULTIPLE_PROPS_PATH)?,
/* initrd= */ None,
&load_trusted_public_key()?,
- PvmfwVerifyError::InvalidDescriptors(avb::IoError::Io),
+ PvmfwVerifyError::InvalidDescriptors(IoError::Io),
)
}
@@ -151,7 +152,7 @@
&fs::read(TEST_IMG_WITH_DUPLICATED_CAP_PATH)?,
/* initrd= */ None,
&load_trusted_public_key()?,
- avb::SlotVerifyError::InvalidMetadata.into(),
+ SlotVerifyError::InvalidMetadata.into(),
)
}
@@ -171,7 +172,7 @@
&load_latest_signed_kernel()?,
/* initrd= */ None,
&load_trusted_public_key()?,
- avb::SlotVerifyError::InvalidMetadata.into(),
+ SlotVerifyError::InvalidMetadata.into(),
)
}
@@ -181,7 +182,7 @@
&load_latest_signed_kernel()?,
&load_latest_initrd_normal()?,
/* trusted_public_key= */ &[0u8; 0],
- avb::SlotVerifyError::PublicKeyRejected.into(),
+ SlotVerifyError::PublicKeyRejected.into(),
)
}
@@ -191,7 +192,7 @@
&load_latest_signed_kernel()?,
&load_latest_initrd_normal()?,
/* trusted_public_key= */ &[0u8; 512],
- avb::SlotVerifyError::PublicKeyRejected.into(),
+ SlotVerifyError::PublicKeyRejected.into(),
)
}
@@ -201,7 +202,7 @@
&load_latest_signed_kernel()?,
&load_latest_initrd_normal()?,
&fs::read(PUBLIC_KEY_RSA2048_PATH)?,
- avb::SlotVerifyError::PublicKeyRejected.into(),
+ SlotVerifyError::PublicKeyRejected.into(),
)
}
@@ -211,7 +212,7 @@
&load_latest_signed_kernel()?,
/* initrd= */ &fs::read(UNSIGNED_TEST_IMG_PATH)?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Verification.into(),
+ SlotVerifyError::Verification(None).into(),
)
}
@@ -221,7 +222,7 @@
&fs::read(UNSIGNED_TEST_IMG_PATH)?,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Io.into(),
+ SlotVerifyError::Io.into(),
)
}
@@ -234,7 +235,7 @@
&kernel,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Verification.into(),
+ SlotVerifyError::Verification(None).into(),
)
}
@@ -272,7 +273,7 @@
&kernel,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Io.into(),
+ SlotVerifyError::Io.into(),
)?;
}
Ok(())
@@ -288,7 +289,7 @@
&kernel,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::InvalidMetadata.into(),
+ SlotVerifyError::InvalidMetadata.into(),
)
}
@@ -301,7 +302,7 @@
&load_latest_signed_kernel()?,
&initrd,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Verification.into(),
+ SlotVerifyError::Verification(None).into(),
)
}
@@ -317,7 +318,7 @@
&kernel,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::InvalidMetadata.into(),
+ SlotVerifyError::InvalidMetadata.into(),
)
}
@@ -340,13 +341,13 @@
&kernel,
&load_latest_initrd_normal()?,
&empty_public_key,
- avb::SlotVerifyError::Verification.into(),
+ SlotVerifyError::Verification(None).into(),
)?;
assert_payload_verification_with_initrd_fails(
&kernel,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Verification.into(),
+ SlotVerifyError::Verification(None).into(),
)
}
@@ -384,7 +385,7 @@
&kernel,
&load_latest_initrd_normal()?,
&load_trusted_public_key()?,
- avb::SlotVerifyError::Verification.into(),
+ SlotVerifyError::Verification(None).into(),
)
}
diff --git a/pvmfw/platform.dts b/pvmfw/platform.dts
index cb8e30d..9abc123 100644
--- a/pvmfw/platform.dts
+++ b/pvmfw/platform.dts
@@ -261,4 +261,64 @@
clock-frequency = <10>;
timeout-sec = <8>;
};
+
+ pviommu_0: pviommu0 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_1: pviommu1 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_2: pviommu2 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_3: pviommu3 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_4: pviommu4 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_5: pviommu5 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_6: pviommu6 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_7: pviommu7 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_8: pviommu8 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_9: pviommu9 {
+ compatible = "pkvm,pviommu";
+ id = <PLACEHOLDER>;
+ #iommu-cells = <1>;
+ };
};
diff --git a/pvmfw/src/bootargs.rs b/pvmfw/src/bootargs.rs
index a089a67..aacd8e0 100644
--- a/pvmfw/src/bootargs.rs
+++ b/pvmfw/src/bootargs.rs
@@ -108,19 +108,7 @@
#[cfg(test)]
mod tests {
use super::*;
-
- // TODO(b/308694211): Use cstr!() from vmbase
- macro_rules! cstr {
- ($str:literal) => {{
- const S: &str = concat!($str, "\0");
- const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes())
- {
- Ok(v) => v,
- Err(_) => panic!("string contains interior NUL"),
- };
- C
- }};
- }
+ use cstr::cstr;
fn check(raw: &CStr, expected: Result<&[(&str, Option<&str>)], ()>) {
let actual = BootArgsIterator::new(raw);
diff --git a/pvmfw/src/config.rs b/pvmfw/src/config.rs
index 7023b95..3f78a88 100644
--- a/pvmfw/src/config.rs
+++ b/pvmfw/src/config.rs
@@ -19,11 +19,10 @@
use core::num::NonZeroUsize;
use core::ops::Range;
use core::result;
-use core::slice;
use log::{info, warn};
use static_assertions::const_assert_eq;
use vmbase::util::RangeExt;
-use zerocopy::{FromBytes, FromZeroes, LayoutVerified};
+use zerocopy::{FromBytes, FromZeroes};
/// Configuration data header.
#[repr(C, packed)]
@@ -129,6 +128,15 @@
impl Entry {
const COUNT: usize = Self::_VARIANT_COUNT as usize;
+
+ const ALL_ENTRIES: [Entry; Self::COUNT] = [Self::Bcc, Self::DebugPolicy, Self::VmDtbo];
+}
+
+#[derive(Default)]
+pub struct Entries<'a> {
+ pub bcc: &'a mut [u8],
+ pub debug_policy: Option<&'a [u8]>,
+ pub vm_dtbo: Option<&'a mut [u8]>,
}
#[repr(packed)]
@@ -196,7 +204,7 @@
}
let (header, rest) =
- LayoutVerified::<_, Header>::new_from_prefix(bytes).ok_or(Error::HeaderMisaligned)?;
+ zerocopy::Ref::<_, Header>::new_from_prefix(bytes).ok_or(Error::HeaderMisaligned)?;
let header = header.into_ref();
if header.magic != Header::MAGIC {
@@ -223,7 +231,7 @@
};
let (header_entries, body) =
- LayoutVerified::<_, [HeaderEntry]>::new_slice_from_prefix(rest, header.entry_count()?)
+ zerocopy::Ref::<_, [HeaderEntry]>::new_slice_from_prefix(rest, header.entry_count()?)
.ok_or(Error::BufferTooSmall)?;
// Validate that we won't get an invalid alignment in the following due to padding to u64.
@@ -233,7 +241,7 @@
let limits = header.body_lowest_bound()?..total_size;
let mut ranges: [Option<NonEmptyRange>; Entry::COUNT] = [None; Entry::COUNT];
let mut last_end = 0;
- for entry in [Entry::Bcc, Entry::DebugPolicy, Entry::VmDtbo] {
+ for entry in Entry::ALL_ENTRIES {
let Some(header_entry) = header_entries.get(entry as usize) else { continue };
let entry_offset = header_entry.offset.try_into().unwrap();
let entry_size = header_entry.size.try_into().unwrap();
@@ -259,35 +267,31 @@
Ok(Self { body, ranges })
}
- /// Get slice containing the platform BCC.
- pub fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>, Option<&mut [u8]>) {
- // This assumes that the blobs are in-order w.r.t. the entries.
- let bcc_range = self.get_entry_range(Entry::Bcc);
- let dp_range = self.get_entry_range(Entry::DebugPolicy);
- let vm_dtbo_range = self.get_entry_range(Entry::VmDtbo);
- // TODO(b/291191157): Provision device assignment with this.
- if let Some(vm_dtbo_range) = vm_dtbo_range {
- info!("Found VM DTBO at {:?}", vm_dtbo_range);
+ /// Locate the various config entries.
+ pub fn get_entries(self) -> Entries<'a> {
+ // We require the blobs to be in the same order as the `Entry` enum (and this is checked
+ // in `new` above)
+ // So we can just work through the body range and split off the parts we are interested in.
+ let mut offset = 0;
+ let mut body = self.body;
+
+ let mut entries: [Option<&mut [u8]>; Entry::COUNT] = Default::default();
+ for (i, range) in self.ranges.iter().enumerate() {
+ if let Some(range) = range {
+ body = &mut body[range.start - offset..];
+ let (chunk, rest) = body.split_at_mut(range.len());
+ offset = range.end();
+ body = rest;
+ entries[i] = Some(chunk);
+ }
}
+ let [bcc, debug_policy, vm_dtbo] = entries;
- // SAFETY: When instantiate, ranges are validated to be in the body range without
- // overlapping.
- unsafe {
- let ptr = self.body.as_mut_ptr() as usize;
- (
- Self::from_raw_range_mut(ptr, bcc_range.unwrap()),
- dp_range.map(|dp_range| Self::from_raw_range_mut(ptr, dp_range)),
- vm_dtbo_range.map(|vm_dtbo_range| Self::from_raw_range_mut(ptr, vm_dtbo_range)),
- )
- }
- }
+ // The platform BCC has always been required.
+ let bcc = bcc.unwrap();
- fn get_entry_range(&self, entry: Entry) -> Option<NonEmptyRange> {
- self.ranges[entry as usize]
- }
-
- unsafe fn from_raw_range_mut(ptr: usize, range: NonEmptyRange) -> &'a mut [u8] {
- // SAFETY: The caller must ensure that the range is valid from ptr.
- unsafe { slice::from_raw_parts_mut((ptr + range.start) as *mut u8, range.end()) }
+ // We have no reason to mutate so drop the `mut`.
+ let debug_policy = debug_policy.map(|x| &*x);
+ Entries { bcc, debug_policy, vm_dtbo }
}
}
diff --git a/pvmfw/src/crypto.rs b/pvmfw/src/crypto.rs
deleted file mode 100644
index 2b3d921..0000000
--- a/pvmfw/src/crypto.rs
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2023, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Wrapper around BoringSSL/OpenSSL symbols.
-
-use core::convert::AsRef;
-use core::ffi::{c_char, c_int, CStr};
-use core::fmt;
-use core::mem::MaybeUninit;
-use core::num::NonZeroU32;
-use core::ptr;
-
-use bssl_ffi::CRYPTO_library_init;
-use bssl_ffi::ERR_get_error_line;
-use bssl_ffi::ERR_lib_error_string;
-use bssl_ffi::ERR_reason_error_string;
-use bssl_ffi::EVP_AEAD_CTX_aead;
-use bssl_ffi::EVP_AEAD_CTX_init;
-use bssl_ffi::EVP_AEAD_CTX_open;
-use bssl_ffi::EVP_AEAD_CTX_seal;
-use bssl_ffi::EVP_AEAD_max_overhead;
-use bssl_ffi::EVP_aead_aes_256_gcm_randnonce;
-use bssl_ffi::EVP_AEAD;
-use bssl_ffi::EVP_AEAD_CTX;
-use vmbase::cstr;
-
-#[derive(Debug)]
-pub struct Error {
- packed: NonZeroU32,
- file: Option<&'static CStr>,
- line: c_int,
-}
-
-impl Error {
- fn get() -> Option<Self> {
- let mut file = ptr::null();
- let mut line = 0;
- // SAFETY: The function writes to the provided pointers, which are valid because they come
- // from references. It doesn't retain them after it returns.
- let packed = unsafe { ERR_get_error_line(&mut file, &mut line) };
-
- let packed = packed.try_into().ok()?;
- // SAFETY: Any non-NULL result is expected to point to a global const C string.
- let file = unsafe { as_static_cstr(file) };
-
- Some(Self { packed, file, line })
- }
-
- fn packed_value(&self) -> u32 {
- self.packed.get()
- }
-
- fn library_name(&self) -> Option<&'static CStr> {
- // SAFETY: Call to a pure function.
- let name = unsafe { ERR_lib_error_string(self.packed_value()) };
- // SAFETY: Any non-NULL result is expected to point to a global const C string.
- unsafe { as_static_cstr(name) }
- }
-
- fn reason(&self) -> Option<&'static CStr> {
- // SAFETY: Call to a pure function.
- let reason = unsafe { ERR_reason_error_string(self.packed_value()) };
- // SAFETY: Any non-NULL result is expected to point to a global const C string.
- unsafe { as_static_cstr(reason) }
- }
-}
-
-impl fmt::Display for Error {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let packed = self.packed_value();
- let library = self.library_name().unwrap_or(cstr!("{unknown library}")).to_str().unwrap();
- let reason = self.reason().unwrap_or(cstr!("{unknown reason}")).to_str().unwrap();
- let file = self.file.unwrap_or(cstr!("??")).to_str().unwrap();
- let line = self.line;
-
- write!(f, "{file}:{line}: {library}: {reason} ({packed:#x})")
- }
-}
-
-#[derive(Copy, Clone)]
-pub struct ErrorIterator {}
-
-impl Iterator for ErrorIterator {
- type Item = Error;
-
- fn next(&mut self) -> Option<Self::Item> {
- Self::Item::get()
- }
-}
-
-pub type Result<T> = core::result::Result<T, ErrorIterator>;
-
-#[repr(transparent)]
-pub struct Aead(EVP_AEAD);
-
-impl Aead {
- pub fn aes_256_gcm_randnonce() -> Option<&'static Self> {
- // SAFETY: Returned pointer is checked below.
- let aead = unsafe { EVP_aead_aes_256_gcm_randnonce() };
- if aead.is_null() {
- None
- } else {
- // SAFETY: We assume that the non-NULL value points to a valid and static EVP_AEAD.
- Some(unsafe { &*(aead as *const _) })
- }
- }
-
- pub fn max_overhead(&self) -> usize {
- // SAFETY: Function should only read from self.
- unsafe { EVP_AEAD_max_overhead(self.as_ref() as *const _) }
- }
-}
-
-#[repr(transparent)]
-pub struct AeadCtx(EVP_AEAD_CTX);
-
-impl AeadCtx {
- pub fn new_aes_256_gcm_randnonce(key: &[u8]) -> Result<Self> {
- let aead = Aead::aes_256_gcm_randnonce().unwrap();
-
- Self::new(aead, key)
- }
-
- fn new(aead: &'static Aead, key: &[u8]) -> Result<Self> {
- const DEFAULT_TAG_LENGTH: usize = 0;
- let engine = ptr::null_mut(); // Use default implementation.
- let mut ctx = MaybeUninit::zeroed();
- // SAFETY: Initialize the EVP_AEAD_CTX with const pointers to the AEAD and key.
- let result = unsafe {
- EVP_AEAD_CTX_init(
- ctx.as_mut_ptr(),
- aead.as_ref() as *const _,
- key.as_ptr(),
- key.len(),
- DEFAULT_TAG_LENGTH,
- engine,
- )
- };
-
- if result == 1 {
- // SAFETY: We assume that the non-NULL value points to a valid and static EVP_AEAD.
- Ok(Self(unsafe { ctx.assume_init() }))
- } else {
- Err(ErrorIterator {})
- }
- }
-
- pub fn aead(&self) -> Option<&'static Aead> {
- // SAFETY: The function should only read from self.
- let aead = unsafe { EVP_AEAD_CTX_aead(self.as_ref() as *const _) };
- if aead.is_null() {
- None
- } else {
- // SAFETY: We assume that the non-NULL value points to a valid and static EVP_AEAD.
- Some(unsafe { &*(aead as *const _) })
- }
- }
-
- pub fn open<'b>(&self, out: &'b mut [u8], data: &[u8]) -> Result<&'b mut [u8]> {
- let nonce = ptr::null_mut();
- let nonce_len = 0;
- let ad = ptr::null_mut();
- let ad_len = 0;
- let mut out_len = MaybeUninit::uninit();
- // SAFETY: The function should only read from self and write to out (at most the provided
- // number of bytes) and out_len while reading from data (at most the provided number of
- // bytes), ignoring any NULL input.
- let result = unsafe {
- EVP_AEAD_CTX_open(
- self.as_ref() as *const _,
- out.as_mut_ptr(),
- out_len.as_mut_ptr(),
- out.len(),
- nonce,
- nonce_len,
- data.as_ptr(),
- data.len(),
- ad,
- ad_len,
- )
- };
-
- if result == 1 {
- // SAFETY: Any value written to out_len could be a valid usize. The value itself is
- // validated as being a proper slice length by panicking in the following indexing
- // otherwise.
- let out_len = unsafe { out_len.assume_init() };
- Ok(&mut out[..out_len])
- } else {
- Err(ErrorIterator {})
- }
- }
-
- pub fn seal<'b>(&self, out: &'b mut [u8], data: &[u8]) -> Result<&'b mut [u8]> {
- let nonce = ptr::null_mut();
- let nonce_len = 0;
- let ad = ptr::null_mut();
- let ad_len = 0;
- let mut out_len = MaybeUninit::uninit();
- // SAFETY: The function should only read from self and write to out (at most the provided
- // number of bytes) while reading from data (at most the provided number of bytes),
- // ignoring any NULL input.
- let result = unsafe {
- EVP_AEAD_CTX_seal(
- self.as_ref() as *const _,
- out.as_mut_ptr(),
- out_len.as_mut_ptr(),
- out.len(),
- nonce,
- nonce_len,
- data.as_ptr(),
- data.len(),
- ad,
- ad_len,
- )
- };
-
- if result == 1 {
- // SAFETY: Any value written to out_len could be a valid usize. The value itself is
- // validated as being a proper slice length by panicking in the following indexing
- // otherwise.
- let out_len = unsafe { out_len.assume_init() };
- Ok(&mut out[..out_len])
- } else {
- Err(ErrorIterator {})
- }
- }
-}
-
-/// Cast a C string pointer to a static non-mutable reference.
-///
-/// # Safety
-///
-/// The caller needs to ensure that the pointer is null or points to a valid C string and that the
-/// C lifetime of the string is compatible with a static Rust lifetime.
-unsafe fn as_static_cstr(p: *const c_char) -> Option<&'static CStr> {
- if p.is_null() {
- None
- } else {
- // Safety: Safe given the requirements of this function.
- Some(unsafe { CStr::from_ptr(p) })
- }
-}
-
-impl AsRef<EVP_AEAD> for Aead {
- fn as_ref(&self) -> &EVP_AEAD {
- &self.0
- }
-}
-
-impl AsRef<EVP_AEAD_CTX> for AeadCtx {
- fn as_ref(&self) -> &EVP_AEAD_CTX {
- &self.0
- }
-}
-
-pub fn init() {
- // SAFETY: Configures the internal state of the library - may be called multiple times.
- unsafe { CRYPTO_library_init() }
-}
diff --git a/pvmfw/src/device_assignment.rs b/pvmfw/src/device_assignment.rs
index 7eae09f..1b0d8cf 100644
--- a/pvmfw/src/device_assignment.rs
+++ b/pvmfw/src/device_assignment.rs
@@ -19,6 +19,7 @@
#[cfg(test)]
extern crate alloc;
+use alloc::collections::{BTreeMap, BTreeSet};
use alloc::ffi::CString;
use alloc::fmt;
use alloc::vec;
@@ -26,7 +27,9 @@
use core::ffi::CStr;
use core::iter::Iterator;
use core::mem;
-use libfdt::{Fdt, FdtError, FdtNode};
+use hyp::DeviceAssigningHypervisor;
+use libfdt::{Fdt, FdtError, FdtNode, Phandle, Reg};
+use log::error;
// TODO(b/308694211): Use cstr! from vmbase instead.
macro_rules! cstr {
@@ -46,14 +49,26 @@
/// Errors in device assignment.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum DeviceAssignmentError {
- // Invalid VM DTBO
+ /// Invalid VM DTBO
InvalidDtbo,
/// Invalid __symbols__
InvalidSymbols,
+ /// Invalid <reg>
+ InvalidReg,
/// Invalid <interrupts>
InvalidInterrupts,
+ /// Invalid <iommus>
+ InvalidIommus,
+ /// Invalid pvIOMMU node
+ InvalidPvIommu,
+ /// Too many pvIOMMU
+ TooManyPvIommu,
+ /// Duplicated pvIOMMU IDs exist
+ DuplicatedPvIommuIds,
/// Unsupported overlay target syntax. Only supports <target-path> with full path.
UnsupportedOverlayTarget,
+ /// Internal error
+ Internal,
/// Unexpected error from libfdt
UnexpectedFdtError(FdtError),
}
@@ -72,10 +87,21 @@
f,
"Invalid property in /__symbols__. Must point to valid assignable device node."
),
+ Self::InvalidReg => write!(f, "Invalid <reg>"),
Self::InvalidInterrupts => write!(f, "Invalid <interrupts>"),
+ Self::InvalidIommus => write!(f, "Invalid <iommus>"),
+ Self::InvalidPvIommu => write!(f, "Invalid pvIOMMU node"),
+ Self::TooManyPvIommu => write!(
+ f,
+ "Too many pvIOMMU node. Insufficient pre-populated pvIOMMUs in platform DT"
+ ),
+ Self::DuplicatedPvIommuIds => {
+ write!(f, "Duplicated pvIOMMU IDs exist. IDs must unique")
+ }
Self::UnsupportedOverlayTarget => {
write!(f, "Unsupported overlay target. Only supports 'target-path = \"/\"'")
}
+ Self::Internal => write!(f, "Internal error"),
Self::UnexpectedFdtError(e) => write!(f, "Unexpected Error from libfdt: {e}"),
}
}
@@ -157,6 +183,15 @@
}
}
+fn is_overlayable_node(dtbo_path: &CStr) -> bool {
+ dtbo_path
+ .to_bytes()
+ .split(|char| *char == b'/')
+ .filter(|&component| !component.is_empty())
+ .nth(1)
+ .map_or(false, |name| name == b"__overlay__")
+}
+
impl AsRef<Fdt> for VmDtbo {
fn as_ref(&self) -> &Fdt {
&self.0
@@ -169,6 +204,60 @@
}
}
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
+struct PvIommu {
+ // ID from pvIOMMU node
+ id: u32,
+}
+
+impl PvIommu {
+ fn parse(node: &FdtNode) -> Result<Self> {
+ let iommu_cells = node
+ .getprop_u32(cstr!("#iommu-cells"))?
+ .ok_or(DeviceAssignmentError::InvalidPvIommu)?;
+ // Ensures <#iommu-cells> = 1. It means that `<iommus>` entry contains pair of
+ // (pvIOMMU ID, vSID)
+ if iommu_cells != 1 {
+ return Err(DeviceAssignmentError::InvalidPvIommu);
+ }
+ let id = node.getprop_u32(cstr!("id"))?.ok_or(DeviceAssignmentError::InvalidPvIommu)?;
+ Ok(Self { id })
+ }
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
+struct Vsid(u32);
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+struct DeviceReg {
+ addr: u64,
+ size: u64,
+}
+
+impl TryFrom<Reg<u64>> for DeviceReg {
+ type Error = DeviceAssignmentError;
+
+ fn try_from(reg: Reg<u64>) -> Result<Self> {
+ Ok(Self { addr: reg.addr, size: reg.size.ok_or(DeviceAssignmentError::InvalidReg)? })
+ }
+}
+
+fn parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>> {
+ node.reg()?
+ .ok_or(DeviceAssignmentError::InvalidReg)?
+ .map(DeviceReg::try_from)
+ .collect::<Result<Vec<_>>>()
+}
+
+fn to_be_bytes(reg: &[DeviceReg]) -> Vec<u8> {
+ let mut reg_cells = vec![];
+ for x in reg {
+ reg_cells.extend_from_slice(&x.addr.to_be_bytes());
+ reg_cells.extend_from_slice(&x.size.to_be_bytes());
+ }
+ reg_cells
+}
+
/// Assigned device information parsed from crosvm DT.
/// Keeps everything in the owned data because underlying FDT will be reused for platform DT.
#[derive(Debug, Eq, PartialEq)]
@@ -178,12 +267,30 @@
// DTBO node path of the assigned device (e.g. "/fragment@rng/__overlay__/rng")
dtbo_node_path: CString,
// <reg> property from the crosvm DT
- reg: Vec<u8>,
+ reg: Vec<DeviceReg>,
// <interrupts> property from the crosvm DT
interrupts: Vec<u8>,
+ // Parsed <iommus> property from the crosvm DT. Tuple of PvIommu and vSID.
+ iommus: Vec<(PvIommu, Vsid)>,
}
impl AssignedDeviceInfo {
+ fn parse_reg(
+ node: &FdtNode,
+ hypervisor: &dyn DeviceAssigningHypervisor,
+ ) -> Result<Vec<DeviceReg>> {
+ let device_reg = parse_node_reg(node)?;
+ // TODO(b/277993056): Valid the result back with physical reg
+ for reg in &device_reg {
+ hypervisor.get_phys_mmio_token(reg.addr, reg.size).map_err(|e| {
+ let name = node.name();
+ error!("Failed to validate device <reg>, error={e:?}, name={name:?}, reg={reg:?}");
+ DeviceAssignmentError::InvalidReg
+ })?;
+ }
+ Ok(device_reg)
+ }
+
fn parse_interrupts(node: &FdtNode) -> Result<Vec<u8>> {
// Validation: Validate if interrupts cell numbers are multiple of #interrupt-cells.
// We can't know how many interrupts would exist.
@@ -199,70 +306,151 @@
Ok(node.getprop(cstr!("interrupts")).unwrap().unwrap().into())
}
- // TODO(b/277993056): Read and validate iommu
- fn parse(fdt: &Fdt, vm_dtbo: &VmDtbo, dtbo_node_path: &CStr) -> Result<Option<Self>> {
+ // TODO(b/277993056): Also validate /__local_fixups__ to ensure that <iommus> has phandle.
+ fn parse_iommus(
+ node: &FdtNode,
+ pviommus: &BTreeMap<Phandle, PvIommu>,
+ hypervisor: &dyn DeviceAssigningHypervisor,
+ ) -> Result<Vec<(PvIommu, Vsid)>> {
+ let mut iommus = vec![];
+ let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
+ return Ok(iommus);
+ };
+ while let Some(cell) = cells.next() {
+ // Parse pvIOMMU ID
+ let phandle = Phandle::try_from(cell).or(Err(DeviceAssignmentError::InvalidIommus))?;
+ let pviommu = pviommus.get(&phandle).ok_or(DeviceAssignmentError::InvalidIommus)?;
+
+ // Parse vSID
+ let Some(cell) = cells.next() else {
+ return Err(DeviceAssignmentError::InvalidIommus);
+ };
+ let vsid = Vsid(cell);
+
+ // TODO(b/277993056): Valid the result back with phys iommu id and sid..
+ hypervisor
+ .get_phys_iommu_token(pviommu.id.into(), vsid.0.into())
+ .map_err(|e| {
+ let name = node.name().unwrap_or_default();
+ error!("Failed to validate device <iommus>, error={e:?}, name={name:?}, pviommu={pviommu:?}, vsid={:?}", vsid.0);
+ DeviceAssignmentError::InvalidIommus
+ })?;
+
+ iommus.push((*pviommu, vsid));
+ }
+ Ok(iommus)
+ }
+
+ fn parse(
+ fdt: &Fdt,
+ vm_dtbo: &VmDtbo,
+ dtbo_node_path: &CStr,
+ pviommus: &BTreeMap<Phandle, PvIommu>,
+ hypervisor: &dyn DeviceAssigningHypervisor,
+ ) -> Result<Option<Self>> {
let node_path = vm_dtbo.locate_overlay_target_path(dtbo_node_path)?;
let Some(node) = fdt.node(&node_path)? else { return Ok(None) };
- // TODO(b/277993056): Validate reg with HVC, and keep reg with FdtNode::reg()
- let reg = node.getprop(cstr!("reg")).unwrap().unwrap();
-
+ let reg = Self::parse_reg(&node, hypervisor)?;
let interrupts = Self::parse_interrupts(&node)?;
-
- Ok(Some(Self {
- node_path,
- dtbo_node_path: dtbo_node_path.into(),
- reg: reg.to_vec(),
- interrupts: interrupts.to_vec(),
- }))
+ let iommus = Self::parse_iommus(&node, pviommus, hypervisor)?;
+ Ok(Some(Self { node_path, dtbo_node_path: dtbo_node_path.into(), reg, interrupts, iommus }))
}
- fn patch(&self, fdt: &mut Fdt) -> Result<()> {
+ fn patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()> {
let mut dst = fdt.node_mut(&self.node_path)?.unwrap();
- dst.setprop(cstr!("reg"), &self.reg)?;
+ dst.setprop(cstr!("reg"), &to_be_bytes(&self.reg))?;
dst.setprop(cstr!("interrupts"), &self.interrupts)?;
- // TODO(b/277993056): Read and patch iommu
+ let mut iommus = Vec::with_capacity(8 * self.iommus.len());
+ for (pviommu, vsid) in &self.iommus {
+ let phandle = pviommu_phandles.get(pviommu).unwrap();
+ iommus.extend_from_slice(&u32::from(*phandle).to_be_bytes());
+ iommus.extend_from_slice(&vsid.0.to_be_bytes());
+ }
+ dst.setprop(cstr!("iommus"), &iommus)?;
+
Ok(())
}
}
#[derive(Debug, Default, Eq, PartialEq)]
pub struct DeviceAssignmentInfo {
+ pviommus: BTreeSet<PvIommu>,
assigned_devices: Vec<AssignedDeviceInfo>,
filtered_dtbo_paths: Vec<CString>,
}
impl DeviceAssignmentInfo {
+ const PVIOMMU_COMPATIBLE: &CStr = cstr!("pkvm,pviommu");
+
+ /// Parses pvIOMMUs in fdt
+ // Note: This will validate pvIOMMU ids' uniqueness, even when unassigned.
+ fn parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>> {
+ let mut pviommus = BTreeMap::new();
+ for compatible in fdt.compatible_nodes(Self::PVIOMMU_COMPATIBLE)? {
+ let Some(phandle) = compatible.get_phandle()? else {
+ continue; // Skips unreachable pvIOMMU node
+ };
+ let pviommu = PvIommu::parse(&compatible)?;
+ if pviommus.insert(phandle, pviommu).is_some() {
+ return Err(FdtError::BadPhandle.into());
+ }
+ }
+ Ok(pviommus)
+ }
+
/// Parses fdt and vm_dtbo, and creates new DeviceAssignmentInfo
// TODO(b/277993056): Parse __local_fixups__
// TODO(b/277993056): Parse __fixups__
- pub fn parse(fdt: &Fdt, vm_dtbo: &VmDtbo) -> Result<Option<Self>> {
+ pub fn parse(
+ fdt: &Fdt,
+ vm_dtbo: &VmDtbo,
+ hypervisor: &dyn DeviceAssigningHypervisor,
+ ) -> Result<Option<Self>> {
let Some(symbols_node) = vm_dtbo.as_ref().symbols()? else {
// /__symbols__ should contain all assignable devices.
// If empty, then nothing can be assigned.
return Ok(None);
};
+ let pviommus = Self::parse_pviommus(fdt)?;
+ let unique_pviommus: BTreeSet<_> = pviommus.values().cloned().collect();
+ if pviommus.len() != unique_pviommus.len() {
+ return Err(DeviceAssignmentError::DuplicatedPvIommuIds);
+ }
+
let mut assigned_devices = vec![];
let mut filtered_dtbo_paths = vec![];
for symbol_prop in symbols_node.properties()? {
let symbol_prop_value = symbol_prop.value()?;
let dtbo_node_path = CStr::from_bytes_with_nul(symbol_prop_value)
.or(Err(DeviceAssignmentError::InvalidSymbols))?;
- let assigned_device = AssignedDeviceInfo::parse(fdt, vm_dtbo, dtbo_node_path)?;
+ if !is_overlayable_node(dtbo_node_path) {
+ continue;
+ }
+ let assigned_device =
+ AssignedDeviceInfo::parse(fdt, vm_dtbo, dtbo_node_path, &pviommus, hypervisor)?;
if let Some(assigned_device) = assigned_device {
assigned_devices.push(assigned_device);
} else {
filtered_dtbo_paths.push(dtbo_node_path.into());
}
}
- filtered_dtbo_paths.push(CString::new("/__symbols__").unwrap());
-
if assigned_devices.is_empty() {
return Ok(None);
}
- Ok(Some(Self { assigned_devices, filtered_dtbo_paths }))
+
+ // Clean up any nodes that wouldn't be overlaid but may contain reference to filtered nodes.
+ // Otherwise, `fdt_apply_overlay()` would fail because of missing phandle reference.
+ filtered_dtbo_paths.push(CString::new("/__symbols__").unwrap());
+ // TODO(b/277993056): Also filter other unused nodes/props in __local_fixups__
+ filtered_dtbo_paths.push(CString::new("/__local_fixups__/host").unwrap());
+
+ // Note: Any node without __overlay__ will be ignored by fdt_apply_overlay,
+ // so doesn't need to be filtered.
+
+ Ok(Some(Self { pviommus: unique_pviommus, assigned_devices, filtered_dtbo_paths }))
}
/// Filters VM DTBO to only contain necessary information for booting pVM
@@ -281,25 +469,39 @@
node.nop()?;
}
- // Filters pvmfw-specific properties in assigned device node.
- const FILTERED_VM_DTBO_PROP: [&CStr; 3] = [
- cstr!("android,pvmfw,phy-reg"),
- cstr!("android,pvmfw,phy-iommu"),
- cstr!("android,pvmfw,phy-sid"),
- ];
- for assigned_device in &self.assigned_devices {
- let mut node = vm_dtbo.node_mut(&assigned_device.dtbo_node_path).unwrap().unwrap();
- for prop in FILTERED_VM_DTBO_PROP {
- node.nop_property(prop)?;
- }
- }
Ok(())
}
- pub fn patch(&self, fdt: &mut Fdt) -> Result<()> {
- for device in &self.assigned_devices {
- device.patch(fdt)?
+ fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
+ let mut compatible = fdt.root_mut()?.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
+ let mut pviommu_phandles = BTreeMap::new();
+
+ for pviommu in &self.pviommus {
+ let mut node = compatible.ok_or(DeviceAssignmentError::TooManyPvIommu)?;
+ let phandle = node.as_node().get_phandle()?.ok_or(DeviceAssignmentError::Internal)?;
+ node.setprop_inplace(cstr!("id"), &pviommu.id.to_be_bytes())?;
+ if pviommu_phandles.insert(*pviommu, phandle).is_some() {
+ return Err(DeviceAssignmentError::Internal);
+ }
+ compatible = node.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
}
+
+ // Filters pre-populated but unassigned pvIOMMUs.
+ while let Some(filtered_pviommu) = compatible {
+ compatible = filtered_pviommu.delete_and_next_compatible(Self::PVIOMMU_COMPATIBLE)?;
+ }
+
+ Ok(pviommu_phandles)
+ }
+
+ pub fn patch(&self, fdt: &mut Fdt) -> Result<()> {
+ let pviommu_phandles = self.patch_pviommus(fdt)?;
+
+ // Patches assigned devices
+ for device in &self.assigned_devices {
+ device.patch(fdt, &pviommu_phandles)?;
+ }
+
Ok(())
}
}
@@ -307,12 +509,97 @@
#[cfg(test)]
mod tests {
use super::*;
+ use alloc::collections::{BTreeMap, BTreeSet};
use std::fs;
const VM_DTBO_FILE_PATH: &str = "test_pvmfw_devices_vm_dtbo.dtbo";
const VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH: &str =
"test_pvmfw_devices_vm_dtbo_without_symbols.dtbo";
+ const FDT_WITHOUT_IOMMUS_FILE_PATH: &str = "test_pvmfw_devices_without_iommus.dtb";
+ const FDT_WITHOUT_DEVICE_FILE_PATH: &str = "test_pvmfw_devices_without_device.dtb";
const FDT_FILE_PATH: &str = "test_pvmfw_devices_with_rng.dtb";
+ const FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH: &str =
+ "test_pvmfw_devices_with_multiple_devices_iommus.dtb";
+ const FDT_WITH_IOMMU_SHARING: &str = "test_pvmfw_devices_with_iommu_sharing.dtb";
+ const FDT_WITH_IOMMU_ID_CONFLICT: &str = "test_pvmfw_devices_with_iommu_id_conflict.dtb";
+
+ #[derive(Debug, Default)]
+ struct MockHypervisor {
+ mmio_tokens: BTreeMap<(u64, u64), u64>,
+ iommu_tokens: BTreeMap<(u64, u64), (u64, u64)>,
+ }
+
+ impl DeviceAssigningHypervisor for MockHypervisor {
+ fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> hyp::Result<u64> {
+ Ok(*self.mmio_tokens.get(&(base_ipa, size)).ok_or(hyp::Error::KvmError(
+ hyp::KvmError::InvalidParameter,
+ 0xc6000012, /* VENDOR_HYP_KVM_DEV_REQ_MMIO_FUNC_ID */
+ ))?)
+ }
+
+ fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> hyp::Result<(u64, u64)> {
+ Ok(*self.iommu_tokens.get(&(pviommu_id, vsid)).ok_or(hyp::Error::KvmError(
+ hyp::KvmError::InvalidParameter,
+ 0xc6000013, /* VENDOR_HYP_KVM_DEV_REQ_DMA_FUNC_ID */
+ ))?)
+ }
+ }
+
+ #[derive(Debug, Eq, PartialEq)]
+ struct AssignedDeviceNode {
+ path: CString,
+ reg: Vec<u8>,
+ interrupts: Vec<u8>,
+ iommus: Vec<u32>, // pvIOMMU id and vSID
+ }
+
+ impl AssignedDeviceNode {
+ fn parse(fdt: &Fdt, path: &CStr) -> Result<Self> {
+ let Some(node) = fdt.node(path)? else {
+ return Err(FdtError::NotFound.into());
+ };
+
+ let reg = node.getprop(cstr!("reg"))?.ok_or(DeviceAssignmentError::InvalidReg)?;
+ let interrupts = node
+ .getprop(cstr!("interrupts"))?
+ .ok_or(DeviceAssignmentError::InvalidInterrupts)?;
+ let mut iommus = vec![];
+ if let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? {
+ while let Some(pviommu_id) = cells.next() {
+ // pvIOMMU id
+ let phandle = Phandle::try_from(pviommu_id)?;
+ let pviommu = fdt
+ .node_with_phandle(phandle)?
+ .ok_or(DeviceAssignmentError::InvalidIommus)?;
+ let compatible = pviommu.getprop_str(cstr!("compatible"));
+ if compatible != Ok(Some(cstr!("pkvm,pviommu"))) {
+ return Err(DeviceAssignmentError::InvalidIommus);
+ }
+ let id = pviommu
+ .getprop_u32(cstr!("id"))?
+ .ok_or(DeviceAssignmentError::InvalidIommus)?;
+ iommus.push(id);
+
+ // vSID
+ let Some(vsid) = cells.next() else {
+ return Err(DeviceAssignmentError::InvalidIommus);
+ };
+ iommus.push(vsid);
+ }
+ }
+ Ok(Self { path: path.into(), reg: reg.into(), interrupts: interrupts.into(), iommus })
+ }
+ }
+
+ fn collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>> {
+ let mut pviommus = BTreeSet::new();
+ for pviommu in fdt.compatible_nodes(cstr!("pkvm,pviommu"))? {
+ if let Ok(Some(id)) = pviommu.getprop_u32(cstr!("id")) {
+ pviommus.insert(id);
+ }
+ }
+ Ok(pviommus.iter().cloned().collect())
+ }
fn into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8> {
let mut v = Vec::with_capacity(native_bytes.len() * 4);
@@ -322,6 +609,12 @@
v
}
+ impl From<[u64; 2]> for DeviceReg {
+ fn from(fdt_cells: [u64; 2]) -> Self {
+ DeviceReg { addr: fdt_cells[0], size: fdt_cells[1] }
+ }
+ }
+
#[test]
fn device_info_new_without_symbols() {
let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
@@ -329,48 +622,83 @@
let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
- let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo).unwrap();
+ let hypervisor: MockHypervisor = Default::default();
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
assert_eq!(device_info, None);
}
#[test]
+ fn device_info_new_without_device() {
+ let mut fdt_data = fs::read(FDT_WITHOUT_DEVICE_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+
+ let hypervisor: MockHypervisor = Default::default();
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
+ assert_eq!(device_info, None);
+ }
+
+ #[test]
+ fn device_info_assigned_info_without_iommus() {
+ let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
+ iommu_tokens: BTreeMap::new(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
+
+ let expected = [AssignedDeviceInfo {
+ node_path: CString::new("/bus0/backlight").unwrap(),
+ dtbo_node_path: cstr!("/fragment@backlight/__overlay__/bus0/backlight").into(),
+ reg: vec![[0x9, 0xFF].into()],
+ interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
+ iommus: vec![],
+ }];
+
+ assert_eq!(device_info.assigned_devices, expected);
+ }
+
+ #[test]
fn device_info_assigned_info() {
let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
- let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo).unwrap().unwrap();
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
+ iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
let expected = [AssignedDeviceInfo {
node_path: CString::new("/rng").unwrap(),
dtbo_node_path: cstr!("/fragment@rng/__overlay__/rng").into(),
- reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
+ reg: vec![[0x9, 0xFF].into()],
interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
+ iommus: vec![(PvIommu { id: 0x4 }, Vsid(0xFF0))],
}];
assert_eq!(device_info.assigned_devices, expected);
}
#[test]
- fn device_info_new_without_assigned_devices() {
- let mut fdt_data: Vec<u8> = pvmfw_fdt_template::RAW.into();
- let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
- let fdt = Fdt::from_mut_slice(fdt_data.as_mut_slice()).unwrap();
- let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
-
- let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo).unwrap();
- assert_eq!(device_info, None);
- }
-
- #[test]
fn device_info_filter() {
let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
- let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo).unwrap().unwrap();
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
+ iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
device_info.filter(vm_dtbo).unwrap();
let vm_dtbo = vm_dtbo.as_mut();
@@ -381,37 +709,256 @@
let light = vm_dtbo.node(cstr!("/fragment@rng/__overlay__/light")).unwrap();
assert_eq!(light, None);
+ let led = vm_dtbo.node(cstr!("/fragment@led/__overlay__/led")).unwrap();
+ assert_eq!(led, None);
+
+ let backlight =
+ vm_dtbo.node(cstr!("/fragment@backlight/__overlay__/bus0/backlight")).unwrap();
+ assert_eq!(backlight, None);
+
let symbols_node = vm_dtbo.symbols().unwrap();
assert_eq!(symbols_node, None);
}
#[test]
fn device_info_patch() {
- let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
+ let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
- let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo).unwrap().unwrap();
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
+ iommu_tokens: BTreeMap::new(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
device_info.filter(vm_dtbo).unwrap();
// SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
unsafe {
platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
}
+ device_info.patch(platform_dt).unwrap();
- let rng_node = platform_dt.node(cstr!("/rng")).unwrap().unwrap();
- let expected: Vec<(&CStr, Vec<u8>)> = vec![
- (cstr!("android,rng,ignore-gctrl-reset"), Vec::<u8>::new()),
- (cstr!("compatible"), b"android,rng\0".to_vec()),
- (cstr!("reg"), into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF])),
- (cstr!("interrupts"), into_fdt_prop(vec![0x0, 0xF, 0x4])),
+ let rng_node = platform_dt.node(cstr!("/bus0/backlight")).unwrap().unwrap();
+ let phandle = rng_node.getprop_u32(cstr!("phandle")).unwrap();
+ assert_ne!(None, phandle);
+
+ // Note: Intentionally not using AssignedDeviceNode for matching all props.
+ type FdtResult<T> = libfdt::Result<T>;
+ let expected: Vec<(FdtResult<&CStr>, FdtResult<Vec<u8>>)> = vec![
+ (Ok(cstr!("android,backlight,ignore-gctrl-reset")), Ok(Vec::new())),
+ (Ok(cstr!("compatible")), Ok(Vec::from(*b"android,backlight\0"))),
+ (Ok(cstr!("interrupts")), Ok(into_fdt_prop(vec![0x0, 0xF, 0x4]))),
+ (Ok(cstr!("iommus")), Ok(Vec::new())),
+ (Ok(cstr!("phandle")), Ok(into_fdt_prop(vec![phandle.unwrap()]))),
+ (Ok(cstr!("reg")), Ok(into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]))),
];
- for (prop, (prop_name, prop_value)) in rng_node.properties().unwrap().zip(expected) {
- assert_eq!((prop.name(), prop.value()), (Ok(prop_name), Ok(prop_value.as_slice())));
+ let mut properties: Vec<_> = rng_node
+ .properties()
+ .unwrap()
+ .map(|prop| (prop.name(), prop.value().map(|x| x.into())))
+ .collect();
+ properties.sort_by(|a, b| {
+ let lhs = a.0.unwrap_or_default();
+ let rhs = b.0.unwrap_or_default();
+ lhs.partial_cmp(rhs).unwrap()
+ });
+
+ assert_eq!(properties, expected);
+ }
+
+ #[test]
+ fn device_info_overlay_iommu() {
+ let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+ let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
+ platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
+ let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
+ platform_dt.unpack().unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
+ iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
+ device_info.filter(vm_dtbo).unwrap();
+
+ // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
+ unsafe {
+ platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
}
+ device_info.patch(platform_dt).unwrap();
+
+ let expected = AssignedDeviceNode {
+ path: CString::new("/rng").unwrap(),
+ reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
+ interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
+ iommus: vec![0x4, 0xFF0],
+ };
+
+ let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
+ assert_eq!(node, Ok(expected));
+
+ let pviommus = collect_pviommus(platform_dt);
+ assert_eq!(pviommus, Ok(vec![0x4]));
+ }
+
+ #[test]
+ fn device_info_multiple_devices_iommus() {
+ let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+ let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
+ platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
+ let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
+ platform_dt.unpack().unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [
+ ((0x9, 0xFF), 0x12F00000),
+ ((0x100, 0x1000), 0xF00000),
+ ((0x200, 0x1000), 0xF10000),
+ ]
+ .into(),
+ iommu_tokens: [
+ ((0x4, 0xFF0), (0x12E40000, 3)),
+ ((0x40, 0xFFA), (0x40000, 0x4)),
+ ((0x50, 0xFFB), (0x50000, 0x5)),
+ ]
+ .into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
+ device_info.filter(vm_dtbo).unwrap();
+
+ // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
+ unsafe {
+ platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
+ }
+ device_info.patch(platform_dt).unwrap();
+
+ let expected_devices = [
+ AssignedDeviceNode {
+ path: CString::new("/rng").unwrap(),
+ reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
+ interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
+ iommus: vec![0x4, 0xFF0],
+ },
+ AssignedDeviceNode {
+ path: CString::new("/light").unwrap(),
+ reg: into_fdt_prop(vec![0x0, 0x100, 0x0, 0x1000, 0x0, 0x200, 0x0, 0x1000]),
+ interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
+ iommus: vec![0x40, 0xFFA, 0x50, 0xFFB],
+ },
+ ];
+
+ for expected in expected_devices {
+ let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
+ assert_eq!(node, Ok(expected));
+ }
+ let pviommus = collect_pviommus(platform_dt);
+ assert_eq!(pviommus, Ok(vec![0x4, 0x40, 0x50]));
+ }
+
+ #[test]
+ fn device_info_iommu_sharing() {
+ let mut fdt_data = fs::read(FDT_WITH_IOMMU_SHARING).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+ let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
+ platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
+ let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
+ platform_dt.unpack().unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x12F00000), ((0x100, 0x9), 0x12000000)].into(),
+ iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 3))].into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
+ device_info.filter(vm_dtbo).unwrap();
+
+ // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
+ unsafe {
+ platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
+ }
+ device_info.patch(platform_dt).unwrap();
+
+ let expected_devices = [
+ AssignedDeviceNode {
+ path: CString::new("/rng").unwrap(),
+ reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
+ interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
+ iommus: vec![0x4, 0xFF0],
+ },
+ AssignedDeviceNode {
+ path: CString::new("/led").unwrap(),
+ reg: into_fdt_prop(vec![0x0, 0x100, 0x0, 0x9]),
+ interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
+ iommus: vec![0x4, 0xFF0],
+ },
+ ];
+
+ for expected in expected_devices {
+ let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
+ assert_eq!(node, Ok(expected));
+ }
+
+ let pviommus = collect_pviommus(platform_dt);
+ assert_eq!(pviommus, Ok(vec![0x4]));
+ }
+
+ #[test]
+ fn device_info_iommu_id_conflict() {
+ let mut fdt_data = fs::read(FDT_WITH_IOMMU_ID_CONFLICT).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
+ iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
+
+ assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
+ }
+
+ #[test]
+ fn device_info_invalid_reg() {
+ let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: BTreeMap::new(),
+ iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
+
+ assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg));
+ }
+
+ #[test]
+ fn device_info_invalid_iommus() {
+ let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
+ iommu_tokens: BTreeMap::new(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
+
+ assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
}
}
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index cc31f34..99bf589 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -14,16 +14,13 @@
//! Support for DICE derivation and BCC generation.
-use core::ffi::c_void;
use core::mem::size_of;
-use core::slice;
+use cstr::cstr;
use diced_open_dice::{
bcc_format_config_descriptor, bcc_handover_main_flow, hash, Config, DiceConfigValues, DiceMode,
Hash, InputValues, HIDDEN_SIZE,
};
-use pvmfw_avb::{DebugLevel, Digest, VerifiedBootData};
-use vmbase::cstr;
-use vmbase::memory::flushed_zeroize;
+use pvmfw_avb::{Capability, DebugLevel, Digest, VerifiedBootData};
fn to_dice_mode(debug_level: DebugLevel) -> DiceMode {
match debug_level {
@@ -46,6 +43,7 @@
pub auth_hash: Hash,
pub mode: DiceMode,
pub security_version: u64,
+ pub rkp_vm_marker: bool,
}
impl PartialInputs {
@@ -55,8 +53,9 @@
let mode = to_dice_mode(data.debug_level);
// We use rollback_index from vbmeta as the security_version field in dice certificate.
let security_version = data.rollback_index;
+ let rkp_vm_marker = data.has_capability(Capability::RemoteAttest);
- Ok(Self { code_hash, auth_hash, mode, security_version })
+ Ok(Self { code_hash, auth_hash, mode, security_version, rkp_vm_marker })
}
pub fn write_next_bcc(
@@ -66,15 +65,7 @@
next_bcc: &mut [u8],
) -> diced_open_dice::Result<()> {
let mut config_descriptor_buffer = [0; 128];
- let config_values = DiceConfigValues {
- component_name: Some(cstr!("vm_entry")),
- security_version: if cfg!(llpvm_changes) { Some(self.security_version) } else { None },
- ..Default::default()
- };
-
- let config_descriptor_size =
- bcc_format_config_descriptor(&config_values, &mut config_descriptor_buffer)?;
- let config = &config_descriptor_buffer[..config_descriptor_size];
+ let config = self.generate_config_descriptor(&mut config_descriptor_buffer)?;
let dice_inputs = InputValues::new(
self.code_hash,
@@ -86,17 +77,138 @@
let _ = bcc_handover_main_flow(current_bcc_handover, &dice_inputs, next_bcc)?;
Ok(())
}
+
+ fn generate_config_descriptor<'a>(
+ &self,
+ config_descriptor_buffer: &'a mut [u8],
+ ) -> diced_open_dice::Result<&'a [u8]> {
+ let config_values = DiceConfigValues {
+ component_name: Some(cstr!("vm_entry")),
+ security_version: if cfg!(dice_changes) { Some(self.security_version) } else { None },
+ rkp_vm_marker: self.rkp_vm_marker,
+ ..Default::default()
+ };
+ let config_descriptor_size =
+ bcc_format_config_descriptor(&config_values, config_descriptor_buffer)?;
+ let config = &config_descriptor_buffer[..config_descriptor_size];
+ Ok(config)
+ }
}
/// Flushes data caches over the provided address range.
///
/// # Safety
///
-/// The provided address and size must be to a valid address range (typically on the stack, .bss,
-/// .data, or provided BCC).
+/// The provided address and size must be to an address range that is valid for read and write
+/// (typically on the stack, .bss, .data, or provided BCC) from a single allocation
+/// (e.g. stack array).
#[no_mangle]
-unsafe extern "C" fn DiceClearMemory(_ctx: *mut c_void, size: usize, addr: *mut c_void) {
- // SAFETY: We must trust that the slice will be valid arrays/variables on the C code stack.
+#[cfg(not(test))]
+unsafe extern "C" fn DiceClearMemory(
+ _ctx: *mut core::ffi::c_void,
+ size: usize,
+ addr: *mut core::ffi::c_void,
+) {
+ use core::slice;
+ use vmbase::memory::flushed_zeroize;
+
+ // SAFETY: We require our caller to provide a valid range within a single object. The open-dice
+ // always calls this on individual stack-allocated arrays which ensures that.
let region = unsafe { slice::from_raw_parts_mut(addr as *mut u8, size) };
flushed_zeroize(region)
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ciborium::Value;
+ use std::collections::HashMap;
+ use std::vec;
+
+ const COMPONENT_NAME_KEY: i64 = -70002;
+ const COMPONENT_VERSION_KEY: i64 = -70003;
+ const RESETTABLE_KEY: i64 = -70004;
+ const SECURITY_VERSION_KEY: i64 = -70005;
+ const RKP_VM_MARKER_KEY: i64 = -70006;
+
+ const BASE_VB_DATA: VerifiedBootData = VerifiedBootData {
+ debug_level: DebugLevel::None,
+ kernel_digest: [1u8; size_of::<Digest>()],
+ initrd_digest: Some([2u8; size_of::<Digest>()]),
+ public_key: b"public key",
+ capabilities: vec![],
+ rollback_index: 42,
+ };
+
+ #[test]
+ fn base_data_conversion() {
+ let vb_data = BASE_VB_DATA;
+ let inputs = PartialInputs::new(&vb_data).unwrap();
+
+ assert_eq!(inputs.mode, DiceMode::kDiceModeNormal);
+ assert_eq!(inputs.security_version, 42);
+ assert!(!inputs.rkp_vm_marker);
+
+ // TODO(b/313608219): Consider checks for code_hash and possibly auth_hash.
+ }
+
+ #[test]
+ fn debuggable_conversion() {
+ let vb_data = VerifiedBootData { debug_level: DebugLevel::Full, ..BASE_VB_DATA };
+ let inputs = PartialInputs::new(&vb_data).unwrap();
+
+ assert_eq!(inputs.mode, DiceMode::kDiceModeDebug);
+ }
+
+ #[test]
+ fn rkp_vm_conversion() {
+ let vb_data =
+ VerifiedBootData { capabilities: vec![Capability::RemoteAttest], ..BASE_VB_DATA };
+ let inputs = PartialInputs::new(&vb_data).unwrap();
+
+ assert!(inputs.rkp_vm_marker);
+ }
+
+ #[test]
+ fn base_config_descriptor() {
+ let vb_data = BASE_VB_DATA;
+ let inputs = PartialInputs::new(&vb_data).unwrap();
+ let config_map = decode_config_descriptor(&inputs);
+
+ assert_eq!(config_map.get(&COMPONENT_NAME_KEY).unwrap().as_text().unwrap(), "vm_entry");
+ assert_eq!(config_map.get(&COMPONENT_VERSION_KEY), None);
+ assert_eq!(config_map.get(&RESETTABLE_KEY), None);
+ if cfg!(dice_changes) {
+ assert_eq!(
+ config_map.get(&SECURITY_VERSION_KEY).unwrap().as_integer().unwrap(),
+ 42.into()
+ );
+ } else {
+ assert_eq!(config_map.get(&SECURITY_VERSION_KEY), None);
+ }
+ assert_eq!(config_map.get(&RKP_VM_MARKER_KEY), None);
+ }
+
+ #[test]
+ fn config_descriptor_with_rkp_vm() {
+ let vb_data =
+ VerifiedBootData { capabilities: vec![Capability::RemoteAttest], ..BASE_VB_DATA };
+ let inputs = PartialInputs::new(&vb_data).unwrap();
+ let config_map = decode_config_descriptor(&inputs);
+
+ assert!(config_map.get(&RKP_VM_MARKER_KEY).unwrap().is_null());
+ }
+
+ fn decode_config_descriptor(inputs: &PartialInputs) -> HashMap<i64, Value> {
+ let mut buffer = [0; 128];
+ let config_descriptor = inputs.generate_config_descriptor(&mut buffer).unwrap();
+
+ let cbor_map =
+ cbor_util::deserialize::<Value>(config_descriptor).unwrap().into_map().unwrap();
+
+ cbor_map
+ .into_iter()
+ .map(|(k, v)| ((k.into_integer().unwrap().try_into().unwrap()), v))
+ .collect()
+ }
+}
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index ed73bc9..2475f32 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -15,9 +15,9 @@
//! Low-level entry and exit points of pvmfw.
use crate::config;
-use crate::crypto;
use crate::fdt;
use crate::memory;
+use bssl_ffi::CRYPTO_library_init;
use core::arch::asm;
use core::mem::{drop, size_of};
use core::num::NonZeroUsize;
@@ -196,7 +196,12 @@
// - only access non-pvmfw memory once (and while) it has been mapped
log::set_max_level(LevelFilter::Info);
- crypto::init();
+ // TODO(https://crbug.com/boringssl/35): Remove this init when BoringSSL can handle this
+ // internally.
+ // SAFETY: Configures the internal state of the library - may be called multiple times.
+ unsafe {
+ CRYPTO_library_init();
+ }
let page_table = memory::init_page_table().map_err(|e| {
error!("Failed to set up the dynamic page tables: {e}");
@@ -207,12 +212,12 @@
// then remapped by `init_page_table()`.
let appended_data = unsafe { get_appended_data_slice() };
- let mut appended = AppendedPayload::new(appended_data).ok_or_else(|| {
+ let appended = AppendedPayload::new(appended_data).ok_or_else(|| {
error!("No valid configuration found");
RebootReason::InvalidConfig
})?;
- let (bcc_slice, debug_policy, vm_dtbo) = appended.get_entries();
+ let config_entries = appended.get_entries();
// Up to this point, we were using the built-in static (from .rodata) page tables.
MEMORY.lock().replace(MemoryTracker::new(
@@ -222,13 +227,19 @@
Some(memory::appended_payload_range()),
));
- let slices = MemorySlices::new(fdt, payload, payload_size, vm_dtbo)?;
+ let slices = MemorySlices::new(fdt, payload, payload_size, config_entries.vm_dtbo)?;
// This wrapper allows main() to be blissfully ignorant of platform details.
- let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
+ let next_bcc = crate::main(
+ slices.fdt,
+ slices.kernel,
+ slices.ramdisk,
+ config_entries.bcc,
+ config_entries.debug_policy,
+ )?;
// Writable-dirty regions will be flushed when MemoryTracker is dropped.
- bcc_slice.zeroize();
+ config_entries.bcc.zeroize();
info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
MEMORY.lock().as_mut().unwrap().mmio_unmap_all().map_err(|e| {
@@ -432,10 +443,10 @@
}
}
- fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>, Option<&mut [u8]>) {
+ fn get_entries(self) -> config::Entries<'a> {
match self {
- Self::Config(ref mut cfg) => cfg.get_entries(),
- Self::LegacyBcc(ref mut bcc) => (bcc, None, None),
+ Self::Config(cfg) => cfg.get_entries(),
+ Self::LegacyBcc(bcc) => config::Entries { bcc, ..Default::default() },
}
}
}
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index 7655614..2a6819b 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -15,8 +15,7 @@
//! High-level FDT functions.
use crate::bootargs::BootArgsIterator;
-use crate::device_assignment::DeviceAssignmentInfo;
-use crate::device_assignment::VmDtbo;
+use crate::device_assignment::{DeviceAssignmentInfo, VmDtbo};
use crate::helpers::GUEST_PAGE_SIZE;
use crate::Box;
use crate::RebootReason;
@@ -28,6 +27,7 @@
use core::fmt;
use core::mem::size_of;
use core::ops::Range;
+use cstr::cstr;
use fdtpci::PciMemoryFlags;
use fdtpci::PciRangeType;
use libfdt::AddressRange;
@@ -41,7 +41,6 @@
use log::info;
use log::warn;
use tinyvec::ArrayVec;
-use vmbase::cstr;
use vmbase::fdt::SwiotlbInfo;
use vmbase::layout::{crosvm::MEM_START, MAX_VIRT_ADDR};
use vmbase::memory::SIZE_4KB;
@@ -201,6 +200,30 @@
Ok(())
}
+fn read_vendor_hashtree_descriptor_root_digest_from(fdt: &Fdt) -> libfdt::Result<Option<Vec<u8>>> {
+ if let Some(avf_node) = fdt.node(cstr!("/avf"))? {
+ if let Some(vendor_hashtree_descriptor_root_digest) =
+ avf_node.getprop(cstr!("vendor_hashtree_descriptor_root_digest"))?
+ {
+ return Ok(Some(vendor_hashtree_descriptor_root_digest.to_vec()));
+ }
+ }
+ Ok(None)
+}
+
+fn patch_vendor_hashtree_descriptor_root_digest(
+ fdt: &mut Fdt,
+ vendor_hashtree_descriptor_root_digest: &[u8],
+) -> libfdt::Result<()> {
+ let mut root_node = fdt.root_mut()?;
+ let mut avf_node = root_node.add_subnode(cstr!("/avf"))?;
+ avf_node.setprop(
+ cstr!("vendor_hashtree_descriptor_root_digest"),
+ vendor_hashtree_descriptor_root_digest,
+ )?;
+ Ok(())
+}
+
#[derive(Debug)]
struct PciInfo {
ranges: [PciAddrRange; 2],
@@ -593,6 +616,7 @@
serial_info: SerialInfo,
pub swiotlb_info: SwiotlbInfo,
device_assignment: Option<DeviceAssignmentInfo>,
+ vendor_hashtree_descriptor_root_digest: Option<Vec<u8>>,
}
impl DeviceTreeInfo {
@@ -627,6 +651,11 @@
RebootReason::InvalidFdt
})?;
+ fdt.unpack().map_err(|e| {
+ error!("Failed to unpack DT for patching: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
if let Some(device_assignment_info) = &info.device_assignment {
let vm_dtbo = vm_dtbo.unwrap();
device_assignment_info.filter(vm_dtbo).map_err(|e| {
@@ -646,6 +675,11 @@
patch_device_tree(fdt, &info)?;
+ fdt.pack().map_err(|e| {
+ error!("Failed to unpack DT after patching: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
Ok(info)
}
@@ -694,13 +728,36 @@
validate_swiotlb_info(&swiotlb_info, &memory_range)?;
let device_assignment = match vm_dtbo {
- Some(vm_dtbo) => DeviceAssignmentInfo::parse(fdt, vm_dtbo).map_err(|e| {
- error!("Failed to parse device assignment from DT and VM DTBO: {e}");
- RebootReason::InvalidFdt
- })?,
+ Some(vm_dtbo) => {
+ if let Some(hypervisor) = hyp::get_device_assigner() {
+ DeviceAssignmentInfo::parse(fdt, vm_dtbo, hypervisor).map_err(|e| {
+ error!("Failed to parse device assignment from DT and VM DTBO: {e}");
+ RebootReason::InvalidFdt
+ })?
+ } else {
+ warn!(
+ "Device assignment is ignored because device assigning hypervisor is missing"
+ );
+ None
+ }
+ }
None => None,
};
+ // TODO(b/285854379) : A temporary solution lives. This is for enabling
+ // microdroid vendor partition for non-protected VM as well. When passing
+ // DT path containing vendor_hashtree_descriptor_root_digest via fstab, init
+ // stage will check if vendor_hashtree_descriptor_root_digest exists in the
+ // init stage, regardless the protection. Adding this temporary solution
+ // will prevent fatal in init stage for protected VM. However, this data is
+ // not trustable without validating root digest of vendor hashtree
+ // descriptor comes from ABL.
+ let vendor_hashtree_descriptor_root_digest =
+ read_vendor_hashtree_descriptor_root_digest_from(fdt).map_err(|e| {
+ error!("Failed to read vendor_hashtree_descriptor_root_digest from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
Ok(DeviceTreeInfo {
kernel_range,
initrd_range,
@@ -711,15 +768,11 @@
serial_info,
swiotlb_info,
device_assignment,
+ vendor_hashtree_descriptor_root_digest,
})
}
fn patch_device_tree(fdt: &mut Fdt, info: &DeviceTreeInfo) -> Result<(), RebootReason> {
- fdt.unpack().map_err(|e| {
- error!("Failed to unpack DT for patching: {e}");
- RebootReason::InvalidFdt
- })?;
-
if let Some(initrd_range) = &info.initrd_range {
patch_initrd_range(fdt, initrd_range).map_err(|e| {
error!("Failed to patch initrd range to DT: {e}");
@@ -768,11 +821,15 @@
RebootReason::InvalidFdt
})?;
}
-
- fdt.pack().map_err(|e| {
- error!("Failed to pack DT after patching: {e}");
- RebootReason::InvalidFdt
- })?;
+ if let Some(vendor_hashtree_descriptor_root_digest) =
+ &info.vendor_hashtree_descriptor_root_digest
+ {
+ patch_vendor_hashtree_descriptor_root_digest(fdt, vendor_hashtree_descriptor_root_digest)
+ .map_err(|e| {
+ error!("Failed to patch vendor_hashtree_descriptor_root_digest to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ }
Ok(())
}
@@ -783,7 +840,7 @@
bcc: &[u8],
new_instance: bool,
strict_boot: bool,
- debug_policy: Option<&mut [u8]>,
+ debug_policy: Option<&[u8]>,
debuggable: bool,
kaslr_seed: u64,
) -> libfdt::Result<()> {
diff --git a/pvmfw/src/instance.rs b/pvmfw/src/instance.rs
index f2cd6a3..e98f663 100644
--- a/pvmfw/src/instance.rs
+++ b/pvmfw/src/instance.rs
@@ -14,13 +14,11 @@
//! Support for reading and writing to the instance.img.
-use crate::crypto;
-use crate::crypto::AeadCtx;
use crate::dice::PartialInputs;
use crate::gpt;
use crate::gpt::Partition;
use crate::gpt::Partitions;
-use bssl_avf::{self, hkdf, Digester};
+use bssl_avf::{self, hkdf, Aead, AeadContext, Digester};
use core::fmt;
use core::mem::size_of;
use diced_open_dice::DiceMode;
@@ -40,12 +38,8 @@
pub enum Error {
/// Unexpected I/O error while accessing the underlying disk.
FailedIo(gpt::Error),
- /// Failed to decrypt the entry.
- FailedOpen(crypto::ErrorIterator),
/// Failed to generate a random salt to be stored.
FailedSaltGeneration(rand::Error),
- /// Failed to encrypt the entry.
- FailedSeal(crypto::ErrorIterator),
/// Impossible to create a new instance.img entry.
InstanceImageFull,
/// Badly formatted instance.img header block.
@@ -72,21 +66,7 @@
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::FailedIo(e) => write!(f, "Failed I/O to disk: {e}"),
- Self::FailedOpen(e_iter) => {
- writeln!(f, "Failed to open the instance.img partition:")?;
- for e in *e_iter {
- writeln!(f, "\t{e}")?;
- }
- Ok(())
- }
Self::FailedSaltGeneration(e) => write!(f, "Failed to generate salt: {e}"),
- Self::FailedSeal(e_iter) => {
- writeln!(f, "Failed to seal the instance.img partition:")?;
- for e in *e_iter {
- writeln!(f, "\t{e}")?;
- }
- Ok(())
- }
Self::InstanceImageFull => write!(f, "Failed to obtain a free instance.img partition"),
Self::InvalidInstanceImageHeader => write!(f, "instance.img header is invalid"),
Self::MissingInstanceImage => write!(f, "Failed to find the instance.img partition"),
@@ -124,6 +104,13 @@
trace!("Found pvmfw instance.img entry: {entry:?}");
let key = hkdf::<32>(secret, /* salt= */ &[], b"vm-instance", Digester::sha512())?;
+ let tag_len = None;
+ let aead_ctx = AeadContext::new(Aead::aes_256_gcm_randnonce(), key.as_slice(), tag_len)?;
+ let ad = &[];
+ // The nonce is generated internally for `aes_256_gcm_randnonce`, so no additional
+ // nonce is required.
+ let nonce = &[];
+
let mut blk = [0; BLK_SIZE];
match entry {
PvmfwEntry::Existing { header_index, payload_size } => {
@@ -136,11 +123,20 @@
let payload = &blk[..payload_size];
let mut entry = [0; size_of::<EntryBody>()];
- let aead =
- AeadCtx::new_aes_256_gcm_randnonce(key.as_slice()).map_err(Error::FailedOpen)?;
- let decrypted = aead.open(&mut entry, payload).map_err(Error::FailedOpen)?;
+ let decrypted = aead_ctx.open(payload, nonce, ad, &mut entry)?;
let body = EntryBody::read_from(decrypted).unwrap();
+ if dice_inputs.rkp_vm_marker {
+ // The RKP VM is allowed to run if it has passed the verified boot check and
+ // contains the expected version in its AVB footer.
+ // The comparison below with the previous boot information is skipped to enable the
+ // simultaneous update of the pvmfw and RKP VM.
+ // For instance, when both the pvmfw and RKP VM are updated, the code hash of the
+ // RKP VM will differ from the one stored in the instance image. In this case, the
+ // RKP VM is still allowed to run.
+ // This ensures that the updated RKP VM will retain the same CDIs in the next stage.
+ return Ok((false, body.salt));
+ }
if body.code_hash != dice_inputs.code_hash {
Err(Error::RecordedCodeHashMismatch)
} else if body.auth_hash != dice_inputs.auth_hash {
@@ -155,12 +151,10 @@
let salt = rand::random_array().map_err(Error::FailedSaltGeneration)?;
let body = EntryBody::new(dice_inputs, &salt);
- let aead =
- AeadCtx::new_aes_256_gcm_randnonce(key.as_slice()).map_err(Error::FailedSeal)?;
// We currently only support single-blk entries.
let plaintext = body.as_bytes();
- assert!(plaintext.len() + aead.aead().unwrap().max_overhead() < blk.len());
- let encrypted = aead.seal(&mut blk, plaintext).map_err(Error::FailedSeal)?;
+ assert!(plaintext.len() + aead_ctx.aead().max_overhead() < blk.len());
+ let encrypted = aead_ctx.seal(plaintext, nonce, ad, &mut blk)?;
let payload_size = encrypted.len();
let payload_index = header_index + 1;
instance_img.write_block(payload_index, &blk).map_err(Error::FailedIo)?;
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 8aa5274..f80bae1 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -22,7 +22,6 @@
mod bcc;
mod bootargs;
mod config;
-mod crypto;
mod device_assignment;
mod dice;
mod entry;
@@ -63,7 +62,7 @@
signed_kernel: &[u8],
ramdisk: Option<&[u8]>,
current_bcc_handover: &[u8],
- mut debug_policy: Option<&mut [u8]>,
+ mut debug_policy: Option<&[u8]>,
) -> Result<Range<usize>, RebootReason> {
info!("pVM firmware");
debug!("FDT: {:?}", fdt.as_ptr());
@@ -115,6 +114,17 @@
if verified_boot_data.has_capability(Capability::RemoteAttest) {
info!("Service VM capable of remote attestation detected");
+ if service_vm_version::VERSION != verified_boot_data.rollback_index {
+ // For RKP VM, we only boot if the version in the AVB footer of its kernel matches
+ // the one embedded in pvmfw at build time.
+ // This prevents the pvmfw from booting a roll backed RKP VM.
+ error!(
+ "Service VM version mismatch: expected {}, found {}",
+ service_vm_version::VERSION,
+ verified_boot_data.rollback_index
+ );
+ return Err(RebootReason::InvalidPayload);
+ }
}
if verified_boot_data.has_capability(Capability::SecretkeeperProtection) {
diff --git a/pvmfw/testdata/test_crosvm_dt_base.dtsi b/pvmfw/testdata/test_crosvm_dt_base.dtsi
new file mode 100644
index 0000000..0c1a311
--- /dev/null
+++ b/pvmfw/testdata/test_crosvm_dt_base.dtsi
@@ -0,0 +1,152 @@
+/dts-v1/;
+/plugin/;
+
+// This is generated manually by removing unassigned pvIOMMU nodes
+// from patched platform.dts.
+
+/ {
+ interrupt-parent = <0x01>;
+ compatible = "linux,dummy-virt";
+ #address-cells = <0x02>;
+ #size-cells = <0x02>;
+
+ chosen {
+ bootargs = "panic=-1 crashkernel=31M";
+ linux,initrd-end = <0x811d6cb8>;
+ linux,initrd-start = <0x81000000>;
+ stdout-path = "/uart@3f8";
+ 1,pci-probe-only = <0x01>;
+ kaslr-seed = <0x00 0x00>;
+ avf,strict-boot;
+ avf,new-instance;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00 0x80000000 0x00 0x10000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <0x02>;
+ #size-cells = <0x02>;
+ ranges;
+
+ restricted_dma_reserved {
+ compatible = "restricted-dma-pool";
+ size = <0x00 0xe00000>;
+ alignment = <0x00 0x1000>;
+ phandle = <0x02>;
+ };
+
+ dice {
+ compatible = "google,open-dice";
+ no-map;
+ reg = <0x00 0x7fe25000 0x00 0x1000>;
+ };
+ };
+
+ cpus {
+ #address-cells = <0x01>;
+ #size-cells = <0x00>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <0x00>;
+ };
+ };
+
+ intc {
+ compatible = "arm,gic-v3";
+ #address-cells = <0x02>;
+ #size-cells = <0x02>;
+ #interrupt-cells = <0x03>;
+ interrupt-controller;
+ reg = <0x00 0x3fff0000 0x00 0x10000 0x00 0x3ffd0000 0x00 0x20000>;
+ phandle = <0x01>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ always-on;
+ interrupts = <0x01 0x0d 0x108 0x01 0x0e 0x108 0x01 0x0b 0x108 0x01 0x0a 0x108>;
+ };
+
+ uart@2e8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x2e8 0x00 0x08>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <0x00 0x02 0x01>;
+ };
+
+ uart@2f8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x2f8 0x00 0x08>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <0x00 0x02 0x01>;
+ };
+
+ uart@3e8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x3e8 0x00 0x08>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <0x00 0x00 0x01>;
+ };
+
+ uart@3f8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x3f8 0x00 0x08>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <0x00 0x00 0x01>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "hvc";
+ };
+
+ pci {
+ compatible = "pci-host-cam-generic";
+ device_type = "pci";
+ #address-cells = <0x03>;
+ #size-cells = <0x02>;
+ #interrupt-cells = <0x01>;
+ dma-coherent;
+ memory-region = <0x02>;
+ ranges = <0x3000000 0x00 0x2000000 0x00 0x2000000 0x00 0x2000000 0x3000000 0x00 0x90800000 0x00 0x90800000 0xff 0x6f800000>;
+ bus-range = <0x00 0x00>;
+ reg = <0x00 0x10000 0x00 0x1000000>;
+ interrupt-map = <0x800 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x04 0x04 0x1000 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x05 0x04 0x1800 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x06 0x04 0x2000 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x07 0x04 0x2800 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x08 0x04 0x3000 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x09 0x04 0x3800 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x0a 0x04 0x4000 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x0b 0x04 0x4800 0x00 0x00 0x01 0x01 0x00 0x00 0x00 0x0c 0x04>;
+ interrupt-map-mask = <0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07 0xf800 0x00 0x00 0x07>;
+ };
+
+ pclk@3M {
+ compatible = "fixed-clock";
+ clock-frequency = <0x2fefd8>;
+ #clock-cells = <0x00>;
+ phandle = <0x03>;
+ };
+
+ rtc@2000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x41030>;
+ reg = <0x00 0x2000 0x00 0x1000>;
+ interrupts = <0x00 0x01 0x04>;
+ clock-names = "apb_pclk";
+ clocks = <0x03>;
+ };
+
+ vmwdt@3000 {
+ compatible = "qemu,vcpu-stall-detector";
+ reg = <0x00 0x3000 0x00 0x1000>;
+ clock-frequency = <0x0a>;
+ timeout-sec = <0x08>;
+ };
+
+ __symbols__ {
+ swiotlb = "/reserved-memory/restricted_dma_reserved";
+ intc = "/intc";
+ clk = "/pclk@3M";
+ };
+};
diff --git a/pvmfw/testdata/test_pvmfw_devices_vm_dtbo.dts b/pvmfw/testdata/test_pvmfw_devices_vm_dtbo.dts
index e85b55b..91693f7 100644
--- a/pvmfw/testdata/test_pvmfw_devices_vm_dtbo.dts
+++ b/pvmfw/testdata/test_pvmfw_devices_vm_dtbo.dts
@@ -1,32 +1,118 @@
/dts-v1/;
-/plugin/;
/ {
- fragment@rng {
- target-path = "/";
- __overlay__ {
- rng {
- compatible = "android,rng";
- android,rng,ignore-gctrl-reset;
- android,pvmfw,phy-reg = <0x0 0x12F00000 0x1000>;
- android,pvmfw,phy-iommu = <0x0 0x12E40000>;
- android,pvmfw,phy-sid = <3>;
- };
- };
- };
-
- fragment@sensor {
- target-path = "/";
- __overlay__ {
- light {
- compatible = "android,light";
- version = <0x1 0x2>;
- };
- };
- };
-
- __symbols__ {
- rng = "/fragment@rng/__overlay__/rng";
- sensor = "/fragment@sensor/__overlay__/light";
- };
+ host {
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+ rng {
+ reg = <0x0 0x12f00000 0x1000>;
+ iommus = <0x1 0x3>;
+ android,pvmfw,target = <0x2>;
+ };
+ light {
+ reg = <0x0 0x00f00000 0x1000>, <0x0 0x00f10000 0x1000>;
+ iommus = <0x3 0x4>, <0x4 0x5>;
+ android,pvmfw,target = <0x5>;
+ };
+ led {
+ reg = <0x0 0x12000000 0x1000>;
+ iommus = <0x1 0x3>;
+ android,pvmfw,target = <0x6>;
+ };
+ bus0 {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ backlight {
+ reg = <0x300 0x100>;
+ android,pvmfw,target = <0x7>;
+ };
+ };
+ iommu0 {
+ #iommu-cells = <0x1>;
+ android,pvmfw,token = <0x0 0x12e40000>;
+ phandle = <0x1>;
+ };
+ iommu1 {
+ #iommu-cells = <0x1>;
+ android,pvmfw,token = <0x0 0x40000>;
+ phandle = <0x3>;
+ };
+ iommu2 {
+ #iommu-cells = <0x1>;
+ android,pvmfw,token = <0x0 0x50000>;
+ phandle = <0x4>;
+ };
+ };
+ fragment@rng {
+ target-path = "/";
+ __overlay__ {
+ rng {
+ compatible = "android,rng";
+ android,rng,ignore-gctrl-reset;
+ phandle = <0x2>;
+ };
+ };
+ };
+ fragment@sensor {
+ target-path = "/";
+ __overlay__ {
+ light {
+ compatible = "android,light";
+ version = <0x1 0x2>;
+ phandle = <0x5>;
+ };
+ };
+ };
+ fragment@led {
+ target-path = "/";
+ __overlay__ {
+ led {
+ compatible = "android,led";
+ prop = <0x555>;
+ phandle = <0x6>;
+ };
+ };
+ };
+ fragment@backlight {
+ target-path = "/";
+ __overlay__ {
+ bus0 {
+ backlight {
+ compatible = "android,backlight";
+ android,backlight,ignore-gctrl-reset;
+ phandle = <0x7>;
+ };
+ };
+ };
+ };
+ __symbols__ {
+ iommu0 = "/host/iommu0";
+ iommu1 = "/host/iommu1";
+ iommu2 = "/host/iommu2";
+ rng = "/fragment@rng/__overlay__/rng";
+ light = "/fragment@sensor/__overlay__/light";
+ led = "/fragment@led/__overlay__/led";
+ backlight = "/fragment@backlight/__overlay__/bus0/backlight";
+ };
+ __local_fixups__ {
+ host {
+ rng {
+ iommus = <0x0>;
+ android,pvmfw,target = <0x0>;
+ };
+ light {
+ iommus = <0x0 0x8>;
+ android,pvmfw,target = <0x0>;
+ };
+ led {
+ iommus = <0x0>;
+ android,pvmfw,target = <0x0>;
+ };
+ bus0 {
+ backlight {
+ android,pvmfw,target = <0x0>;
+ };
+ };
+ };
+ };
};
diff --git a/pvmfw/testdata/test_pvmfw_devices_vm_dtbo_without_symbols.dts b/pvmfw/testdata/test_pvmfw_devices_vm_dtbo_without_symbols.dts
index 08444ac..2bc8081 100644
--- a/pvmfw/testdata/test_pvmfw_devices_vm_dtbo_without_symbols.dts
+++ b/pvmfw/testdata/test_pvmfw_devices_vm_dtbo_without_symbols.dts
@@ -1,27 +1,114 @@
/dts-v1/;
-/plugin/;
/ {
- fragment@rng {
- target-path = "/";
- __overlay__ {
- rng {
- compatible = "android,rng";
- android,rng,ignore-gctrl-reset;
- android,pvmfw,phy-reg = <0x0 0x12F00000 0x1000>;
- android,pvmfw,phy-iommu = <0x0 0x12E40000>;
- android,pvmfw,phy-sid = <3>;
- };
- };
- };
-
- fragment@sensor {
- target-path = "/";
- __overlay__ {
- light {
- compatible = "android,light";
- version = <0x1 0x2>;
- };
- };
- };
+ host {
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+ rng {
+ reg = <0x0 0x12f00000 0x1000>;
+ iommus = <0x1 0x3>;
+ android,pvmfw,target = <0x2>;
+ };
+ light {
+ reg = <0x0 0x00f00000 0x1000>, <0x0 0x00f10000 0x1000>;
+ iommus = <0x3 0x4>, <0x4 0x5>;
+ android,pvmfw,target = <0x5>;
+ };
+ led {
+ reg = <0x0 0x12000000 0x1000>;
+ iommus = <0x1 0x3>;
+ android,pvmfw,target = <0x6>;
+ };
+ bus0 {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ backlight {
+ reg = <0x300 0x100>;
+ android,pvmfw,target = <0x7>;
+ };
+ };
+ iommu0 {
+ #iommu-cells = <0x1>;
+ android,pvmfw,token = <0x0 0x12e40000>;
+ phandle = <0x1>;
+ };
+ iommu1 {
+ #iommu-cells = <0x1>;
+ android,pvmfw,token = <0x0 0x40000>;
+ phandle = <0x3>;
+ };
+ iommu2 {
+ #iommu-cells = <0x1>;
+ android,pvmfw,token = <0x0 0x50000>;
+ phandle = <0x4>;
+ };
+ };
+ fragment@rng {
+ target-path = "/";
+ __overlay__ {
+ rng {
+ compatible = "android,rng";
+ android,rng,ignore-gctrl-reset;
+ phandle = <0x2>;
+ };
+ };
+ };
+ fragment@sensor {
+ target-path = "/";
+ __overlay__ {
+ light {
+ compatible = "android,light";
+ version = <0x1 0x2>;
+ phandle = <0x5>;
+ };
+ };
+ };
+ fragment@led {
+ target-path = "/";
+ __overlay__ {
+ led {
+ compatible = "android,led";
+ prop = <0x555>;
+ phandle = <0x6>;
+ };
+ };
+ };
+ fragment@backlight {
+ target-path = "/";
+ __overlay__ {
+ bus0 {
+ backlight {
+ compatible = "android,backlight";
+ android,backlight,ignore-gctrl-reset;
+ phandle = <0x7>;
+ };
+ };
+ };
+ };
+ __symbols__ {
+ iommu0 = "/host/iommu0";
+ iommu1 = "/host/iommu1";
+ iommu2 = "/host/iommu2";
+ };
+ __local_fixups__ {
+ host {
+ rng {
+ iommus = <0x0>;
+ android,pvmfw,target = <0x0>;
+ };
+ light {
+ iommus = <0x0 0x8>;
+ android,pvmfw,target = <0x0>;
+ };
+ led {
+ iommus = <0x0>;
+ android,pvmfw,target = <0x0>;
+ };
+ bus0 {
+ backlight {
+ android,pvmfw,target = <0x0>;
+ };
+ };
+ };
+ };
};
diff --git a/pvmfw/testdata/test_pvmfw_devices_with_iommu_id_conflict.dts b/pvmfw/testdata/test_pvmfw_devices_with_iommu_id_conflict.dts
new file mode 100644
index 0000000..a9e30be
--- /dev/null
+++ b/pvmfw/testdata/test_pvmfw_devices_with_iommu_id_conflict.dts
@@ -0,0 +1,46 @@
+/dts-v1/;
+/plugin/;
+
+/include/ "test_crosvm_dt_base.dtsi"
+
+/ {
+ rng@90000000 {
+ compatible = "android,rng";
+ reg = <0x0 0x9 0x0 0xFF>;
+ interrupts = <0x0 0xF 0x4>;
+ google,eh,ignore-gctrl-reset;
+ status = "okay";
+ iommus = <&pviommu_0 0x0>, <&pviommu_1 0x1>;
+ };
+
+ pviommu_0: pviommu0 {
+ compatible = "pkvm,pviommu";
+ id = <0x4>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_1: pviommu1 {
+ compatible = "pkvm,pviommu";
+ id = <0x9>;
+ #iommu-cells = <1>;
+ };
+
+ light@70000000 {
+ compatible = "android,light";
+ reg = <0x0 0x100 0x0 0x100>, <0x0 0x200 0x0 0x100>;
+ interrupts = <0x0 0xF 0x5>;
+ iommus = <&pviommu_a 0xA>, <&pviommu_b 0xB>;
+ };
+
+ pviommu_a: pviommua {
+ compatible = "pkvm,pviommu";
+ id = <0x40>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_b: pviommub {
+ compatible = "pkvm,pviommu";
+ id = <0x9>;
+ #iommu-cells = <1>;
+ };
+};
diff --git a/pvmfw/testdata/test_pvmfw_devices_with_iommu_sharing.dts b/pvmfw/testdata/test_pvmfw_devices_with_iommu_sharing.dts
new file mode 100644
index 0000000..78ff868
--- /dev/null
+++ b/pvmfw/testdata/test_pvmfw_devices_with_iommu_sharing.dts
@@ -0,0 +1,28 @@
+/dts-v1/;
+/plugin/;
+
+/include/ "test_crosvm_dt_base.dtsi"
+
+/ {
+ rng@90000000 {
+ compatible = "android,rng";
+ reg = <0x0 0x9 0x0 0xFF>;
+ interrupts = <0x0 0xF 0x4>;
+ google,eh,ignore-gctrl-reset;
+ status = "okay";
+ iommus = <&pviommu_0 0xFF0>;
+ };
+
+ led@70000000 {
+ compatible = "android,led";
+ reg = <0x0 0x100 0x0 0x9>;
+ interrupts = <0x0 0xF 0x5>;
+ iommus = <&pviommu_0 0xFF0>;
+ };
+
+ pviommu_0: pviommu0 {
+ compatible = "pkvm,pviommu";
+ id = <0x4>;
+ #iommu-cells = <1>;
+ };
+};
diff --git a/pvmfw/testdata/test_pvmfw_devices_with_multiple_devices_iommus.dts b/pvmfw/testdata/test_pvmfw_devices_with_multiple_devices_iommus.dts
new file mode 100644
index 0000000..ca7e7f3
--- /dev/null
+++ b/pvmfw/testdata/test_pvmfw_devices_with_multiple_devices_iommus.dts
@@ -0,0 +1,39 @@
+/dts-v1/;
+/plugin/;
+
+/include/ "test_crosvm_dt_base.dtsi"
+/ {
+ rng@90000000 {
+ compatible = "android,rng";
+ reg = <0x0 0x9 0x0 0xFF>;
+ interrupts = <0x0 0xF 0x4>;
+ google,eh,ignore-gctrl-reset;
+ status = "okay";
+ iommus = <&pviommu_0 0xFF0>;
+ };
+
+ pviommu_0: pviommu0 {
+ compatible = "pkvm,pviommu";
+ id = <0x4>;
+ #iommu-cells = <1>;
+ };
+
+ light@70000000 {
+ compatible = "android,light";
+ reg = <0x0 0x100 0x0 0x1000>, <0x0 0x200 0x0 0x1000>;
+ interrupts = <0x0 0xF 0x5>;
+ iommus = <&pviommu_a 0xFFA>, <&pviommu_b 0xFFB>;
+ };
+
+ pviommu_a: pviommua {
+ compatible = "pkvm,pviommu";
+ id = <0x40>;
+ #iommu-cells = <1>;
+ };
+
+ pviommu_b: pviommub {
+ compatible = "pkvm,pviommu";
+ id = <0x50>;
+ #iommu-cells = <1>;
+ };
+};
diff --git a/pvmfw/testdata/test_pvmfw_devices_with_rng.dts b/pvmfw/testdata/test_pvmfw_devices_with_rng.dts
index f24fd65..a987098 100644
--- a/pvmfw/testdata/test_pvmfw_devices_with_rng.dts
+++ b/pvmfw/testdata/test_pvmfw_devices_with_rng.dts
@@ -1,52 +1,21 @@
/dts-v1/;
/plugin/;
+/include/ "test_crosvm_dt_base.dtsi"
+
/ {
- chosen {
- stdout-path = "/uart@3f8";
- linux,pci-probe-only = <1>;
- };
-
- memory {
- device_type = "memory";
- reg = <0x00 0x80000000 0xFFFFFFFF>;
- };
-
- reserved-memory {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- swiotlb: restricted_dma_reserved {
- compatible = "restricted-dma-pool";
- reg = <0xFFFFFFFF>;
- size = <0xFFFFFFFF>;
- alignment = <0xFFFFFFFF>;
- };
-
- dice {
- compatible = "google,open-dice";
- no-map;
- reg = <0xFFFFFFFF>;
- };
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- cpu@0 {
- device_type = "cpu";
- };
- cpu@1 {
- device_type = "cpu";
- reg = <0x00 0x80000000 0xFFFFFFFF>;
- };
- };
-
rng@90000000 {
compatible = "android,rng";
reg = <0x0 0x9 0x0 0xFF>;
interrupts = <0x0 0xF 0x4>;
google,eh,ignore-gctrl-reset;
status = "okay";
+ iommus = <&pviommu_0 0xFF0>;
+ };
+
+ pviommu_0: pviommu0 {
+ compatible = "pkvm,pviommu";
+ id = <0x4>;
+ #iommu-cells = <1>;
};
};
diff --git a/pvmfw/testdata/test_pvmfw_devices_without_device.dts b/pvmfw/testdata/test_pvmfw_devices_without_device.dts
new file mode 100644
index 0000000..ee0be3a
--- /dev/null
+++ b/pvmfw/testdata/test_pvmfw_devices_without_device.dts
@@ -0,0 +1,7 @@
+/dts-v1/;
+/plugin/;
+
+/include/ "test_crosvm_dt_base.dtsi"
+
+/ {
+};
diff --git a/pvmfw/testdata/test_pvmfw_devices_without_iommus.dts b/pvmfw/testdata/test_pvmfw_devices_without_iommus.dts
new file mode 100644
index 0000000..1a12c87
--- /dev/null
+++ b/pvmfw/testdata/test_pvmfw_devices_without_iommus.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+/plugin/;
+
+/include/ "test_crosvm_dt_base.dtsi"
+
+/ {
+ bus0 {
+ #address-cells = <0x2>;
+ #size-cells = <0x2>;
+
+ backlight@90000000 {
+ compatible = "android,backlight";
+ reg = <0x0 0x9 0x0 0xFF>;
+ interrupts = <0x0 0xF 0x4>;
+ google,eh,ignore-gctrl-reset;
+ status = "okay";
+ };
+ };
+};
diff --git a/rialto/Android.bp b/rialto/Android.bp
index 326f6fc..5e7fe1f 100644
--- a/rialto/Android.bp
+++ b/rialto/Android.bp
@@ -13,13 +13,14 @@
"libbssl_ffi_nostd",
"libciborium_io_nostd",
"libciborium_nostd",
+ "libcstr",
"libdiced_open_dice_nostd",
- "libdiced_sample_inputs_nostd",
"libhyp",
"libfdtpci",
"liblibfdt",
"liblog_rust_nostd",
"libservice_vm_comm_nostd",
+ "libservice_vm_fake_chain_nostd",
"libservice_vm_requests_nostd",
"libtinyvec_nostd",
"libvirtio_drivers",
@@ -62,6 +63,28 @@
srcs: [":avb_testkey_rsa4096"],
}
+// Both SERVICE_VM_VERSION and SERVICE_VM_VERSION_STRING should represent the
+// same version number for the service VM.
+SERVICE_VM_VERSION = 1
+SERVICE_VM_VERSION_STRING = "1"
+
+genrule {
+ name: "service_vm_version_rs",
+ out: ["lib.rs"],
+ cmd: "(" +
+ " echo '#![no_std]';" +
+ " echo '#![allow(missing_docs)]';" +
+ " echo 'pub const VERSION: u64 = " + SERVICE_VM_VERSION_STRING + ";'" +
+ ") > $(out)",
+}
+
+rust_library_rlib {
+ name: "libservice_vm_version",
+ crate_name: "service_vm_version",
+ defaults: ["vmbase_rlib_defaults"],
+ srcs: [":service_vm_version_rs"],
+}
+
avb_add_hash_footer {
name: "rialto_signed",
src: ":empty_file",
@@ -69,6 +92,7 @@
partition_name: "boot",
private_key: ":rialto_sign_key",
salt: rialto_salt,
+ rollback_index: SERVICE_VM_VERSION,
props: [
{
name: "com.android.virt.cap",
@@ -108,15 +132,21 @@
"android.system.virtualizationservice-rust",
"libandroid_logger",
"libanyhow",
+ "libbssl_avf_nostd",
"libciborium",
+ "libclient_vm_csr",
+ "libcoset",
"liblibc",
"liblog_rust",
"libservice_vm_comm",
+ "libservice_vm_fake_chain",
"libservice_vm_manager",
"libvmclient",
+ "libx509_parser",
],
data: [
":rialto_unsigned",
+ ":test_rkp_cert_chain",
],
test_suites: ["general-tests"],
enabled: false,
diff --git a/rialto/src/fdt.rs b/rialto/src/fdt.rs
index 8bb40c3..09cdd36 100644
--- a/rialto/src/fdt.rs
+++ b/rialto/src/fdt.rs
@@ -15,8 +15,8 @@
//! High-level FDT functions.
use core::ops::Range;
+use cstr::cstr;
use libfdt::{Fdt, FdtError};
-use vmbase::cstr;
/// Reads the DICE data range from the given `fdt`.
pub fn read_dice_range_from(fdt: &Fdt) -> libfdt::Result<Range<usize>> {
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index d9cffe0..1215021 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -37,7 +37,8 @@
use hyp::{get_mem_sharer, get_mmio_guard};
use libfdt::FdtError;
use log::{debug, error, info};
-use service_vm_comm::{ServiceVmRequest, VmType};
+use service_vm_comm::{RequestProcessingError, Response, ServiceVmRequest, VmType};
+use service_vm_fake_chain::service_vm;
use service_vm_requests::process_request;
use virtio_drivers::{
device::socket::{VsockAddr, VMADDR_CID_HOST},
@@ -163,9 +164,7 @@
}
// Currently, a sample DICE data is used for non-protected VMs, as these VMs only run
// in tests at the moment.
- // If we intend to run non-protected rialto in production, we should retrieve real
- // DICE chain data instead.
- VmType::NonProtectedVm => Box::new(diced_sample_inputs::make_sample_bcc_and_cdis()?),
+ VmType::NonProtectedVm => Box::new(service_vm::fake_service_vm_dice_artifacts()?),
};
let pci_info = PciInfo::from_fdt(fdt)?;
@@ -178,7 +177,15 @@
let mut vsock_stream = VsockStream::new(socket_device, host_addr())?;
while let ServiceVmRequest::Process(req) = vsock_stream.read_request()? {
- let response = process_request(req, bcc_handover.as_ref());
+ let mut response = process_request(req, bcc_handover.as_ref());
+ // TODO(b/185878400): We don't want to issue a certificate to pVM when the client VM
+ // attestation is unfinished. The following code should be removed once the
+ // verification is completed.
+ if vm_type() == VmType::ProtectedVm
+ && matches!(response, Response::RequestClientVmAttestation(_))
+ {
+ response = Response::Err(RequestProcessingError::OperationUnimplemented);
+ }
vsock_stream.write_response(&response)?;
vsock_stream.flush()?;
}
diff --git a/rialto/tests/test.rs b/rialto/tests/test.rs
index e13b7a1..02a5a28 100644
--- a/rialto/tests/test.rs
+++ b/rialto/tests/test.rs
@@ -22,20 +22,35 @@
binder::{ParcelFileDescriptor, ProcessState},
};
use anyhow::{bail, Context, Result};
+use bssl_avf::{sha256, EcKey, PKey};
use ciborium::value::Value;
+use client_vm_csr::generate_attestation_key_and_csr;
+use coset::{CborSerializable, CoseMac0, CoseSign};
use log::info;
use service_vm_comm::{
- EcdsaP256KeyPair, GenerateCertificateRequestParams, Request, Response, VmType,
+ ClientVmAttestationParams, Csr, CsrPayload, EcdsaP256KeyPair, GenerateCertificateRequestParams,
+ Request, RequestProcessingError, Response, VmType,
+};
+use service_vm_fake_chain::client_vm::{
+ fake_client_vm_dice_artifacts, fake_sub_components, SubComponent,
};
use service_vm_manager::ServiceVm;
+use std::fs;
use std::fs::File;
use std::io;
use std::panic;
use std::path::PathBuf;
use vmclient::VmInstance;
+use x509_parser::{
+ certificate::X509Certificate,
+ der_parser::{ber::BerObject, der::parse_der, oid, oid::Oid},
+ prelude::FromDer,
+ x509::{AlgorithmIdentifier, SubjectPublicKeyInfo, X509Version},
+};
const UNSIGNED_RIALTO_PATH: &str = "/data/local/tmp/rialto_test/arm64/rialto_unsigned.bin";
const INSTANCE_IMG_PATH: &str = "/data/local/tmp/rialto_test/arm64/instance.img";
+const TEST_CERT_CHAIN_PATH: &str = "testdata/rkp_cert_chain.der";
#[test]
fn process_requests_in_protected_vm() -> Result<()> {
@@ -51,8 +66,9 @@
let mut vm = start_service_vm(vm_type)?;
check_processing_reverse_request(&mut vm)?;
- let maced_public_key = check_processing_generating_key_pair_request(&mut vm)?;
- check_processing_generating_certificate_request(&mut vm, maced_public_key)?;
+ let key_pair = check_processing_generating_key_pair_request(&mut vm)?;
+ check_processing_generating_certificate_request(&mut vm, &key_pair.maced_public_key)?;
+ check_attestation_request(&mut vm, &key_pair, vm_type)?;
Ok(())
}
@@ -68,17 +84,17 @@
Ok(())
}
-fn check_processing_generating_key_pair_request(vm: &mut ServiceVm) -> Result<Vec<u8>> {
+fn check_processing_generating_key_pair_request(vm: &mut ServiceVm) -> Result<EcdsaP256KeyPair> {
let request = Request::GenerateEcdsaP256KeyPair;
let response = vm.process_request(request)?;
info!("Received response: {response:?}.");
match response {
- Response::GenerateEcdsaP256KeyPair(EcdsaP256KeyPair { maced_public_key, key_blob }) => {
- assert_array_has_nonzero(&maced_public_key);
- assert_array_has_nonzero(&key_blob);
- Ok(maced_public_key)
+ Response::GenerateEcdsaP256KeyPair(key_pair) => {
+ assert_array_has_nonzero(&key_pair.maced_public_key);
+ assert_array_has_nonzero(&key_pair.key_blob);
+ Ok(key_pair)
}
_ => bail!("Incorrect response type: {response:?}"),
}
@@ -90,10 +106,10 @@
fn check_processing_generating_certificate_request(
vm: &mut ServiceVm,
- maced_public_key: Vec<u8>,
+ maced_public_key: &[u8],
) -> Result<()> {
let params = GenerateCertificateRequestParams {
- keys_to_sign: vec![maced_public_key],
+ keys_to_sign: vec![maced_public_key.to_vec()],
challenge: vec![],
};
let request = Request::GenerateCertificateRequest(params);
@@ -107,6 +123,144 @@
}
}
+fn check_attestation_request(
+ vm: &mut ServiceVm,
+ remotely_provisioned_key_pair: &EcdsaP256KeyPair,
+ vm_type: VmType,
+) -> Result<()> {
+ /// The following data was generated randomly with urandom.
+ const CHALLENGE: [u8; 16] = [
+ 0x7d, 0x86, 0x58, 0x79, 0x3a, 0x09, 0xdf, 0x1c, 0xa5, 0x80, 0x80, 0x15, 0x2b, 0x13, 0x17,
+ 0x5c,
+ ];
+ let dice_artifacts = fake_client_vm_dice_artifacts()?;
+ let attestation_data = generate_attestation_key_and_csr(&CHALLENGE, &dice_artifacts)?;
+ let cert_chain = fs::read(TEST_CERT_CHAIN_PATH)?;
+ let (remaining, cert) = X509Certificate::from_der(&cert_chain)?;
+
+ // Builds the mock parameters for the client VM attestation.
+ // The `csr` and `remotely_provisioned_key_blob` parameters are extracted from the same
+ // libraries as in production.
+ // The `remotely_provisioned_cert` parameter is an RKP certificate extracted from a test
+ // certificate chain retrieved from RKPD.
+ let params = ClientVmAttestationParams {
+ csr: attestation_data.csr.clone().into_cbor_vec()?,
+ remotely_provisioned_key_blob: remotely_provisioned_key_pair.key_blob.to_vec(),
+ remotely_provisioned_cert: cert_chain[..(cert_chain.len() - remaining.len())].to_vec(),
+ };
+ let request = Request::RequestClientVmAttestation(params);
+
+ let response = vm.process_request(request)?;
+ info!("Received response: {response:?}.");
+
+ match response {
+ Response::RequestClientVmAttestation(certificate) => {
+ // The end-to-end test for non-protected VM attestation works because both the service
+ // VM and the client VM use the same fake DICE chain.
+ assert_eq!(vm_type, VmType::NonProtectedVm);
+ check_certificate_for_client_vm(
+ &certificate,
+ &remotely_provisioned_key_pair.maced_public_key,
+ &attestation_data.csr,
+ &cert,
+ )?;
+ Ok(())
+ }
+ Response::Err(RequestProcessingError::InvalidDiceChain) => {
+ // The end-to-end test for protected VM attestation doesn't work because the service VM
+ // compares the fake DICE chain in the CSR with the real DICE chain.
+ // We cannot generate a valid DICE chain with the same payloads up to pvmfw.
+ assert_eq!(vm_type, VmType::ProtectedVm);
+ Ok(())
+ }
+ _ => bail!("Incorrect response type: {response:?}"),
+ }
+}
+
+fn check_vm_components(vm_components: &[BerObject]) -> Result<()> {
+ let expected_components = fake_sub_components();
+ assert_eq!(expected_components.len(), vm_components.len());
+ for i in 0..expected_components.len() {
+ check_vm_component(&vm_components[i], &expected_components[i])?;
+ }
+ Ok(())
+}
+
+fn check_vm_component(vm_component: &BerObject, expected_component: &SubComponent) -> Result<()> {
+ let vm_component = vm_component.as_sequence()?;
+ assert_eq!(4, vm_component.len());
+ assert_eq!(expected_component.name, vm_component[0].as_str()?);
+ assert_eq!(expected_component.version, vm_component[1].as_u64()?);
+ assert_eq!(expected_component.code_hash, vm_component[2].as_slice()?);
+ assert_eq!(expected_component.authority_hash, vm_component[3].as_slice()?);
+ Ok(())
+}
+
+fn check_certificate_for_client_vm(
+ certificate: &[u8],
+ maced_public_key: &[u8],
+ csr: &Csr,
+ parent_certificate: &X509Certificate,
+) -> Result<()> {
+ let cose_mac = CoseMac0::from_slice(maced_public_key)?;
+ let authority_public_key =
+ EcKey::from_cose_public_key_slice(&cose_mac.payload.unwrap()).unwrap();
+ let (remaining, cert) = X509Certificate::from_der(certificate)?;
+ assert!(remaining.is_empty());
+
+ // Checks the certificate signature against the authority public key.
+ const ECDSA_WITH_SHA_256: Oid<'static> = oid!(1.2.840 .10045 .4 .3 .2);
+ let expected_algorithm =
+ AlgorithmIdentifier { algorithm: ECDSA_WITH_SHA_256, parameters: None };
+ assert_eq!(expected_algorithm, cert.signature_algorithm);
+ let digest = sha256(cert.tbs_certificate.as_ref()).unwrap();
+ authority_public_key
+ .ecdsa_verify(cert.signature_value.as_ref(), &digest)
+ .expect("Failed to verify the certificate signature with the authority public key");
+
+ // Checks that the certificate's subject public key is equal to the key in the CSR.
+ let cose_sign = CoseSign::from_slice(&csr.signed_csr_payload)?;
+ let csr_payload =
+ cose_sign.payload.as_ref().and_then(|v| CsrPayload::from_cbor_slice(v).ok()).unwrap();
+ let subject_public_key = EcKey::from_cose_public_key_slice(&csr_payload.public_key).unwrap();
+ let expected_spki_data =
+ PKey::try_from(subject_public_key).unwrap().subject_public_key_info().unwrap();
+ let (remaining, expected_spki) = SubjectPublicKeyInfo::from_der(&expected_spki_data)?;
+ assert!(remaining.is_empty());
+ assert_eq!(&expected_spki, cert.public_key());
+
+ // Checks the certificate extension.
+ const ATTESTATION_EXTENSION_OID: Oid<'static> = oid!(1.3.6 .1 .4 .1 .11129 .2 .1 .29 .1);
+ let extensions = cert.extensions();
+ assert_eq!(1, extensions.len());
+ let extension = &extensions[0];
+ assert_eq!(ATTESTATION_EXTENSION_OID, extension.oid);
+ assert!(!extension.critical);
+ let (remaining, extension) = parse_der(extension.value)?;
+ assert!(remaining.is_empty());
+ let attestation_ext = extension.as_sequence()?;
+ assert_eq!(3, attestation_ext.len());
+ assert_eq!(csr_payload.challenge, attestation_ext[0].as_slice()?);
+ let is_vm_secure = attestation_ext[1].as_bool()?;
+ assert!(
+ !is_vm_secure,
+ "The VM shouldn't be secure as the last payload added in the test is in Debug mode"
+ );
+ let vm_components = attestation_ext[2].as_sequence()?;
+ check_vm_components(vm_components)?;
+
+ // Checks other fields on the certificate
+ assert_eq!(X509Version::V3, cert.version());
+ assert_eq!(parent_certificate.validity(), cert.validity());
+ assert_eq!(
+ String::from("CN=Android Protected Virtual Machine Key"),
+ cert.subject().to_string()
+ );
+ assert_eq!(parent_certificate.subject(), cert.issuer());
+
+ Ok(())
+}
+
/// TODO(b/300625792): Check the CSR with libhwtrust once the CSR is complete.
fn check_csr(csr: Vec<u8>) -> Result<()> {
let mut reader = io::Cursor::new(csr);
diff --git a/secretkeeper/dice_policy/Android.bp b/secretkeeper/dice_policy/Android.bp
deleted file mode 100644
index a7ac5b9..0000000
--- a/secretkeeper/dice_policy/Android.bp
+++ /dev/null
@@ -1,35 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-rust_defaults {
- name: "libdice_policy.defaults",
- crate_name: "dice_policy",
- defaults: ["avf_build_flags_rust"],
- srcs: ["src/lib.rs"],
- edition: "2021",
- prefer_rlib: true,
- rustlibs: [
- "libanyhow",
- "libciborium",
- "libcoset",
- ],
-}
-
-rust_library {
- name: "libdice_policy",
- defaults: ["libdice_policy.defaults"],
-}
-
-rust_test {
- name: "libdice_policy.test",
- defaults: [
- "libdice_policy.defaults",
- "rdroidtest.defaults",
- ],
- test_suites: ["general-tests"],
- rustlibs: [
- "librustutils",
- "libscopeguard",
- ],
-}
diff --git a/secretkeeper/dice_policy/src/lib.rs b/secretkeeper/dice_policy/src/lib.rs
deleted file mode 100644
index 327b8a4..0000000
--- a/secretkeeper/dice_policy/src/lib.rs
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (C) 2023 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//! A “DICE policy” is a format for setting constraints on a DICE chain. A DICE chain policy
-//! verifier takes a policy and a DICE chain, and returns a boolean indicating whether the
-//! DICE chain meets the constraints set out on a policy.
-//!
-//! This forms the foundation of Dice Policy aware Authentication (DPA-Auth), where the server
-//! authenticates a client by comparing its dice chain against a set policy.
-//!
-//! Another use is "sealing", where clients can use an appropriately constructed dice policy to
-//! seal a secret. Unsealing is only permitted if dice chain of the component requesting unsealing
-//! complies with the policy.
-//!
-//! A typical policy will assert things like:
-//! # DK_pub must have this value
-//! # The DICE chain must be exactly five certificates long
-//! # authorityHash in the third certificate must have this value
-//! securityVersion in the fourth certificate must be an integer greater than 8
-//!
-//! These constraints used to express policy are (for now) limited to following 2 types:
-//! 1. Exact Match: useful for enforcing rules like authority hash should be exactly equal.
-//! 2. Greater than or equal to: Useful for setting policies that seal
-//! Anti-rollback protected entities (should be accessible to versions >= present).
-//!
-//! Dice Policy CDDL:
-//!
-//! dicePolicy = [
-//! 1, ; dice policy version
-//! + nodeConstraintList ; for each entry in dice chain
-//! ]
-//!
-//! nodeConstraintList = [
-//! * nodeConstraint
-//! ]
-//!
-//! ; We may add a hashConstraint item later
-//! nodeConstraint = exactMatchConstraint / geConstraint
-//!
-//! exactMatchConstraint = [1, keySpec, value]
-//! geConstraint = [2, keySpec, int]
-//!
-//! keySpec = [value+]
-//!
-//! value = bool / int / tstr / bstr
-
-use anyhow::{anyhow, bail, Context, Result};
-use ciborium::Value;
-use coset::{AsCborValue, CoseSign1};
-use std::borrow::Cow;
-
-const DICE_POLICY_VERSION: u64 = 1;
-
-/// Constraint Types supported in Dice policy.
-#[non_exhaustive]
-#[derive(Clone, Copy, Debug, PartialEq)]
-pub enum ConstraintType {
- /// Enforce exact match criteria, indicating the policy should match
- /// if the dice chain has exact same specified values.
- ExactMatch = 1,
- /// Enforce Greater than or equal to criteria. When applied on security_version, this
- /// can be useful to set policy that matches dice chains with same or upgraded images.
- GreaterOrEqual = 2,
-}
-
-/// ConstraintSpec is used to specify which constraint type to apply and
-/// on which all entries in a dice node.
-/// See documentation of `from_dice_chain()` for examples.
-pub struct ConstraintSpec {
- constraint_type: ConstraintType,
- // path is essentially a list of label/int.
- // It identifies which entry (in a dice node) to be applying constraints on.
- path: Vec<i64>,
-}
-
-impl ConstraintSpec {
- /// Construct the ConstraintSpec.
- pub fn new(constraint_type: ConstraintType, path: Vec<i64>) -> Result<Self> {
- Ok(ConstraintSpec { constraint_type, path })
- }
-}
-
-// TODO(b/291238565): Restrict (nested_)key & value type to (bool/int/tstr/bstr).
-// and maybe convert it into struct.
-/// Each constraint (on a dice node) is a tuple: (ConstraintType, constraint_path, value)
-#[derive(Debug, PartialEq)]
-struct Constraint(u16, Vec<i64>, Value);
-
-/// List of all constraints on a dice node.
-#[derive(Debug, PartialEq)]
-struct NodeConstraints(Box<[Constraint]>);
-
-/// Module for working with dice policy.
-#[derive(Debug, PartialEq)]
-pub struct DicePolicy {
- version: u64,
- node_constraints_list: Box<[NodeConstraints]>, // Constraint on each entry in dice chain.
-}
-
-impl DicePolicy {
- /// Construct a dice policy from a given dice chain.
- /// This can be used by clients to construct a policy to seal secrets.
- /// Constraints on all but first dice node is applied using constraint_spec argument.
- /// For the first node (which is a ROT key), the constraint is ExactMatch of the whole node.
- ///
- /// # Arguments
- /// `dice_chain`: The serialized CBOR encoded Dice chain, adhering to Android Profile for DICE.
- /// https://pigweed.googlesource.com/open-dice/+/refs/heads/main/docs/android.md
- ///
- /// `constraint_spec`: List of constraints to be applied on dice node.
- /// Each constraint is a ConstraintSpec object.
- ///
- /// Note: Dice node is treated as a nested map (& so the lookup is done in that fashion).
- ///
- /// Examples of constraint_spec:
- /// 1. For exact_match on auth_hash & greater_or_equal on security_version
- /// constraint_spec =[
- /// (ConstraintType::ExactMatch, vec![AUTHORITY_HASH]),
- /// (ConstraintType::GreaterOrEqual, vec![CONFIG_DESC, COMPONENT_NAME]),
- /// ];
- ///
- /// 2. For hypothetical (and highly simplified) dice chain:
- /// [ROT_KEY, [{1 : 'a', 2 : {200 : 5, 201 : 'b'}}]]
- /// The following can be used
- /// constraint_spec =[
- /// ConstraintSpec(ConstraintType::ExactMatch, vec![1]), // exact_matches value 'a'
- /// ConstraintSpec(ConstraintType::GreaterOrEqual, vec![2, 200]),// matches any value >= 5
- /// ];
- pub fn from_dice_chain(dice_chain: &[u8], constraint_spec: &[ConstraintSpec]) -> Result<Self> {
- // TODO(b/298217847): Check if the given dice chain adheres to Explicit-key DiceCertChain
- // format and if not, convert it before policy construction.
- let dice_chain = value_from_bytes(dice_chain).context("Unable to decode top-level CBOR")?;
- let dice_chain = match dice_chain {
- Value::Array(array) if array.len() >= 2 => array,
- _ => bail!("Expected an array of at least length 2, found: {:?}", dice_chain),
- };
- let mut constraints_list: Vec<NodeConstraints> = Vec::with_capacity(dice_chain.len());
- let mut it = dice_chain.into_iter();
-
- constraints_list.push(NodeConstraints(Box::new([Constraint(
- ConstraintType::ExactMatch as u16,
- Vec::new(),
- it.next().unwrap(),
- )])));
-
- for (n, value) in it.enumerate() {
- let entry = cbor_value_from_cose_sign(value)
- .with_context(|| format!("Unable to get Cose payload at: {}", n))?;
- constraints_list.push(payload_to_constraints(entry, constraint_spec)?);
- }
-
- Ok(DicePolicy {
- version: DICE_POLICY_VERSION,
- node_constraints_list: constraints_list.into_boxed_slice(),
- })
- }
-}
-
-// Take the payload of a dice node & construct the constraints on it.
-fn payload_to_constraints(
- payload: Value,
- constraint_spec: &[ConstraintSpec],
-) -> Result<NodeConstraints> {
- let mut node_constraints: Vec<Constraint> = Vec::with_capacity(constraint_spec.len());
- for constraint_item in constraint_spec {
- let constraint_path = constraint_item.path.to_vec();
- if constraint_path.is_empty() {
- bail!("Expected non-empty key spec");
- }
- let val = lookup_value_in_nested_map(&payload, &constraint_path)
- .context(format!("Value not found for constraint_path {:?}", constraint_path))?;
- let constraint = Constraint(constraint_item.constraint_type as u16, constraint_path, val);
- node_constraints.push(constraint);
- }
- Ok(NodeConstraints(node_constraints.into_boxed_slice()))
-}
-
-// Lookup value corresponding to constraint path in nested map.
-// This function recursively calls itself.
-// The depth of recursion is limited by the size of constraint_path.
-fn lookup_value_in_nested_map(cbor_map: &Value, constraint_path: &[i64]) -> Result<Value> {
- if constraint_path.is_empty() {
- return Ok(cbor_map.clone());
- }
- let explicit_map = get_map_from_value(cbor_map)?;
- let val = lookup_value_in_map(&explicit_map, constraint_path[0])
- .ok_or(anyhow!("Value not found for constraint key: {:?}", constraint_path[0]))?;
- lookup_value_in_nested_map(val, &constraint_path[1..])
-}
-
-fn get_map_from_value(cbor_map: &Value) -> Result<Cow<Vec<(Value, Value)>>> {
- match cbor_map {
- Value::Bytes(b) => value_from_bytes(b)?
- .into_map()
- .map(Cow::Owned)
- .map_err(|e| anyhow!("Expected a cbor map: {:?}", e)),
- Value::Map(map) => Ok(Cow::Borrowed(map)),
- _ => bail!("/Expected a cbor map {:?}", cbor_map),
- }
-}
-
-fn lookup_value_in_map(map: &[(Value, Value)], key: i64) -> Option<&Value> {
- let key = Value::Integer(key.into());
- for (k, v) in map.iter() {
- if k == &key {
- return Some(v);
- }
- }
- None
-}
-
-/// Extract the payload from the COSE Sign
-fn cbor_value_from_cose_sign(cbor: Value) -> Result<Value> {
- let sign1 =
- CoseSign1::from_cbor_value(cbor).map_err(|e| anyhow!("Error extracting CoseKey: {}", e))?;
- match sign1.payload {
- None => bail!("Missing payload"),
- Some(payload) => Ok(value_from_bytes(&payload)?),
- }
-}
-
-/// Decodes the provided binary CBOR-encoded value and returns a
-/// ciborium::Value struct wrapped in Result.
-fn value_from_bytes(mut bytes: &[u8]) -> Result<Value> {
- let value = ciborium::de::from_reader(&mut bytes)?;
- // Ciborium tries to read one Value, & doesn't care if there is trailing data after it. We do.
- if !bytes.is_empty() {
- bail!("Unexpected trailing data while converting to CBOR value");
- }
- Ok(value)
-}
-
-#[cfg(test)]
-rdroidtest::test_main!();
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use ciborium::cbor;
- use coset::{CoseKey, Header, ProtectedHeader};
- use rdroidtest::test;
-
- const AUTHORITY_HASH: i64 = -4670549;
- const CONFIG_DESC: i64 = -4670548;
- const COMPONENT_NAME: i64 = -70002;
- const KEY_MODE: i64 = -4670551;
-
- // Helper struct to encapsulate artifacts that are useful for unit tests.
- struct TestArtifacts {
- // A dice chain.
- input_dice: Vec<u8>,
- // A list of ConstraintSpec that can be applied on the input_dice to get a dice policy.
- constraint_spec: Vec<ConstraintSpec>,
- // The expected dice policy if above constraint_spec is applied to input_dice.
- expected_dice_policy: DicePolicy,
- }
-
- impl TestArtifacts {
- // Get an example instance of TestArtifacts. This uses a hard coded, hypothetical
- // chain of certificates & a list of constraint_spec on this.
- fn get_example() -> Self {
- const EXAMPLE_NUM: i64 = 59765;
- const EXAMPLE_STRING: &str = "testing_dice_policy";
-
- let rot_key = CoseKey::default().to_cbor_value().unwrap();
- let nested_payload = cbor!({
- 100 => EXAMPLE_NUM
- })
- .unwrap();
- let payload = cbor!({
- 1 => EXAMPLE_STRING,
- 2 => "some_other_example_string",
- 3 => Value::Bytes(value_to_bytes(&nested_payload).unwrap()),
- })
- .unwrap();
- let payload = value_to_bytes(&payload).unwrap();
- let dice_node = CoseSign1 {
- protected: ProtectedHeader::default(),
- unprotected: Header::default(),
- payload: Some(payload),
- signature: b"ddef".to_vec(),
- }
- .to_cbor_value()
- .unwrap();
- let input_dice = Value::Array([rot_key.clone(), dice_node].to_vec());
-
- let input_dice = value_to_bytes(&input_dice).unwrap();
-
- // Now construct constraint_spec on the input dice, note this will use the keys
- // which are also hardcoded within the get_dice_chain_helper.
-
- let constraint_spec = vec![
- ConstraintSpec::new(ConstraintType::ExactMatch, vec![1]).unwrap(),
- // Notice how key "2" is (deliberately) absent in ConstraintSpec
- // so policy should not constraint it.
- ConstraintSpec::new(ConstraintType::GreaterOrEqual, vec![3, 100]).unwrap(),
- ];
- let expected_dice_policy = DicePolicy {
- version: 1,
- node_constraints_list: Box::new([
- NodeConstraints(Box::new([Constraint(
- ConstraintType::ExactMatch as u16,
- vec![],
- rot_key.clone(),
- )])),
- NodeConstraints(Box::new([
- Constraint(
- ConstraintType::ExactMatch as u16,
- vec![1],
- Value::Text(EXAMPLE_STRING.to_string()),
- ),
- Constraint(
- ConstraintType::GreaterOrEqual as u16,
- vec![3, 100],
- Value::from(EXAMPLE_NUM),
- ),
- ])),
- ]),
- };
- Self { input_dice, constraint_spec, expected_dice_policy }
- }
- }
-
- test!(policy_structure_check);
- fn policy_structure_check() {
- let example = TestArtifacts::get_example();
- let policy =
- DicePolicy::from_dice_chain(&example.input_dice, &example.constraint_spec).unwrap();
-
- // Assert policy is exactly as expected!
- assert_eq!(policy, example.expected_dice_policy);
- }
-
- test!(policy_dice_size_is_same);
- fn policy_dice_size_is_same() {
- // This is the number of certs in compos bcc (including the first ROT)
- // To analyze a bcc use hwtrust tool from /tools/security/remote_provisioning/hwtrust
- // `hwtrust --verbose dice-chain [path]/composbcc`
- let compos_dice_chain_size: usize = 5;
- let input_dice = include_bytes!("../testdata/composbcc");
- let constraint_spec = [
- ConstraintSpec::new(ConstraintType::ExactMatch, vec![AUTHORITY_HASH]).unwrap(),
- ConstraintSpec::new(ConstraintType::ExactMatch, vec![KEY_MODE]).unwrap(),
- ConstraintSpec::new(ConstraintType::GreaterOrEqual, vec![CONFIG_DESC, COMPONENT_NAME])
- .unwrap(),
- ];
- let policy = DicePolicy::from_dice_chain(input_dice, &constraint_spec).unwrap();
- assert_eq!(policy.node_constraints_list.len(), compos_dice_chain_size);
- }
-
- /// Encodes a ciborium::Value into bytes.
- fn value_to_bytes(value: &Value) -> Result<Vec<u8>> {
- let mut bytes: Vec<u8> = Vec::new();
- ciborium::ser::into_writer(&value, &mut bytes)?;
- Ok(bytes)
- }
-}
diff --git a/secretkeeper/dice_policy/testdata/composbcc b/secretkeeper/dice_policy/testdata/composbcc
deleted file mode 100644
index fb3e006..0000000
--- a/secretkeeper/dice_policy/testdata/composbcc
+++ /dev/null
Binary files differ
diff --git a/service_vm/client_vm_csr/Android.bp b/service_vm/client_vm_csr/Android.bp
new file mode 100644
index 0000000..8d738d8
--- /dev/null
+++ b/service_vm/client_vm_csr/Android.bp
@@ -0,0 +1,37 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libclient_vm_csr_defaults",
+ crate_name: "client_vm_csr",
+ srcs: ["src/lib.rs"],
+ rustlibs: [
+ "libanyhow",
+ "libcoset",
+ "libdiced_open_dice",
+ "libopenssl",
+ "libservice_vm_comm",
+ "libzeroize",
+ ],
+}
+
+rust_library {
+ name: "libclient_vm_csr",
+ defaults: ["libclient_vm_csr_defaults"],
+ prefer_rlib: true,
+ apex_available: [
+ "com.android.virt",
+ ],
+}
+
+rust_test {
+ name: "libclient_vm_csr.test",
+ defaults: ["libclient_vm_csr_defaults"],
+ test_suites: ["general-tests"],
+ rustlibs: [
+ "libciborium",
+ "libdiced_sample_inputs",
+ "libhwtrust",
+ ],
+}
diff --git a/service_vm/client_vm_csr/TEST_MAPPING b/service_vm/client_vm_csr/TEST_MAPPING
new file mode 100644
index 0000000..5bc06c0
--- /dev/null
+++ b/service_vm/client_vm_csr/TEST_MAPPING
@@ -0,0 +1,9 @@
+// When adding or removing tests here, don't forget to amend _all_modules list in
+// wireless/android/busytown/ath_config/configs/prod/avf/tests.gcl
+{
+ "avf-presubmit" : [
+ {
+ "name" : "libclient_vm_csr.test"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/service_vm/client_vm_csr/src/lib.rs b/service_vm/client_vm_csr/src/lib.rs
new file mode 100644
index 0000000..512ecaf
--- /dev/null
+++ b/service_vm/client_vm_csr/src/lib.rs
@@ -0,0 +1,242 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Generate the attestation key and CSR for client VM in the remote
+//! attestation.
+
+use anyhow::{anyhow, Context, Result};
+use coset::{
+ iana, CborSerializable, CoseKey, CoseKeyBuilder, CoseSign, CoseSignBuilder, CoseSignature,
+ CoseSignatureBuilder, HeaderBuilder,
+};
+use diced_open_dice::{derive_cdi_leaf_priv, sign, DiceArtifacts, PrivateKey};
+use openssl::{
+ bn::{BigNum, BigNumContext},
+ ec::{EcGroup, EcKey, EcKeyRef},
+ ecdsa::EcdsaSig,
+ nid::Nid,
+ pkey::Private,
+ sha::sha256,
+};
+use service_vm_comm::{Csr, CsrPayload};
+use zeroize::Zeroizing;
+
+/// Key parameters for the attestation key.
+///
+/// See service_vm/comm/client_vm_csr.cddl for more information about the attestation key.
+const ATTESTATION_KEY_NID: Nid = Nid::X9_62_PRIME256V1; // NIST P-256 curve
+const ATTESTATION_KEY_ALGO: iana::Algorithm = iana::Algorithm::ES256;
+const ATTESTATION_KEY_CURVE: iana::EllipticCurve = iana::EllipticCurve::P_256;
+const ATTESTATION_KEY_AFFINE_COORDINATE_SIZE: i32 = 32;
+
+/// Represents the output of generating the attestation key and CSR for the client VM.
+pub struct ClientVmAttestationData {
+ /// DER-encoded ECPrivateKey to be attested.
+ pub private_key: Zeroizing<Vec<u8>>,
+
+ /// CSR containing client VM information and the public key corresponding to the
+ /// private key to be attested.
+ pub csr: Csr,
+}
+
+/// Generates the attestation key and CSR including the public key to be attested for the
+/// client VM in remote attestation.
+pub fn generate_attestation_key_and_csr(
+ challenge: &[u8],
+ dice_artifacts: &dyn DiceArtifacts,
+) -> Result<ClientVmAttestationData> {
+ let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
+ let attestation_key = EcKey::generate(&group)?;
+
+ let csr = build_csr(challenge, attestation_key.as_ref(), dice_artifacts)?;
+ let private_key = attestation_key.private_key_to_der()?;
+ Ok(ClientVmAttestationData { private_key: Zeroizing::new(private_key), csr })
+}
+
+fn build_csr(
+ challenge: &[u8],
+ attestation_key: &EcKeyRef<Private>,
+ dice_artifacts: &dyn DiceArtifacts,
+) -> Result<Csr> {
+ // Builds CSR Payload to be signed.
+ let public_key =
+ to_cose_public_key(attestation_key)?.to_vec().context("Failed to serialize public key")?;
+ let csr_payload = CsrPayload { public_key, challenge: challenge.to_vec() };
+ let csr_payload = csr_payload.into_cbor_vec()?;
+
+ // Builds signed CSR Payload.
+ let cdi_leaf_priv = derive_cdi_leaf_priv(dice_artifacts)?;
+ let signed_csr_payload = build_signed_data(csr_payload, &cdi_leaf_priv, attestation_key)?
+ .to_vec()
+ .context("Failed to serialize signed CSR payload")?;
+
+ // Builds CSR.
+ let dice_cert_chain = dice_artifacts.bcc().ok_or(anyhow!("bcc is none"))?.to_vec();
+ Ok(Csr { dice_cert_chain, signed_csr_payload })
+}
+
+fn build_signed_data(
+ payload: Vec<u8>,
+ cdi_leaf_priv: &PrivateKey,
+ attestation_key: &EcKeyRef<Private>,
+) -> Result<CoseSign> {
+ let cdi_leaf_sig_headers = build_signature_headers(iana::Algorithm::EdDSA);
+ let attestation_key_sig_headers = build_signature_headers(ATTESTATION_KEY_ALGO);
+ let aad = &[];
+ let signed_data = CoseSignBuilder::new()
+ .payload(payload)
+ .try_add_created_signature(cdi_leaf_sig_headers, aad, |message| {
+ sign(message, cdi_leaf_priv.as_array()).map(|v| v.to_vec())
+ })?
+ .try_add_created_signature(attestation_key_sig_headers, aad, |message| {
+ ecdsa_sign(message, attestation_key)
+ })?
+ .build();
+ Ok(signed_data)
+}
+
+/// Builds a signature with headers filled with the provided algorithm.
+/// The signature data will be filled later when building the signed data.
+fn build_signature_headers(alg: iana::Algorithm) -> CoseSignature {
+ let protected = HeaderBuilder::new().algorithm(alg).build();
+ CoseSignatureBuilder::new().protected(protected).build()
+}
+
+fn ecdsa_sign(message: &[u8], key: &EcKeyRef<Private>) -> Result<Vec<u8>> {
+ let digest = sha256(message);
+ // Passes the digest to `ECDSA_do_sign` as recommended in the spec:
+ // https://commondatastorage.googleapis.com/chromium-boringssl-docs/ecdsa.h.html#ECDSA_do_sign
+ let sig = EcdsaSig::sign::<Private>(&digest, key)?;
+ Ok(sig.to_der()?)
+}
+
+fn get_affine_coordinates(key: &EcKeyRef<Private>) -> Result<(Vec<u8>, Vec<u8>)> {
+ let mut ctx = BigNumContext::new()?;
+ let mut x = BigNum::new()?;
+ let mut y = BigNum::new()?;
+ key.public_key().affine_coordinates_gfp(key.group(), &mut x, &mut y, &mut ctx)?;
+ let x = x.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
+ let y = y.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
+ Ok((x, y))
+}
+
+fn to_cose_public_key(key: &EcKeyRef<Private>) -> Result<CoseKey> {
+ let (x, y) = get_affine_coordinates(key)?;
+ Ok(CoseKeyBuilder::new_ec2_pub_key(ATTESTATION_KEY_CURVE, x, y)
+ .algorithm(ATTESTATION_KEY_ALGO)
+ .build())
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use anyhow::bail;
+ use ciborium::Value;
+ use coset::{iana::EnumI64, Label};
+ use hwtrust::{dice, session::Session};
+ use openssl::pkey::Public;
+
+ /// The following data was generated randomly with urandom.
+ const CHALLENGE: [u8; 16] = [
+ 0xb3, 0x66, 0xfa, 0x72, 0x92, 0x32, 0x2c, 0xd4, 0x99, 0xcb, 0x00, 0x1f, 0x0e, 0xe0, 0xc7,
+ 0x41,
+ ];
+
+ #[test]
+ fn csr_and_private_key_have_correct_format() -> Result<()> {
+ let dice_artifacts = diced_sample_inputs::make_sample_bcc_and_cdis()?;
+
+ let ClientVmAttestationData { private_key, csr } =
+ generate_attestation_key_and_csr(&CHALLENGE, &dice_artifacts)?;
+ let ec_private_key = EcKey::private_key_from_der(&private_key)?;
+ let cose_sign = CoseSign::from_slice(&csr.signed_csr_payload).unwrap();
+ let aad = &[];
+
+ // Checks CSR payload.
+ let csr_payload =
+ cose_sign.payload.as_ref().and_then(|v| CsrPayload::from_cbor_slice(v).ok()).unwrap();
+ let public_key = to_cose_public_key(&ec_private_key)?.to_vec().unwrap();
+ let expected_csr_payload = CsrPayload { challenge: CHALLENGE.to_vec(), public_key };
+ assert_eq!(expected_csr_payload, csr_payload);
+
+ // Checks the first signature is signed with CDI_Leaf_Priv.
+ let session = Session::default();
+ let chain = dice::Chain::from_cbor(&session, &csr.dice_cert_chain)?;
+ let public_key = chain.leaf().subject_public_key();
+ cose_sign
+ .verify_signature(0, aad, |signature, message| public_key.verify(signature, message))?;
+
+ // Checks the second signature is signed with attestation key.
+ let attestation_public_key = CoseKey::from_slice(&csr_payload.public_key).unwrap();
+ let ec_public_key = to_ec_public_key(&attestation_public_key)?;
+ cose_sign.verify_signature(1, aad, |signature, message| {
+ ecdsa_verify(signature, message, &ec_public_key)
+ })?;
+
+ // Verifies that private key and the public key form a valid key pair.
+ let message = b"test message";
+ let signature = ecdsa_sign(message, &ec_private_key)?;
+ ecdsa_verify(&signature, message, &ec_public_key)?;
+
+ Ok(())
+ }
+
+ fn ecdsa_verify(
+ signature: &[u8],
+ message: &[u8],
+ ec_public_key: &EcKeyRef<Public>,
+ ) -> Result<()> {
+ let sig = EcdsaSig::from_der(signature)?;
+ let digest = sha256(message);
+ if sig.verify(&digest, ec_public_key)? {
+ Ok(())
+ } else {
+ bail!("Signature does not match")
+ }
+ }
+
+ fn to_ec_public_key(cose_key: &CoseKey) -> Result<EcKey<Public>> {
+ check_ec_key_params(cose_key)?;
+ let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
+ let x = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::X.to_i64()))?;
+ let y = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::Y.to_i64()))?;
+ let key = EcKey::from_public_key_affine_coordinates(&group, &x, &y)?;
+ key.check_key()?;
+ Ok(key)
+ }
+
+ fn check_ec_key_params(cose_key: &CoseKey) -> Result<()> {
+ assert_eq!(coset::KeyType::Assigned(iana::KeyType::EC2), cose_key.kty);
+ assert_eq!(Some(coset::Algorithm::Assigned(ATTESTATION_KEY_ALGO)), cose_key.alg);
+ let crv = get_label_value(cose_key, Label::Int(iana::Ec2KeyParameter::Crv.to_i64()))?;
+ assert_eq!(&Value::from(ATTESTATION_KEY_CURVE.to_i64()), crv);
+ Ok(())
+ }
+
+ fn get_label_value_as_bignum(key: &CoseKey, label: Label) -> Result<BigNum> {
+ get_label_value(key, label)?
+ .as_bytes()
+ .map(|v| BigNum::from_slice(&v[..]).unwrap())
+ .ok_or_else(|| anyhow!("Value not a bstr."))
+ }
+
+ fn get_label_value(key: &CoseKey, label: Label) -> Result<&Value> {
+ Ok(&key
+ .params
+ .iter()
+ .find(|(k, _)| k == &label)
+ .ok_or_else(|| anyhow!("Label {:?} not found", label))?
+ .1)
+ }
+}
diff --git a/service_vm/comm/Android.bp b/service_vm/comm/Android.bp
index 6e05587..bf923a4 100644
--- a/service_vm/comm/Android.bp
+++ b/service_vm/comm/Android.bp
@@ -23,7 +23,9 @@
rustlibs: [
"libbssl_avf_error_nostd",
"libciborium_nostd",
+ "libcbor_util_nostd",
"libcoset_nostd",
+ "libder_nostd",
"liblog_rust_nostd",
"libserde_nostd",
],
@@ -35,6 +37,7 @@
rustlibs: [
"libbssl_avf_error",
"libciborium",
+ "libcbor_util",
"libcoset",
"liblog_rust",
"libserde",
diff --git a/service_vm/comm/src/client_vm_csr.cddl b/service_vm/comm/src/client_vm_csr.cddl
new file mode 100644
index 0000000..bbc709a
--- /dev/null
+++ b/service_vm/comm/src/client_vm_csr.cddl
@@ -0,0 +1,62 @@
+; CDDL for the CSR sent from the client VM to the RKP VM for pVM remote attestation.
+
+Csr = [
+ DiceCertChain, ; The DICE chain containing measurement of the client VM. See
+ ; keymint/generateCertificateRequestV2.cddl for the DiceCertChain
+ ; definition.
+ SignedData,
+]
+
+; COSE_Sign [RFC9052 s4.1]
+SignedData = [
+ protected: {}, ; The signing algorithms are specified in each signature
+ ; separately.
+ unprotected: {},
+ payload: bstr .cbor CsrPayload,
+ Signatures,
+]
+
+CsrPayload = [ ; CBOR Array defining the payload for CSR
+ challenge: bstr .size (0..64), ; The challenge is provided by the client server.
+ ; It will be included in the certificate chain in the
+ ; attestation result, serving as proof of the freshness
+ ; of the result.
+ PublicKey, ; COSE_Key encoded EC P-256 public key [ RFC9053 s7.1.1 ]
+ ; to be attested. See keymint/PublicKey.cddl for the
+ ; definition, the test flag `-70000` is never used.
+]
+
+Signatures = [
+ dice_cdi_leaf_signature: COSE_Signature_Dice_Cdi_Leaf,
+ attestation_key_signature: COSE_Signature_Attestation_Key,
+]
+
+; COSE_Signature [RFC9052 s4.1]
+COSE_Signature_Dice_Cdi_Leaf = [
+ protected: bstr .cbor { 1: AlgorithmEdDSA },
+ unprotected: {},
+ signature: bstr, ; Ed25519(CDI_Leaf_Priv, SigStruct)
+]
+
+; COSE_Signature [RFC9052 s4.1]
+COSE_Signature_Attestation_Key = [
+ protected: bstr .cbor { 1: AlgorithmES256 },
+ unprotected: {},
+ signature: bstr, ; ECDSA(PrivateKey, SigStruct)
+]
+
+; Sig_structure for SignedData [ RFC9052 s4.4 ]
+SigStruct = {
+ context: "Signature",
+ external_aad: bstr .size 0,
+ payload: bstr .cbor CsrPayload,
+}
+
+; ASN.1 DER-encoded EC P-256 ECPrivateKey [ RFC 5915 s3 ]:
+; ECPrivateKey ::= SEQUENCE {
+; version INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1),
+; privateKey OCTET STRING,
+; parameters [0] ECParameters {{ NamedCurve }} OPTIONAL,
+; publicKey [1] BIT STRING OPTIONAL
+;}
+PrivateKey = bstr
diff --git a/service_vm/comm/src/csr.rs b/service_vm/comm/src/csr.rs
index 5e1cbad..a87d28f 100644
--- a/service_vm/comm/src/csr.rs
+++ b/service_vm/comm/src/csr.rs
@@ -17,10 +17,13 @@
use alloc::vec;
use alloc::vec::Vec;
+use cbor_util::{cbor_value_type, value_to_bytes};
use ciborium::Value;
use coset::{self, CborSerializable, CoseError};
/// Represents a CSR sent from the client VM to the service VM for attestation.
+///
+/// See client_vm_csr.cddl for the definition of the CSR.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Csr {
/// The DICE certificate chain of the client VM.
@@ -52,8 +55,8 @@
return Err(CoseError::UnexpectedItem("array", "array with 2 items"));
}
Ok(Self {
- signed_csr_payload: try_as_bytes(arr.remove(1))?,
- dice_cert_chain: try_as_bytes(arr.remove(0))?,
+ signed_csr_payload: value_to_bytes(arr.remove(1), "signed_csr_payload")?,
+ dice_cert_chain: value_to_bytes(arr.remove(0), "dice_cert_chain")?,
})
}
}
@@ -91,31 +94,8 @@
return Err(CoseError::UnexpectedItem("array", "array with 2 items"));
}
Ok(Self {
- challenge: try_as_bytes(arr.remove(1))?,
- public_key: try_as_bytes(arr.remove(0))?,
+ challenge: value_to_bytes(arr.remove(1), "challenge")?,
+ public_key: value_to_bytes(arr.remove(0), "public_key")?,
})
}
}
-
-fn try_as_bytes(v: Value) -> coset::Result<Vec<u8>> {
- if let Value::Bytes(data) = v {
- Ok(data)
- } else {
- Err(CoseError::UnexpectedItem(cbor_value_type(&v), "bytes"))
- }
-}
-
-fn cbor_value_type(v: &Value) -> &'static str {
- match v {
- Value::Integer(_) => "int",
- Value::Bytes(_) => "bstr",
- Value::Float(_) => "float",
- Value::Text(_) => "tstr",
- Value::Bool(_) => "bool",
- Value::Null => "nul",
- Value::Tag(_, _) => "tag",
- Value::Array(_) => "array",
- Value::Map(_) => "map",
- _ => "other",
- }
-}
diff --git a/service_vm/comm/src/lib.rs b/service_vm/comm/src/lib.rs
index 0818f24..bb85a26 100644
--- a/service_vm/comm/src/lib.rs
+++ b/service_vm/comm/src/lib.rs
@@ -25,7 +25,7 @@
pub use csr::{Csr, CsrPayload};
pub use message::{
- EcdsaP256KeyPair, GenerateCertificateRequestParams, Request, RequestProcessingError, Response,
- ServiceVmRequest,
+ ClientVmAttestationParams, EcdsaP256KeyPair, GenerateCertificateRequestParams, Request,
+ RequestProcessingError, Response, ServiceVmRequest,
};
pub use vsock::VmType;
diff --git a/service_vm/comm/src/message.rs b/service_vm/comm/src/message.rs
index f8d7420..80a9608 100644
--- a/service_vm/comm/src/message.rs
+++ b/service_vm/comm/src/message.rs
@@ -50,6 +50,29 @@
/// Creates a certificate signing request to be sent to the
/// provisioning server.
GenerateCertificateRequest(GenerateCertificateRequestParams),
+
+ /// Requests the service VM to attest the client VM and issue a certificate
+ /// if the attestation succeeds.
+ RequestClientVmAttestation(ClientVmAttestationParams),
+}
+
+/// Represents the params passed to `Request::RequestClientVmAttestation`.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct ClientVmAttestationParams {
+ /// The CBOR-encoded CSR signed by the CDI_Leaf_Priv of the client VM's DICE chain
+ /// and the private key to be attested.
+ /// See client_vm_csr.cddl for the definition of the CSR.
+ pub csr: Vec<u8>,
+
+ /// The key blob retrieved from RKPD by virtualizationservice.
+ pub remotely_provisioned_key_blob: Vec<u8>,
+
+ /// The leaf certificate of the certificate chain retrieved from RKPD by
+ /// virtualizationservice.
+ ///
+ /// This certificate is a DER-encoded X.509 certificate that includes the remotely
+ /// provisioned public key.
+ pub remotely_provisioned_cert: Vec<u8>,
}
/// Represents a response to a request sent to the service VM.
@@ -66,6 +89,11 @@
/// Returns a CBOR Certificate Signing Request (Csr) serialized into a byte array.
GenerateCertificateRequest(Vec<u8>),
+ /// Returns a certificate covering the public key to be attested in the provided CSR.
+ /// The certificate is signed by the remotely provisioned private key and also
+ /// includes an extension that describes the attested client VM.
+ RequestClientVmAttestation(Vec<u8>),
+
/// Encountered an error during the request processing.
Err(RequestProcessingError),
}
@@ -93,6 +121,18 @@
/// The DICE chain of the service VM is missing.
MissingDiceChain,
+
+ /// Failed to decrypt the remotely provisioned key blob.
+ FailedToDecryptKeyBlob,
+
+ /// The requested operation has not been implemented.
+ OperationUnimplemented,
+
+ /// An error happened during the DER encoding/decoding.
+ DerError,
+
+ /// The DICE chain from the client VM is invalid.
+ InvalidDiceChain,
}
impl fmt::Display for RequestProcessingError {
@@ -109,6 +149,18 @@
write!(f, "An error happened when serializing to/from a CBOR Value.")
}
Self::MissingDiceChain => write!(f, "The DICE chain of the service VM is missing"),
+ Self::FailedToDecryptKeyBlob => {
+ write!(f, "Failed to decrypt the remotely provisioned key blob")
+ }
+ Self::OperationUnimplemented => {
+ write!(f, "The requested operation has not been implemented")
+ }
+ Self::DerError => {
+ write!(f, "An error happened during the DER encoding/decoding")
+ }
+ Self::InvalidDiceChain => {
+ write!(f, "The DICE chain from the client VM is invalid")
+ }
}
}
}
@@ -133,6 +185,14 @@
}
}
+#[cfg(not(feature = "std"))]
+impl From<der::Error> for RequestProcessingError {
+ fn from(e: der::Error) -> Self {
+ error!("DER encoding/decoding error: {e}");
+ Self::DerError
+ }
+}
+
/// Represents the params passed to GenerateCertificateRequest
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct GenerateCertificateRequestParams {
diff --git a/service_vm/comm/src/vsock.rs b/service_vm/comm/src/vsock.rs
index aa7166d..7f7cf25 100644
--- a/service_vm/comm/src/vsock.rs
+++ b/service_vm/comm/src/vsock.rs
@@ -18,7 +18,7 @@
const NON_PROTECTED_VM_PORT: u32 = 5680;
/// VM Type.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum VmType {
/// Protected VM.
ProtectedVm,
diff --git a/service_vm/fake_chain/Android.bp b/service_vm/fake_chain/Android.bp
new file mode 100644
index 0000000..2bc7b4e
--- /dev/null
+++ b/service_vm/fake_chain/Android.bp
@@ -0,0 +1,58 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libservice_vm_fake_chain_defaults",
+ crate_name: "service_vm_fake_chain",
+ defaults: ["avf_build_flags_rust"],
+ srcs: ["src/lib.rs"],
+ visibility: [
+ "//packages/modules/Virtualization/rialto:__subpackages__",
+ ],
+ prefer_rlib: true,
+ rustlibs: [
+ "libcstr",
+ ],
+}
+
+rust_library {
+ name: "libservice_vm_fake_chain",
+ defaults: ["libservice_vm_fake_chain_defaults"],
+ features: [
+ "std",
+ ],
+ rustlibs: [
+ "libciborium",
+ "libcoset",
+ "libdiced_open_dice",
+ "liblog_rust",
+ "libmicrodroid_kernel_hashes",
+ ],
+}
+
+rust_library_rlib {
+ name: "libservice_vm_fake_chain_nostd",
+ defaults: ["libservice_vm_fake_chain_defaults"],
+ rustlibs: [
+ "libciborium_nostd",
+ "libcoset_nostd",
+ "libdiced_open_dice_nostd",
+ "liblog_rust_nostd",
+ ],
+
+}
diff --git a/service_vm/fake_chain/src/client_vm.rs b/service_vm/fake_chain/src/client_vm.rs
new file mode 100644
index 0000000..44ea898
--- /dev/null
+++ b/service_vm/fake_chain/src/client_vm.rs
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Provides functions to build fake DICE artifacts for client VM in tests.
+
+use crate::service_vm;
+use alloc::vec;
+use alloc::vec::Vec;
+use ciborium::{cbor, value::Value};
+use core::result;
+use coset::CborSerializable;
+use cstr::cstr;
+use diced_open_dice::{
+ hash, retry_bcc_format_config_descriptor, retry_bcc_main_flow, Config, DiceArtifacts,
+ DiceConfigValues, DiceError, DiceMode, InputValues, OwnedDiceArtifacts, Result, HASH_SIZE,
+ HIDDEN_SIZE,
+};
+use log::error;
+use microdroid_kernel_hashes::{INITRD_DEBUG_HASH, KERNEL_HASH};
+
+type CborResult<T> = result::Result<T, ciborium::value::Error>;
+
+/// All the following data are generated with urandom.
+const CODE_HASH_PAYLOAD: [u8; HASH_SIZE] = [
+ 0x08, 0x78, 0xc2, 0x5b, 0xe7, 0xea, 0x3d, 0x62, 0x70, 0x22, 0xd9, 0x1c, 0x4f, 0x3c, 0x2e, 0x2f,
+ 0x0f, 0x97, 0xa4, 0x6f, 0x6d, 0xd5, 0xe6, 0x4a, 0x6d, 0xbe, 0x34, 0x2e, 0x56, 0x04, 0xaf, 0xef,
+ 0x74, 0x3f, 0xec, 0xb8, 0x44, 0x11, 0xf4, 0x2f, 0x05, 0xb2, 0x06, 0xa3, 0x0e, 0x75, 0xb7, 0x40,
+ 0x9a, 0x4c, 0x58, 0xab, 0x96, 0xe7, 0x07, 0x97, 0x07, 0x86, 0x5c, 0xa1, 0x42, 0x12, 0xf0, 0x34,
+];
+const AUTHORITY_HASH_PAYLOAD: [u8; HASH_SIZE] = [
+ 0xc7, 0x97, 0x5b, 0xa9, 0x9e, 0xbf, 0x0b, 0xeb, 0xe7, 0x7f, 0x69, 0x8f, 0x8e, 0xcf, 0x04, 0x7d,
+ 0x2c, 0x0f, 0x4d, 0xbe, 0xcb, 0xf5, 0xf1, 0x4c, 0x1d, 0x1c, 0xb7, 0x44, 0xdf, 0xf8, 0x40, 0x90,
+ 0x09, 0x65, 0xab, 0x01, 0x34, 0x3e, 0xc2, 0xc4, 0xf7, 0xa2, 0x3a, 0x5c, 0x4e, 0x76, 0x4f, 0x42,
+ 0xa8, 0x6c, 0xc9, 0xf1, 0x7b, 0x12, 0x80, 0xa4, 0xef, 0xa2, 0x4d, 0x72, 0xa1, 0x21, 0xe2, 0x47,
+];
+const APK1_CODE_HASH: &[u8] = &[
+ 0x41, 0x92, 0x0d, 0xd0, 0xf5, 0x60, 0xe3, 0x69, 0x26, 0x7f, 0xb8, 0xbc, 0x12, 0x3a, 0xd1, 0x95,
+ 0x1d, 0xb8, 0x9a, 0x9c, 0x3a, 0x3f, 0x01, 0xbf, 0xa8, 0xd9, 0x6d, 0xe9, 0x90, 0x30, 0x1d, 0x0b,
+];
+const APK1_AUTHORITY_HASH: &[u8] = &[
+ 0xe3, 0xd9, 0x1c, 0xf5, 0x6f, 0xee, 0x73, 0x40, 0x3d, 0x95, 0x59, 0x67, 0xea, 0x5d, 0x01, 0xfd,
+ 0x25, 0x9d, 0x5c, 0x88, 0x94, 0x3a, 0xc6, 0xd7, 0xa9, 0xdc, 0x4c, 0x60, 0x81, 0xbe, 0x2b, 0x74,
+];
+const APEX1_CODE_HASH: &[u8] = &[
+ 0x52, 0x93, 0x2b, 0xb0, 0x8d, 0xec, 0xdf, 0x54, 0x1f, 0x5c, 0x10, 0x9d, 0x17, 0xce, 0x7f, 0xac,
+ 0xb0, 0x2b, 0xe2, 0x99, 0x05, 0x7d, 0xa3, 0x9b, 0xa6, 0x3e, 0xf9, 0x99, 0xa2, 0xea, 0xd4, 0xd9,
+];
+const APEX1_AUTHORITY_HASH: &[u8] = &[
+ 0xd1, 0xfc, 0x3d, 0x5f, 0xa0, 0x5f, 0x02, 0xd0, 0x83, 0x9b, 0x0e, 0x32, 0xc2, 0x27, 0x09, 0x12,
+ 0xcc, 0xfc, 0x42, 0xf6, 0x0d, 0xf4, 0x7d, 0xc8, 0x80, 0x1a, 0x64, 0x25, 0xa7, 0xfa, 0x4a, 0x37,
+];
+
+#[allow(missing_docs)]
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct SubComponent {
+ pub name: String,
+ pub version: u64,
+ pub code_hash: Vec<u8>,
+ pub authority_hash: Vec<u8>,
+}
+
+impl SubComponent {
+ fn to_value(&self) -> CborResult<Value> {
+ Ok(cbor!({
+ 1 => self.name,
+ 2 => self.version,
+ 3 => Value::Bytes(self.code_hash.clone()),
+ 4 => Value::Bytes(self.authority_hash.clone()),
+ })?)
+ }
+}
+
+/// Generates fake DICE artifacts for client VM with a DICE chain up to the certificate
+/// describing the Microdroid payload.
+///
+/// The fake DICE chain has the following nodes:
+/// Root public key -> pvmfw certificate -> Microdroid kernel certificate
+/// -> Microdroid payload certificate
+pub fn fake_client_vm_dice_artifacts() -> Result<OwnedDiceArtifacts> {
+ // Client VM DICE chain has the same prefix as the service VM DICE chain up to
+ // the pvmfw entry.
+ let (cdi_values, dice_chain) = service_vm::fake_dice_artifacts_up_to_pvmfw()?;
+
+ // Adds an entry describing the Microdroid kernel.
+ let config_values = DiceConfigValues {
+ component_name: Some(cstr!("vm_entry")),
+ component_version: Some(12),
+ resettable: true,
+ ..Default::default()
+ };
+ let config_descriptor = retry_bcc_format_config_descriptor(&config_values)?;
+ // The Microdroid kernel is signed with the same key as the one used for the service VM,
+ // so the authority hash is the same.
+ let authority_hash = service_vm::AUTHORITY_HASH_SERVICE_VM;
+ let input_values = InputValues::new(
+ kernel_code_hash()?,
+ Config::Descriptor(config_descriptor.as_slice()),
+ authority_hash,
+ DiceMode::kDiceModeDebug,
+ [0; HIDDEN_SIZE], // No hidden.
+ );
+ let dice_artifacts = retry_bcc_main_flow(
+ &cdi_values.cdi_attest,
+ &cdi_values.cdi_seal,
+ &dice_chain,
+ &input_values,
+ )
+ .map_err(|e| {
+ error!("Failed to run the Microdroid kernel BCC main flow: {e}");
+ e
+ })?;
+
+ // Adds an entry describing the Microdroid payload.
+ let config_descriptor = fake_microdroid_payload_config_descriptor().map_err(|e| {
+ error!("Failed to generate config descriptor for Microdroid: {e}");
+ DiceError::InvalidInput
+ })?;
+ let input_values = InputValues::new(
+ CODE_HASH_PAYLOAD,
+ Config::Descriptor(config_descriptor.as_slice()),
+ AUTHORITY_HASH_PAYLOAD,
+ DiceMode::kDiceModeDebug,
+ [0u8; HIDDEN_SIZE], // hidden
+ );
+ retry_bcc_main_flow(
+ dice_artifacts.cdi_attest(),
+ dice_artifacts.cdi_seal(),
+ dice_artifacts.bcc().unwrap(),
+ &input_values,
+ )
+ .map_err(|e| {
+ error!("Failed to run the Microdroid payload BCC main flow: {e}");
+ e
+ })
+}
+
+fn fake_microdroid_payload_config_descriptor() -> CborResult<Vec<u8>> {
+ let mut map = Vec::new();
+ map.push((cbor!(-70002)?, cbor!("Microdroid payload")?));
+ map.push((cbor!(-71000)?, cbor!("/config_path")?));
+ let components =
+ fake_sub_components().iter().map(|c| c.to_value()).collect::<CborResult<_>>()?;
+ map.push((cbor!(-71002)?, Value::Array(components)));
+ Ok(Value::Map(map).to_vec().unwrap())
+}
+
+/// Generates a list of fake subcomponents as the Microdroid payload.
+pub fn fake_sub_components() -> Vec<SubComponent> {
+ vec![
+ SubComponent {
+ name: "apk:com.android.apk.apk1".to_string(),
+ version: 1,
+ code_hash: APK1_CODE_HASH.to_vec(),
+ authority_hash: APK1_AUTHORITY_HASH.to_vec(),
+ },
+ SubComponent {
+ name: "apex:com.android.apex.apex1".to_string(),
+ version: 1,
+ code_hash: APEX1_CODE_HASH.to_vec(),
+ authority_hash: APEX1_AUTHORITY_HASH.to_vec(),
+ },
+ ]
+}
+
+fn kernel_code_hash() -> Result<[u8; HASH_SIZE]> {
+ let code_hash = [KERNEL_HASH, INITRD_DEBUG_HASH].concat();
+ hash(&code_hash)
+}
diff --git a/service_vm/fake_chain/src/lib.rs b/service_vm/fake_chain/src/lib.rs
new file mode 100644
index 0000000..a5ab828
--- /dev/null
+++ b/service_vm/fake_chain/src/lib.rs
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Provides functions to build a test chain for non-protected rialto and tests.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+
+// `client_vm` builds DICE artifacts related to Microdroid, which is not relevant
+// to the nostd build used in rialto.
+#[cfg(feature = "std")]
+pub mod client_vm;
+pub mod service_vm;
diff --git a/service_vm/fake_chain/src/service_vm.rs b/service_vm/fake_chain/src/service_vm.rs
new file mode 100644
index 0000000..9bd831d
--- /dev/null
+++ b/service_vm/fake_chain/src/service_vm.rs
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Provides functions to build fake DICE artifacts for non-protected rialto used in
+//! end-to-end tests.
+
+use alloc::vec;
+use alloc::vec::Vec;
+use ciborium::value::Value;
+use coset::{
+ iana::{self, EnumI64},
+ Algorithm, AsCborValue, CborSerializable, CoseKey, KeyOperation, KeyType, Label,
+};
+use cstr::cstr;
+use diced_open_dice::{
+ derive_cdi_private_key_seed, keypair_from_seed, retry_bcc_format_config_descriptor,
+ retry_bcc_main_flow, retry_dice_main_flow, CdiValues, Config, DiceConfigValues, DiceError,
+ DiceMode, InputValues, OwnedDiceArtifacts, Result, CDI_SIZE, HASH_SIZE, HIDDEN_SIZE,
+};
+use log::error;
+
+/// All the following data are generated with urandom.
+const UDS: [u8; CDI_SIZE] = [
+ 0x1d, 0xa5, 0xea, 0x90, 0x47, 0xfc, 0xb5, 0xf6, 0x47, 0x12, 0xd3, 0x65, 0x9c, 0xf2, 0x00, 0xe0,
+ 0x06, 0xf7, 0xe8, 0x9e, 0x2f, 0xd0, 0x94, 0x7f, 0xc9, 0x9a, 0x9d, 0x40, 0xf7, 0xce, 0x13, 0x21,
+];
+const CODE_HASH_PVMFW: [u8; HASH_SIZE] = [
+ 0x16, 0x48, 0xf2, 0x55, 0x53, 0x23, 0xdd, 0x15, 0x2e, 0x83, 0x38, 0xc3, 0x64, 0x38, 0x63, 0x26,
+ 0x0f, 0xcf, 0x5b, 0xd1, 0x3a, 0xd3, 0x40, 0x3e, 0x23, 0xf8, 0x34, 0x4c, 0x6d, 0xa2, 0xbe, 0x25,
+ 0x1c, 0xb0, 0x29, 0xe8, 0xc3, 0xfb, 0xb8, 0x80, 0xdc, 0xb1, 0xd2, 0xb3, 0x91, 0x4d, 0xd3, 0xfb,
+ 0x01, 0x0f, 0xe4, 0xe9, 0x46, 0xa2, 0xc0, 0x26, 0x57, 0x5a, 0xba, 0x30, 0xf7, 0x15, 0x98, 0x14,
+];
+const AUTHORITY_HASH_PVMFW: [u8; HASH_SIZE] = [
+ 0xf9, 0x00, 0x9d, 0xc2, 0x59, 0x09, 0xe0, 0xb6, 0x98, 0xbd, 0xe3, 0x97, 0x4a, 0xcb, 0x3c, 0xe7,
+ 0x6b, 0x24, 0xc3, 0xe4, 0x98, 0xdd, 0xa9, 0x6a, 0x41, 0x59, 0x15, 0xb1, 0x23, 0xe6, 0xc8, 0xdf,
+ 0xfb, 0x52, 0xb4, 0x52, 0xc1, 0xb9, 0x61, 0xdd, 0xbc, 0x5b, 0x37, 0x0e, 0x12, 0x12, 0xb2, 0xfd,
+ 0xc1, 0x09, 0xb0, 0xcf, 0x33, 0x81, 0x4c, 0xc6, 0x29, 0x1b, 0x99, 0xea, 0xae, 0xfd, 0xaa, 0x0d,
+];
+const HIDDEN_PVMFW: [u8; HIDDEN_SIZE] = [
+ 0xa2, 0x01, 0xd0, 0xc0, 0xaa, 0x75, 0x3c, 0x06, 0x43, 0x98, 0x6c, 0xc3, 0x5a, 0xb5, 0x5f, 0x1f,
+ 0x0f, 0x92, 0x44, 0x3b, 0x0e, 0xd4, 0x29, 0x75, 0xe3, 0xdb, 0x36, 0xda, 0xc8, 0x07, 0x97, 0x4d,
+ 0xff, 0xbc, 0x6a, 0xa4, 0x8a, 0xef, 0xc4, 0x7f, 0xf8, 0x61, 0x7d, 0x51, 0x4d, 0x2f, 0xdf, 0x7e,
+ 0x8c, 0x3d, 0xa3, 0xfc, 0x63, 0xd4, 0xd4, 0x74, 0x8a, 0xc4, 0x14, 0x45, 0x83, 0x6b, 0x12, 0x7e,
+];
+const CODE_HASH_SERVICE_VM: [u8; HASH_SIZE] = [
+ 0xa4, 0x0c, 0xcb, 0xc1, 0xbf, 0xfa, 0xcc, 0xfd, 0xeb, 0xf4, 0xfc, 0x43, 0x83, 0x7f, 0x46, 0x8d,
+ 0xd8, 0xd8, 0x14, 0xc1, 0x96, 0x14, 0x1f, 0x6e, 0xb3, 0xa0, 0xd9, 0x56, 0xb3, 0xbf, 0x2f, 0xfa,
+ 0x88, 0x70, 0x11, 0x07, 0x39, 0xa4, 0xd2, 0xa9, 0x6b, 0x18, 0x28, 0xe8, 0x29, 0x20, 0x49, 0x0f,
+ 0xbb, 0x8d, 0x08, 0x8c, 0xc6, 0x54, 0xe9, 0x71, 0xd2, 0x7e, 0xa4, 0xfe, 0x58, 0x7f, 0xd3, 0xc7,
+];
+pub(crate) const AUTHORITY_HASH_SERVICE_VM: [u8; HASH_SIZE] = [
+ 0xb2, 0x69, 0x05, 0x48, 0x56, 0xb5, 0xfa, 0x55, 0x6f, 0xac, 0x56, 0xd9, 0x02, 0x35, 0x2b, 0xaa,
+ 0x4c, 0xba, 0x28, 0xdd, 0x82, 0x3a, 0x86, 0xf5, 0xd4, 0xc2, 0xf1, 0xf9, 0x35, 0x7d, 0xe4, 0x43,
+ 0x13, 0xbf, 0xfe, 0xd3, 0x36, 0xd8, 0x1c, 0x12, 0x78, 0x5c, 0x9c, 0x3e, 0xf6, 0x66, 0xef, 0xab,
+ 0x3d, 0x0f, 0x89, 0xa4, 0x6f, 0xc9, 0x72, 0xee, 0x73, 0x43, 0x02, 0x8a, 0xef, 0xbc, 0x05, 0x98,
+];
+const HIDDEN_SERVICE_VM: [u8; HIDDEN_SIZE] = [
+ 0x5b, 0x3f, 0xc9, 0x6b, 0xe3, 0x95, 0x59, 0x40, 0x5e, 0x64, 0xe5, 0x64, 0x3f, 0xfd, 0x21, 0x09,
+ 0x9d, 0xf3, 0xcd, 0xc7, 0xa4, 0x2a, 0xe2, 0x97, 0xdd, 0xe2, 0x4f, 0xb0, 0x7d, 0x7e, 0xf5, 0x8e,
+ 0xd6, 0x4d, 0x84, 0x25, 0x54, 0x41, 0x3f, 0x8f, 0x78, 0x64, 0x1a, 0x51, 0x27, 0x9d, 0x55, 0x8a,
+ 0xe9, 0x90, 0x35, 0xab, 0x39, 0x80, 0x4b, 0x94, 0x40, 0x84, 0xa2, 0xfd, 0x73, 0xeb, 0x35, 0x7a,
+];
+
+fn ed25519_public_key_to_cbor_value(public_key: &[u8]) -> Result<Value> {
+ let key = CoseKey {
+ kty: KeyType::Assigned(iana::KeyType::OKP),
+ alg: Some(Algorithm::Assigned(iana::Algorithm::EdDSA)),
+ key_ops: vec![KeyOperation::Assigned(iana::KeyOperation::Verify)].into_iter().collect(),
+ params: vec![
+ (
+ Label::Int(iana::Ec2KeyParameter::Crv.to_i64()),
+ iana::EllipticCurve::Ed25519.to_i64().into(),
+ ),
+ (Label::Int(iana::Ec2KeyParameter::X.to_i64()), Value::Bytes(public_key.to_vec())),
+ ],
+ ..Default::default()
+ };
+ key.to_cbor_value().map_err(|e| {
+ error!("Failed to serialize the key to CBOR data: {e}");
+ DiceError::InvalidInput
+ })
+}
+
+/// Generates a fake DICE artifacts with a DICE chain up to the certificate describing pvmfw.
+///
+/// The fake DICE chain has the following nodes:
+/// Root public key -> pvmfw certificate
+pub(crate) fn fake_dice_artifacts_up_to_pvmfw() -> Result<(CdiValues, Vec<u8>)> {
+ let private_key_seed = derive_cdi_private_key_seed(&UDS).map_err(|e| {
+ error!("Failed to derive private key seed: {e}");
+ e
+ })?;
+
+ // Gets the root public key in DICE chain.
+ let (public_key, _) = keypair_from_seed(private_key_seed.as_array()).map_err(|e| {
+ error!("Failed to generate key pair: {e}");
+ e
+ })?;
+ let ed25519_public_key_value = ed25519_public_key_to_cbor_value(&public_key)?;
+
+ // Gets the pvmfw certificate to as the root certificate of DICE chain.
+ let config_values = DiceConfigValues {
+ component_name: Some(cstr!("Protected VM firmware")),
+ component_version: Some(1),
+ resettable: true,
+ ..Default::default()
+ };
+ let config_descriptor = retry_bcc_format_config_descriptor(&config_values)?;
+ let input_values = InputValues::new(
+ CODE_HASH_PVMFW,
+ Config::Descriptor(config_descriptor.as_slice()),
+ AUTHORITY_HASH_PVMFW,
+ DiceMode::kDiceModeDebug,
+ HIDDEN_PVMFW,
+ );
+ let (cdi_values, cert) = retry_dice_main_flow(&UDS, &UDS, &input_values).map_err(|e| {
+ error!("Failed to run first main flow: {e}");
+ e
+ })?;
+ let dice_chain = Value::Array(vec![
+ ed25519_public_key_value,
+ Value::from_slice(&cert).map_err(|e| {
+ error!("Deserialize root DICE certificate failed: {e}");
+ DiceError::InvalidInput
+ })?,
+ ]);
+ let dice_chain = dice_chain.to_vec().map_err(|e| {
+ error!("Failed to serialize the DICE chain to CBOR data: {e}");
+ DiceError::InvalidInput
+ })?;
+ Ok((cdi_values, dice_chain))
+}
+
+/// Generates fake DICE artifacts for service VM with a DICE chain up to the certificate
+/// describing service VM.
+///
+/// The fake DICE chain has the following nodes:
+/// Root public key -> pvmfw certificate -> service VM certificate
+///
+/// The fake DICE chain is solely used in non-protected rialto for testing
+/// purposes.
+pub fn fake_service_vm_dice_artifacts() -> Result<OwnedDiceArtifacts> {
+ let (cdi_values, dice_chain) = fake_dice_artifacts_up_to_pvmfw()?;
+ let config_values = DiceConfigValues {
+ component_name: Some(cstr!("vm_entry")),
+ component_version: Some(12),
+ resettable: true,
+ ..Default::default()
+ };
+ let config_descriptor = retry_bcc_format_config_descriptor(&config_values)?;
+ let input_values = InputValues::new(
+ CODE_HASH_SERVICE_VM,
+ Config::Descriptor(config_descriptor.as_slice()),
+ AUTHORITY_HASH_SERVICE_VM,
+ DiceMode::kDiceModeDebug,
+ HIDDEN_SERVICE_VM,
+ );
+ retry_bcc_main_flow(&cdi_values.cdi_attest, &cdi_values.cdi_seal, &dice_chain, &input_values)
+ .map_err(|e| {
+ error!("Failed to run the service VM BCC main flow: {e}");
+ e
+ })
+}
diff --git a/service_vm/kernel/Android.bp b/service_vm/kernel/Android.bp
new file mode 100644
index 0000000..79158e6
--- /dev/null
+++ b/service_vm/kernel/Android.bp
@@ -0,0 +1,31 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+python_binary_host {
+ name: "extract_microdroid_kernel_hashes",
+ srcs: ["extract_microdroid_kernel_hashes.py"],
+}
+
+genrule {
+ name: "microdroid_kernel_hashes_rs",
+ srcs: [":microdroid_kernel"],
+ out: ["lib.rs"],
+ tools: [
+ "extract_microdroid_kernel_hashes",
+ "avbtool",
+ ],
+ cmd: "$(location extract_microdroid_kernel_hashes) $(location avbtool) $(in) > $(out)",
+}
+
+rust_library_rlib {
+ name: "libmicrodroid_kernel_hashes",
+ srcs: [":microdroid_kernel_hashes_rs"],
+ crate_name: "microdroid_kernel_hashes",
+ prefer_rlib: true,
+ no_stdlibs: true,
+ stdlibs: [
+ "libcompiler_builtins.rust_sysroot",
+ "libcore.rust_sysroot",
+ ],
+}
diff --git a/service_vm/kernel/extract_microdroid_kernel_hashes.py b/service_vm/kernel/extract_microdroid_kernel_hashes.py
new file mode 100644
index 0000000..148e8be
--- /dev/null
+++ b/service_vm/kernel/extract_microdroid_kernel_hashes.py
@@ -0,0 +1,73 @@
+"""Extracts the following hashes from the AVB footer of Microdroid's kernel:
+
+- kernel hash
+- initrd_normal hash
+- initrd_debug hash
+
+The hashes are written to stdout as a Rust file.
+
+In unsupportive environments such as x86, when the kernel is just an empty file,
+the output Rust file has the same hash constant fields for compatibility
+reasons, but all of them are empty.
+"""
+#!/usr/bin/env python3
+
+import sys
+import subprocess
+from typing import Dict
+
+PARTITION_NAME_BOOT = 'boot'
+PARTITION_NAME_INITRD_NORMAL = 'initrd_normal'
+PARTITION_NAME_INITRD_DEBUG = 'initrd_debug'
+
+def main(args):
+ """Main function."""
+ avbtool = args[0]
+ kernel_image_path = args[1]
+ hashes = collect_hashes(avbtool, kernel_image_path)
+
+ print("//! This file is generated by extract_microdroid_kernel_hashes.py.")
+ print("//! It contains the hashes of the kernel and initrds.\n")
+ print("#![no_std]\n#![allow(missing_docs)]\n")
+
+ # Microdroid's kernel is just an empty file in unsupportive environments
+ # such as x86, in this case the hashes should be empty.
+ if hashes.keys() != {PARTITION_NAME_BOOT,
+ PARTITION_NAME_INITRD_NORMAL,
+ PARTITION_NAME_INITRD_DEBUG}:
+ print("/// The kernel is empty, no hashes are available.")
+ hashes[PARTITION_NAME_BOOT] = ""
+ hashes[PARTITION_NAME_INITRD_NORMAL] = ""
+ hashes[PARTITION_NAME_INITRD_DEBUG] = ""
+
+ print("pub const KERNEL_HASH: &[u8] = &["
+ f"{format_hex_string(hashes[PARTITION_NAME_BOOT])}];\n")
+ print("pub const INITRD_NORMAL_HASH: &[u8] = &["
+ f"{format_hex_string(hashes[PARTITION_NAME_INITRD_NORMAL])}];\n")
+ print("pub const INITRD_DEBUG_HASH: &[u8] = &["
+ f"{format_hex_string(hashes[PARTITION_NAME_INITRD_DEBUG])}];")
+
+def collect_hashes(avbtool: str, kernel_image_path: str) -> Dict[str, str]:
+ """Collects the hashes from the AVB footer of the kernel image."""
+ hashes = {}
+ with subprocess.Popen(
+ [avbtool, 'print_partition_digests', '--image', kernel_image_path],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
+ stdout, _ = proc.communicate()
+ for line in stdout.decode("utf-8").split("\n"):
+ line = line.replace(" ", "").split(":")
+ if len(line) == 2:
+ partition_name, hash_ = line
+ hashes[partition_name] = hash_
+ return hashes
+
+def format_hex_string(hex_string: str) -> str:
+ """Formats a hex string into a Rust array."""
+ assert len(hex_string) % 2 == 0, \
+ "Hex string must have even length: " + hex_string
+ return ", ".join(["\n0x" + hex_string[i:i+2] if i % 32 == 0
+ else "0x" + hex_string[i:i+2]
+ for i in range(0, len(hex_string), 2)])
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/service_vm/requests/Android.bp b/service_vm/requests/Android.bp
index ecede8b..57da012 100644
--- a/service_vm/requests/Android.bp
+++ b/service_vm/requests/Android.bp
@@ -21,10 +21,14 @@
"libcbor_util_nostd",
"libciborium_nostd",
"libcoset_nostd",
+ "libder_nostd",
"libdiced_open_dice_nostd",
"liblog_rust_nostd",
+ "libmicrodroid_kernel_hashes",
"libserde_nostd",
"libservice_vm_comm_nostd",
+ "libspki_nostd",
+ "libx509_cert_nostd",
"libzeroize_nostd",
],
}
diff --git a/service_vm/requests/src/api.rs b/service_vm/requests/src/api.rs
index eae0370..315d2af 100644
--- a/service_vm/requests/src/api.rs
+++ b/service_vm/requests/src/api.rs
@@ -14,6 +14,7 @@
//! This module contains the main API for the request processing module.
+use crate::client_vm;
use crate::rkp;
use alloc::vec::Vec;
use diced_open_dice::DiceArtifacts;
@@ -31,6 +32,8 @@
rkp::generate_certificate_request(p, dice_artifacts)
.map_or_else(Response::Err, Response::GenerateCertificateRequest)
}
+ Request::RequestClientVmAttestation(p) => client_vm::request_attestation(p, dice_artifacts)
+ .map_or_else(Response::Err, Response::RequestClientVmAttestation),
}
}
diff --git a/service_vm/requests/src/cert.rs b/service_vm/requests/src/cert.rs
new file mode 100644
index 0000000..91281e7
--- /dev/null
+++ b/service_vm/requests/src/cert.rs
@@ -0,0 +1,172 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Generation of certificates and attestation extensions.
+
+use crate::dice::SubComponent;
+use alloc::vec;
+use alloc::vec::Vec;
+use der::{
+ asn1::{BitString, ObjectIdentifier, OctetString, Utf8StringRef},
+ oid::AssociatedOid,
+ Decode, Sequence,
+};
+use spki::{AlgorithmIdentifier, SubjectPublicKeyInfo};
+use x509_cert::{
+ certificate::{Certificate, TbsCertificate, Version},
+ ext::Extension,
+ name::Name,
+ serial_number::SerialNumber,
+ time::Validity,
+};
+
+/// OID value for ECDSA with SHA-256, see RFC 5912 s6.
+const ECDSA_WITH_SHA_256: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.2.840.10045.4.3.2");
+
+/// OID value for the protected VM remote attestation extension.
+///
+/// This OID value was added at cl/584542390.
+const AVF_ATTESTATION_EXTENSION_V1: ObjectIdentifier =
+ ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11129.2.1.29.1");
+
+/// Attestation extension contents
+///
+/// ```asn1
+/// AttestationDescription ::= SEQUENCE {
+/// attestationChallenge OCTET_STRING,
+/// isVmSecure BOOLEAN,
+/// vmComponents SEQUENCE OF VmComponent,
+/// }
+/// ```
+#[derive(Debug, Clone, Sequence)]
+pub(crate) struct AttestationExtension<'a> {
+ #[asn1(type = "OCTET STRING")]
+ attestation_challenge: &'a [u8],
+ /// Indicates whether the VM is operating under a secure configuration.
+ is_vm_secure: bool,
+ vm_components: Vec<VmComponent<'a>>,
+}
+
+impl<'a> AssociatedOid for AttestationExtension<'a> {
+ const OID: ObjectIdentifier = AVF_ATTESTATION_EXTENSION_V1;
+}
+
+impl<'a> AttestationExtension<'a> {
+ pub(crate) fn new(
+ attestation_challenge: &'a [u8],
+ is_vm_secure: bool,
+ vm_components: Vec<VmComponent<'a>>,
+ ) -> Self {
+ Self { attestation_challenge, is_vm_secure, vm_components }
+ }
+}
+
+/// VM component information
+///
+/// ```asn1
+/// VmComponent ::= SEQUENCE {
+/// name UTF8String,
+/// securityVersion INTEGER,
+/// codeHash OCTET STRING,
+/// authorityHash OCTET STRING,
+/// }
+/// ```
+#[derive(Debug, Clone, Sequence)]
+pub(crate) struct VmComponent<'a> {
+ name: Utf8StringRef<'a>,
+ version: u64,
+ #[asn1(type = "OCTET STRING")]
+ code_hash: &'a [u8],
+ #[asn1(type = "OCTET STRING")]
+ authority_hash: &'a [u8],
+}
+
+impl<'a> VmComponent<'a> {
+ pub(crate) fn new(sub_component: &'a SubComponent) -> der::Result<Self> {
+ Ok(Self {
+ name: Utf8StringRef::new(&sub_component.name)?,
+ version: sub_component.version,
+ code_hash: &sub_component.code_hash,
+ authority_hash: &sub_component.authority_hash,
+ })
+ }
+}
+
+/// Builds an X.509 `Certificate` as defined in RFC 5280 Section 4.1:
+///
+/// ```asn1
+/// Certificate ::= SEQUENCE {
+/// tbsCertificate TBSCertificate,
+/// signatureAlgorithm AlgorithmIdentifier,
+/// signature BIT STRING
+/// }
+/// ```
+pub(crate) fn build_certificate(
+ tbs_cert: TbsCertificate,
+ signature: &[u8],
+) -> der::Result<Certificate> {
+ Ok(Certificate {
+ signature_algorithm: tbs_cert.signature.clone(),
+ tbs_certificate: tbs_cert,
+ signature: BitString::new(0, signature)?,
+ })
+}
+
+/// Builds an X.509 `TbsCertificate` as defined in RFC 5280 Section 4.1:
+///
+/// ```asn1
+/// TBSCertificate ::= SEQUENCE {
+/// version [0] EXPLICIT Version DEFAULT v1,
+/// serialNumber CertificateSerialNumber,
+/// signature AlgorithmIdentifier,
+/// issuer Name,
+/// validity Validity,
+/// subject Name,
+/// subjectPublicKeyInfo SubjectPublicKeyInfo,
+/// issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL,
+/// -- If present, version MUST be v2 or v3
+/// subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL,
+/// -- If present, version MUST be v2 or v3
+/// extensions [3] Extensions OPTIONAL
+/// -- If present, version MUST be v3 --
+/// }
+/// ```
+pub(crate) fn build_tbs_certificate(
+ serial_number: &[u8],
+ issuer: Name,
+ subject: Name,
+ validity: Validity,
+ subject_public_key_info: &[u8],
+ attestation_ext: &[u8],
+) -> der::Result<TbsCertificate> {
+ let signature = AlgorithmIdentifier { oid: ECDSA_WITH_SHA_256, parameters: None };
+ let subject_public_key_info = SubjectPublicKeyInfo::from_der(subject_public_key_info)?;
+ let extensions = vec![Extension {
+ extn_id: AttestationExtension::OID,
+ critical: false,
+ extn_value: OctetString::new(attestation_ext)?,
+ }];
+ Ok(TbsCertificate {
+ version: Version::V3,
+ serial_number: SerialNumber::new(serial_number)?,
+ signature,
+ issuer,
+ validity,
+ subject,
+ subject_public_key_info,
+ issuer_unique_id: None,
+ subject_unique_id: None,
+ extensions: Some(extensions),
+ })
+}
diff --git a/service_vm/requests/src/client_vm.rs b/service_vm/requests/src/client_vm.rs
new file mode 100644
index 0000000..5b1bf6c
--- /dev/null
+++ b/service_vm/requests/src/client_vm.rs
@@ -0,0 +1,196 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module contains functions related to the attestation of the
+//! client VM.
+
+use crate::cert;
+use crate::dice::{
+ validate_client_vm_dice_chain_prefix_match, ClientVmDiceChain, DiceChainEntryPayload,
+};
+use crate::keyblob::decrypt_private_key;
+use alloc::vec::Vec;
+use bssl_avf::{rand_bytes, sha256, Digester, EcKey, PKey};
+use cbor_util::value_to_array;
+use ciborium::value::Value;
+use core::result;
+use coset::{AsCborValue, CborSerializable, CoseSign, CoseSign1};
+use der::{Decode, Encode};
+use diced_open_dice::{DiceArtifacts, HASH_SIZE};
+use log::error;
+use microdroid_kernel_hashes::{INITRD_DEBUG_HASH, INITRD_NORMAL_HASH, KERNEL_HASH};
+use service_vm_comm::{ClientVmAttestationParams, Csr, CsrPayload, RequestProcessingError};
+use x509_cert::{certificate::Certificate, name::Name};
+
+type Result<T> = result::Result<T, RequestProcessingError>;
+
+const DICE_CDI_LEAF_SIGNATURE_INDEX: usize = 0;
+const ATTESTATION_KEY_SIGNATURE_INDEX: usize = 1;
+
+pub(super) fn request_attestation(
+ params: ClientVmAttestationParams,
+ dice_artifacts: &dyn DiceArtifacts,
+) -> Result<Vec<u8>> {
+ let csr = Csr::from_cbor_slice(¶ms.csr)?;
+ let cose_sign = CoseSign::from_slice(&csr.signed_csr_payload)?;
+ let csr_payload = cose_sign.payload.as_ref().ok_or_else(|| {
+ error!("No CsrPayload found in the CSR");
+ RequestProcessingError::InternalError
+ })?;
+ let csr_payload = CsrPayload::from_cbor_slice(csr_payload)?;
+
+ // Validates the prefix of the Client VM DICE chain in the CSR.
+ let service_vm_dice_chain =
+ dice_artifacts.bcc().ok_or(RequestProcessingError::MissingDiceChain)?;
+ let service_vm_dice_chain =
+ value_to_array(Value::from_slice(service_vm_dice_chain)?, "service_vm_dice_chain")?;
+ let client_vm_dice_chain =
+ value_to_array(Value::from_slice(&csr.dice_cert_chain)?, "client_vm_dice_chain")?;
+ validate_client_vm_dice_chain_prefix_match(&client_vm_dice_chain, &service_vm_dice_chain)?;
+ // Validates the signatures in the Client VM DICE chain and extracts the partially decoded
+ // DiceChainEntryPayloads.
+ let client_vm_dice_chain =
+ ClientVmDiceChain::validate_signatures_and_parse_dice_chain(client_vm_dice_chain)?;
+
+ // The last entry in the service VM DICE chain describes the service VM, which should
+ // be signed with the same key as the kernel image.
+ let service_vm_entry = service_vm_dice_chain.last().unwrap();
+ validate_kernel_authority_hash(client_vm_dice_chain.microdroid_kernel(), service_vm_entry)?;
+ validate_kernel_code_hash(&client_vm_dice_chain)?;
+
+ // AAD is empty as defined in service_vm/comm/client_vm_csr.cddl.
+ let aad = &[];
+
+ // Verifies the first signature with the leaf private key in the DICE chain.
+ cose_sign.verify_signature(DICE_CDI_LEAF_SIGNATURE_INDEX, aad, |signature, message| {
+ client_vm_dice_chain.microdroid_payload().subject_public_key.verify(signature, message)
+ })?;
+
+ // Verifies the second signature with the public key in the CSR payload.
+ let ec_public_key = EcKey::from_cose_public_key_slice(&csr_payload.public_key)?;
+ cose_sign.verify_signature(ATTESTATION_KEY_SIGNATURE_INDEX, aad, |signature, message| {
+ ecdsa_verify(&ec_public_key, signature, message)
+ })?;
+ let subject_public_key_info = PKey::try_from(ec_public_key)?.subject_public_key_info()?;
+
+ // Builds the TBSCertificate.
+ // The serial number can be up to 20 bytes according to RFC5280 s4.1.2.2.
+ // In this case, a serial number with a length of 20 bytes is used to ensure that each
+ // certificate signed by RKP VM has a unique serial number.
+ let mut serial_number = [0u8; 20];
+ rand_bytes(&mut serial_number)?;
+ let subject = Name::encode_from_string("CN=Android Protected Virtual Machine Key")?;
+ let rkp_cert = Certificate::from_der(¶ms.remotely_provisioned_cert)?;
+ let vm_components =
+ if let Some(components) = client_vm_dice_chain.microdroid_payload_components() {
+ components.iter().map(cert::VmComponent::new).collect::<der::Result<Vec<_>>>()?
+ } else {
+ Vec::new()
+ };
+ let attestation_ext = cert::AttestationExtension::new(
+ &csr_payload.challenge,
+ client_vm_dice_chain.all_entries_are_secure(),
+ vm_components,
+ )
+ .to_der()?;
+ let tbs_cert = cert::build_tbs_certificate(
+ &serial_number,
+ rkp_cert.tbs_certificate.subject,
+ Name::from_der(&subject)?,
+ rkp_cert.tbs_certificate.validity,
+ &subject_public_key_info,
+ &attestation_ext,
+ )?;
+
+ // Signs the TBSCertificate and builds the Certificate.
+ // The two private key structs below will be zeroed out on drop.
+ let private_key =
+ decrypt_private_key(¶ms.remotely_provisioned_key_blob, dice_artifacts.cdi_seal())
+ .map_err(|e| {
+ error!("Failed to decrypt the remotely provisioned key blob: {e}");
+ RequestProcessingError::FailedToDecryptKeyBlob
+ })?;
+ let ec_private_key = EcKey::from_ec_private_key(private_key.as_slice())?;
+ let signature = ecdsa_sign(&ec_private_key, &tbs_cert.to_der()?)?;
+ let certificate = cert::build_certificate(tbs_cert, &signature)?;
+ Ok(certificate.to_der()?)
+}
+
+fn ecdsa_verify(key: &EcKey, signature: &[u8], message: &[u8]) -> bssl_avf::Result<()> {
+ // The message was signed with ECDSA with curve P-256 and SHA-256 at the signature generation.
+ let digest = sha256(message)?;
+ key.ecdsa_verify(signature, &digest)
+}
+
+fn ecdsa_sign(key: &EcKey, message: &[u8]) -> bssl_avf::Result<Vec<u8>> {
+ let digest = sha256(message)?;
+ key.ecdsa_sign(&digest)
+}
+
+/// Validates that the authority hash of the Microdroid kernel in the Client VM DICE chain
+/// matches the authority hash of the service VM entry in the service VM DICE chain, because
+/// the Microdroid kernel is signed with the same key as the one used for the service VM.
+fn validate_kernel_authority_hash(
+ kernel: &DiceChainEntryPayload,
+ service_vm_entry: &Value,
+) -> Result<()> {
+ if expected_kernel_authority_hash(service_vm_entry)? == kernel.authority_hash {
+ Ok(())
+ } else {
+ error!("The authority hash of the Microdroid kernel does not match the expected value");
+ Err(RequestProcessingError::InvalidDiceChain)
+ }
+}
+
+/// Validates that the kernel code hash in the Client VM DICE chain matches the code hashes
+/// embedded during the build time.
+fn validate_kernel_code_hash(dice_chain: &ClientVmDiceChain) -> Result<()> {
+ let kernel = dice_chain.microdroid_kernel();
+ if expected_kernel_code_hash_normal()? == kernel.code_hash {
+ return Ok(());
+ }
+ if expected_kernel_code_hash_debug()? == kernel.code_hash {
+ if dice_chain.all_entries_are_secure() {
+ error!("The Microdroid kernel has debug initrd but the DICE chain is secure");
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ return Ok(());
+ }
+ error!("The kernel code hash in the Client VM DICE chain does not match any expected values");
+ Err(RequestProcessingError::InvalidDiceChain)
+}
+
+fn expected_kernel_code_hash_normal() -> bssl_avf::Result<Vec<u8>> {
+ let mut code_hash = [0u8; 64];
+ code_hash[0..32].copy_from_slice(KERNEL_HASH);
+ code_hash[32..].copy_from_slice(INITRD_NORMAL_HASH);
+ Digester::sha512().digest(&code_hash)
+}
+
+fn expected_kernel_code_hash_debug() -> bssl_avf::Result<Vec<u8>> {
+ let mut code_hash = [0u8; 64];
+ code_hash[0..32].copy_from_slice(KERNEL_HASH);
+ code_hash[32..].copy_from_slice(INITRD_DEBUG_HASH);
+ Digester::sha512().digest(&code_hash)
+}
+
+fn expected_kernel_authority_hash(service_vm_entry: &Value) -> Result<[u8; HASH_SIZE]> {
+ let cose_sign1 = CoseSign1::from_cbor_value(service_vm_entry.clone())?;
+ let payload = cose_sign1.payload.ok_or_else(|| {
+ error!("No payload found in the service VM DICE chain entry");
+ RequestProcessingError::InternalError
+ })?;
+ let service_vm = DiceChainEntryPayload::from_slice(&payload)?;
+ Ok(service_vm.authority_hash)
+}
diff --git a/service_vm/requests/src/dice.rs b/service_vm/requests/src/dice.rs
new file mode 100644
index 0000000..657e482
--- /dev/null
+++ b/service_vm/requests/src/dice.rs
@@ -0,0 +1,545 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module contains functions related to DICE.
+
+use alloc::string::String;
+use alloc::vec::Vec;
+use bssl_avf::{ed25519_verify, Digester, EcKey};
+use cbor_util::{
+ cbor_value_type, get_label_value, get_label_value_as_bytes, value_to_array,
+ value_to_byte_array, value_to_bytes, value_to_map, value_to_num, value_to_text,
+};
+use ciborium::value::Value;
+use core::cell::OnceCell;
+use core::result;
+use coset::{
+ self,
+ iana::{self, EnumI64},
+ Algorithm, AsCborValue, CborSerializable, CoseError, CoseKey, CoseSign1, KeyOperation, KeyType,
+ Label,
+};
+use diced_open_dice::{DiceMode, HASH_SIZE};
+use log::error;
+use service_vm_comm::RequestProcessingError;
+
+type Result<T> = result::Result<T, RequestProcessingError>;
+
+const CODE_HASH: i64 = -4670545;
+const CONFIG_DESC: i64 = -4670548;
+const AUTHORITY_HASH: i64 = -4670549;
+const MODE: i64 = -4670551;
+const SUBJECT_PUBLIC_KEY: i64 = -4670552;
+
+const CONFIG_DESC_COMPONENT_NAME: i64 = -70002;
+const CONFIG_DESC_SUB_COMPONENTS: i64 = -71002;
+
+const SUB_COMPONENT_NAME: i64 = 1;
+const SUB_COMPONENT_VERSION: i64 = 2;
+const SUB_COMPONENT_CODE_HASH: i64 = 3;
+const SUB_COMPONENT_AUTHORITY_HASH: i64 = 4;
+
+const MICRODROID_KERNEL_COMPONENT_NAME: &str = "vm_entry";
+const MICRODROID_PAYLOAD_COMPONENT_NAME: &str = "Microdroid payload";
+
+/// Represents a partially decoded `DiceCertChain` from the client VM.
+/// The whole chain is defined as following:
+///
+/// DiceCertChain = [
+/// PubKeyEd25519 / PubKeyECDSA256 / PubKeyECDSA384, ; UDS_Pub
+/// + DiceChainEntry, ; First CDI_Certificate -> Last CDI_Certificate
+/// ]
+#[derive(Debug, Clone)]
+pub(crate) struct ClientVmDiceChain {
+ payloads: Vec<DiceChainEntryPayload>,
+}
+
+impl ClientVmDiceChain {
+ /// Validates the signatures of the entries in the `client_vm_dice_chain` as following:
+ ///
+ /// - The first entry of the `client_vm_dice_chain` must be signed with the root public key.
+ /// - After the first entry, each entry of the `client_vm_dice_chain` must be signed with the
+ /// subject public key of the previous entry.
+ ///
+ /// Returns a partially decoded client VM's DICE chain if the verification succeeds.
+ pub(crate) fn validate_signatures_and_parse_dice_chain(
+ mut client_vm_dice_chain: Vec<Value>,
+ ) -> Result<Self> {
+ let root_public_key =
+ CoseKey::from_cbor_value(client_vm_dice_chain.remove(0))?.try_into()?;
+
+ let mut payloads = Vec::with_capacity(client_vm_dice_chain.len());
+ let mut previous_public_key = &root_public_key;
+ for (i, value) in client_vm_dice_chain.into_iter().enumerate() {
+ let payload = DiceChainEntryPayload::validate_cose_signature_and_extract_payload(
+ value,
+ previous_public_key,
+ )
+ .map_err(|e| {
+ error!("Failed to verify the DICE chain entry {}: {:?}", i, e);
+ e
+ })?;
+ payloads.push(payload);
+ previous_public_key = &payloads.last().unwrap().subject_public_key;
+ }
+ // After successfully calling `validate_client_vm_dice_chain_prefix_match`, we can be
+ // certain that the client VM's DICE chain must contain at least three entries that
+ // describe:
+ // - pvmfw
+ // - Microdroid kernel
+ // - Apk/Apexes
+ assert!(
+ payloads.len() >= 3,
+ "The client VM DICE chain must contain at least three DiceChainEntryPayloads"
+ );
+ let chain = Self { payloads };
+ chain.validate_microdroid_components_names()?;
+ Ok(chain)
+ }
+
+ fn validate_microdroid_components_names(&self) -> Result<()> {
+ let microdroid_kernel_name = &self.microdroid_kernel().config_descriptor.component_name;
+ if MICRODROID_KERNEL_COMPONENT_NAME != microdroid_kernel_name {
+ error!(
+ "The second to last entry in the client VM DICE chain must describe the \
+ Microdroid kernel. Got {}",
+ microdroid_kernel_name
+ );
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ let microdroid_payload_name = &self.microdroid_payload().config_descriptor.component_name;
+ if MICRODROID_PAYLOAD_COMPONENT_NAME != microdroid_payload_name {
+ error!(
+ "The last entry in the client VM DICE chain must describe the Microdroid \
+ payload. Got {}",
+ microdroid_payload_name
+ );
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn microdroid_kernel(&self) -> &DiceChainEntryPayload {
+ &self.payloads[self.payloads.len() - 2]
+ }
+
+ pub(crate) fn microdroid_payload(&self) -> &DiceChainEntryPayload {
+ &self.payloads[self.payloads.len() - 1]
+ }
+
+ pub(crate) fn microdroid_payload_components(&self) -> Option<&Vec<SubComponent>> {
+ self.microdroid_payload().config_descriptor.sub_components.as_ref()
+ }
+
+ /// Returns true if all payloads in the DICE chain are in normal mode.
+ pub(crate) fn all_entries_are_secure(&self) -> bool {
+ self.payloads.iter().all(|p| p.mode == DiceMode::kDiceModeNormal)
+ }
+}
+
+/// Validates that the `client_vm_dice_chain` matches the `service_vm_dice_chain` up to the pvmfw
+/// entry.
+///
+/// Returns `Ok(())` if the verification succeeds.
+pub(crate) fn validate_client_vm_dice_chain_prefix_match(
+ client_vm_dice_chain: &[Value],
+ service_vm_dice_chain: &[Value],
+) -> Result<()> {
+ if service_vm_dice_chain.len() < 3 {
+ // The service VM's DICE chain must contain the root key and at least two other entries
+ // that describe:
+ // - pvmfw
+ // - Service VM kernel
+ error!("The service VM DICE chain must contain at least three entries");
+ return Err(RequestProcessingError::InternalError);
+ }
+ // Ignores the last entry that describes service VM
+ let entries_up_to_pvmfw = &service_vm_dice_chain[0..(service_vm_dice_chain.len() - 1)];
+ if entries_up_to_pvmfw.len() + 2 != client_vm_dice_chain.len() {
+ // Client VM DICE chain = entries_up_to_pvmfw
+ // + Microdroid kernel entry (added in pvmfw)
+ // + Apk/Apexes entry (added in microdroid)
+ error!("The client VM's DICE chain must contain exactly two extra entries");
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ if entries_up_to_pvmfw != &client_vm_dice_chain[0..entries_up_to_pvmfw.len()] {
+ error!(
+ "The client VM's DICE chain does not match service VM's DICE chain up to \
+ the pvmfw entry"
+ );
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ Ok(())
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct PublicKey(CoseKey);
+
+impl TryFrom<CoseKey> for PublicKey {
+ type Error = RequestProcessingError;
+
+ fn try_from(key: CoseKey) -> Result<Self> {
+ if !key.key_ops.contains(&KeyOperation::Assigned(iana::KeyOperation::Verify)) {
+ error!("Public key does not support verification");
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ Ok(Self(key))
+ }
+}
+
+impl PublicKey {
+ /// Verifies the signature of the provided message with the public key.
+ ///
+ /// This function supports the following key/algorithm types as specified in
+ /// hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/
+ /// generateCertificateRequestV2.cddl:
+ ///
+ /// PubKeyEd25519 / PubKeyECDSA256 / PubKeyECDSA384
+ pub(crate) fn verify(&self, signature: &[u8], message: &[u8]) -> Result<()> {
+ match &self.0.kty {
+ KeyType::Assigned(iana::KeyType::EC2) => {
+ let public_key = EcKey::from_cose_public_key(&self.0)?;
+ let Some(Algorithm::Assigned(alg)) = self.0.alg else {
+ error!("Invalid algorithm in COSE key {:?}", self.0.alg);
+ return Err(RequestProcessingError::InvalidDiceChain);
+ };
+ let digester = match alg {
+ iana::Algorithm::ES256 => Digester::sha256(),
+ iana::Algorithm::ES384 => Digester::sha384(),
+ _ => {
+ error!("Unsupported algorithm in EC2 key: {:?}", alg);
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ };
+ let digest = digester.digest(message)?;
+ Ok(public_key.ecdsa_verify(signature, &digest)?)
+ }
+ KeyType::Assigned(iana::KeyType::OKP) => {
+ let curve_type =
+ get_label_value(&self.0, Label::Int(iana::OkpKeyParameter::Crv.to_i64()))?;
+ if curve_type != &Value::from(iana::EllipticCurve::Ed25519.to_i64()) {
+ error!("Unsupported curve type in OKP COSE key: {:?}", curve_type);
+ return Err(RequestProcessingError::OperationUnimplemented);
+ }
+ let x = get_label_value_as_bytes(
+ &self.0,
+ Label::Int(iana::OkpKeyParameter::X.to_i64()),
+ )?;
+ let public_key = x.try_into().map_err(|_| {
+ error!("Invalid ED25519 public key size: {}", x.len());
+ RequestProcessingError::InvalidDiceChain
+ })?;
+ let signature = signature.try_into().map_err(|_| {
+ error!("Invalid ED25519 signature size: {}", signature.len());
+ RequestProcessingError::InvalidDiceChain
+ })?;
+ Ok(ed25519_verify(message, signature, public_key)?)
+ }
+ kty => {
+ error!("Unsupported key type in COSE key: {:?}", kty);
+ Err(RequestProcessingError::OperationUnimplemented)
+ }
+ }
+ }
+}
+
+/// Represents a partially decoded `DiceChainEntryPayload`. The whole payload is defined in:
+///
+/// hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/
+/// generateCertificateRequestV2.cddl
+#[derive(Debug, Clone)]
+pub(crate) struct DiceChainEntryPayload {
+ pub(crate) subject_public_key: PublicKey,
+ mode: DiceMode,
+ pub(crate) code_hash: [u8; HASH_SIZE],
+ pub(crate) authority_hash: [u8; HASH_SIZE],
+ config_descriptor: ConfigDescriptor,
+}
+
+impl DiceChainEntryPayload {
+ /// Validates the signature of the provided CBOR value with the provided public key and
+ /// extracts payload from the value.
+ fn validate_cose_signature_and_extract_payload(
+ value: Value,
+ authority_public_key: &PublicKey,
+ ) -> Result<Self> {
+ let cose_sign1 = CoseSign1::from_cbor_value(value)?;
+ let aad = &[]; // AAD is not used in DICE chain entry.
+ cose_sign1.verify_signature(aad, |signature, message| {
+ authority_public_key.verify(signature, message)
+ })?;
+
+ let payload = cose_sign1.payload.ok_or_else(|| {
+ error!("No payload found in the DICE chain entry");
+ RequestProcessingError::InvalidDiceChain
+ })?;
+ Self::from_slice(&payload)
+ }
+
+ pub(crate) fn from_slice(data: &[u8]) -> Result<Self> {
+ let entries = value_to_map(Value::from_slice(data)?, "DiceChainEntryPayload")?;
+ let mut builder = PayloadBuilder::default();
+ for (key, value) in entries.into_iter() {
+ let key: i64 = value_to_num(key, "DiceChainEntryPayload key")?;
+ match key {
+ SUBJECT_PUBLIC_KEY => {
+ let subject_public_key = value_to_bytes(value, "subject_public_key")?;
+ let subject_public_key =
+ CoseKey::from_slice(&subject_public_key)?.try_into()?;
+ builder.subject_public_key(subject_public_key)?;
+ }
+ MODE => builder.mode(to_mode(value)?)?,
+ CODE_HASH => {
+ let code_hash = value_to_byte_array(value, "DiceChainEntryPayload code_hash")?;
+ builder.code_hash(code_hash)?;
+ }
+ AUTHORITY_HASH => {
+ let authority_hash =
+ value_to_byte_array(value, "DiceChainEntryPayload authority_hash")?;
+ builder.authority_hash(authority_hash)?;
+ }
+ CONFIG_DESC => {
+ let config_descriptor = value_to_bytes(value, "config_descriptor")?;
+ let config_descriptor = ConfigDescriptor::from_slice(&config_descriptor)?;
+ builder.config_descriptor(config_descriptor)?;
+ }
+ _ => {}
+ }
+ }
+ builder.build()
+ }
+}
+/// Represents a partially decoded `ConfigurationDescriptor`.
+///
+/// The whole `ConfigurationDescriptor` is defined in:
+///
+/// hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/
+/// generateCertificateRequestV2.cddl
+#[derive(Debug, Clone)]
+pub(crate) struct ConfigDescriptor {
+ component_name: String,
+ sub_components: Option<Vec<SubComponent>>,
+}
+
+impl ConfigDescriptor {
+ fn from_slice(data: &[u8]) -> Result<Self> {
+ let value = Value::from_slice(data)?;
+ let entries = value_to_map(value, "ConfigDescriptor")?;
+ let mut builder = ConfigDescriptorBuilder::default();
+ for (key, value) in entries.into_iter() {
+ let key: i64 = value_to_num(key, "ConfigDescriptor key")?;
+ match key {
+ CONFIG_DESC_COMPONENT_NAME => {
+ let name = value_to_text(value, "ConfigDescriptor component_name")?;
+ builder.component_name(name)?;
+ }
+ CONFIG_DESC_SUB_COMPONENTS => {
+ let sub_components = value_to_array(value, "ConfigDescriptor sub_components")?;
+ let sub_components = sub_components
+ .into_iter()
+ .map(SubComponent::try_from)
+ .collect::<Result<Vec<_>>>()?;
+ builder.sub_components(sub_components)?
+ }
+ _ => {}
+ }
+ }
+ builder.build()
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+struct ConfigDescriptorBuilder {
+ component_name: OnceCell<String>,
+ sub_components: OnceCell<Vec<SubComponent>>,
+}
+
+impl ConfigDescriptorBuilder {
+ fn component_name(&mut self, component_name: String) -> Result<()> {
+ set_once(&self.component_name, component_name, "ConfigDescriptor component_name")
+ }
+
+ fn sub_components(&mut self, sub_components: Vec<SubComponent>) -> Result<()> {
+ set_once(&self.sub_components, sub_components, "ConfigDescriptor sub_components")
+ }
+
+ fn build(mut self) -> Result<ConfigDescriptor> {
+ let component_name =
+ take_value(&mut self.component_name, "ConfigDescriptor component_name")?;
+ let sub_components = self.sub_components.take();
+ Ok(ConfigDescriptor { component_name, sub_components })
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct SubComponent {
+ pub(crate) name: String,
+ pub(crate) version: u64,
+ pub(crate) code_hash: Vec<u8>,
+ pub(crate) authority_hash: Vec<u8>,
+}
+
+impl TryFrom<Value> for SubComponent {
+ type Error = RequestProcessingError;
+
+ fn try_from(value: Value) -> Result<Self> {
+ let entries = value_to_map(value, "SubComponent")?;
+ let mut builder = SubComponentBuilder::default();
+ for (key, value) in entries.into_iter() {
+ let key: i64 = value_to_num(key, "SubComponent key")?;
+ match key {
+ SUB_COMPONENT_NAME => {
+ builder.name(value_to_text(value, "SubComponent component_name")?)?
+ }
+ SUB_COMPONENT_VERSION => {
+ builder.version(value_to_num(value, "SubComponent version")?)?
+ }
+ SUB_COMPONENT_CODE_HASH => {
+ builder.code_hash(value_to_bytes(value, "SubComponent code_hash")?)?
+ }
+ SUB_COMPONENT_AUTHORITY_HASH => {
+ builder.authority_hash(value_to_bytes(value, "SubComponent authority_hash")?)?
+ }
+ k => {
+ error!("Unknown key in SubComponent: {}", k);
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ }
+ }
+ builder.build()
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+struct SubComponentBuilder {
+ name: OnceCell<String>,
+ version: OnceCell<u64>,
+ code_hash: OnceCell<Vec<u8>>,
+ authority_hash: OnceCell<Vec<u8>>,
+}
+
+impl SubComponentBuilder {
+ fn name(&mut self, name: String) -> Result<()> {
+ set_once(&self.name, name, "SubComponent name")
+ }
+
+ fn version(&mut self, version: u64) -> Result<()> {
+ set_once(&self.version, version, "SubComponent version")
+ }
+
+ fn code_hash(&mut self, code_hash: Vec<u8>) -> Result<()> {
+ set_once(&self.code_hash, code_hash, "SubComponent code_hash")
+ }
+
+ fn authority_hash(&mut self, authority_hash: Vec<u8>) -> Result<()> {
+ set_once(&self.authority_hash, authority_hash, "SubComponent authority_hash")
+ }
+
+ fn build(mut self) -> Result<SubComponent> {
+ let name = take_value(&mut self.name, "SubComponent name")?;
+ let version = take_value(&mut self.version, "SubComponent version")?;
+ let code_hash = take_value(&mut self.code_hash, "SubComponent code_hash")?;
+ let authority_hash = take_value(&mut self.authority_hash, "SubComponent authority_hash")?;
+ Ok(SubComponent { name, version, code_hash, authority_hash })
+ }
+}
+
+fn to_mode(value: Value) -> Result<DiceMode> {
+ let mode = match value {
+ // Mode is supposed to be encoded as a 1-byte bstr, but some implementations instead
+ // encode it as an integer. Accept either. See b/273552826.
+ // If Mode is omitted, it should be treated as if it was NotConfigured, according to
+ // the Open Profile for DICE spec.
+ Value::Bytes(bytes) => {
+ if bytes.len() != 1 {
+ error!("Bytes array with invalid length for mode: {:?}", bytes.len());
+ return Err(RequestProcessingError::InvalidDiceChain);
+ }
+ bytes[0].into()
+ }
+ Value::Integer(i) => i,
+ v => return Err(CoseError::UnexpectedItem(cbor_value_type(&v), "bstr or int").into()),
+ };
+ let mode = match mode {
+ x if x == (DiceMode::kDiceModeNormal as i64).into() => DiceMode::kDiceModeNormal,
+ x if x == (DiceMode::kDiceModeDebug as i64).into() => DiceMode::kDiceModeDebug,
+ x if x == (DiceMode::kDiceModeMaintenance as i64).into() => DiceMode::kDiceModeMaintenance,
+ // If Mode is invalid, it should be treated as if it was NotConfigured, according to
+ // the Open Profile for DICE spec.
+ _ => DiceMode::kDiceModeNotInitialized,
+ };
+ Ok(mode)
+}
+
+#[derive(Default, Debug, Clone)]
+struct PayloadBuilder {
+ subject_public_key: OnceCell<PublicKey>,
+ mode: OnceCell<DiceMode>,
+ code_hash: OnceCell<[u8; HASH_SIZE]>,
+ authority_hash: OnceCell<[u8; HASH_SIZE]>,
+ config_descriptor: OnceCell<ConfigDescriptor>,
+}
+
+fn set_once<T>(field: &OnceCell<T>, value: T, field_name: &str) -> Result<()> {
+ field.set(value).map_err(|_| {
+ error!("Field '{field_name}' is duplicated in the Payload");
+ RequestProcessingError::InvalidDiceChain
+ })
+}
+
+fn take_value<T>(field: &mut OnceCell<T>, field_name: &str) -> Result<T> {
+ field.take().ok_or_else(|| {
+ error!("Field '{field_name}' is missing in the Payload");
+ RequestProcessingError::InvalidDiceChain
+ })
+}
+
+impl PayloadBuilder {
+ fn subject_public_key(&mut self, key: PublicKey) -> Result<()> {
+ set_once(&self.subject_public_key, key, "subject_public_key")
+ }
+
+ fn mode(&mut self, mode: DiceMode) -> Result<()> {
+ set_once(&self.mode, mode, "mode")
+ }
+
+ fn code_hash(&mut self, code_hash: [u8; HASH_SIZE]) -> Result<()> {
+ set_once(&self.code_hash, code_hash, "code_hash")
+ }
+
+ fn authority_hash(&mut self, authority_hash: [u8; HASH_SIZE]) -> Result<()> {
+ set_once(&self.authority_hash, authority_hash, "authority_hash")
+ }
+
+ fn config_descriptor(&mut self, config_descriptor: ConfigDescriptor) -> Result<()> {
+ set_once(&self.config_descriptor, config_descriptor, "config_descriptor")
+ }
+
+ fn build(mut self) -> Result<DiceChainEntryPayload> {
+ let subject_public_key = take_value(&mut self.subject_public_key, "subject_public_key")?;
+ // If Mode is omitted, it should be treated as if it was NotConfigured, according to
+ // the Open Profile for DICE spec.
+ let mode = self.mode.take().unwrap_or(DiceMode::kDiceModeNotInitialized);
+ let code_hash = take_value(&mut self.code_hash, "code_hash")?;
+ let authority_hash = take_value(&mut self.authority_hash, "authority_hash")?;
+ let config_descriptor = take_value(&mut self.config_descriptor, "config_descriptor")?;
+ Ok(DiceChainEntryPayload {
+ subject_public_key,
+ mode,
+ code_hash,
+ authority_hash,
+ config_descriptor,
+ })
+ }
+}
diff --git a/service_vm/requests/src/keyblob.rs b/service_vm/requests/src/keyblob.rs
index 456c879..1fb7a67 100644
--- a/service_vm/requests/src/keyblob.rs
+++ b/service_vm/requests/src/keyblob.rs
@@ -20,8 +20,6 @@
use core::result;
use serde::{Deserialize, Serialize};
use service_vm_comm::RequestProcessingError;
-// TODO(b/241428146): This will be used once the retrieval mechanism is available.
-#[cfg(test)]
use zeroize::Zeroizing;
type Result<T> = result::Result<T, RequestProcessingError>;
@@ -61,9 +59,6 @@
EncryptedKeyBlobV1::new(private_key, kek_secret).map(Self::V1)
}
- // TODO(b/241428146): Use this function to decrypt the retrieved keyblob once the retrieval
- // mechanism is available.
- #[cfg(test)]
pub(crate) fn decrypt_private_key(&self, kek_secret: &[u8]) -> Result<Zeroizing<Vec<u8>>> {
match self {
Self::V1(blob) => blob.decrypt_private_key(kek_secret),
@@ -85,7 +80,6 @@
Ok(Self { kek_salt, encrypted_private_key: ciphertext.to_vec() })
}
- #[cfg(test)]
fn decrypt_private_key(&self, kek_secret: &[u8]) -> Result<Zeroizing<Vec<u8>>> {
let kek = hkdf::<32>(kek_secret, &self.kek_salt, KEK_INFO, Digester::sha512())?;
let mut out = Zeroizing::new(vec![0u8; self.encrypted_private_key.len()]);
@@ -101,6 +95,15 @@
}
}
+pub(crate) fn decrypt_private_key(
+ encrypted_key_blob: &[u8],
+ kek_secret: &[u8],
+) -> Result<Zeroizing<Vec<u8>>> {
+ let key_blob: EncryptedKeyBlob = cbor_util::deserialize(encrypted_key_blob)?;
+ let private_key = key_blob.decrypt_private_key(kek_secret)?;
+ Ok(private_key)
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -127,8 +130,7 @@
fn decrypting_keyblob_succeeds_with_the_same_kek() -> Result<()> {
let encrypted_key_blob =
cbor_util::serialize(&EncryptedKeyBlob::new(&TEST_KEY, &TEST_SECRET1)?)?;
- let encrypted_key_blob: EncryptedKeyBlob = cbor_util::deserialize(&encrypted_key_blob)?;
- let decrypted_key = encrypted_key_blob.decrypt_private_key(&TEST_SECRET1)?;
+ let decrypted_key = decrypt_private_key(&encrypted_key_blob, &TEST_SECRET1)?;
assert_eq!(TEST_KEY, decrypted_key.as_slice());
Ok(())
@@ -138,8 +140,7 @@
fn decrypting_keyblob_fails_with_a_different_kek() -> Result<()> {
let encrypted_key_blob =
cbor_util::serialize(&EncryptedKeyBlob::new(&TEST_KEY, &TEST_SECRET1)?)?;
- let encrypted_key_blob: EncryptedKeyBlob = cbor_util::deserialize(&encrypted_key_blob)?;
- let err = encrypted_key_blob.decrypt_private_key(&TEST_SECRET2).unwrap_err();
+ let err = decrypt_private_key(&encrypted_key_blob, &TEST_SECRET2).unwrap_err();
let expected_err: RequestProcessingError =
Error::CallFailed(ApiName::EVP_AEAD_CTX_open, CipherError::BadDecrypt.into()).into();
diff --git a/service_vm/requests/src/lib.rs b/service_vm/requests/src/lib.rs
index e3c5794..0dfac09 100644
--- a/service_vm/requests/src/lib.rs
+++ b/service_vm/requests/src/lib.rs
@@ -19,6 +19,9 @@
extern crate alloc;
mod api;
+mod cert;
+mod client_vm;
+mod dice;
mod keyblob;
mod pub_key;
mod rkp;
diff --git a/service_vm/requests/src/rkp.rs b/service_vm/requests/src/rkp.rs
index 8d7d771..9901a92 100644
--- a/service_vm/requests/src/rkp.rs
+++ b/service_vm/requests/src/rkp.rs
@@ -44,11 +44,12 @@
dice_artifacts: &dyn DiceArtifacts,
) -> Result<EcdsaP256KeyPair> {
let hmac_key = derive_hmac_key(dice_artifacts)?;
- let ec_key = EcKey::new_p256()?;
+ let mut ec_key = EcKey::new_p256()?;
+ ec_key.generate_key()?;
let maced_public_key = build_maced_public_key(ec_key.cose_public_key()?, hmac_key.as_ref())?;
let key_blob =
- EncryptedKeyBlob::new(ec_key.private_key()?.as_slice(), dice_artifacts.cdi_seal())?;
+ EncryptedKeyBlob::new(ec_key.ec_private_key()?.as_slice(), dice_artifacts.cdi_seal())?;
let key_pair =
EcdsaP256KeyPair { maced_public_key, key_blob: cbor_util::serialize(&key_blob)? };
@@ -75,10 +76,13 @@
public_keys.push(public_key.to_cbor_value()?);
}
// Builds `CsrPayload`.
+ // TODO(b/299256925): The device information is currently empty as we do not
+ // have sufficient details to include.
+ let device_info = Value::Map(Vec::new());
let csr_payload = cbor!([
Value::Integer(CSR_PAYLOAD_SCHEMA_V3.into()),
Value::Text(String::from(CERTIFICATE_TYPE)),
- // TODO(b/299256925): Add device info in CBOR format here.
+ device_info,
Value::Array(public_keys),
])?;
let csr_payload = cbor_util::serialize(&csr_payload)?;
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
index 937fbee..be13196 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
@@ -36,6 +36,8 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
public abstract class MicrodroidHostTestCaseBase extends BaseHostJUnit4Test {
protected static final String TEST_ROOT = "/data/local/tmp/virt/";
@@ -52,6 +54,9 @@
(int) (MICRODROID_ADB_CONNECT_TIMEOUT_MINUTES * 60 * 1000
/ MICRODROID_COMMAND_RETRY_INTERVAL_MILLIS);
+ protected static final Set<String> SUPPORTED_GKI_VERSIONS =
+ new HashSet(Arrays.asList("android14-6.1"));
+
public static void prepareVirtualizationTestSetup(ITestDevice androidDevice)
throws DeviceNotAvailableException {
CommandRunner android = new CommandRunner(androidDevice);
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index 001dfeb..a54a22a 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -25,6 +25,7 @@
import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeFalse;
import static org.junit.Assume.assumeTrue;
@@ -102,14 +103,24 @@
}
}
- @Parameterized.Parameters(name = "protectedVm={0}")
+ @Parameterized.Parameters(name = "protectedVm={0},gki={1}")
public static Collection<Object[]> params() {
- return List.of(new Object[] {true}, new Object[] {false});
+ List<Object[]> ret = new ArrayList<>();
+ ret.add(new Object[] {true /* protectedVm */, null /* use microdroid kernel */});
+ ret.add(new Object[] {false /* protectedVm */, null /* use microdroid kernel */});
+ for (String gki : SUPPORTED_GKI_VERSIONS) {
+ ret.add(new Object[] {true /* protectedVm */, gki});
+ ret.add(new Object[] {false /* protectedVm */, gki});
+ }
+ return ret;
}
@Parameterized.Parameter(0)
public boolean mProtectedVm;
+ @Parameterized.Parameter(1)
+ public String mGki;
+
@Rule public TestLogData mTestLogs = new TestLogData();
@Rule public TestName mTestName = new TestName();
@Rule public TestMetrics mMetrics = new TestMetrics();
@@ -164,6 +175,12 @@
if (!updateBootconfigs) {
command.add("--do_not_update_bootconfigs");
}
+ // In some cases we run a CTS binary that is built from a different branch that the /system
+ // image under test. In such cases we might end up in a situation when avb_version used in
+ // CTS binary and avb_version used to sign the com.android.virt APEX do not match.
+ // This is a weird configuration, but unfortunately it can happen, hence we pass here
+ // --do_not_validate_avb_version flag to make sure that CTS doesn't fail on it.
+ command.add("--do_not_validate_avb_version");
keyOverrides.forEach(
(filename, keyFile) ->
command.add("--key_override " + filename + "=" + keyFile.getPath()));
@@ -316,7 +333,8 @@
// - its idsig
// Load etc/microdroid.json
- File microdroidConfigFile = new File(virtApexEtcDir, "microdroid.json");
+ String os = mGki != null ? "microdroid_gki-" + mGki : "microdroid";
+ File microdroidConfigFile = new File(virtApexEtcDir, os + ".json");
JSONObject config = new JSONObject(FileUtil.readStringFromFile(microdroidConfigFile));
// Replace paths so that the config uses re-signed images from TEST_ROOT
@@ -332,7 +350,7 @@
}
// Add partitions to the second disk
- final String initrdPath = TEST_ROOT + "etc/microdroid_initrd_debuggable.img";
+ final String initrdPath = TEST_ROOT + "etc/" + os + "_initrd_debuggable.img";
config.put("initrd", initrdPath);
// Add instance image as a partition in disks[1]
disks.put(
@@ -400,6 +418,7 @@
.memoryMib(minMemorySize())
.cpuTopology("match_host")
.protectedVm(true)
+ .gki(mGki)
.build(getAndroidDevice());
// Assert
@@ -450,7 +469,9 @@
key, keyOverrides, /* isProtected= */ false, /* updateBootconfigs= */ true);
assertThatEventually(
100000,
- () -> getDevice().pullFileContents(LOG_PATH),
+ () ->
+ getDevice().pullFileContents(CONSOLE_PATH)
+ + getDevice().pullFileContents(LOG_PATH),
containsString("boot completed, time to run payload"));
vmInfo.mProcess.destroy();
@@ -524,6 +545,7 @@
.memoryMib(minMemorySize())
.cpuTopology("match_host")
.protectedVm(protectedVm)
+ .gki(mGki)
.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
mMicrodroidDevice.enableAdbRoot();
@@ -678,6 +700,7 @@
.memoryMib(minMemorySize())
.cpuTopology("match_host")
.protectedVm(mProtectedVm)
+ .gki(mGki)
.build(device);
microdroid.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
device.shutdownMicrodroid(microdroid);
@@ -731,19 +754,10 @@
.isLessThan(atomVmExited.getElapsedTimeMillis());
}
- @Test
- @CddTest(requirements = {"9.17/C-1-1", "9.17/C-1-2", "9.17/C/1-3"})
- public void testMicrodroidBoots() throws Exception {
+ private void testMicrodroidBootsWithBuilder(MicrodroidBuilder builder) throws Exception {
CommandRunner android = new CommandRunner(getDevice());
- final String configPath = "assets/vm_config.json"; // path inside the APK
- mMicrodroidDevice =
- MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
- .debugLevel("full")
- .memoryMib(minMemorySize())
- .cpuTopology("match_host")
- .protectedVm(mProtectedVm)
- .build(getAndroidDevice());
+ mMicrodroidDevice = builder.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
CommandRunner microdroid = new CommandRunner(mMicrodroidDevice);
@@ -807,6 +821,19 @@
}
@Test
+ @CddTest(requirements = {"9.17/C-1-1", "9.17/C-1-2", "9.17/C/1-3"})
+ public void testMicrodroidBoots() throws Exception {
+ final String configPath = "assets/vm_config.json"; // path inside the APK
+ testMicrodroidBootsWithBuilder(
+ MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
+ .debugLevel("full")
+ .memoryMib(minMemorySize())
+ .cpuTopology("match_host")
+ .protectedVm(mProtectedVm)
+ .gki(mGki));
+ }
+
+ @Test
public void testMicrodroidRamUsage() throws Exception {
final String configPath = "assets/vm_config.json";
mMicrodroidDevice =
@@ -815,6 +842,7 @@
.memoryMib(minMemorySize())
.cpuTopology("match_host")
.protectedVm(mProtectedVm)
+ .gki(mGki)
.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
mMicrodroidDevice.enableAdbRoot();
@@ -970,11 +998,21 @@
.cpuTopology("match_host")
.protectedVm(true)
.addAssignableDevice(devices.get(0))
+ .gki(mGki)
.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
}
+ @Test
+ public void testGkiVersions() throws Exception {
+ for (String gki : getSupportedGKIVersions()) {
+ assertTrue(
+ "Unknown gki \"" + gki + "\". Supported gkis: " + SUPPORTED_GKI_VERSIONS,
+ SUPPORTED_GKI_VERSIONS.contains(gki));
+ }
+ }
+
@Before
public void setUp() throws Exception {
assumeDeviceIsCapable(getDevice());
@@ -989,6 +1027,12 @@
assumeTrue(
"Microdroid is not supported for specific VM protection type",
getAndroidDevice().supportsMicrodroid(mProtectedVm));
+
+ if (mGki != null) {
+ assumeTrue(
+ "GKI version \"" + mGki + "\" is not supported on this device",
+ getSupportedGKIVersions().contains(mGki));
+ }
}
@After
@@ -1029,21 +1073,28 @@
&& device.doesFileExist("/sys/bus/platform/drivers/vfio-platform"));
}
- private List<String> getAssignableDevices() throws Exception {
+ private List<String> parseStringArrayFieldsFromVmInfo(String header) throws Exception {
CommandRunner android = new CommandRunner(getDevice());
String result = android.run("/apex/com.android.virt/bin/vm", "info");
- List<String> devices = new ArrayList<>();
+ List<String> ret = new ArrayList<>();
for (String line : result.split("\n")) {
- final String header = "Assignable devices: ";
if (!line.startsWith(header)) continue;
JSONArray jsonArray = new JSONArray(line.substring(header.length()));
for (int i = 0; i < jsonArray.length(); i++) {
- devices.add(jsonArray.getString(i));
+ ret.add(jsonArray.getString(i));
}
break;
}
- return devices;
+ return ret;
+ }
+
+ private List<String> getAssignableDevices() throws Exception {
+ return parseStringArrayFieldsFromVmInfo("Assignable devices: ");
+ }
+
+ private List<String> getSupportedGKIVersions() throws Exception {
+ return parseStringArrayFieldsFromVmInfo("Available gki versions: ");
}
private TestDevice getAndroidDevice() {
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index c6291e4..b06eea6 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -1293,7 +1293,7 @@
assertThat(payloadStarted.getNow(false)).isTrue();
assertThat(exitCodeFuture.getNow(0)).isNotEqualTo(0);
- assertThat(listener.getLogOutput()).contains(reason);
+ assertThat(listener.getConsoleOutput() + listener.getLogOutput()).contains(reason);
}
@Test
@@ -1698,7 +1698,7 @@
.command(
"logcat",
"-e",
- "virtualizationmanager::aidl: Log.*executing main task",
+ "virtualizationmanager::aidl: (Console|Log).*executing main task",
"-t",
time)
.start();
diff --git a/virtualizationmanager/Android.bp b/virtualizationmanager/Android.bp
index 33897b2..60c94fc 100644
--- a/virtualizationmanager/Android.bp
+++ b/virtualizationmanager/Android.bp
@@ -5,7 +5,11 @@
rust_defaults {
name: "virtualizationmanager_defaults",
crate_name: "virtualizationmanager",
- defaults: ["avf_build_flags_rust"],
+ defaults: [
+ "avf_build_flags_rust",
+ "secretkeeper_use_latest_hal_aidl_rust",
+ "authgraph_use_latest_hal_aidl_rust",
+ ],
edition: "2021",
// Only build on targets which crosvm builds on.
enabled: false,
@@ -34,6 +38,7 @@
"libclap",
"libcommand_fds",
"libdisk",
+ "libhex",
"libhypervisor_props",
"liblazy_static",
"liblibc",
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 70e9be9..8c2099f 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -52,6 +52,14 @@
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::{
BnVirtualMachineService, IVirtualMachineService,
};
+use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::ISecretkeeper::{BnSecretkeeper, ISecretkeeper};
+use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::SecretId::SecretId;
+use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph::{
+ Arc::Arc as AuthgraphArc, IAuthGraphKeyExchange::IAuthGraphKeyExchange,
+ IAuthGraphKeyExchange::BnAuthGraphKeyExchange, Identity::Identity, KeInitResult::KeInitResult,
+ Key::Key, PubKey::PubKey, SessionIdSignature::SessionIdSignature, SessionInfo::SessionInfo,
+ SessionInitiationInfo::SessionInitiationInfo,
+};
use anyhow::{anyhow, bail, Context, Result};
use apkverify::{HashAlgorithm, V4Signature};
use avflog::LogResult;
@@ -66,6 +74,7 @@
use log::{debug, error, info, warn};
use microdroid_payload_config::{OsConfig, Task, TaskType, VmPayloadConfig};
use nix::unistd::pipe;
+use regex::Regex;
use rpcbinder::RpcServer;
use rustutils::system_properties;
use semver::VersionReq;
@@ -101,9 +110,13 @@
const MICRODROID_OS_NAME: &str = "microdroid";
+// TODO(b/291213394): Use 'default' instance for secretkeeper instead of 'nonsecure'
+const SECRETKEEPER_IDENTIFIER: &str =
+ "android.hardware.security.secretkeeper.ISecretkeeper/nonsecure";
+
const UNFORMATTED_STORAGE_MAGIC: &str = "UNFORMATTED-STORAGE";
-/// Roughly estimated sufficient size for storing vendor public key into DTBO.
+/// Rough size for storing root digest of vendor hash descriptor into DTBO.
const EMPTY_VENDOR_DT_OVERLAY_BUF_SIZE: usize = 10000;
/// crosvm requires all partitions to be a multiple of 4KiB.
@@ -113,6 +126,8 @@
pub static ref GLOBAL_SERVICE: Strong<dyn IVirtualizationServiceInternal> =
wait_for_interface(BINDER_SERVICE_IDENTIFIER)
.expect("Could not connect to VirtualizationServiceInternal");
+ static ref MICRODROID_GKI_OS_NAME_PATTERN: Regex =
+ Regex::new(r"^microdroid_gki-android\d+-\d+\.\d+$").expect("Failed to construct Regex");
}
fn create_or_update_idsig_file(
@@ -168,23 +183,23 @@
}
impl Interface for VirtualizationService {
- fn dump(&self, mut file: &File, _args: &[&CStr]) -> Result<(), StatusCode> {
+ fn dump(&self, writer: &mut dyn Write, _args: &[&CStr]) -> Result<(), StatusCode> {
check_permission("android.permission.DUMP").or(Err(StatusCode::PERMISSION_DENIED))?;
let state = &mut *self.state.lock().unwrap();
let vms = state.vms();
- writeln!(file, "Running {0} VMs:", vms.len()).or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(writer, "Running {0} VMs:", vms.len()).or(Err(StatusCode::UNKNOWN_ERROR))?;
for vm in vms {
- writeln!(file, "VM CID: {}", vm.cid).or(Err(StatusCode::UNKNOWN_ERROR))?;
- writeln!(file, "\tState: {:?}", vm.vm_state.lock().unwrap())
+ writeln!(writer, "VM CID: {}", vm.cid).or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(writer, "\tState: {:?}", vm.vm_state.lock().unwrap())
.or(Err(StatusCode::UNKNOWN_ERROR))?;
- writeln!(file, "\tPayload state {:?}", vm.payload_state())
+ writeln!(writer, "\tPayload state {:?}", vm.payload_state())
.or(Err(StatusCode::UNKNOWN_ERROR))?;
- writeln!(file, "\tProtected: {}", vm.protected).or(Err(StatusCode::UNKNOWN_ERROR))?;
- writeln!(file, "\ttemporary_directory: {}", vm.temporary_directory.to_string_lossy())
+ writeln!(writer, "\tProtected: {}", vm.protected).or(Err(StatusCode::UNKNOWN_ERROR))?;
+ writeln!(writer, "\ttemporary_directory: {}", vm.temporary_directory.to_string_lossy())
.or(Err(StatusCode::UNKNOWN_ERROR))?;
- writeln!(file, "\trequester_uid: {}", vm.requester_uid)
+ writeln!(writer, "\trequester_uid: {}", vm.requester_uid)
.or(Err(StatusCode::UNKNOWN_ERROR))?;
- writeln!(file, "\trequester_debug_pid: {}", vm.requester_debug_pid)
+ writeln!(writer, "\trequester_debug_pid: {}", vm.requester_debug_pid)
.or(Err(StatusCode::UNKNOWN_ERROR))?;
}
Ok(())
@@ -366,13 +381,17 @@
check_gdb_allowed(config)?;
}
- let vendor_public_key = extract_vendor_public_key(config)
- .context("Failed to extract vendor public key")
- .or_service_specific_exception(-1)?;
- let dtbo_vendor = if let Some(vendor_public_key) = vendor_public_key {
+ let vendor_hashtree_descriptor_root_digest =
+ extract_vendor_hashtree_descriptor_root_digest(config)
+ .context("Failed to extract root digest of vendor")
+ .or_service_specific_exception(-1)?;
+ let dtbo_vendor = if let Some(vendor_hashtree_descriptor_root_digest) =
+ vendor_hashtree_descriptor_root_digest
+ {
+ let root_digest_hex = hex::encode(vendor_hashtree_descriptor_root_digest);
let dtbo_for_vendor_image = temporary_directory.join("dtbo_vendor");
- create_dtbo_for_vendor_image(&vendor_public_key, &dtbo_for_vendor_image)
- .context("Failed to write vendor_public_key")
+ create_dtbo_for_vendor_image(root_digest_hex.as_bytes(), &dtbo_for_vendor_image)
+ .context("Failed to write root digest of vendor")
.or_service_specific_exception(-1)?;
let file = File::open(dtbo_for_vendor_image)
.context("Failed to open dtbo_vendor")
@@ -480,7 +499,7 @@
}
};
- let vfio_devices = if !config.devices.is_empty() {
+ let (vfio_devices, dtbo) = if !config.devices.is_empty() {
let mut set = HashSet::new();
for device in config.devices.iter() {
let path = canonicalize(device)
@@ -491,16 +510,25 @@
.or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
}
}
- GLOBAL_SERVICE
+ let devices = GLOBAL_SERVICE
.bindDevicesToVfioDriver(&config.devices)?
.into_iter()
.map(|x| VfioDevice {
sysfs_path: PathBuf::from(&x.sysfsPath),
dtbo_label: x.dtboLabel,
})
- .collect::<Vec<_>>()
+ .collect::<Vec<_>>();
+ let dtbo_file = File::from(
+ GLOBAL_SERVICE
+ .getDtboFile()?
+ .as_ref()
+ .try_clone()
+ .context("Failed to create File from ParcelFileDescriptor")
+ .or_binder_exception(ExceptionCode::BAD_PARCELABLE)?,
+ );
+ (devices, Some(dtbo_file))
} else {
- vec![]
+ (vec![], None)
};
// Actually start the VM.
@@ -527,6 +555,7 @@
detect_hangup: is_app_config,
gdb_port,
vfio_devices,
+ dtbo,
dtbo_vendor,
};
let instance = Arc::new(
@@ -546,7 +575,9 @@
}
}
-fn extract_vendor_public_key(config: &VirtualMachineConfig) -> Result<Option<Vec<u8>>> {
+fn extract_vendor_hashtree_descriptor_root_digest(
+ config: &VirtualMachineConfig,
+) -> Result<Option<Vec<u8>>> {
let VirtualMachineConfig::AppConfig(config) = config else {
return Ok(None);
};
@@ -561,15 +592,19 @@
let size = file.metadata().context("Failed to get metadata from microdroid-vendor.img")?.len();
let vbmeta = VbMetaImage::verify_reader_region(&file, 0, size)
.context("Failed to get vbmeta from microdroid-vendor.img")?;
- let vendor_public_key = vbmeta
- .public_key()
- .ok_or(anyhow!("No public key is extracted from microdroid-vendor.img"))?
- .to_vec();
- Ok(Some(vendor_public_key))
+ for descriptor in vbmeta.descriptors()?.iter() {
+ if let vbmeta::Descriptor::Hashtree(_) = descriptor {
+ return Ok(Some(descriptor.to_hashtree()?.root_digest().to_vec()));
+ }
+ }
+ Err(anyhow!("No root digest is extracted from microdroid-vendor.img"))
}
-fn create_dtbo_for_vendor_image(vendor_public_key: &[u8], dtbo: &PathBuf) -> Result<()> {
+fn create_dtbo_for_vendor_image(
+ vendor_hashtree_descriptor_root_digest: &[u8],
+ dtbo: &PathBuf,
+) -> Result<()> {
if dtbo.exists() {
return Err(anyhow!("DTBO file already exists"));
}
@@ -597,10 +632,16 @@
let mut avf_node = overlay_node
.add_subnode(avf_node_name.as_c_str())
.map_err(|e| anyhow!("Failed to create avf node: {:?}", e))?;
- let vendor_public_key_name = CString::new("vendor_public_key")?;
+ let vendor_hashtree_descriptor_root_digest_name =
+ CString::new("vendor_hashtree_descriptor_root_digest")?;
avf_node
- .setprop(vendor_public_key_name.as_c_str(), vendor_public_key)
- .map_err(|e| anyhow!("Failed to set avf/vendor_public_key: {:?}", e))?;
+ .setprop(
+ vendor_hashtree_descriptor_root_digest_name.as_c_str(),
+ vendor_hashtree_descriptor_root_digest,
+ )
+ .map_err(|e| {
+ anyhow!("Failed to set avf/vendor_hashtree_descriptor_root_digest: {:?}", e)
+ })?;
fdt.pack().map_err(|e| anyhow!("Failed to pack fdt: {:?}", e))?;
let mut file = File::create(dtbo)?;
@@ -694,6 +735,16 @@
}
}
+fn is_valid_os(os_name: &str) -> bool {
+ if os_name == MICRODROID_OS_NAME {
+ true
+ } else if cfg!(vendor_modules) && MICRODROID_GKI_OS_NAME_PATTERN.is_match(os_name) {
+ PathBuf::from(format!("/apex/com.android.virt/etc/{}.json", os_name)).exists()
+ } else {
+ false
+ }
+}
+
fn load_app_config(
config: &VirtualMachineAppConfig,
debug_config: &DebugConfig,
@@ -717,9 +768,9 @@
Payload::PayloadConfig(payload_config) => create_vm_payload_config(payload_config)?,
};
- // For now, the only supported OS is Microdroid
+ // For now, the only supported OS is Microdroid and Microdroid GKI
let os_name = vm_payload_config.os.name.as_str();
- if os_name != MICRODROID_OS_NAME {
+ if !is_valid_os(os_name) {
bail!("Unknown OS \"{}\"", os_name);
}
@@ -753,7 +804,7 @@
vm_config.cpuTopology = config.cpuTopology;
// Microdroid takes additional init ramdisk & (optionally) storage image
- add_microdroid_system_images(config, instance_file, storage_image, &mut vm_config)?;
+ add_microdroid_system_images(config, instance_file, storage_image, os_name, &mut vm_config)?;
// Include Microdroid payload disk (contains apks, idsigs) in vm config
add_microdroid_payload_images(
@@ -788,8 +839,9 @@
}
let task = Task { type_: TaskType::MicrodroidLauncher, command: payload_binary_name.clone() };
+ let name = payload_config.osName.clone();
Ok(VmPayloadConfig {
- os: OsConfig { name: MICRODROID_OS_NAME.to_owned() },
+ os: OsConfig { name },
task: Some(task),
apexes: vec![],
extra_apks: vec![],
@@ -871,7 +923,7 @@
/// Check that a file SELinux label is acceptable.
///
/// We only want to allow code in a VM to be sourced from places that apps, and the
-/// system, do not have write access to.
+/// system or vendor, do not have write access to.
///
/// Note that sepolicy must also grant read access for these types to both virtualization
/// service and crosvm.
@@ -885,6 +937,7 @@
| "staging_data_file" // updated/staged APEX images
| "system_file" // immutable dm-verity protected partition
| "virtualizationservice_data_file" // files created by VS / VirtMgr
+ | "vendor_microdroid_file" // immutable dm-verity protected partition (/vendor/etc/avf/microdroid/.*)
=> Ok(()),
_ => bail!("Label {} is not allowed", context),
}
@@ -1114,6 +1167,7 @@
.try_clone()
.context("Failed to clone File from ParcelFileDescriptor")
.or_binder_exception(ExceptionCode::BAD_PARCELABLE)
+ .map(File::from)
}
/// Converts an `&Option<ParcelFileDescriptor>` to an `Option<File>` by cloning the file.
@@ -1194,10 +1248,24 @@
Ok(())
}
+fn check_no_devices(config: &VirtualMachineConfig) -> binder::Result<()> {
+ let VirtualMachineConfig::AppConfig(config) = config else { return Ok(()) };
+ if let Some(custom_config) = &config.customConfig {
+ if !custom_config.devices.is_empty() {
+ return Err(anyhow!("device assignment feature is disabled"))
+ .or_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION);
+ }
+ }
+ Ok(())
+}
+
fn check_config_features(config: &VirtualMachineConfig) -> binder::Result<()> {
if !cfg!(vendor_modules) {
check_no_vendor_modules(config)?;
}
+ if !cfg!(device_assignment) {
+ check_no_devices(config)?;
+ }
Ok(())
}
@@ -1330,8 +1398,22 @@
}
}
+ fn getSecretkeeper(&self) -> binder::Result<Option<Strong<dyn ISecretkeeper>>> {
+ let sk = match binder::get_interface(SECRETKEEPER_IDENTIFIER) {
+ Ok(sk) => {
+ Some(BnSecretkeeper::new_binder(SecretkeeperProxy(sk), BinderFeatures::default()))
+ }
+ Err(StatusCode::NAME_NOT_FOUND) => None,
+ Err(e) => {
+ error!("unexpected error while fetching connection to Secretkeeper {:?}", e);
+ return Err(e.into());
+ }
+ };
+ Ok(sk)
+ }
+
fn requestAttestation(&self, csr: &[u8]) -> binder::Result<Vec<Certificate>> {
- GLOBAL_SERVICE.requestAttestation(csr)
+ GLOBAL_SERVICE.requestAttestation(csr, get_calling_uid() as i32)
}
}
@@ -1462,13 +1544,14 @@
#[test]
fn test_create_dtbo_for_vendor_image() -> Result<()> {
- let vendor_public_key = String::from("foo");
- let vendor_public_key = vendor_public_key.as_bytes();
+ let vendor_hashtree_descriptor_root_digest = String::from("foo");
+ let vendor_hashtree_descriptor_root_digest =
+ vendor_hashtree_descriptor_root_digest.as_bytes();
let tmp_dir = tempfile::TempDir::new()?;
let dtbo_path = tmp_dir.path().to_path_buf().join("bar");
- create_dtbo_for_vendor_image(vendor_public_key, &dtbo_path)?;
+ create_dtbo_for_vendor_image(vendor_hashtree_descriptor_root_digest, &dtbo_path)?;
let data = std::fs::read(dtbo_path)?;
let fdt = Fdt::from_slice(&data).unwrap();
@@ -1489,9 +1572,11 @@
let Some(avf_node) = avf_node else {
bail!("avf_node shouldn't be None.");
};
- let vendor_public_key_name = CString::new("vendor_public_key")?;
- let key_from_dtbo = avf_node.getprop(vendor_public_key_name.as_c_str()).unwrap();
- assert_eq!(key_from_dtbo, Some(vendor_public_key));
+ let vendor_hashtree_descriptor_root_digest_name =
+ CString::new("vendor_hashtree_descriptor_root_digest")?;
+ let digest_from_dtbo =
+ avf_node.getprop(vendor_hashtree_descriptor_root_digest_name.as_c_str()).unwrap();
+ assert_eq!(digest_from_dtbo, Some(vendor_hashtree_descriptor_root_digest));
tmp_dir.close()?;
Ok(())
@@ -1499,18 +1584,84 @@
#[test]
fn test_create_dtbo_for_vendor_image_throws_error_if_already_exists() -> Result<()> {
- let vendor_public_key = String::from("foo");
- let vendor_public_key = vendor_public_key.as_bytes();
+ let vendor_hashtree_descriptor_root_digest = String::from("foo");
+ let vendor_hashtree_descriptor_root_digest =
+ vendor_hashtree_descriptor_root_digest.as_bytes();
let tmp_dir = tempfile::TempDir::new()?;
let dtbo_path = tmp_dir.path().to_path_buf().join("bar");
- create_dtbo_for_vendor_image(vendor_public_key, &dtbo_path)?;
+ create_dtbo_for_vendor_image(vendor_hashtree_descriptor_root_digest, &dtbo_path)?;
- let ret_second_trial = create_dtbo_for_vendor_image(vendor_public_key, &dtbo_path);
+ let ret_second_trial =
+ create_dtbo_for_vendor_image(vendor_hashtree_descriptor_root_digest, &dtbo_path);
assert!(ret_second_trial.is_err(), "should fail");
tmp_dir.close()?;
Ok(())
}
}
+
+struct SecretkeeperProxy(Strong<dyn ISecretkeeper>);
+
+impl Interface for SecretkeeperProxy {}
+
+impl ISecretkeeper for SecretkeeperProxy {
+ fn processSecretManagementRequest(&self, req: &[u8]) -> binder::Result<Vec<u8>> {
+ // Pass the request to the channel, and read the response.
+ self.0.processSecretManagementRequest(req)
+ }
+
+ fn getAuthGraphKe(&self) -> binder::Result<Strong<dyn IAuthGraphKeyExchange>> {
+ let ag = AuthGraphKeyExchangeProxy(self.0.getAuthGraphKe()?);
+ Ok(BnAuthGraphKeyExchange::new_binder(ag, BinderFeatures::default()))
+ }
+
+ fn deleteIds(&self, ids: &[SecretId]) -> binder::Result<()> {
+ self.0.deleteIds(ids)
+ }
+
+ fn deleteAll(&self) -> binder::Result<()> {
+ self.0.deleteAll()
+ }
+}
+
+struct AuthGraphKeyExchangeProxy(Strong<dyn IAuthGraphKeyExchange>);
+
+impl Interface for AuthGraphKeyExchangeProxy {}
+
+impl IAuthGraphKeyExchange for AuthGraphKeyExchangeProxy {
+ fn create(&self) -> binder::Result<SessionInitiationInfo> {
+ self.0.create()
+ }
+
+ fn init(
+ &self,
+ peer_pub_key: &PubKey,
+ peer_id: &Identity,
+ peer_nonce: &[u8],
+ peer_version: i32,
+ ) -> binder::Result<KeInitResult> {
+ self.0.init(peer_pub_key, peer_id, peer_nonce, peer_version)
+ }
+
+ fn finish(
+ &self,
+ peer_pub_key: &PubKey,
+ peer_id: &Identity,
+ peer_signature: &SessionIdSignature,
+ peer_nonce: &[u8],
+ peer_version: i32,
+ own_key: &Key,
+ ) -> binder::Result<SessionInfo> {
+ self.0.finish(peer_pub_key, peer_id, peer_signature, peer_nonce, peer_version, own_key)
+ }
+
+ fn authenticationComplete(
+ &self,
+ peer_signature: &SessionIdSignature,
+ shared_keys: &[AuthgraphArc; 2],
+ ) -> binder::Result<[AuthgraphArc; 2]> {
+ self.0.authenticationComplete(peer_signature, shared_keys)
+ }
+}
diff --git a/virtualizationmanager/src/composite.rs b/virtualizationmanager/src/composite.rs
index fe17ff4..a4b7eae 100644
--- a/virtualizationmanager/src/composite.rs
+++ b/virtualizationmanager/src/composite.rs
@@ -93,7 +93,8 @@
.context("Invalid partition image file descriptor")?
.as_ref()
.try_clone()
- .context("Failed to clone partition image file descriptor")?;
+ .context("Failed to clone partition image file descriptor")?
+ .into();
let path = fd_path_for_file(&file);
let size = get_partition_size(&file, &path)?;
files.push(file);
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index 8d3abed..f0c3e4b 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -116,6 +116,7 @@
pub detect_hangup: bool,
pub gdb_port: Option<NonZeroU16>,
pub vfio_devices: Vec<VfioDevice>,
+ pub dtbo: Option<File>,
pub dtbo_vendor: Option<File>,
}
@@ -717,17 +718,29 @@
}
if let Some(p) = path.to_str() {
- Ok(format!("--vfio={p},iommu=viommu,dt-symbol={0}", device.dtbo_label))
+ Ok(format!("--vfio={p},iommu=pkvm-iommu,dt-symbol={0}", device.dtbo_label))
} else {
bail!("invalid path {path:?}");
}
}
-fn append_platform_devices(command: &mut Command, config: &CrosvmConfig) -> Result<(), Error> {
+fn append_platform_devices(
+ command: &mut Command,
+ preserved_fds: &mut Vec<RawFd>,
+ config: &CrosvmConfig,
+) -> Result<(), Error> {
+ if config.vfio_devices.is_empty() {
+ return Ok(());
+ }
+
+ let Some(dtbo) = &config.dtbo else {
+ bail!("VFIO devices assigned but no DTBO available");
+ };
+ command.arg(format!("--device-tree-overlay={},filter", add_preserved_fd(preserved_fds, dtbo)));
+
for device in &config.vfio_devices {
command.arg(vfio_argument_for_platform_device(device)?);
}
- // TODO(b/291192693): add dtbo to command line when assigned device is not empty.
Ok(())
}
@@ -891,7 +904,7 @@
command.arg("--device-tree-overlay").arg(add_preserved_fd(&mut preserved_fds, dtbo_vendor));
}
- append_platform_devices(&mut command, &config)?;
+ append_platform_devices(&mut command, &mut preserved_fds, &config)?;
debug!("Preserving FDs {:?}", preserved_fds);
command.preserved_fds(preserved_fds);
diff --git a/virtualizationmanager/src/payload.rs b/virtualizationmanager/src/payload.rs
index 3bfad33..c19c103 100644
--- a/virtualizationmanager/src/payload.rs
+++ b/virtualizationmanager/src/payload.rs
@@ -425,6 +425,7 @@
config: &VirtualMachineAppConfig,
instance_file: File,
storage_image: Option<File>,
+ os_name: &str,
vm_config: &mut VirtualMachineRawConfig,
) -> Result<()> {
let debug_suffix = match config.debugLevel {
@@ -432,7 +433,7 @@
DebugLevel::FULL => "debuggable",
_ => return Err(anyhow!("unsupported debug level: {:?}", config.debugLevel)),
};
- let initrd = format!("/apex/com.android.virt/etc/microdroid_initrd_{}.img", debug_suffix);
+ let initrd = format!("/apex/com.android.virt/etc/{os_name}_initrd_{debug_suffix}.img");
vm_config.initrd = Some(open_parcel_file(Path::new(&initrd), false)?);
let mut writable_partitions = vec![Partition {
diff --git a/virtualizationmanager/src/selinux.rs b/virtualizationmanager/src/selinux.rs
index 0485943..ba62b7f 100644
--- a/virtualizationmanager/src/selinux.rs
+++ b/virtualizationmanager/src/selinux.rs
@@ -17,11 +17,10 @@
use anyhow::{anyhow, bail, Context, Result};
use std::ffi::{CStr, CString};
use std::fmt;
-use std::fs::File;
use std::io;
use std::ops::Deref;
+use std::os::fd::AsRawFd;
use std::os::raw::c_char;
-use std::os::unix::io::AsRawFd;
use std::ptr;
// Partially copied from system/security/keystore2/selinux/src/lib.rs
@@ -102,7 +101,7 @@
}
}
-pub fn getfilecon(file: &File) -> Result<SeContext> {
+pub fn getfilecon<F: AsRawFd>(file: &F) -> Result<SeContext> {
let fd = file.as_raw_fd();
let mut con: *mut c_char = ptr::null_mut();
// SAFETY: the returned pointer `con` is wrapped in SeContext::Raw which is freed with
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index c00445d..3f8d193 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -2,8 +2,8 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-rust_binary {
- name: "virtualizationservice",
+rust_defaults {
+ name: "virtualizationservice_defaults",
crate_name: "virtualizationservice",
defaults: ["avf_build_flags_rust"],
edition: "2021",
@@ -31,9 +31,11 @@
"libanyhow",
"libavflog",
"libbinder_rs",
+ "libhypervisor_props",
"liblibc",
"liblog_rust",
"libnix",
+ "librkpd_client",
"librustutils",
"libvmclient",
"libstatslog_virtualization_rust",
@@ -43,13 +45,39 @@
"libserde_xml_rs",
"libservice_vm_comm",
"libservice_vm_manager",
+ "libx509_parser",
],
apex_available: ["com.android.virt"],
}
+rust_binary {
+ name: "virtualizationservice",
+ defaults: ["virtualizationservice_defaults"],
+}
+
xsd_config {
name: "assignable_devices",
srcs: ["assignable_devices.xsd"],
api_dir: "schema",
package_name: "android.system.virtualizationservice",
}
+
+rust_test {
+ name: "virtualizationservice_test",
+ defaults: ["virtualizationservice_defaults"],
+ test_suites: ["general-tests"],
+ data: [
+ ":test_rkp_cert_chain",
+ ],
+}
+
+// The chain originates from a CTS test for Keymint, with the Keymint certificate
+// (leaf certificate) truncated.
+//
+// The certificate chain begins with a leaf certificate obtained from RKP and ends
+// with a root certificate. Each certificate in the chain possesses a signature that
+// is signed by the private key of the subsequent certificate in the chain.
+filegroup {
+ name: "test_rkp_cert_chain",
+ srcs: ["testdata/rkp_cert_chain.der"],
+}
diff --git a/virtualizationservice/TEST_MAPPING b/virtualizationservice/TEST_MAPPING
new file mode 100644
index 0000000..4fef83c
--- /dev/null
+++ b/virtualizationservice/TEST_MAPPING
@@ -0,0 +1,9 @@
+// When adding or removing tests here, don't forget to amend _all_modules list in
+// wireless/android/busytown/ath_config/configs/prod/avf/tests.gcl
+{
+ "avf-presubmit" : [
+ {
+ "name" : "virtualizationservice_test"
+ }
+ ]
+}
diff --git a/virtualizationservice/aidl/Android.bp b/virtualizationservice/aidl/Android.bp
index 91d91aa..8ca375a 100644
--- a/virtualizationservice/aidl/Android.bp
+++ b/virtualizationservice/aidl/Android.bp
@@ -57,11 +57,14 @@
aidl_interface {
name: "android.system.virtualmachineservice",
srcs: ["android/system/virtualmachineservice/**/*.aidl"],
- imports: ["android.system.virtualizationcommon"],
+ imports: [
+ "android.hardware.security.secretkeeper-V1",
+ "android.system.virtualizationcommon",
+ ],
unstable: true,
backend: {
java: {
- sdk_version: "module_current",
+ enabled: false,
},
rust: {
enabled: true,
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachinePayloadConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachinePayloadConfig.aidl
index 55c2f5d..f3f54f3 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachinePayloadConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachinePayloadConfig.aidl
@@ -23,4 +23,9 @@
* function invoked.
*/
@utf8InCpp String payloadBinaryName;
+
+ /**
+ * Name of the OS to run the payload. Currently "microdroid" and "microdroid_gki" is supported.
+ */
+ @utf8InCpp String osName = "microdroid";
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
index 2592135..a2cb693 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
@@ -60,10 +60,14 @@
* Requests a certificate chain for the provided certificate signing request (CSR).
*
* @param csr The certificate signing request.
+ * @param requesterUid The UID of the app that requests remote attestation. The client VM to be
+ * attested is owned by this app.
+ * The uniqueness of the UID ensures that no two VMs owned by different apps
+ * are able to correlate keys.
* @return A sequence of DER-encoded X.509 certificates that make up the attestation
* key's certificate chain. The attestation key is provided in the CSR.
*/
- Certificate[] requestAttestation(in byte[] csr);
+ Certificate[] requestAttestation(in byte[] csr, int requesterUid);
/**
* Get a list of assignable devices.
@@ -77,4 +81,7 @@
* @return a list of pairs (sysfs path, DTBO node label) for devices.
*/
BoundDevice[] bindDevicesToVfioDriver(in String[] devices);
+
+ /** Returns a read-only file descriptor of the VM DTBO file. */
+ ParcelFileDescriptor getDtboFile();
}
diff --git a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
index 3c60478..cf91302 100644
--- a/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualmachineservice/IVirtualMachineService.aidl
@@ -15,6 +15,7 @@
*/
package android.system.virtualmachineservice;
+import android.hardware.security.secretkeeper.ISecretkeeper;
import android.system.virtualizationcommon.Certificate;
import android.system.virtualizationcommon.ErrorCode;
@@ -54,4 +55,11 @@
* key's certificate chain. The attestation key is provided in the CSR.
*/
Certificate[] requestAttestation(in byte[] csr);
+
+ /**
+ * Request connection to Secretkeeper. This is used by pVM to store Anti-Rollback protected
+ * secrets. Note that the return value is nullable to reflect that Secretkeeper HAL may not be
+ * present.
+ */
+ @nullable ISecretkeeper getSecretkeeper();
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 2be2b19..3ac1e60 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -14,7 +14,7 @@
//! Implementation of the AIDL interface of the VirtualizationService.
-use crate::{get_calling_pid, get_calling_uid};
+use crate::{get_calling_pid, get_calling_uid, REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME};
use crate::atom::{forward_vm_booted_atom, forward_vm_creation_atom, forward_vm_exited_atom};
use crate::rkpvm::request_attestation;
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
@@ -39,10 +39,11 @@
use binder::{self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, LazyServiceGuard, Status, Strong, IntoBinderResult};
use libc::VMADDR_CID_HOST;
use log::{error, info, warn};
+use rkpd_client::get_rkpd_attestation_key;
use rustutils::system_properties;
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
-use std::fs::{self, create_dir, remove_dir_all, set_permissions, File, Permissions};
+use std::fs::{self, create_dir, remove_dir_all, remove_file, set_permissions, File, Permissions};
use std::io::{Read, Write};
use std::os::unix::fs::PermissionsExt;
use std::os::unix::raw::{pid_t, uid_t};
@@ -51,6 +52,7 @@
use tombstoned_client::{DebuggerdDumpType, TombstonedConnection};
use vsock::{VsockListener, VsockStream};
use nix::unistd::{chown, Uid};
+use x509_parser::{traits::FromDer, certificate::X509Certificate};
/// The unique ID of a VM used (together with a port number) for vsock communication.
pub type Cid = u32;
@@ -159,24 +161,52 @@
Ok(cids)
}
- fn requestAttestation(&self, csr: &[u8]) -> binder::Result<Vec<Certificate>> {
+ fn requestAttestation(
+ &self,
+ csr: &[u8],
+ requester_uid: i32,
+ ) -> binder::Result<Vec<Certificate>> {
check_manage_access()?;
- info!("Received csr. Requestting attestation...");
- if cfg!(remote_attestation) {
- request_attestation(csr)
- .context("Failed to request attestation")
- .with_log()
- .or_service_specific_exception(-1)
- } else {
- Err(Status::new_exception_str(
+ if !cfg!(remote_attestation) {
+ return Err(Status::new_exception_str(
ExceptionCode::UNSUPPORTED_OPERATION,
Some(
"requestAttestation is not supported with the remote_attestation feature \
disabled",
),
))
- .with_log()
+ .with_log();
}
+ info!("Received csr. Requestting attestation...");
+ let attestation_key = get_rkpd_attestation_key(
+ REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME,
+ requester_uid as u32,
+ )
+ .context("Failed to retrieve the remotely provisioned keys")
+ .with_log()
+ .or_service_specific_exception(-1)?;
+ let mut certificate_chain = split_x509_certificate_chain(&attestation_key.encodedCertChain)
+ .context("Failed to split the remotely provisioned certificate chain")
+ .with_log()
+ .or_service_specific_exception(-1)?;
+ if certificate_chain.is_empty() {
+ return Err(Status::new_service_specific_error_str(
+ -1,
+ Some("The certificate chain should contain at least 1 certificate"),
+ ))
+ .with_log();
+ }
+ let certificate = request_attestation(
+ csr.to_vec(),
+ attestation_key.keyBlob,
+ certificate_chain[0].encodedCertificate.clone(),
+ )
+ .context("Failed to request attestation")
+ .with_log()
+ .or_service_specific_exception(-1)?;
+ certificate_chain.insert(0, Certificate { encodedCertificate: certificate });
+
+ Ok(certificate_chain)
}
fn getAssignableDevices(&self) -> binder::Result<Vec<AssignableDevice>> {
@@ -194,18 +224,8 @@
let vfio_service: Strong<dyn IVfioHandler> =
wait_for_interface(<BpVfioHandler as IVfioHandler>::get_descriptor())?;
-
vfio_service.bindDevicesToVfioDriver(devices)?;
- let dtbo_path = Path::new(TEMPORARY_DIRECTORY).join("common").join("dtbo");
- if !dtbo_path.exists() {
- // open a writable file descriptor for vfio_handler
- let dtbo = File::create(&dtbo_path)
- .context("Failed to create VM DTBO file")
- .or_service_specific_exception(-1)?;
- vfio_service.writeVmDtbo(&ParcelFileDescriptor::new(dtbo))?;
- }
-
Ok(get_assignable_devices()?
.device
.into_iter()
@@ -218,6 +238,14 @@
})
.collect::<Vec<_>>())
}
+
+ fn getDtboFile(&self) -> binder::Result<ParcelFileDescriptor> {
+ check_use_custom_virtual_machine()?;
+
+ let state = &mut *self.state.lock().unwrap();
+ let file = state.get_dtbo_file().or_service_specific_exception(-1)?;
+ Ok(ParcelFileDescriptor::new(file))
+ }
}
// KEEP IN SYNC WITH assignable_devices.xsd
@@ -272,6 +300,17 @@
Ok(devices)
}
+fn split_x509_certificate_chain(mut cert_chain: &[u8]) -> Result<Vec<Certificate>> {
+ let mut out = Vec::new();
+ while !cert_chain.is_empty() {
+ let (remaining, _) = X509Certificate::from_der(cert_chain)?;
+ let end = cert_chain.len() - remaining.len();
+ out.push(Certificate { encodedCertificate: cert_chain[..end].to_vec() });
+ cert_chain = remaining;
+ }
+ Ok(out)
+}
+
#[derive(Debug, Default)]
struct GlobalVmInstance {
/// The unique CID assigned to the VM for vsock communication.
@@ -296,6 +335,9 @@
/// VM contexts currently allocated to running VMs. A CID is never recycled as long
/// as there is a strong reference held by a GlobalVmContext.
held_contexts: HashMap<Cid, Weak<GlobalVmInstance>>,
+
+ /// Cached read-only FD of VM DTBO file. Also serves as a lock for creating the file.
+ dtbo_file: Mutex<Option<File>>,
}
impl GlobalState {
@@ -359,26 +401,64 @@
let cid = self.get_next_available_cid()?;
let instance = Arc::new(GlobalVmInstance { cid, requester_uid, requester_debug_pid });
- create_temporary_directory(&instance.get_temp_dir(), requester_uid)?;
+ create_temporary_directory(&instance.get_temp_dir(), Some(requester_uid))?;
self.held_contexts.insert(cid, Arc::downgrade(&instance));
let binder = GlobalVmContext { instance, ..Default::default() };
Ok(BnGlobalVmContext::new_binder(binder, BinderFeatures::default()))
}
+
+ fn get_dtbo_file(&mut self) -> Result<File> {
+ let mut file = self.dtbo_file.lock().unwrap();
+
+ let fd = if let Some(ref_fd) = &*file {
+ ref_fd.try_clone()?
+ } else {
+ let path = get_or_create_common_dir()?.join("vm.dtbo");
+ if path.exists() {
+ // All temporary files are deleted when the service is started.
+ // If the file exists but the FD is not cached, the file is
+ // likely corrupted.
+ remove_file(&path).context("Failed to clone cached VM DTBO file descriptor")?;
+ }
+
+ // Open a write-only file descriptor for vfio_handler.
+ let write_fd = File::create(&path).context("Failed to create VM DTBO file")?;
+
+ let vfio_service: Strong<dyn IVfioHandler> =
+ wait_for_interface(<BpVfioHandler as IVfioHandler>::get_descriptor())?;
+ vfio_service.writeVmDtbo(&ParcelFileDescriptor::new(write_fd))?;
+
+ // Open read-only. This FD will be cached and returned to clients.
+ let read_fd = File::open(&path).context("Failed to open VM DTBO file")?;
+ let read_fd_clone =
+ read_fd.try_clone().context("Failed to clone VM DTBO file descriptor")?;
+ *file = Some(read_fd);
+ read_fd_clone
+ };
+
+ Ok(fd)
+ }
}
-fn create_temporary_directory(path: &PathBuf, requester_uid: uid_t) -> Result<()> {
+fn create_temporary_directory(path: &PathBuf, requester_uid: Option<uid_t>) -> Result<()> {
+ // Directory may exist if previous attempt to create it had failed.
+ // Delete it before trying again.
if path.as_path().exists() {
remove_temporary_dir(path).unwrap_or_else(|e| {
warn!("Could not delete temporary directory {:?}: {}", path, e);
});
}
- // Create a directory that is owned by client's UID but system's GID, and permissions 0700.
+ // Create directory.
+ create_dir(path).with_context(|| format!("Could not create temporary directory {:?}", path))?;
+ // If provided, change ownership to client's UID but system's GID, and permissions 0700.
// If the chown() fails, this will leave behind an empty directory that will get removed
// at the next attempt, or if virtualizationservice is restarted.
- create_dir(path).with_context(|| format!("Could not create temporary directory {:?}", path))?;
- chown(path, Some(Uid::from_raw(requester_uid)), None)
- .with_context(|| format!("Could not set ownership of temporary directory {:?}", path))?;
+ if let Some(uid) = requester_uid {
+ chown(path, Some(Uid::from_raw(uid)), None).with_context(|| {
+ format!("Could not set ownership of temporary directory {:?}", path)
+ })?;
+ }
Ok(())
}
@@ -392,6 +472,14 @@
Ok(())
}
+fn get_or_create_common_dir() -> Result<PathBuf> {
+ let path = Path::new(TEMPORARY_DIRECTORY).join("common");
+ if !path.exists() {
+ create_temporary_directory(&path, None)?;
+ }
+ Ok(path)
+}
+
/// Implementation of the AIDL `IGlobalVmContext` interface.
#[derive(Debug, Default)]
struct GlobalVmContext {
@@ -496,3 +584,24 @@
fn check_use_custom_virtual_machine() -> binder::Result<()> {
check_permission("android.permission.USE_CUSTOM_VIRTUAL_MACHINE")
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs;
+
+ const TEST_RKP_CERT_CHAIN_PATH: &str = "testdata/rkp_cert_chain.der";
+
+ #[test]
+ fn splitting_x509_certificate_chain_succeeds() -> Result<()> {
+ let bytes = fs::read(TEST_RKP_CERT_CHAIN_PATH)?;
+ let cert_chain = split_x509_certificate_chain(&bytes)?;
+
+ assert_eq!(4, cert_chain.len());
+ for cert in cert_chain {
+ let (remaining, _) = X509Certificate::from_der(&cert.encodedCertificate)?;
+ assert!(remaining.is_empty());
+ }
+ Ok(())
+ }
+}
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index fd668bc..ea073bf 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -33,8 +33,8 @@
use std::path::Path;
const LOG_TAG: &str = "VirtualizationService";
-const _REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME: &str =
- "android.system.virtualization.IRemotelyProvisionedComponent/avf";
+pub(crate) const REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME: &str =
+ "android.hardware.security.keymint.IRemotelyProvisionedComponent/avf";
fn get_calling_pid() -> pid_t {
ThreadState::get_calling_pid()
@@ -69,10 +69,17 @@
register_lazy_service(BINDER_SERVICE_IDENTIFIER, service.as_binder()).unwrap();
info!("Registered Binder service {}.", BINDER_SERVICE_IDENTIFIER);
- // The IRemotelyProvisionedComponent service is only supposed to be triggered by rkpd for
- // RKP VM attestation.
- let _remote_provisioning_service = remote_provisioning::new_binder();
- // TODO(b/274881098): Register the RKP service when the implementation is ready.
+ if cfg!(remote_attestation) {
+ // The IRemotelyProvisionedComponent service is only supposed to be triggered by rkpd for
+ // RKP VM attestation.
+ let remote_provisioning_service = remote_provisioning::new_binder();
+ register_lazy_service(
+ REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME,
+ remote_provisioning_service.as_binder(),
+ )
+ .unwrap();
+ info!("Registered Binder service {}.", REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME);
+ }
ProcessState::join_thread_pool();
}
diff --git a/virtualizationservice/src/remote_provisioning.rs b/virtualizationservice/src/remote_provisioning.rs
index a9a07a5..40f54db 100644
--- a/virtualizationservice/src/remote_provisioning.rs
+++ b/virtualizationservice/src/remote_provisioning.rs
@@ -27,7 +27,11 @@
};
use anyhow::Context;
use avflog::LogResult;
-use binder::{BinderFeatures, Interface, IntoBinderResult, Result as BinderResult, Status, Strong};
+use binder::{
+ BinderFeatures, ExceptionCode, Interface, IntoBinderResult, Result as BinderResult, Status,
+ Strong,
+};
+use hypervisor_props::is_protected_vm_supported;
use service_vm_comm::{RequestProcessingError, Response};
/// Constructs a binder object that implements `IRemotelyProvisionedComponent`.
@@ -45,11 +49,13 @@
#[allow(non_snake_case)]
impl IRemotelyProvisionedComponent for AvfRemotelyProvisionedComponent {
fn getHardwareInfo(&self) -> BinderResult<RpcHardwareInfo> {
+ check_protected_vm_is_supported()?;
+
Ok(RpcHardwareInfo {
versionNumber: 3,
rpcAuthorName: String::from("Android Virtualization Framework"),
supportedEekCurve: CURVE_NONE,
- uniqueId: Some(String::from("Android Virtualization Framework 1")),
+ uniqueId: Some(String::from("AVF Remote Provisioning 1")),
supportedNumKeysInCsr: MIN_SUPPORTED_NUM_KEYS_IN_CSR,
})
}
@@ -59,6 +65,8 @@
testMode: bool,
macedPublicKey: &mut MacedPublicKey,
) -> BinderResult<Vec<u8>> {
+ check_protected_vm_is_supported()?;
+
if testMode {
return Err(Status::new_service_specific_error_str(
STATUS_REMOVED,
@@ -101,6 +109,8 @@
keysToSign: &[MacedPublicKey],
challenge: &[u8],
) -> BinderResult<Vec<u8>> {
+ check_protected_vm_is_supported()?;
+
const MAX_CHALLENGE_SIZE: usize = 64;
if challenge.len() > MAX_CHALLENGE_SIZE {
let message = format!(
@@ -123,6 +133,18 @@
}
}
+fn check_protected_vm_is_supported() -> BinderResult<()> {
+ if is_protected_vm_supported().unwrap_or(false) {
+ Ok(())
+ } else {
+ Err(Status::new_exception_str(
+ ExceptionCode::UNSUPPORTED_OPERATION,
+ Some("Protected VM support is missing for this operation"),
+ ))
+ .with_log()
+ }
+}
+
fn to_service_specific_error(response: Response) -> Status {
match response {
Response::Err(e) => match e {
diff --git a/virtualizationservice/src/rkpvm.rs b/virtualizationservice/src/rkpvm.rs
index 8f1de6b..79e09b0 100644
--- a/virtualizationservice/src/rkpvm.rs
+++ b/virtualizationservice/src/rkpvm.rs
@@ -17,23 +17,24 @@
//! serves as a trusted platform to attest a client VM.
use android_hardware_security_rkp::aidl::android::hardware::security::keymint::MacedPublicKey::MacedPublicKey;
-use android_system_virtualizationcommon::aidl::android::system::virtualizationcommon::Certificate::Certificate;
use anyhow::{bail, Context, Result};
-use service_vm_comm::{GenerateCertificateRequestParams, Request, Response};
+use service_vm_comm::{
+ ClientVmAttestationParams, GenerateCertificateRequestParams, Request, Response,
+};
use service_vm_manager::ServiceVm;
-pub(crate) fn request_attestation(csr: &[u8]) -> Result<Vec<Certificate>> {
+pub(crate) fn request_attestation(
+ csr: Vec<u8>,
+ remotely_provisioned_key_blob: Vec<u8>,
+ remotely_provisioned_cert: Vec<u8>,
+) -> Result<Vec<u8>> {
let mut vm = ServiceVm::start()?;
- // TODO(b/271275206): Send the correct request type with client VM's
- // information to be attested.
- let request = Request::Reverse(csr.to_vec());
+ let params =
+ ClientVmAttestationParams { csr, remotely_provisioned_key_blob, remotely_provisioned_cert };
+ let request = Request::RequestClientVmAttestation(params);
match vm.process_request(request).context("Failed to process request")? {
- // TODO(b/271275206): Adjust the response type.
- Response::Reverse(cert) => {
- let cert = Certificate { encodedCertificate: cert };
- Ok(vec![cert])
- }
+ Response::RequestClientVmAttestation(cert) => Ok(cert),
_ => bail!("Incorrect response type"),
}
}
diff --git a/virtualizationservice/testdata/rkp_cert_chain.der b/virtualizationservice/testdata/rkp_cert_chain.der
new file mode 100644
index 0000000..f32065d
--- /dev/null
+++ b/virtualizationservice/testdata/rkp_cert_chain.der
Binary files differ
diff --git a/virtualizationservice/vfio_handler/src/aidl.rs b/virtualizationservice/vfio_handler/src/aidl.rs
index 2968ff9..63f19c6 100644
--- a/virtualizationservice/vfio_handler/src/aidl.rs
+++ b/virtualizationservice/vfio_handler/src/aidl.rs
@@ -282,11 +282,13 @@
.or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
let buffer = read_values(dtbo_img_file, dt_size, entry.dt_offset.get().into())?;
- let mut dtbo_fd = dtbo_fd
- .as_ref()
- .try_clone()
- .context("Failed to clone File from ParcelFileDescriptor")
- .or_binder_exception(ExceptionCode::BAD_PARCELABLE)?;
+ let mut dtbo_fd = File::from(
+ dtbo_fd
+ .as_ref()
+ .try_clone()
+ .context("Failed to create File from ParcelFileDescriptor")
+ .or_binder_exception(ExceptionCode::BAD_PARCELABLE)?,
+ );
dtbo_fd
.write_all(&buffer)
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 0af9791..9a92f13 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -27,6 +27,7 @@
use clap::{Args, Parser};
use create_idsig::command_create_idsig;
use create_partition::command_create_partition;
+use glob::glob;
use run::{command_run, command_run_app, command_run_microdroid};
use std::num::NonZeroU16;
use std::path::{Path, PathBuf};
@@ -97,33 +98,24 @@
#[arg(long)]
storage_size: Option<u64>,
- /// Path to custom kernel image to use when booting Microdroid.
- #[cfg(vendor_modules)]
- #[arg(long)]
- kernel: Option<PathBuf>,
-
/// Path to disk image containing vendor-specific modules.
#[cfg(vendor_modules)]
#[arg(long)]
vendor: Option<PathBuf>,
/// SysFS nodes of devices to assign to VM
+ #[cfg(device_assignment)]
#[arg(long)]
devices: Vec<PathBuf>,
+
+ /// Version of GKI to use. If set, use instead of microdroid kernel
+ #[cfg(vendor_modules)]
+ #[arg(long)]
+ gki: Option<String>,
}
impl MicrodroidConfig {
#[cfg(vendor_modules)]
- fn kernel(&self) -> &Option<PathBuf> {
- &self.kernel
- }
-
- #[cfg(not(vendor_modules))]
- fn kernel(&self) -> Option<PathBuf> {
- None
- }
-
- #[cfg(vendor_modules)]
fn vendor(&self) -> &Option<PathBuf> {
&self.vendor
}
@@ -132,6 +124,26 @@
fn vendor(&self) -> Option<PathBuf> {
None
}
+
+ #[cfg(vendor_modules)]
+ fn gki(&self) -> Option<&str> {
+ self.gki.as_deref()
+ }
+
+ #[cfg(not(vendor_modules))]
+ fn gki(&self) -> Option<&str> {
+ None
+ }
+
+ #[cfg(device_assignment)]
+ fn devices(&self) -> &Vec<PathBuf> {
+ &self.devices
+ }
+
+ #[cfg(not(device_assignment))]
+ fn devices(&self) -> Vec<PathBuf> {
+ Vec::new()
+ }
}
#[derive(Args)]
@@ -304,6 +316,12 @@
Ok(())
}
+fn extract_gki_version(gki_config: &Path) -> Option<&str> {
+ let name = gki_config.file_name()?;
+ let name_str = name.to_str()?;
+ name_str.strip_prefix("microdroid_gki-")?.strip_suffix(".json")
+}
+
/// Print information about supported VM types.
fn command_info() -> Result<(), Error> {
let non_protected_vm_supported = hypervisor_props::is_vm_supported()?;
@@ -343,6 +361,12 @@
let devices = devices.into_iter().map(|x| x.node).collect::<Vec<_>>();
println!("Assignable devices: {}", serde_json::to_string(&devices)?);
+ let gki_configs =
+ glob("/apex/com.android.virt/etc/microdroid_gki-*.json")?.collect::<Result<Vec<_>, _>>()?;
+ let gki_versions =
+ gki_configs.iter().filter_map(|x| extract_gki_version(x)).collect::<Vec<_>>();
+ println!("Available gki versions: {}", serde_json::to_string(&gki_versions)?);
+
Ok(())
}
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 1ba9dec..8721e71 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -98,9 +98,6 @@
None
};
- let kernel =
- config.microdroid.kernel().as_ref().map(|p| open_parcel_file(p, false)).transpose()?;
-
let vendor =
config.microdroid.vendor().as_ref().map(|p| open_parcel_file(p, false)).transpose()?;
@@ -114,8 +111,14 @@
}
Payload::ConfigPath(config_path)
} else if let Some(payload_binary_name) = config.payload_binary_name {
+ let os_name = if let Some(ver) = config.microdroid.gki() {
+ format!("microdroid_gki-{ver}")
+ } else {
+ "microdroid".to_owned()
+ };
Payload::PayloadConfig(VirtualMachinePayloadConfig {
payloadBinaryName: payload_binary_name,
+ osName: os_name,
})
} else {
bail!("Either --config-path or --payload-binary-name must be defined")
@@ -124,13 +127,13 @@
let payload_config_str = format!("{:?}!{:?}", config.apk, payload);
let custom_config = CustomConfig {
- customKernelImage: kernel,
+ customKernelImage: None,
gdbPort: config.debug.gdb.map(u16::from).unwrap_or(0) as i32, // 0 means no gdb
taskProfiles: config.common.task_profiles,
vendorImage: vendor,
devices: config
.microdroid
- .devices
+ .devices()
.iter()
.map(|x| {
x.to_str().map(String::from).ok_or(anyhow!("Failed to convert {x:?} to String"))
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index 7f2b9df..286612c 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -8,7 +8,7 @@
crate_name: "vm_payload",
defaults: ["avf_build_flags_rust"],
visibility: ["//visibility:private"],
- srcs: ["src/*.rs"],
+ srcs: ["src/lib.rs"],
include_dirs: ["include"],
prefer_rlib: true,
rustlibs: [
diff --git a/vm_payload/include/vm_payload.h b/vm_payload/include/vm_payload.h
index 2dfa2cb..3483e1d 100644
--- a/vm_payload/include/vm_payload.h
+++ b/vm_payload/include/vm_payload.h
@@ -19,7 +19,6 @@
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
-#include <stdnoreturn.h>
#include <sys/cdefs.h>
#include "vm_main.h"
@@ -43,11 +42,14 @@
/** The remote attestation completes successfully. */
ATTESTATION_OK = 0,
- /** The remote attestation has failed due to an unspecified cause. */
- ATTESTATION_UNKNOWN_ERROR = -10000,
-
/** The challenge size is not between 0 and 64. */
ATTESTATION_ERROR_INVALID_CHALLENGE = -10001,
+
+ /** Failed to attest the VM. Please retry at a later time. */
+ ATTESTATION_ERROR_ATTESTATION_FAILED = -10002,
+
+ /** Remote attestation is not supported in the current environment. */
+ ATTESTATION_ERROR_UNSUPPORTED = -10003,
} attestation_status_t;
/**
@@ -78,9 +80,9 @@
* callback will be called at most once.
* \param param parameter to be passed to the `on_ready` callback.
*/
-noreturn void AVmPayload_runVsockRpcServer(AIBinder* _Nonnull service, uint32_t port,
- void (*_Nullable on_ready)(void* _Nullable param),
- void* _Nullable param);
+__attribute__((noreturn)) void AVmPayload_runVsockRpcServer(
+ AIBinder* _Nonnull service, uint32_t port,
+ void (*_Nullable on_ready)(void* _Nullable param), void* _Nullable param);
/**
* Returns all or part of a 32-byte secret that is bound to this unique VM
@@ -222,8 +224,8 @@
* Gets the number of certificates in the certificate chain.
*
* The certificate chain consists of a sequence of DER-encoded X.509 certificates that form
- * the attestation key's certificate chain. It starts with a root certificate and ends with a
- * leaf certificate covering the attested public key.
+ * the attestation key's certificate chain. It starts with a leaf certificate covering the attested
+ * public key and ends with a root certificate.
*
* \param result A pointer to the attestation result obtained from `AVmPayload_requestAttestation`
* when the attestation succeeds.
@@ -238,8 +240,8 @@
* attestation result.
*
* The certificate chain consists of a sequence of DER-encoded X.509 certificates that form
- * the attestation key's certificate chain. It starts with a root certificate and ends with a
- * leaf certificate covering the attested public key.
+ * the attestation key's certificate chain. It starts with a leaf certificate covering the attested
+ * public key and ends with a root certificate.
*
* \param result A pointer to the attestation result obtained from `AVmPayload_requestAttestation`
* when the attestation succeeds.
diff --git a/vm_payload/src/api.rs b/vm_payload/src/api.rs
index 64f8d6a..c76f2d3 100644
--- a/vm_payload/src/api.rs
+++ b/vm_payload/src/api.rs
@@ -21,7 +21,7 @@
use anyhow::{bail, ensure, Context, Result};
use binder::{
unstable_api::{new_spibinder, AIBinder},
- Strong,
+ Strong, ExceptionCode,
};
use lazy_static::lazy_static;
use log::{error, info, Level};
@@ -296,15 +296,24 @@
// `challenge_size` bytes and `challenge_size` is not zero.
unsafe { std::slice::from_raw_parts(challenge, challenge_size) }
};
- let attestation_res = unwrap_or_abort(try_request_attestation(challenge));
- *res = Box::into_raw(Box::new(attestation_res));
- attestation_status_t::ATTESTATION_OK
+ let service = unwrap_or_abort(get_vm_payload_service());
+ match service.requestAttestation(challenge) {
+ Ok(attestation_res) => {
+ *res = Box::into_raw(Box::new(attestation_res));
+ attestation_status_t::ATTESTATION_OK
+ }
+ Err(e) => {
+ error!("Remote attestation failed: {e:?}");
+ binder_status_to_attestation_status(e)
+ }
+ }
}
-fn try_request_attestation(public_key: &[u8]) -> Result<AttestationResult> {
- get_vm_payload_service()?
- .requestAttestation(public_key)
- .context("Failed to request attestation")
+fn binder_status_to_attestation_status(status: binder::Status) -> attestation_status_t {
+ match status.exception_code() {
+ ExceptionCode::UNSUPPORTED_OPERATION => attestation_status_t::ATTESTATION_ERROR_UNSUPPORTED,
+ _ => attestation_status_t::ATTESTATION_ERROR_ATTESTATION_FAILED,
+ }
}
/// Converts the return value from `AVmPayload_requestAttestation` to a text string
@@ -320,8 +329,12 @@
attestation_status_t::ATTESTATION_ERROR_INVALID_CHALLENGE => {
CStr::from_bytes_with_nul(b"The challenge size is not between 0 and 64.\0").unwrap()
}
- _ => CStr::from_bytes_with_nul(
- b"The remote attestation has failed due to an unspecified cause.\0",
+ attestation_status_t::ATTESTATION_ERROR_ATTESTATION_FAILED => {
+ CStr::from_bytes_with_nul(b"Failed to attest the VM. Please retry at a later time.\0")
+ .unwrap()
+ }
+ attestation_status_t::ATTESTATION_ERROR_UNSUPPORTED => CStr::from_bytes_with_nul(
+ b"Remote attestation is not supported in the current environment.\0",
)
.unwrap(),
};
diff --git a/vm_payload/src/lib.rs b/vm_payload/src/lib.rs
index e305769..9e10895 100644
--- a/vm_payload/src/lib.rs
+++ b/vm_payload/src/lib.rs
@@ -18,7 +18,7 @@
pub use api::{
AVmAttestationResult_free, AVmAttestationResult_getCertificateAt,
- AVmAttestationResult_getCertificatesCount, AVmAttestationResult_getPrivateKey,
+ AVmAttestationResult_getCertificateCount, AVmAttestationResult_getPrivateKey,
AVmAttestationResult_resultToString, AVmAttestationResult_sign,
AVmPayload_getDiceAttestationCdi, AVmPayload_getDiceAttestationChain,
AVmPayload_getVmInstanceSecret, AVmPayload_notifyPayloadReady, AVmPayload_requestAttestation,
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index b2b1549..e682773 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -76,6 +76,7 @@
rustlibs: [
"libaarch64_paging",
"libbuddy_system_allocator",
+ "libcstr",
"libfdtpci",
"libhyp",
"liblibfdt",
diff --git a/vmbase/example/Android.bp b/vmbase/example/Android.bp
index ae1a593..fe9de44 100644
--- a/vmbase/example/Android.bp
+++ b/vmbase/example/Android.bp
@@ -9,6 +9,7 @@
srcs: ["src/main.rs"],
rustlibs: [
"libaarch64_paging",
+ "libcstr",
"libdiced_open_dice_nostd",
"libfdtpci",
"liblibfdt",
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index ebd981c..6f513ee 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -28,11 +28,12 @@
use aarch64_paging::paging::MemoryRegion;
use aarch64_paging::MapError;
use alloc::{vec, vec::Vec};
+use cstr::cstr;
use fdtpci::PciInfo;
use libfdt::Fdt;
use log::{debug, error, info, trace, warn, LevelFilter};
use vmbase::{
- bionic, configure_heap, cstr,
+ bionic, configure_heap,
layout::{dtb_range, rodata_range, scratch_range, text_range},
linker, logger, main,
memory::{PageTable, SIZE_64KB},
diff --git a/vmbase/src/bionic.rs b/vmbase/src/bionic.rs
index f8db1fe..a049616 100644
--- a/vmbase/src/bionic.rs
+++ b/vmbase/src/bionic.rs
@@ -22,11 +22,12 @@
use core::str;
use crate::console;
-use crate::cstr;
use crate::eprintln;
use crate::rand::fill_with_entropy;
use crate::read_sysreg;
+use cstr::cstr;
+
const EOF: c_int = -1;
const EIO: c_int = 5;
diff --git a/vmbase/src/fdt.rs b/vmbase/src/fdt.rs
index 537ca03..4101f7e 100644
--- a/vmbase/src/fdt.rs
+++ b/vmbase/src/fdt.rs
@@ -14,8 +14,8 @@
//! High-level FDT functions.
-use crate::cstr;
use core::ops::Range;
+use cstr::cstr;
use libfdt::{self, Fdt, FdtError};
/// Represents information about a SWIOTLB buffer.
diff --git a/vmbase/src/heap.rs b/vmbase/src/heap.rs
index ec03d38..99c06aa 100644
--- a/vmbase/src/heap.rs
+++ b/vmbase/src/heap.rs
@@ -86,6 +86,21 @@
}
#[no_mangle]
+unsafe extern "C" fn __memset_chk(
+ dest: *mut c_void,
+ val: u8,
+ len: usize,
+ destlen: usize,
+) -> *mut c_void {
+ assert!(len <= destlen, "memset buffer overflow detected");
+ // SAFETY: `dest` is valid for writes of `len` bytes.
+ unsafe {
+ ptr::write_bytes(dest, val, len);
+ }
+ dest
+}
+
+#[no_mangle]
/// SAFETY: ptr must be null or point to a currently-allocated block returned by allocate (either
/// directly or via malloc or calloc). Note that this function is called directly from C, so we have
/// to trust that the C code is doing the right thing; there are checks below which will catch some
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 401022e..108cd5d 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -14,7 +14,7 @@
//! Hardware management of the access flag and dirty state.
-use super::page_table::{is_leaf_pte, PageTable};
+use super::page_table::PageTable;
use super::util::flush_region;
use crate::{dsb, isb, read_sysreg, tlbi, write_sysreg};
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
@@ -52,14 +52,10 @@
/// Flushes a memory range the descriptor refers to, if the descriptor is in writable-dirty state.
pub(super) fn flush_dirty_range(
va_range: &MemoryRegion,
- desc: &mut Descriptor,
- level: usize,
+ desc: &Descriptor,
+ _level: usize,
) -> Result<(), ()> {
- // Only flush ranges corresponding to dirty leaf PTEs.
let flags = desc.flags().ok_or(())?;
- if !is_leaf_pte(&flags, level) {
- return Ok(());
- }
if !flags.contains(Attributes::READ_ONLY) {
flush_region(va_range.start().0, va_range.len());
}
@@ -71,12 +67,9 @@
pub(super) fn mark_dirty_block(
va_range: &MemoryRegion,
desc: &mut Descriptor,
- level: usize,
+ _level: usize,
) -> Result<(), ()> {
let flags = desc.flags().ok_or(())?;
- if !is_leaf_pte(&flags, level) {
- return Ok(());
- }
if flags.contains(Attributes::DBM) {
assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index e067e96..dc346e7 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -16,7 +16,7 @@
use crate::read_sysreg;
use aarch64_paging::idmap::IdMap;
-use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
+use aarch64_paging::paging::{Attributes, Constraints, Descriptor, MemoryRegion};
use aarch64_paging::MapError;
use core::result;
@@ -83,7 +83,9 @@
/// code being currently executed. Otherwise, the Rust execution model (on which the borrow
/// checker relies) would be violated.
pub unsafe fn activate(&mut self) {
- self.idmap.activate()
+ // SAFETY: the caller of this unsafe function asserts that switching to a different
+ // translation is safe
+ unsafe { self.idmap.activate() }
}
/// Maps the given range of virtual addresses to the physical addresses as lazily mapped
@@ -107,7 +109,15 @@
/// Maps the given range of virtual addresses to the physical addresses as non-executable,
/// read-only and writable-clean normal memory.
pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> {
- self.idmap.map_range(range, DATA_DBM)
+ // Map the region down to pages to minimize the size of the regions that will be marked
+ // dirty once a store hits them, but also to ensure that we can clear the read-only
+ // attribute while the mapping is live without causing break-before-make (BBM) violations.
+ // The latter implies that we must avoid the use of the contiguous hint as well.
+ self.idmap.map_range_with_constraints(
+ range,
+ DATA_DBM,
+ Constraints::NO_BLOCK_MAPPINGS | Constraints::NO_CONTIGUOUS_HINT,
+ )
}
/// Maps the given range of virtual addresses to the physical addresses as read-only
@@ -124,18 +134,20 @@
/// Applies the provided updater function to a number of PTEs corresponding to a given memory
/// range.
- pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<()> {
+ pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<()>
+ where
+ F: Fn(&MemoryRegion, &mut Descriptor, usize) -> result::Result<(), ()>,
+ {
self.idmap.modify_range(range, f)
}
-}
-/// Checks whether a PTE at given level is a page or block descriptor.
-#[inline]
-pub(super) fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
- const LEAF_PTE_LEVEL: usize = 3;
- if flags.contains(Attributes::TABLE_OR_PAGE) {
- level == LEAF_PTE_LEVEL
- } else {
- level < LEAF_PTE_LEVEL
+ /// Applies the provided callback function to a number of PTEs corresponding to a given memory
+ /// range.
+ pub fn walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()>
+ where
+ F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>,
+ {
+ let mut callback = |mr: &MemoryRegion, d: &Descriptor, l: usize| f(mr, d, l);
+ self.idmap.walk_range(range, &mut callback)
}
}
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 6c8a844..dd433d4 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -16,12 +16,14 @@
use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
use super::error::MemoryTrackerError;
-use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
+use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
use super::util::{page_4kb_of, virt_to_phys};
use crate::dsb;
use crate::exceptions::HandleExceptionError;
use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
+use aarch64_paging::paging::{
+ Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, BITS_PER_LEVEL, PAGE_SIZE,
+};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
use alloc::boxed::Box;
use alloc::vec::Vec;
@@ -253,7 +255,7 @@
if get_mmio_guard().is_some() {
for range in &self.mmio_regions {
self.page_table
- .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+ .walk_range(&get_va_range(range), &mmio_guard_unmap_page)
.map_err(|_| MemoryTrackerError::FailedToUnmap)?;
}
}
@@ -319,14 +321,24 @@
/// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
let page_start = VirtualAddress(page_4kb_of(addr.0));
+ assert_eq!(page_start.0 % MMIO_GUARD_GRANULE_SIZE, 0);
let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
let mmio_guard = get_mmio_guard().unwrap();
+ // This must be safe and free from break-before-make (BBM) violations, given that the
+ // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
+ // created inside the mapping has the same size and alignment.
self.page_table
- .modify_range(&page_range, &verify_lazy_mapped_block)
+ .modify_range(&page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
+ let flags = desc.flags().expect("Unsupported PTE flags set");
+ if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+ desc.modify_flags(Attributes::VALID, Attributes::empty());
+ Ok(())
+ } else {
+ Err(())
+ }
+ })
.map_err(|_| MemoryTrackerError::InvalidPte)?;
- mmio_guard.map(page_start.0)?;
- // Maps a single device page, breaking up block mappings if necessary.
- self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
+ Ok(mmio_guard.map(page_start.0)?)
}
/// Flush all memory regions marked as writable-dirty.
@@ -340,7 +352,7 @@
// Now flush writable-dirty pages in those regions.
for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
self.page_table
- .modify_range(&get_va_range(range), &flush_dirty_range)
+ .walk_range(&get_va_range(range), &flush_dirty_range)
.map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
}
Ok(())
@@ -467,33 +479,13 @@
}
}
-/// Checks whether block flags indicate it should be MMIO guard mapped.
-fn verify_lazy_mapped_block(
- _range: &VaRange,
- desc: &mut Descriptor,
- level: usize,
-) -> result::Result<(), ()> {
- let flags = desc.flags().expect("Unsupported PTE flags set");
- if !is_leaf_pte(&flags, level) {
- return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
- }
- if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
- Ok(())
- } else {
- Err(())
- }
-}
-
/// MMIO guard unmaps page
fn mmio_guard_unmap_page(
va_range: &VaRange,
- desc: &mut Descriptor,
+ desc: &Descriptor,
level: usize,
) -> result::Result<(), ()> {
let flags = desc.flags().expect("Unsupported PTE flags set");
- if !is_leaf_pte(&flags, level) {
- return Ok(());
- }
// This function will be called on an address range that corresponds to a device. Only if a
// page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
// guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
@@ -503,9 +495,11 @@
flags.contains(MMIO_LAZY_MAP_FLAG),
"Attempting MMIO guard unmap for non-device pages"
);
+ const MMIO_GUARD_GRANULE_SHIFT: u32 = MMIO_GUARD_GRANULE_SIZE.ilog2() - PAGE_SIZE.ilog2();
+ const MMIO_GUARD_GRANULE_LEVEL: usize =
+ 3 - (MMIO_GUARD_GRANULE_SHIFT as usize / BITS_PER_LEVEL);
assert_eq!(
- va_range.len(),
- MMIO_GUARD_GRANULE_SIZE,
+ level, MMIO_GUARD_GRANULE_LEVEL,
"Failed to break down block mapping before MMIO guard mapping"
);
let page_base = va_range.start().0;
diff --git a/vmbase/src/util.rs b/vmbase/src/util.rs
index 25586bc..8c230a1 100644
--- a/vmbase/src/util.rs
+++ b/vmbase/src/util.rs
@@ -16,19 +16,6 @@
use core::ops::Range;
-/// Create &CStr out of &str literal
-#[macro_export]
-macro_rules! cstr {
- ($str:literal) => {{
- const S: &str = concat!($str, "\0");
- const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
- Ok(v) => v,
- Err(_) => panic!("string contains interior NUL"),
- };
- C
- }};
-}
-
/// Flatten [[T; N]] into &[T]
/// TODO: use slice::flatten when it graduates from experimental
pub fn flatten<T, const N: usize>(original: &[[T; N]]) -> &[T] {
diff --git a/vmclient/src/lib.rs b/vmclient/src/lib.rs
index 9f1d7d1..a2a88d8 100644
--- a/vmclient/src/lib.rs
+++ b/vmclient/src/lib.rs
@@ -74,11 +74,7 @@
// Create new POSIX socketpair, suitable for use with RpcBinder UDS bootstrap
// transport. Make it O_CLOEXEC to align with how Rust creates file
// descriptors (expected by SharedChild).
- let (raw1, raw2) =
- socketpair(AddressFamily::Unix, SockType::Stream, None, SockFlag::SOCK_CLOEXEC)?;
-
- // SAFETY: Taking ownership of brand new FDs.
- unsafe { Ok((OwnedFd::from_raw_fd(raw1), OwnedFd::from_raw_fd(raw2))) }
+ Ok(socketpair(AddressFamily::Unix, SockType::Stream, None, SockFlag::SOCK_CLOEXEC)?)
}
/// A running instance of virtmgr which is hosting a VirtualizationService