Merge "defer-rollback-protection: property in guest DT" into main
diff --git a/README.md b/README.md
index 1b092f6..827e55c 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@
AVF components:
* [pVM firmware](pvmfw/README.md)
+* [Android Boot Loader (ABL)](docs/abl.md)
* [Microdroid](microdroid/README.md)
* [Microdroid kernel](microdroid/kernel/README.md)
* [Microdroid payload](microdroid/payload/README.md)
diff --git a/TEST_MAPPING b/TEST_MAPPING
index f146b4e..ec9042c 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -12,6 +12,9 @@
"name": "MicrodroidTestApp"
},
{
+ "name": "MicrodroidTestAppNoPerm"
+ },
+ {
"name": "VmAttestationTestApp"
},
{
@@ -54,6 +57,11 @@
},
{
"name": "AVFHostTestCases"
+ },
+ {
+ // TODO(b/325610326): Add this target to presubmit once there is enough
+ // SLO data for it.
+ "name": "AvfRkpdAppIntegrationTests"
}
],
"postsubmit": [
diff --git a/apex/Android.bp b/apex/Android.bp
index cc59b16..3b5141e 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -81,7 +81,6 @@
// TODO(b/295593640) Unfortunately these are added to the apex even though they are unused.
// Once the build system is fixed, remove this.
unwanted_transitive_deps: [
- "libdrm",
"libsso",
"libutils",
],
@@ -139,6 +138,7 @@
"microdroid_initrd_normal",
"microdroid.json",
"microdroid_kernel",
+ "com.android.virt.init.rc",
],
host_required: [
"vm_shell",
@@ -172,13 +172,9 @@
],
},
release_avf_enable_remote_attestation: {
- prebuilts: ["com.android.virt.init_attestation_enabled.rc"],
vintf_fragments: [
"virtualizationservice.xml",
],
- conditions_default: {
- prebuilts: ["com.android.virt.init.rc"],
- },
},
},
}
@@ -200,16 +196,35 @@
certificate: "com.android.virt",
}
-prebuilt_etc {
- name: "com.android.virt.init.rc",
- src: "virtualizationservice.rc",
- filename: "virtualizationservice.rc",
- installable: false,
+soong_config_module_type {
+ name: "avf_flag_aware_genrule",
+ module_type: "genrule",
+ config_namespace: "ANDROID",
+ bool_variables: [
+ "release_avf_enable_llpvm_changes",
+ "release_avf_enable_remote_attestation",
+ ],
+ properties: ["srcs"],
+}
+
+avf_flag_aware_genrule {
+ name: "virtualizationservice_rc_combined",
+ srcs: ["virtualizationservice.rc.base"],
+ soong_config_variables: {
+ release_avf_enable_llpvm_changes: {
+ srcs: ["virtualizationservice.rc.llpvm"],
+ },
+ release_avf_enable_remote_attestation: {
+ srcs: ["virtualizationservice.rc.ra"],
+ },
+ },
+ out: ["virtualizationservice.rc"],
+ cmd: "cat $(in) > $(out)",
}
prebuilt_etc {
- name: "com.android.virt.init_attestation_enabled.rc",
- src: "virtualizationservice_attestation_enabled.rc",
+ name: "com.android.virt.init.rc",
+ src: ":virtualizationservice_rc_combined",
filename: "virtualizationservice.rc",
installable: false,
}
@@ -239,6 +254,7 @@
"initrd_bootconfig",
"lpmake",
"lpunpack",
+ "lz4",
"simg2img",
],
}
@@ -259,6 +275,7 @@
"initrd_bootconfig",
"lpmake",
"lpunpack",
+ "lz4",
"sign_virt_apex",
"simg2img",
],
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index 0b6137b..7c59b54 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -153,12 +153,18 @@
'--key', key, '--output', output])
+def is_lz4(args, path):
+ # error 44: Unrecognized header
+ result = RunCommand(args, ['lz4', '-t', path], expected_return_values={0, 44})
+ return result[1] == 0
+
+
def AvbInfo(args, image_path):
"""Parses avbtool --info image output
Args:
args: program arguments.
- image_path: The path to the image.
+ image_path: The path to the image, either raw or lz4 compressed
descriptor_name: Descriptor name of interest.
Returns:
@@ -169,6 +175,11 @@
if not os.path.exists(image_path):
raise ValueError(f'Failed to find image: {image_path}')
+ if is_lz4(args, image_path):
+ with tempfile.NamedTemporaryFile() as decompressed_image:
+ RunCommand(args, ['lz4', '-d', '-f', image_path, decompressed_image.name])
+ return AvbInfo(args, decompressed_image.name)
+
output, ret_code = RunCommand(
args, ['avbtool', 'info_image', '--image', image_path], expected_return_values={0, 1})
if ret_code == 1:
@@ -560,11 +571,7 @@
wait=[vbmeta_f])
# Re-sign kernel. Note kernel's vbmeta contain addition descriptor from ramdisk(s)
- def resign_kernel(kernel, initrd_normal, initrd_debug):
- kernel_file = files[kernel]
- initrd_normal_file = files[initrd_normal]
- initrd_debug_file = files[initrd_debug]
-
+ def resign_decompressed_kernel(kernel_file, initrd_normal_file, initrd_debug_file):
_, kernel_image_descriptors = AvbInfo(args, kernel_file)
salts = extract_hash_descriptors(
kernel_image_descriptors, lambda descriptor: descriptor['Salt'])
@@ -580,21 +587,47 @@
additional_images=[initrd_normal_hashdesc, initrd_debug_hashdesc],
wait=[initrd_n_f, initrd_d_f])
+ def resign_compressed_kernel(kernel_file, initrd_normal_file, initrd_debug_file):
+ # decompress, re-sign, compress again
+ with tempfile.TemporaryDirectory() as work_dir:
+ decompressed_kernel_file = os.path.join(work_dir, os.path.basename(kernel_file))
+ RunCommand(args, ['lz4', '-d', kernel_file, decompressed_kernel_file])
+ resign_decompressed_kernel(decompressed_kernel_file, initrd_normal_file,
+ initrd_debug_file).result()
+ RunCommand(args, ['lz4', '-9', '-f', decompressed_kernel_file, kernel_file])
+
+ def resign_kernel(kernel, initrd_normal, initrd_debug):
+ kernel_file = files[kernel]
+ initrd_normal_file = files[initrd_normal]
+ initrd_debug_file = files[initrd_debug]
+
+ # kernel may be compressed with lz4.
+ if is_lz4(args, kernel_file):
+ return Async(resign_compressed_kernel, kernel_file, initrd_normal_file,
+ initrd_debug_file)
+ else:
+ return resign_decompressed_kernel(kernel_file, initrd_normal_file, initrd_debug_file)
+
_, original_kernel_descriptors = AvbInfo(args, files['kernel'])
- resign_kernel_task = resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')
+ resign_kernel_tasks = [resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')]
+ original_kernels = {"kernel" : original_kernel_descriptors}
for ver in gki_versions:
if f'gki-{ver}_kernel' in files:
- resign_kernel(
- f'gki-{ver}_kernel',
+ kernel_name = f'gki-{ver}_kernel'
+ _, original_kernel_descriptors = AvbInfo(args, files[kernel_name])
+ task = resign_kernel(
+ kernel_name,
f'gki-{ver}_initrd_normal.img',
f'gki-{ver}_initrd_debuggable.img')
+ resign_kernel_tasks.append(task)
+ original_kernels[kernel_name] = original_kernel_descriptors
# Re-sign rialto if it exists. Rialto only exists in arm64 environment.
if os.path.exists(files['rialto']):
update_initrd_digests_task = Async(
- update_initrd_digests_in_rialto, original_kernel_descriptors, args,
- files, wait=[resign_kernel_task])
+ update_initrd_digests_of_kernels_in_rialto, original_kernels, args, files,
+ wait=resign_kernel_tasks)
Async(resign_rialto, args, key, files['rialto'], wait=[update_initrd_digests_task])
def resign_rialto(args, key, rialto_path):
@@ -628,18 +661,7 @@
f"Value of '{key}' should change for '{context}'" \
f"Original value: {original[key]}, updated value: {updated[key]}"
-def update_initrd_digests_in_rialto(original_descriptors, args, files):
- _, updated_descriptors = AvbInfo(args, files['kernel'])
-
- original_digests = extract_hash_descriptors(
- original_descriptors, lambda x: binascii.unhexlify(x['Digest']))
- updated_digests = extract_hash_descriptors(
- updated_descriptors, lambda x: binascii.unhexlify(x['Digest']))
- assert original_digests.pop("boot") == updated_digests.pop("boot"), \
- "Hash descriptor of boot should not change for kernel. " \
- f"Original descriptors: {original_descriptors}, " \
- f"updated descriptors: {updated_descriptors}"
-
+def update_initrd_digests_of_kernels_in_rialto(original_kernels, args, files):
# Update the hashes of initrd_normal and initrd_debug in rialto if the
# bootconfigs in them are updated.
if args.do_not_update_bootconfigs:
@@ -648,6 +670,26 @@
with open(files['rialto'], "rb") as file:
content = file.read()
+ for kernel_name, descriptors in original_kernels.items():
+ content = update_initrd_digests_in_rialto(
+ descriptors, args, files, kernel_name, content)
+
+ with open(files['rialto'], "wb") as file:
+ file.write(content)
+
+def update_initrd_digests_in_rialto(
+ original_descriptors, args, files, kernel_name, content):
+ _, updated_descriptors = AvbInfo(args, files[kernel_name])
+
+ original_digests = extract_hash_descriptors(
+ original_descriptors, lambda x: binascii.unhexlify(x['Digest']))
+ updated_digests = extract_hash_descriptors(
+ updated_descriptors, lambda x: binascii.unhexlify(x['Digest']))
+ assert original_digests.pop("boot") == updated_digests.pop("boot"), \
+ "Hash descriptor of boot should not change for " + kernel_name + \
+ f"\nOriginal descriptors: {original_descriptors}, " \
+ f"\nUpdated descriptors: {updated_descriptors}"
+
# Check that the original and updated digests are different before updating rialto.
partition_names = {'initrd_normal', 'initrd_debug'}
assert set(original_digests.keys()) == set(updated_digests.keys()) == partition_names, \
@@ -671,8 +713,7 @@
f"original digest of the partition {partition_name} not found."
content = new_content
- with open(files['rialto'], "wb") as file:
- file.write(content)
+ return content
def extract_hash_descriptors(descriptors, f=lambda x: x):
return {desc["Partition Name"]: f(desc) for desc in
diff --git a/apex/virtualizationservice.rc b/apex/virtualizationservice.rc.base
similarity index 99%
rename from apex/virtualizationservice.rc
rename to apex/virtualizationservice.rc.base
index 02b2081..688db10 100644
--- a/apex/virtualizationservice.rc
+++ b/apex/virtualizationservice.rc.base
@@ -16,6 +16,6 @@
class main
user system
group system
- interface aidl android.system.virtualizationservice
disabled
oneshot
+ interface aidl android.system.virtualizationservice
diff --git a/apex/virtualizationservice.rc.llpvm b/apex/virtualizationservice.rc.llpvm
new file mode 100644
index 0000000..916d508
--- /dev/null
+++ b/apex/virtualizationservice.rc.llpvm
@@ -0,0 +1 @@
+ interface aidl android.system.virtualizationmaintenance
diff --git a/apex/virtualizationservice.rc.ra b/apex/virtualizationservice.rc.ra
new file mode 100644
index 0000000..3554259
--- /dev/null
+++ b/apex/virtualizationservice.rc.ra
@@ -0,0 +1 @@
+ interface aidl android.hardware.security.keymint.IRemotelyProvisionedComponent/avf
diff --git a/apex/virtualizationservice_attestation_enabled.rc b/apex/virtualizationservice_attestation_enabled.rc
deleted file mode 100644
index 8eaccae..0000000
--- a/apex/virtualizationservice_attestation_enabled.rc
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-service virtualizationservice /apex/com.android.virt/bin/virtualizationservice
- class main
- user system
- group system
- interface aidl android.system.virtualizationservice
- interface aidl android.hardware.security.keymint.IRemotelyProvisionedComponent/avf
- disabled
- oneshot
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index b0294dd..75f0c4f 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -7,6 +7,7 @@
srcs: ["src/composd_main.rs"],
edition: "2021",
prefer_rlib: true,
+ defaults: ["avf_build_flags_rust"],
rustlibs: [
"android.system.composd-rust",
"android.system.virtualizationservice-rust",
diff --git a/docs/abl.md b/docs/abl.md
new file mode 100644
index 0000000..b08464e
--- /dev/null
+++ b/docs/abl.md
@@ -0,0 +1,53 @@
+# Android Bootloader (ABL)
+
+[ABL](https://source.android.com/docs/core/architecture/bootloader) is not a component of AVF, but
+it plays a crucial role in loading the necessary AVF components and initializing them in a correct
+way. This doc explains the responsibilities of ABL from the perspective of AVF.
+
+## pVM firmware (pvmfw)
+
+ABL is responsible for the followings:
+
+* locating pvmfw binary from the pvmfw partition,
+* verifying it as part of the [verified
+ boot](https://source.android.com/docs/security/features/verifiedboot) process,
+* loading it into memory, and
+* describing the region where pvmfw is loaded using DT and passing it to hypervisor.
+
+See [ABL Support](../pvmfw/README.md#android-bootloader-abl_support) for more detail.
+
+ABL is also responsible for constructing the pvmfw configuration data. The data consists of the
+following info:
+
+* DICE chain (also known as BCC Handover)
+* DTBO describing [debug policy](debug/README.md#debug-policy) (if available)
+* DTBO describing [assignable devices](device_assignment.md) (if available)
+* Reference DT carrying extra information that needs to be passed to the guest VM
+
+See [Configuration Data](../pvmfw/README.md#configuration-data) for more detail.
+
+## Android
+
+ABL is responsible for setting the following bootconfigs describing the status and capabilities of
+the hypervisor.
+
+* `androidboot.hypervisor.version`: free-form description of the hypervisor
+* `androidboot.hypervisor.vm.supported`: whether traditional VMs (i.e. non-protected VMS) are
+ supported or not
+* `androidboot.hypervisor.protected_vm.supported`: whether protected VMs are supported or not
+
+Thee bootconfigs are converted into system properties by the init process.
+
+See
+[HypervisorProperties.prop](https://android.googlesource.com/platform/system/libsysprop/+/refs/heads/main/srcs/android/sysprop/HypervisorProperties.sysprop)
+for more detail.
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/updatable_vm.md b/docs/updatable_vm.md
new file mode 100644
index 0000000..de5552e
--- /dev/null
+++ b/docs/updatable_vm.md
@@ -0,0 +1,3 @@
+# Updatable VM
+
+(To be filled)
diff --git a/docs/vm_remote_attestation.md b/docs/vm_remote_attestation.md
new file mode 100644
index 0000000..093418b
--- /dev/null
+++ b/docs/vm_remote_attestation.md
@@ -0,0 +1,3 @@
+# VM Remote Attestation
+
+(To be filled)
diff --git a/java/service/Android.bp b/java/service/Android.bp
index fdfb203..8bac7be 100644
--- a/java/service/Android.bp
+++ b/java/service/Android.bp
@@ -29,6 +29,9 @@
"framework",
"services.core",
],
+ static_libs: [
+ "android.system.virtualizationmaintenance-java",
+ ],
sdk_version: "core_platform",
apex_available: ["com.android.virt"],
installable: true,
diff --git a/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java b/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java
index 2905acd..3f973b4 100644
--- a/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java
+++ b/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java
@@ -16,16 +16,121 @@
package com.android.system.virtualmachine;
+import android.content.BroadcastReceiver;
import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.os.Handler;
+import android.os.IBinder;
+import android.os.ServiceManager;
+import android.os.UserHandle;
+import android.system.virtualizationmaintenance.IVirtualizationMaintenance;
+import android.util.Log;
+
+import com.android.internal.os.BackgroundThread;
import com.android.server.SystemService;
-/** TODO */
+/**
+ * This class exists to notify virtualization service of relevant things happening in the Android
+ * framework.
+ *
+ * <p>It currently is responsible for Secretkeeper-related maintenance - ensuring that we are not
+ * storing secrets for apps or users that no longer exist.
+ */
public class VirtualizationSystemService extends SystemService {
+ private static final String TAG = VirtualizationSystemService.class.getName();
+ private static final String SERVICE_NAME = "android.system.virtualizationmaintenance";
+ private Handler mHandler;
public VirtualizationSystemService(Context context) {
super(context);
}
@Override
- public void onStart() {}
+ public void onStart() {
+ // Nothing needed here - we don't expose any binder service. The binder service we use is
+ // exposed as a lazy service by the virtualizationservice native binary.
+ }
+
+ @Override
+ public void onBootPhase(int phase) {
+ if (phase != PHASE_BOOT_COMPLETED) return;
+
+ mHandler = BackgroundThread.getHandler();
+ new Receiver().registerForBroadcasts();
+ }
+
+ private void notifyAppRemoved(int uid) {
+ try {
+ IVirtualizationMaintenance maintenance = connectToMaintenanceService();
+ maintenance.appRemoved(UserHandle.getUserId(uid), UserHandle.getAppId(uid));
+ } catch (Exception e) {
+ Log.e(TAG, "notifyAppRemoved failed", e);
+ }
+ }
+
+ private void notifyUserRemoved(int userId) {
+ try {
+ IVirtualizationMaintenance maintenance = connectToMaintenanceService();
+ maintenance.userRemoved(userId);
+ } catch (Exception e) {
+ Log.e(TAG, "notifyUserRemoved failed", e);
+ }
+ }
+
+ private static IVirtualizationMaintenance connectToMaintenanceService() {
+ IBinder binder = ServiceManager.waitForService(SERVICE_NAME);
+ IVirtualizationMaintenance maintenance =
+ IVirtualizationMaintenance.Stub.asInterface(binder);
+ if (maintenance == null) {
+ throw new IllegalStateException("Failed to connect to " + SERVICE_NAME);
+ }
+ return maintenance;
+ }
+
+ private class Receiver extends BroadcastReceiver {
+ public void registerForBroadcasts() {
+ Context allUsers = getContext().createContextAsUser(UserHandle.ALL, 0 /* flags */);
+
+ allUsers.registerReceiver(this, new IntentFilter(Intent.ACTION_USER_REMOVED));
+
+ IntentFilter packageFilter = new IntentFilter(Intent.ACTION_PACKAGE_REMOVED);
+ packageFilter.addDataScheme("package");
+ allUsers.registerReceiver(this, packageFilter);
+ }
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ switch (intent.getAction()) {
+ case Intent.ACTION_USER_REMOVED:
+ onUserRemoved(intent);
+ break;
+ case Intent.ACTION_PACKAGE_REMOVED:
+ onPackageRemoved(intent);
+ break;
+ default:
+ Log.e(TAG, "received unexpected intent: " + intent.getAction());
+ break;
+ }
+ }
+
+ private void onUserRemoved(Intent intent) {
+ int userId = intent.getIntExtra(Intent.EXTRA_USER_HANDLE, UserHandle.USER_NULL);
+ if (userId != UserHandle.USER_NULL) {
+ mHandler.post(() -> notifyUserRemoved(userId));
+ }
+ }
+
+ private void onPackageRemoved(Intent intent) {
+ if (intent.getBooleanExtra(Intent.EXTRA_REPLACING, false)
+ || !intent.getBooleanExtra(Intent.EXTRA_DATA_REMOVED, false)) {
+ // Package is being updated rather than uninstalled.
+ return;
+ }
+ int uid = intent.getIntExtra(Intent.EXTRA_UID, -1);
+ if (uid != -1) {
+ mHandler.post(() -> notifyAppRemoved(uid));
+ }
+ }
+ }
}
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index cb7afda..743c52b 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -33,7 +33,7 @@
impl<'a> CompatibleIterator<'a> {
pub(crate) fn new(fdt: &'a Fdt, compatible: &'a CStr) -> Result<Self, FdtError> {
- let node = fdt.root()?;
+ let node = fdt.root();
Ok(Self { node, compatible })
}
}
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 3339262..8ea9cd9 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -478,14 +478,38 @@
self.delete_and_next(next_offset)
}
- /// Returns the next node
+ /// Returns the next node. Use this API to travel descendant of a node.
+ ///
+ /// Returned depth is relative to the initial node that had called with any of next node APIs.
+ /// Returns None if end of FDT reached or depth becomes negative.
+ ///
+ /// See also: [`next_node_skip_subnodes`], and [`delete_and_next_node`]
pub fn next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
let next = self.fdt.next_node(self.offset, depth)?;
Ok(next.map(|(offset, depth)| (Self { fdt: self.fdt, offset }, depth)))
}
- /// Deletes this and returns the next node
+ /// Returns the next node skipping subnodes. Use this API to travel descendants of a node while
+ /// ignoring certain node.
+ ///
+ /// Returned depth is relative to the initial node that had called with any of next node APIs.
+ /// Returns None if end of FDT reached or depth becomes negative.
+ ///
+ /// See also: [`next_node`], and [`delete_and_next_node`]
+ pub fn next_node_skip_subnodes(self, depth: usize) -> Result<Option<(Self, usize)>> {
+ let next = self.fdt.next_node_skip_subnodes(self.offset, depth)?;
+
+ Ok(next.map(|(offset, depth)| (Self { fdt: self.fdt, offset }, depth)))
+ }
+
+ /// Deletes this and returns the next node. Use this API to travel descendants of a node while
+ /// removing certain node.
+ ///
+ /// Returned depth is relative to the initial node that had called with any of next node APIs.
+ /// Returns None if end of FDT reached or depth becomes negative.
+ ///
+ /// See also: [`next_node`], and [`next_node_skip_subnodes`]
pub fn delete_and_next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
let next_node = self.fdt.next_node_skip_subnodes(self.offset, depth)?;
if let Some((offset, depth)) = next_node {
@@ -669,7 +693,7 @@
///
/// NOTE: This does not support individual "/memory@XXXX" banks.
pub fn memory(&self) -> Result<MemRegIterator> {
- let node = self.root()?.subnode(cstr!("memory"))?.ok_or(FdtError::NotFound)?;
+ let node = self.root().subnode(cstr!("memory"))?.ok_or(FdtError::NotFound)?;
if node.device_type()? != Some(cstr!("memory")) {
return Err(FdtError::BadValue);
}
@@ -683,7 +707,7 @@
/// Returns the standard /chosen node.
pub fn chosen(&self) -> Result<Option<FdtNode>> {
- self.root()?.subnode(cstr!("chosen"))
+ self.root().subnode(cstr!("chosen"))
}
/// Returns the standard /chosen node as mutable.
@@ -692,13 +716,13 @@
}
/// Returns the root node of the tree.
- pub fn root(&self) -> Result<FdtNode> {
- Ok(FdtNode { fdt: self, offset: NodeOffset::ROOT })
+ pub fn root(&self) -> FdtNode {
+ FdtNode { fdt: self, offset: NodeOffset::ROOT }
}
/// Returns the standard /__symbols__ node.
pub fn symbols(&self) -> Result<Option<FdtNode>> {
- self.root()?.subnode(cstr!("__symbols__"))
+ self.root().subnode(cstr!("__symbols__"))
}
/// Returns the standard /__symbols__ node as mutable
@@ -738,8 +762,8 @@
}
/// Returns the mutable root node of the tree.
- pub fn root_mut(&mut self) -> Result<FdtNodeMut> {
- Ok(FdtNodeMut { fdt: self, offset: NodeOffset::ROOT })
+ pub fn root_mut(&mut self) -> FdtNodeMut {
+ FdtNodeMut { fdt: self, offset: NodeOffset::ROOT }
}
/// Returns a mutable tree node by its full path.
diff --git a/libs/libfdt/tests/api_test.rs b/libs/libfdt/tests/api_test.rs
index 8f5b76d..f521a00 100644
--- a/libs/libfdt/tests/api_test.rs
+++ b/libs/libfdt/tests/api_test.rs
@@ -81,7 +81,7 @@
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
- let root = fdt.root().unwrap();
+ let root = fdt.root();
assert_eq!(root.name(), Ok(cstr!("")));
let chosen = fdt.chosen().unwrap().unwrap();
@@ -96,7 +96,7 @@
fn node_subnodes() {
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let expected = [Ok(cstr!("cpus")), Ok(cstr!("randomnode")), Ok(cstr!("chosen"))];
let root_subnodes = root.subnodes().unwrap();
@@ -108,7 +108,7 @@
fn node_properties() {
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let one_be = 0x1_u32.to_be_bytes();
type Result<T> = core::result::Result<T, FdtError>;
let expected: Vec<(Result<&CStr>, Result<&[u8]>)> = vec![
@@ -290,7 +290,7 @@
let fdt = Fdt::from_slice(&data).unwrap();
let name = cstr!("node_a");
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let node = root.subnode(name).unwrap();
assert_ne!(None, node);
let node = node.unwrap();
@@ -304,7 +304,7 @@
let fdt = Fdt::from_slice(&data).unwrap();
let name = b"node_aaaaa";
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let node = root.subnode_with_name_bytes(&name[0..6]).unwrap();
assert_ne!(None, node);
let node = node.unwrap();
@@ -319,7 +319,7 @@
let name = cstr!("node_a");
let node = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.subnode(name).unwrap().unwrap()
};
@@ -378,7 +378,7 @@
let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut data).unwrap();
- let root = fdt.root_mut().unwrap();
+ let root = fdt.root_mut();
let mut subnode_iter = root.first_subnode().unwrap();
while let Some(subnode) = subnode_iter {
@@ -389,7 +389,7 @@
}
}
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let expected_names = vec![
Ok(cstr!("node_a")),
Ok(cstr!("node_b")),
@@ -416,7 +416,7 @@
];
let mut expected_nodes_iter = expected_nodes.iter();
- let mut iter = fdt.root_mut().unwrap().next_node(0).unwrap();
+ let mut iter = fdt.root_mut().next_node(0).unwrap();
while let Some((node, depth)) = iter {
let node_name = node.as_node().name();
if node_name == Ok(cstr!("node_a")) || node_name == Ok(cstr!("node_zz")) {
@@ -431,7 +431,7 @@
}
assert_eq!(None, expected_nodes_iter.next());
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let all_descendants: Vec<_> =
root.descendants().map(|(node, depth)| (node.name(), depth)).collect();
assert_eq!(expected_nodes, all_descendants);
@@ -442,12 +442,12 @@
let mut data = fs::read(TEST_TREE_WITH_EMPTY_MEMORY_RANGE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut data).unwrap();
- let mut iter = fdt.root_mut().unwrap().next_node(0).unwrap();
+ let mut iter = fdt.root_mut().next_node(0).unwrap();
while let Some((node, depth)) = iter {
iter = node.delete_and_next_node(depth).unwrap();
}
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let all_descendants: Vec<_> =
root.descendants().map(|(node, depth)| (node.name(), depth)).collect();
assert!(all_descendants.is_empty(), "{all_descendants:?}");
@@ -460,7 +460,7 @@
let fdt = Fdt::from_slice(&data).unwrap();
let name = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.name()
// Make root to be dropped
};
@@ -472,12 +472,12 @@
let mut data = vec![0_u8; 1000];
let fdt = Fdt::create_empty_tree(&mut data).unwrap();
- let root = fdt.root_mut().unwrap();
+ let root = fdt.root_mut();
let names = [cstr!("a"), cstr!("b")];
root.add_subnodes(&names).unwrap();
let expected: HashSet<_> = names.into_iter().collect();
- let subnodes = fdt.root().unwrap().subnodes().unwrap();
+ let subnodes = fdt.root().subnodes().unwrap();
let names: HashSet<_> = subnodes.map(|node| node.name().unwrap()).collect();
assert_eq!(expected, names);
@@ -491,7 +491,7 @@
let name = {
let node_a = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.subnode(cstr!("node_a")).unwrap()
// Make root to be dropped
};
@@ -511,7 +511,7 @@
let first_subnode_name = {
let first_subnode = {
let mut subnodes_iter = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.subnodes().unwrap()
// Make root to be dropped
};
@@ -533,7 +533,7 @@
let first_descendant_name = {
let (first_descendant, _) = {
let mut descendants_iter = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.descendants()
// Make root to be dropped
};
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 4aaa793..999dc52 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -559,7 +559,7 @@
avb_add_hash_footer {
name: "microdroid_gki-android14-6.1_kernel_signed",
defaults: ["microdroid_kernel_signed_defaults"],
- filename: "microdroid_gki-android14-6.1_kernel",
+ filename: "microdroid_gki-android14-6.1_kernel_signed",
arch: {
arm64: {
src: ":microdroid_gki_kernel_prebuilts-6.1-arm64",
@@ -574,13 +574,29 @@
],
}
+// HACK: use cc_genrule for arch-specific properties
+cc_genrule {
+ name: "microdroid_gki-android14-6.1_kernel_signed-lz4",
+ out: ["microdroid_gki-android14-6.1_kernel_signed-lz4"],
+ srcs: [":empty_file"],
+ arch: {
+ arm64: {
+ srcs: [":microdroid_gki-android14-6.1_kernel_signed"],
+ exclude_srcs: [":empty_file"],
+ },
+ },
+ tools: ["lz4"],
+ cmd: "$(location lz4) -9 $(in) $(out)",
+}
+
prebuilt_etc {
name: "microdroid_gki-android14-6.1_kernel",
+ filename: "microdroid_gki-android14-6.1_kernel",
src: ":empty_file",
relative_install_path: "fs",
arch: {
arm64: {
- src: ":microdroid_gki-android14-6.1_kernel_signed",
+ src: ":microdroid_gki-android14-6.1_kernel_signed-lz4",
},
x86_64: {
src: ":microdroid_gki-android14-6.1_kernel_signed",
@@ -599,3 +615,41 @@
defaults: ["microdroid_initrd_debug_defaults"],
src: ":microdroid_gki-android14-6.1_initrd_debuggable",
}
+
+python_binary_host {
+ name: "extract_microdroid_kernel_hashes",
+ srcs: ["extract_microdroid_kernel_hashes.py"],
+}
+
+// HACK: use cc_genrule for arch-specific properties
+cc_genrule {
+ name: "microdroid_kernel_hashes_rs",
+ srcs: [":microdroid_kernel"],
+ arch: {
+ arm64: {
+ srcs: [":microdroid_gki-android14-6.1_kernel_signed"],
+ },
+ x86_64: {
+ srcs: [":microdroid_gki-android14-6.1_kernel_signed"],
+ },
+ },
+ out: ["lib.rs"],
+ tools: [
+ "extract_microdroid_kernel_hashes",
+ "avbtool",
+ ],
+ cmd: "$(location extract_microdroid_kernel_hashes) --avbtool $(location avbtool) " +
+ "--kernel $(in) > $(out)",
+}
+
+rust_library_rlib {
+ name: "libmicrodroid_kernel_hashes",
+ srcs: [":microdroid_kernel_hashes_rs"],
+ crate_name: "microdroid_kernel_hashes",
+ prefer_rlib: true,
+ no_stdlibs: true,
+ stdlibs: [
+ "libcompiler_builtins.rust_sysroot",
+ "libcore.rust_sysroot",
+ ],
+}
diff --git a/microdroid/extract_microdroid_kernel_hashes.py b/microdroid/extract_microdroid_kernel_hashes.py
new file mode 100644
index 0000000..f2c6ae7
--- /dev/null
+++ b/microdroid/extract_microdroid_kernel_hashes.py
@@ -0,0 +1,104 @@
+"""Extracts the following hashes from the AVB footer of Microdroid's kernel:
+
+- kernel hash
+- initrd_normal hash
+- initrd_debug hash
+
+The hashes are written to stdout as a Rust file.
+
+In unsupportive environments such as x86, when the kernel is just an empty file,
+the output Rust file has the same hash constant fields for compatibility
+reasons, but all of them are empty.
+"""
+#!/usr/bin/env python3
+
+import argparse
+from collections import defaultdict
+import subprocess
+from typing import Dict
+
+PARTITION_NAME_BOOT = 'boot'
+PARTITION_NAME_INITRD_NORMAL = 'initrd_normal'
+PARTITION_NAME_INITRD_DEBUG = 'initrd_debug'
+HASH_SIZE = 32
+
+def main(args):
+ """Main function."""
+ avbtool = args.avbtool
+ num_kernel_images = len(args.kernel)
+
+ print("//! This file is generated by extract_microdroid_kernel_hashes.py.")
+ print("//! It contains the hashes of the kernel and initrds.\n")
+ print("#![no_std]\n#![allow(missing_docs)]\n")
+
+ print("pub const HASH_SIZE: usize = " + str(HASH_SIZE) + ";\n")
+ print("pub struct OsHashes {")
+ print(" pub kernel: [u8; HASH_SIZE],")
+ print(" pub initrd_normal: [u8; HASH_SIZE],")
+ print(" pub initrd_debug: [u8; HASH_SIZE],")
+ print("}\n")
+
+ hashes = defaultdict(list)
+ for kernel_image_path in args.kernel:
+ collected_hashes = collect_hashes(avbtool, kernel_image_path)
+
+ if collected_hashes.keys() == {PARTITION_NAME_BOOT,
+ PARTITION_NAME_INITRD_NORMAL,
+ PARTITION_NAME_INITRD_DEBUG}:
+ for partition_name, v in collected_hashes.items():
+ hashes[partition_name].append(v)
+ else:
+ # Microdroid's kernel is just an empty file in unsupportive
+ # environments such as x86, in this case the hashes should be empty.
+ print("/// The kernel is empty, no hashes are available.")
+ hashes[PARTITION_NAME_BOOT].append("")
+ hashes[PARTITION_NAME_INITRD_NORMAL].append("")
+ hashes[PARTITION_NAME_INITRD_DEBUG].append("")
+
+ print("pub const OS_HASHES: [OsHashes; " + str(num_kernel_images) + "] = [")
+ for i in range(num_kernel_images):
+ print("OsHashes {")
+ print(" kernel: [" +
+ format_hex_string(hashes[PARTITION_NAME_BOOT][i]) + "],")
+ print(" initrd_normal: [" +
+ format_hex_string(hashes[PARTITION_NAME_INITRD_NORMAL][i]) + "],")
+ print(" initrd_debug: [" +
+ format_hex_string(hashes[PARTITION_NAME_INITRD_DEBUG][i]) + "],")
+ print("},")
+ print("];")
+
+def collect_hashes(avbtool: str, kernel_image_path: str) -> Dict[str, str]:
+ """Collects the hashes from the AVB footer of the kernel image."""
+ hashes = {}
+ with subprocess.Popen(
+ [avbtool, 'print_partition_digests', '--image', kernel_image_path],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
+ stdout, _ = proc.communicate()
+ for line in stdout.decode("utf-8").split("\n"):
+ line = line.replace(" ", "").split(":")
+ if len(line) == 2:
+ partition_name, hash_ = line
+ hashes[partition_name] = hash_
+ return hashes
+
+def format_hex_string(hex_string: str) -> str:
+ """Formats a hex string into a Rust array."""
+ if not hex_string:
+ return "0x00, " * HASH_SIZE
+ assert len(hex_string) == HASH_SIZE * 2, \
+ "Hex string must have length " + str(HASH_SIZE * 2) + ": " + \
+ hex_string
+ return ", ".join(["\n0x" + hex_string[i:i+2] if i % 32 == 0
+ else "0x" + hex_string[i:i+2]
+ for i in range(0, len(hex_string), 2)])
+
+def parse_args():
+ """Parses the command line arguments."""
+ parser = argparse.ArgumentParser(
+ "Extracts the hashes from the kernels' AVB footer")
+ parser.add_argument('--avbtool', help='Path to the avbtool binary')
+ parser.add_argument('--kernel', help='Path to the kernel image', nargs='+')
+ return parser.parse_args()
+
+if __name__ == '__main__':
+ main(parse_args())
diff --git a/microdroid_manager/src/vm_payload_service.rs b/microdroid_manager/src/vm_payload_service.rs
index 959197a..7f4317b 100644
--- a/microdroid_manager/src/vm_payload_service.rs
+++ b/microdroid_manager/src/vm_payload_service.rs
@@ -73,7 +73,6 @@
challenge: &[u8],
test_mode: bool,
) -> binder::Result<AttestationResult> {
- self.check_restricted_apis_allowed()?;
let ClientVmAttestationData { private_key, csr } =
generate_attestation_key_and_csr(challenge, self.secret.dice_artifacts())
.map_err(|e| {
diff --git a/pvmfw/src/device_assignment.rs b/pvmfw/src/device_assignment.rs
index 54b5a47..2c47f9e 100644
--- a/pvmfw/src/device_assignment.rs
+++ b/pvmfw/src/device_assignment.rs
@@ -294,6 +294,26 @@
.map_or(false, |name| name == b"__overlay__")
}
+fn filter_dangling_symbols(fdt: &mut Fdt) -> Result<()> {
+ if let Some(symbols) = fdt.symbols()? {
+ let mut removed = vec![];
+ for prop in symbols.properties()? {
+ let path = CStr::from_bytes_with_nul(prop.value()?)
+ .map_err(|_| DeviceAssignmentError::Internal)?;
+ if fdt.node(path)?.is_none() {
+ let name = prop.name()?;
+ removed.push(CString::from(name));
+ }
+ }
+
+ let mut symbols = fdt.symbols_mut()?.unwrap();
+ for name in removed {
+ symbols.nop_property(&name)?;
+ }
+ }
+ Ok(())
+}
+
impl AsRef<Fdt> for VmDtbo {
fn as_ref(&self) -> &Fdt {
&self.0
@@ -715,7 +735,7 @@
}
fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
- let mut compatible = fdt.root_mut()?.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
+ let mut compatible = fdt.root_mut().next_compatible(Self::PVIOMMU_COMPATIBLE)?;
let mut pviommu_phandles = BTreeMap::new();
for pviommu in &self.pviommus {
@@ -744,7 +764,8 @@
device.patch(fdt, &pviommu_phandles)?;
}
- Ok(())
+ // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
+ filter_dangling_symbols(fdt)
}
}
@@ -1020,6 +1041,39 @@
}
#[test]
+ fn device_info_patch_no_pviommus() {
+ let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+ let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
+ iommu_tokens: BTreeMap::new(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
+ device_info.filter(vm_dtbo).unwrap();
+
+ // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
+ unsafe {
+ platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
+ }
+ device_info.patch(platform_dt).unwrap();
+
+ let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu")).unwrap();
+ assert_eq!(None, compatible);
+
+ if let Some(symbols) = platform_dt.symbols().unwrap() {
+ for prop in symbols.properties().unwrap() {
+ let path = CStr::from_bytes_with_nul(prop.value().unwrap()).unwrap();
+ assert_ne!(None, platform_dt.node(path).unwrap());
+ }
+ }
+ }
+
+ #[test]
fn device_info_overlay_iommu() {
let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index 146d012..51ba112 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -368,7 +368,7 @@
n: usize,
compat: &CStr,
) -> libfdt::Result<Option<FdtNodeMut<'a>>> {
- let mut node = fdt.root_mut()?.next_compatible(compat)?;
+ let mut node = fdt.root_mut().next_compatible(compat)?;
for _ in 0..n {
node = node.ok_or(FdtError::NoSpace)?.next_compatible(compat)?;
}
@@ -479,7 +479,7 @@
vm_ref_dt: &Fdt,
props_info: &BTreeMap<CString, Vec<u8>>,
) -> libfdt::Result<()> {
- let root_vm_dt = vm_dt.root_mut()?;
+ let root_vm_dt = vm_dt.root_mut();
let mut avf_vm_dt = root_vm_dt.add_subnode(cstr!("avf"))?;
// TODO(b/318431677): Validate nodes beyond /avf.
let avf_node = vm_ref_dt.node(cstr!("/avf"))?.ok_or(FdtError::NotFound)?;
@@ -714,10 +714,8 @@
}
fn patch_pci_info(fdt: &mut Fdt, pci_info: &PciInfo) -> libfdt::Result<()> {
- let mut node = fdt
- .root_mut()?
- .next_compatible(cstr!("pci-host-cam-generic"))?
- .ok_or(FdtError::NotFound)?;
+ let mut node =
+ fdt.root_mut().next_compatible(cstr!("pci-host-cam-generic"))?.ok_or(FdtError::NotFound)?;
let irq_masks_size = pci_info.irq_masks.len() * size_of::<PciIrqMask>();
node.trimprop(cstr!("interrupt-map-mask"), irq_masks_size)?;
@@ -758,7 +756,7 @@
/// Patch the DT by deleting the ns16550a compatible nodes whose address are unknown
fn patch_serial_info(fdt: &mut Fdt, serial_info: &SerialInfo) -> libfdt::Result<()> {
let name = cstr!("ns16550a");
- let mut next = fdt.root_mut()?.next_compatible(name);
+ let mut next = fdt.root_mut().next_compatible(name);
while let Some(current) = next? {
let reg =
current.as_node().reg()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)?;
@@ -806,7 +804,7 @@
fn patch_swiotlb_info(fdt: &mut Fdt, swiotlb_info: &SwiotlbInfo) -> libfdt::Result<()> {
let mut node =
- fdt.root_mut()?.next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
+ fdt.root_mut().next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
if let Some(range) = swiotlb_info.fixed_range() {
node.setprop_addrrange_inplace(
@@ -845,7 +843,7 @@
let value = [addr0, size0.unwrap(), addr1, size1.unwrap()];
let mut node =
- fdt.root_mut()?.next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
+ fdt.root_mut().next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
node.setprop_inplace(cstr!("reg"), flatten(&value))
}
@@ -869,7 +867,7 @@
let value = value.into_inner();
let mut node =
- fdt.root_mut()?.next_compatible(cstr!("arm,armv8-timer"))?.ok_or(FdtError::NotFound)?;
+ fdt.root_mut().next_compatible(cstr!("arm,armv8-timer"))?.ok_or(FdtError::NotFound)?;
node.setprop_inplace(cstr!("interrupts"), value.as_bytes())
}
@@ -877,7 +875,7 @@
let avf_node = if let Some(node) = fdt.node_mut(cstr!("/avf"))? {
node
} else {
- fdt.root_mut()?.add_subnode(cstr!("avf"))?
+ fdt.root_mut().add_subnode(cstr!("avf"))?
};
// The node shouldn't already be present; if it is, return the error.
diff --git a/service_vm/demo_apk/src/main.rs b/service_vm/demo_apk/src/main.rs
index 0d1efb0..8ea4e65 100644
--- a/service_vm/demo_apk/src/main.rs
+++ b/service_vm/demo_apk/src/main.rs
@@ -23,10 +23,10 @@
result,
};
use vm_payload_bindgen::{
- attestation_status_t, AVmAttestationResult, AVmAttestationResult_free,
- AVmAttestationResult_getCertificateAt, AVmAttestationResult_getCertificateCount,
- AVmAttestationResult_getPrivateKey, AVmAttestationResult_resultToString,
- AVmAttestationResult_sign, AVmPayload_requestAttestation,
+ AVmAttestationResult, AVmAttestationResult_free, AVmAttestationResult_getCertificateAt,
+ AVmAttestationResult_getCertificateCount, AVmAttestationResult_getPrivateKey,
+ AVmAttestationResult_sign, AVmAttestationStatus, AVmAttestationStatus_toString,
+ AVmPayload_requestAttestation,
};
/// Entry point of the Service VM client.
@@ -56,7 +56,7 @@
ensure!(res.is_err());
let status = res.unwrap_err();
ensure!(
- status == attestation_status_t::ATTESTATION_ERROR_INVALID_CHALLENGE,
+ status == AVmAttestationStatus::ATTESTATION_ERROR_INVALID_CHALLENGE,
"Unexpected status: {:?}",
status
);
@@ -89,7 +89,7 @@
struct AttestationResult(NonNull<AVmAttestationResult>);
impl AttestationResult {
- fn request_attestation(challenge: &[u8]) -> result::Result<Self, attestation_status_t> {
+ fn request_attestation(challenge: &[u8]) -> result::Result<Self, AVmAttestationStatus> {
let mut res: *mut AVmAttestationResult = ptr::null_mut();
// SAFETY: It is safe as we only read the challenge within its bounds and the
// function does not retain any reference to it.
@@ -100,7 +100,7 @@
&mut res,
)
};
- if status == attestation_status_t::ATTESTATION_OK {
+ if status == AVmAttestationStatus::ATTESTATION_OK {
info!("Attestation succeeds. Status: {:?}", status_to_cstr(status));
let res = NonNull::new(res).expect("The attestation result is null");
Ok(Self(res))
@@ -219,11 +219,11 @@
Ok(signature.into_boxed_slice())
}
-fn status_to_cstr(status: attestation_status_t) -> &'static CStr {
+fn status_to_cstr(status: AVmAttestationStatus) -> &'static CStr {
// SAFETY: The function only reads the given enum status and returns a pointer to a
// static string.
- let message = unsafe { AVmAttestationResult_resultToString(status) };
- // SAFETY: The pointer returned by `AVmAttestationResult_resultToString` is guaranteed to
+ let message = unsafe { AVmAttestationStatus_toString(status) };
+ // SAFETY: The pointer returned by `AVmAttestationStatus_toString` is guaranteed to
// point to a valid C String that lives forever.
unsafe { CStr::from_ptr(message) }
}
diff --git a/service_vm/fake_chain/src/client_vm.rs b/service_vm/fake_chain/src/client_vm.rs
index 44ea898..6f956a7 100644
--- a/service_vm/fake_chain/src/client_vm.rs
+++ b/service_vm/fake_chain/src/client_vm.rs
@@ -29,7 +29,7 @@
HIDDEN_SIZE,
};
use log::error;
-use microdroid_kernel_hashes::{INITRD_DEBUG_HASH, KERNEL_HASH};
+use microdroid_kernel_hashes::OS_HASHES;
type CborResult<T> = result::Result<T, ciborium::value::Error>;
@@ -176,6 +176,7 @@
}
fn kernel_code_hash() -> Result<[u8; HASH_SIZE]> {
- let code_hash = [KERNEL_HASH, INITRD_DEBUG_HASH].concat();
+ let os_hashes = &OS_HASHES[0];
+ let code_hash = [os_hashes.kernel, os_hashes.initrd_debug].concat();
hash(&code_hash)
}
diff --git a/service_vm/kernel/Android.bp b/service_vm/kernel/Android.bp
deleted file mode 100644
index 79158e6..0000000
--- a/service_vm/kernel/Android.bp
+++ /dev/null
@@ -1,31 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-python_binary_host {
- name: "extract_microdroid_kernel_hashes",
- srcs: ["extract_microdroid_kernel_hashes.py"],
-}
-
-genrule {
- name: "microdroid_kernel_hashes_rs",
- srcs: [":microdroid_kernel"],
- out: ["lib.rs"],
- tools: [
- "extract_microdroid_kernel_hashes",
- "avbtool",
- ],
- cmd: "$(location extract_microdroid_kernel_hashes) $(location avbtool) $(in) > $(out)",
-}
-
-rust_library_rlib {
- name: "libmicrodroid_kernel_hashes",
- srcs: [":microdroid_kernel_hashes_rs"],
- crate_name: "microdroid_kernel_hashes",
- prefer_rlib: true,
- no_stdlibs: true,
- stdlibs: [
- "libcompiler_builtins.rust_sysroot",
- "libcore.rust_sysroot",
- ],
-}
diff --git a/service_vm/kernel/extract_microdroid_kernel_hashes.py b/service_vm/kernel/extract_microdroid_kernel_hashes.py
deleted file mode 100644
index 148e8be..0000000
--- a/service_vm/kernel/extract_microdroid_kernel_hashes.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Extracts the following hashes from the AVB footer of Microdroid's kernel:
-
-- kernel hash
-- initrd_normal hash
-- initrd_debug hash
-
-The hashes are written to stdout as a Rust file.
-
-In unsupportive environments such as x86, when the kernel is just an empty file,
-the output Rust file has the same hash constant fields for compatibility
-reasons, but all of them are empty.
-"""
-#!/usr/bin/env python3
-
-import sys
-import subprocess
-from typing import Dict
-
-PARTITION_NAME_BOOT = 'boot'
-PARTITION_NAME_INITRD_NORMAL = 'initrd_normal'
-PARTITION_NAME_INITRD_DEBUG = 'initrd_debug'
-
-def main(args):
- """Main function."""
- avbtool = args[0]
- kernel_image_path = args[1]
- hashes = collect_hashes(avbtool, kernel_image_path)
-
- print("//! This file is generated by extract_microdroid_kernel_hashes.py.")
- print("//! It contains the hashes of the kernel and initrds.\n")
- print("#![no_std]\n#![allow(missing_docs)]\n")
-
- # Microdroid's kernel is just an empty file in unsupportive environments
- # such as x86, in this case the hashes should be empty.
- if hashes.keys() != {PARTITION_NAME_BOOT,
- PARTITION_NAME_INITRD_NORMAL,
- PARTITION_NAME_INITRD_DEBUG}:
- print("/// The kernel is empty, no hashes are available.")
- hashes[PARTITION_NAME_BOOT] = ""
- hashes[PARTITION_NAME_INITRD_NORMAL] = ""
- hashes[PARTITION_NAME_INITRD_DEBUG] = ""
-
- print("pub const KERNEL_HASH: &[u8] = &["
- f"{format_hex_string(hashes[PARTITION_NAME_BOOT])}];\n")
- print("pub const INITRD_NORMAL_HASH: &[u8] = &["
- f"{format_hex_string(hashes[PARTITION_NAME_INITRD_NORMAL])}];\n")
- print("pub const INITRD_DEBUG_HASH: &[u8] = &["
- f"{format_hex_string(hashes[PARTITION_NAME_INITRD_DEBUG])}];")
-
-def collect_hashes(avbtool: str, kernel_image_path: str) -> Dict[str, str]:
- """Collects the hashes from the AVB footer of the kernel image."""
- hashes = {}
- with subprocess.Popen(
- [avbtool, 'print_partition_digests', '--image', kernel_image_path],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
- stdout, _ = proc.communicate()
- for line in stdout.decode("utf-8").split("\n"):
- line = line.replace(" ", "").split(":")
- if len(line) == 2:
- partition_name, hash_ = line
- hashes[partition_name] = hash_
- return hashes
-
-def format_hex_string(hex_string: str) -> str:
- """Formats a hex string into a Rust array."""
- assert len(hex_string) % 2 == 0, \
- "Hex string must have even length: " + hex_string
- return ", ".join(["\n0x" + hex_string[i:i+2] if i % 32 == 0
- else "0x" + hex_string[i:i+2]
- for i in range(0, len(hex_string), 2)])
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/service_vm/requests/src/client_vm.rs b/service_vm/requests/src/client_vm.rs
index d4474cf..15a3bd0 100644
--- a/service_vm/requests/src/client_vm.rs
+++ b/service_vm/requests/src/client_vm.rs
@@ -29,7 +29,7 @@
use der::{Decode, Encode};
use diced_open_dice::{DiceArtifacts, HASH_SIZE};
use log::{error, info};
-use microdroid_kernel_hashes::{INITRD_DEBUG_HASH, INITRD_NORMAL_HASH, KERNEL_HASH};
+use microdroid_kernel_hashes::{HASH_SIZE as KERNEL_HASH_SIZE, OS_HASHES};
use service_vm_comm::{ClientVmAttestationParams, Csr, CsrPayload, RequestProcessingError};
use x509_cert::{certificate::Certificate, name::Name};
@@ -159,10 +159,10 @@
/// embedded during the build time.
fn validate_kernel_code_hash(dice_chain: &ClientVmDiceChain) -> Result<()> {
let kernel = dice_chain.microdroid_kernel();
- if expected_kernel_code_hash_normal()? == kernel.code_hash {
+ if matches_any_kernel_code_hash(&kernel.code_hash, /* is_debug= */ false)? {
return Ok(());
}
- if expected_kernel_code_hash_debug()? == kernel.code_hash {
+ if matches_any_kernel_code_hash(&kernel.code_hash, /* is_debug= */ true)? {
if dice_chain.all_entries_are_secure() {
error!("The Microdroid kernel has debug initrd but the DICE chain is secure");
return Err(RequestProcessingError::InvalidDiceChain);
@@ -173,18 +173,20 @@
Err(RequestProcessingError::InvalidDiceChain)
}
-fn expected_kernel_code_hash_normal() -> bssl_avf::Result<Vec<u8>> {
- let mut code_hash = [0u8; 64];
- code_hash[0..32].copy_from_slice(KERNEL_HASH);
- code_hash[32..].copy_from_slice(INITRD_NORMAL_HASH);
- Digester::sha512().digest(&code_hash)
-}
-
-fn expected_kernel_code_hash_debug() -> bssl_avf::Result<Vec<u8>> {
- let mut code_hash = [0u8; 64];
- code_hash[0..32].copy_from_slice(KERNEL_HASH);
- code_hash[32..].copy_from_slice(INITRD_DEBUG_HASH);
- Digester::sha512().digest(&code_hash)
+fn matches_any_kernel_code_hash(actual_code_hash: &[u8], is_debug: bool) -> bssl_avf::Result<bool> {
+ for os_hash in OS_HASHES {
+ let mut code_hash = [0u8; KERNEL_HASH_SIZE * 2];
+ code_hash[0..KERNEL_HASH_SIZE].copy_from_slice(&os_hash.kernel);
+ if is_debug {
+ code_hash[KERNEL_HASH_SIZE..].copy_from_slice(&os_hash.initrd_debug);
+ } else {
+ code_hash[KERNEL_HASH_SIZE..].copy_from_slice(&os_hash.initrd_normal);
+ }
+ if Digester::sha512().digest(&code_hash)? == actual_code_hash {
+ return Ok(true);
+ }
+ }
+ Ok(false)
}
fn expected_kernel_authority_hash(service_vm_entry: &Value) -> Result<[u8; HASH_SIZE]> {
diff --git a/service_vm/test_apk/assets/config.json b/service_vm/test_apk/assets/config.json
deleted file mode 100644
index caae3ce..0000000
--- a/service_vm/test_apk/assets/config.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "os": {
- "name": "microdroid"
- },
- "task": {
- "type": "microdroid_launcher",
- "command": "libvm_attestation_test_payload.so"
- },
- "export_tombstones": true
- }
\ No newline at end of file
diff --git a/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java b/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java
index 7771e83..af99711 100644
--- a/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java
+++ b/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java
@@ -40,7 +40,7 @@
@RunWith(Parameterized.class)
public class VmAttestationTests extends MicrodroidDeviceTestBase {
private static final String TAG = "VmAttestationTest";
- private static final String DEFAULT_CONFIG = "assets/config.json";
+ private static final String VM_PAYLOAD_PATH = "libvm_attestation_test_payload.so";
@Parameterized.Parameter(0)
public String mGki;
@@ -71,7 +71,7 @@
assumeFeatureEnabled(VirtualMachineManager.FEATURE_REMOTE_ATTESTATION);
VirtualMachineConfig.Builder builder =
- newVmConfigBuilderWithPayloadConfig(DEFAULT_CONFIG)
+ newVmConfigBuilderWithPayloadBinary(VM_PAYLOAD_PATH)
.setDebugLevel(DEBUG_LEVEL_FULL)
.setVmOutputCaptured(true);
VirtualMachineConfig config = builder.build();
diff --git a/service_vm/test_apk/src/native/main.rs b/service_vm/test_apk/src/native/main.rs
index d5d599d..199b45c 100644
--- a/service_vm/test_apk/src/native/main.rs
+++ b/service_vm/test_apk/src/native/main.rs
@@ -31,10 +31,10 @@
sync::{Arc, Mutex},
};
use vm_payload_bindgen::{
- attestation_status_t, AIBinder, AVmAttestationResult, AVmAttestationResult_free,
+ AIBinder, AVmAttestationResult, AVmAttestationResult_free,
AVmAttestationResult_getCertificateAt, AVmAttestationResult_getCertificateCount,
- AVmAttestationResult_getPrivateKey, AVmAttestationResult_resultToString,
- AVmAttestationResult_sign, AVmPayload_notifyPayloadReady,
+ AVmAttestationResult_getPrivateKey, AVmAttestationResult_sign, AVmAttestationStatus,
+ AVmAttestationStatus_toString, AVmPayload_notifyPayloadReady,
AVmPayload_requestAttestationForTesting, AVmPayload_runVsockRpcServer,
};
@@ -116,7 +116,7 @@
unsafe impl Send for AttestationResult {}
impl AttestationResult {
- fn request_attestation(challenge: &[u8]) -> result::Result<Self, attestation_status_t> {
+ fn request_attestation(challenge: &[u8]) -> result::Result<Self, AVmAttestationStatus> {
let mut res: *mut AVmAttestationResult = ptr::null_mut();
// SAFETY: It is safe as we only read the challenge within its bounds and the
// function does not retain any reference to it.
@@ -127,7 +127,7 @@
&mut res,
)
};
- if status == attestation_status_t::ATTESTATION_OK {
+ if status == AVmAttestationStatus::ATTESTATION_OK {
info!("Attestation succeeds. Status: {:?}", status_to_cstr(status));
let res = NonNull::new(res).expect("The attestation result is null");
Ok(Self(res))
@@ -261,11 +261,11 @@
Ok(signature.into_boxed_slice())
}
-fn status_to_cstr(status: attestation_status_t) -> &'static CStr {
+fn status_to_cstr(status: AVmAttestationStatus) -> &'static CStr {
// SAFETY: The function only reads the given enum status and returns a pointer to a
// static string.
- let message = unsafe { AVmAttestationResult_resultToString(status) };
- // SAFETY: The pointer returned by `AVmAttestationResult_resultToString` is guaranteed to
+ let message = unsafe { AVmAttestationStatus_toString(status) };
+ // SAFETY: The pointer returned by `AVmAttestationStatus_toString` is guaranteed to
// point to a valid C String that lives forever.
unsafe { CStr::from_ptr(message) }
}
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index 13a9925..41d244d 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -35,6 +35,7 @@
"initrd_bootconfig",
"lpmake",
"lpunpack",
+ "lz4",
"sign_virt_apex",
"simg2img",
"dtdiff",
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index 9b95461..4f502ab 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -553,7 +553,8 @@
mMicrodroidDevice.enableAdbRoot();
CommandRunner microdroid = new CommandRunner(mMicrodroidDevice);
- microdroid.run(crashCommand);
+ // can crash in the middle of crashCommand; fail is ok
+ microdroid.tryRun(crashCommand);
// check until microdroid is shut down
waitForCrosvmExit(android, testStartTime);
@@ -953,7 +954,43 @@
assertThat(hasDebugPolicy).isFalse();
}
+ private boolean isLz4(String path) throws Exception {
+ File lz4tool = findTestFile("lz4");
+ CommandResult result =
+ new RunUtil().runTimedCmd(5000, lz4tool.getAbsolutePath(), "-t", path);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
+ private void decompressLz4(String inputPath, String outputPath) throws Exception {
+ File lz4tool = findTestFile("lz4");
+ CommandResult result =
+ new RunUtil()
+ .runTimedCmd(
+ 5000, lz4tool.getAbsolutePath(), "-d", "-f", inputPath, outputPath);
+ String out = result.getStdout();
+ String err = result.getStderr();
+ assertWithMessage(
+ "lz4 image "
+ + inputPath
+ + " decompression failed."
+ + "\n\tout: "
+ + out
+ + "\n\terr: "
+ + err
+ + "\n")
+ .about(command_results())
+ .that(result)
+ .isSuccess();
+ }
+
private String avbInfo(String image_path) throws Exception {
+ if (isLz4(image_path)) {
+ File decompressedImage = FileUtil.createTempFile("decompressed", ".img");
+ decompressedImage.deleteOnExit();
+ decompressLz4(image_path, decompressedImage.getAbsolutePath());
+ image_path = decompressedImage.getAbsolutePath();
+ }
+
File avbtool = findTestFile("avbtool");
List<String> command =
Arrays.asList(avbtool.getAbsolutePath(), "info_image", "--image", image_path);
diff --git a/tests/libs/libdts/Android.bp b/tests/libs/libdts/Android.bp
new file mode 100644
index 0000000..512c50b
--- /dev/null
+++ b/tests/libs/libdts/Android.bp
@@ -0,0 +1,17 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library_rlib {
+ name: "libdts",
+ crate_name: "dts",
+ defaults: ["avf_build_flags_rust"],
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ prefer_rlib: true,
+ rustlibs: [
+ "libanyhow",
+ "liblibfdt",
+ ],
+ apex_available: ["com.android.virt"],
+}
diff --git a/tests/libs/libdts/README.md b/tests/libs/libdts/README.md
new file mode 100644
index 0000000..ed63bd0
--- /dev/null
+++ b/tests/libs/libdts/README.md
@@ -0,0 +1,16 @@
+Device tree source (DTS) decompiler on Android device.
+
+This is alternative to dtdiff, which only support bash.
+
+How to use for rust_test
+========================
+
+Following dependencies are needed in addition to libdts.
+
+```
+rust_test {
+ ...
+ data_bins: ["dtc_static"],
+ compile_multilib: "first",
+}
+```
diff --git a/tests/libs/libdts/src/lib.rs b/tests/libs/libdts/src/lib.rs
new file mode 100644
index 0000000..0ee9b66
--- /dev/null
+++ b/tests/libs/libdts/src/lib.rs
@@ -0,0 +1,75 @@
+// Copyright 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Device tree source (dts) for comparing device tree contents
+//! i.e. sorted dts decompiled by `dtc -s -O dts`.
+
+use anyhow::{anyhow, Result};
+use libfdt::Fdt;
+use std::io::Write;
+use std::path::Path;
+use std::process::{Command, Stdio};
+
+/// Device tree source (dts)
+#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
+pub struct Dts {
+ dts: String,
+}
+
+impl Dts {
+ /// Creates a device tree source from /proc/device-tree style directory
+ pub fn from_fs(path: &Path) -> Result<Self> {
+ let path = path.to_str().unwrap();
+ let res = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "fs", "-O", "dts", path])
+ .output()?;
+ if !res.status.success() {
+ return Err(anyhow!("Failed to run dtc_static, res={res:?}"));
+ }
+ Ok(Self { dts: String::from_utf8(res.stdout)? })
+ }
+
+ /// Creates a device tree source from dtb
+ pub fn from_dtb(path: &Path) -> Result<Self> {
+ let path = path.to_str().unwrap();
+ let res = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "dtb", "-O", "dts", path])
+ .output()?;
+ if !res.status.success() {
+ return Err(anyhow!("Failed to run dtc_static, res={res:?}"));
+ }
+ Ok(Self { dts: String::from_utf8(res.stdout)? })
+ }
+
+ /// Creates a device tree source from Fdt
+ pub fn from_fdt(fdt: &Fdt) -> Result<Self> {
+ let mut dtc = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "dtb", "-O", "dts"])
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()?;
+
+ {
+ let mut stdin = dtc.stdin.take().unwrap();
+ stdin.write_all(fdt.as_slice())?;
+ // Explicitly drop stdin to avoid indefinite blocking
+ }
+
+ let res = dtc.wait_with_output()?;
+ if !res.status.success() {
+ return Err(anyhow!("Failed to run dtc_static, res={res:?}"));
+ }
+ Ok(Self { dts: String::from_utf8(res.stdout)? })
+ }
+}
diff --git a/tests/testapk_no_perm/Android.bp b/tests/testapk_no_perm/Android.bp
new file mode 100644
index 0000000..22616de
--- /dev/null
+++ b/tests/testapk_no_perm/Android.bp
@@ -0,0 +1,26 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+android_test {
+ name: "MicrodroidTestAppNoPerm",
+ static_libs: [
+ "MicrodroidDeviceTestHelper",
+ "MicrodroidTestHelper",
+ "androidx.test.runner",
+ "androidx.test.ext.junit",
+ "com.android.microdroid.testservice-java",
+ "truth",
+ "compatibility-common-util-devicesidelib",
+ ],
+ jni_libs: [
+ "MicrodroidTestNativeLib",
+ ],
+ test_suites: [
+ "general-tests",
+ "cts",
+ ],
+ srcs: ["src/java/**/*.java"],
+ defaults: ["MicrodroidTestAppsDefaults"],
+ min_sdk_version: "33",
+}
diff --git a/tests/testapk_no_perm/AndroidManifest.xml b/tests/testapk_no_perm/AndroidManifest.xml
new file mode 100644
index 0000000..44aa92a
--- /dev/null
+++ b/tests/testapk_no_perm/AndroidManifest.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2024 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.microdroid.test_no_perm">
+ <uses-sdk android:minSdkVersion="33" android:targetSdkVersion="33" />
+ <uses-feature android:name="android.software.virtualization_framework" android:required="false" />
+ <application />
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.microdroid.test_no_perm"
+ android:label="No Permission Microdroid Test" />
+</manifest>
diff --git a/tests/testapk_no_perm/AndroidTest.xml b/tests/testapk_no_perm/AndroidTest.xml
new file mode 100644
index 0000000..d4a818f
--- /dev/null
+++ b/tests/testapk_no_perm/AndroidTest.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2024 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs Microdroid Tests with no permission">
+ <option name="test-suite-tag" value="cts" />
+ <option name="config-descriptor:metadata" key="component" value="security" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_instant_app" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_multi_abi" />
+ <option name="config-descriptor:metadata" key="parameter" value="secondary_user" />
+ <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
+ <option name="test-file-name" value="MicrodroidTestAppNoPerm.apk" />
+ </target_preparer>
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.microdroid.test_no_perm" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="shell-timeout" value="300000" />
+ <option name="test-timeout" value="300000" />
+ </test>
+</configuration>
diff --git a/tests/testapk_no_perm/src/java/com/android/microdroid/test/MicrodroidTestAppNoPerm.java b/tests/testapk_no_perm/src/java/com/android/microdroid/test/MicrodroidTestAppNoPerm.java
new file mode 100644
index 0000000..1772e6b
--- /dev/null
+++ b/tests/testapk_no_perm/src/java/com/android/microdroid/test/MicrodroidTestAppNoPerm.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.microdroid.test;
+
+import android.system.virtualmachine.VirtualMachineConfig;
+
+import com.android.compatibility.common.util.CddTest;
+import com.android.microdroid.test.device.MicrodroidDeviceTestBase;
+
+import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertThrows;
+
+import org.junit.Before;
+import org.junit.runners.Parameterized;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+/**
+ * Test that the android.permission.MANAGE_VIRTUAL_MACHINE is enforced and that an app cannot launch
+ * a VM without said permission.
+ */
+@RunWith(Parameterized.class)
+public class MicrodroidTestAppNoPerm extends MicrodroidDeviceTestBase {
+
+ @Parameterized.Parameters(name = "protectedVm={0}")
+ public static Object[] protectedVmConfigs() {
+ return new Object[] {false, true};
+ }
+
+ @Parameterized.Parameter public boolean mProtectedVm;
+
+ @Before
+ public void setup() {
+ prepareTestSetup(mProtectedVm, null);
+ }
+
+ @Test
+ @CddTest(
+ requirements = {
+ "9.17/C-1-1",
+ "9.17/C-1-2",
+ "9.17/C-1-4",
+ })
+ public void createVmRequiresPermission() {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilderWithPayloadBinary("MicrodroidTestNativeLib.so").build();
+
+ SecurityException e =
+ assertThrows(
+ SecurityException.class,
+ () -> forceCreateNewVirtualMachine("test_vm_requires_permission", config));
+ assertThat(e)
+ .hasMessageThat()
+ .contains("android.permission.MANAGE_VIRTUAL_MACHINE permission");
+ }
+}
diff --git a/virtualizationmanager/fsfdt/Android.bp b/virtualizationmanager/fsfdt/Android.bp
index 7a1e5ed..1d03522 100644
--- a/virtualizationmanager/fsfdt/Android.bp
+++ b/virtualizationmanager/fsfdt/Android.bp
@@ -41,6 +41,7 @@
defaults: ["libfsfdt_default"],
data: ["testdata/**/*"],
data_bins: ["dtc_static"],
- rustlibs: ["libtempfile"],
+ prefer_rlib: true,
+ rustlibs: ["libdts"],
compile_multilib: "first",
}
diff --git a/virtualizationmanager/fsfdt/src/lib.rs b/virtualizationmanager/fsfdt/src/lib.rs
index 84e50c1..e176b7b 100644
--- a/virtualizationmanager/fsfdt/src/lib.rs
+++ b/virtualizationmanager/fsfdt/src/lib.rs
@@ -114,51 +114,20 @@
#[cfg(test)]
mod test {
use super::*;
- use std::io::Write;
- use std::process::Command;
- use tempfile::NamedTempFile;
+ use dts::Dts;
const TEST_FS_FDT_ROOT_PATH: &str = "testdata/fs";
const BUF_SIZE_MAX: usize = 1024;
- fn dts_from_fs(path: &Path) -> String {
- let path = path.to_str().unwrap();
- let res = Command::new("./dtc_static")
- .args(["-f", "-s", "-I", "fs", "-O", "dts", path])
- .output()
- .unwrap();
- assert!(res.status.success(), "{res:?}");
- String::from_utf8(res.stdout).unwrap()
- }
-
- fn dts_from_dtb(path: &Path) -> String {
- let path = path.to_str().unwrap();
- let res = Command::new("./dtc_static")
- .args(["-f", "-s", "-I", "dtb", "-O", "dts", path])
- .output()
- .unwrap();
- assert!(res.status.success(), "{res:?}");
- String::from_utf8(res.stdout).unwrap()
- }
-
- fn to_temp_file(fdt: &Fdt) -> Result<NamedTempFile> {
- let mut file = NamedTempFile::new()?;
- file.as_file_mut().write_all(fdt.as_slice())?;
- file.as_file_mut().sync_all()?;
-
- Ok(file)
- }
-
#[test]
fn test_from_fs() {
let fs_path = Path::new(TEST_FS_FDT_ROOT_PATH);
let mut data = vec![0_u8; BUF_SIZE_MAX];
let fdt = Fdt::from_fs(fs_path, &mut data).unwrap();
- let file = to_temp_file(fdt).unwrap();
- let expected = dts_from_fs(fs_path);
- let actual = dts_from_dtb(file.path());
+ let expected = Dts::from_fs(fs_path).unwrap();
+ let actual = Dts::from_fdt(fdt).unwrap();
assert_eq!(&expected, &actual);
// Again append fdt from TEST_FS_FDT_ROOT_PATH at root & ensure it succeeds when some
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index f06a3ab..ea3a481 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -83,7 +83,7 @@
use std::io::{BufRead, BufReader, Error, ErrorKind, Seek, SeekFrom, Write};
use std::iter;
use std::num::{NonZeroU16, NonZeroU32};
-use std::os::unix::io::{FromRawFd, IntoRawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::os::unix::raw::pid_t;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, Weak};
@@ -493,6 +493,11 @@
.try_for_each(check_label_for_partition)
.or_service_specific_exception(-1)?;
+ // Check if files for payloads and bases are NOT coming from /vendor and /odm, as they may
+ // have unstable interfaces.
+ // TODO(b/316431494): remove once Treble interfaces are stabilized.
+ check_partitions_for_files(config).or_service_specific_exception(-1)?;
+
let kernel = maybe_clone_file(&config.kernel)?;
let initrd = maybe_clone_file(&config.initrd)?;
@@ -864,6 +869,38 @@
Ok(vm_config)
}
+fn check_partition_for_file(fd: &ParcelFileDescriptor) -> Result<()> {
+ let path = format!("/proc/self/fd/{}", fd.as_raw_fd());
+ let link = fs::read_link(&path).context(format!("can't read_link {path}"))?;
+
+ // microdroid vendor image is OK
+ if cfg!(vendor_modules) && link == Path::new("/vendor/etc/avf/microdroid/microdroid_vendor.img")
+ {
+ return Ok(());
+ }
+
+ if link.starts_with("/vendor") || link.starts_with("/odm") {
+ bail!("vendor or odm file {} can't be used for VM", link.display());
+ }
+
+ Ok(())
+}
+
+fn check_partitions_for_files(config: &VirtualMachineRawConfig) -> Result<()> {
+ config
+ .disks
+ .iter()
+ .flat_map(|disk| disk.partitions.iter())
+ .filter_map(|partition| partition.image.as_ref())
+ .try_for_each(check_partition_for_file)?;
+
+ config.kernel.as_ref().map_or(Ok(()), check_partition_for_file)?;
+ config.initrd.as_ref().map_or(Ok(()), check_partition_for_file)?;
+ config.bootloader.as_ref().map_or(Ok(()), check_partition_for_file)?;
+
+ Ok(())
+}
+
fn load_vm_payload_config_from_file(apk_file: &File, config_path: &str) -> Result<VmPayloadConfig> {
let mut apk_zip = ZipArchive::new(apk_file)?;
let config_file = apk_zip.by_name(config_path)?;
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index ddd3e68..97a27e0 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -595,6 +595,35 @@
}
}
+// Get Cpus_allowed mask
+fn check_if_all_cpus_allowed() -> Result<bool> {
+ let file = read_to_string("/proc/self/status")?;
+ let lines: Vec<_> = file.split('\n').collect();
+
+ for line in lines {
+ if line.contains("Cpus_allowed_list") {
+ let prop: Vec<_> = line.split_whitespace().collect();
+ if prop.len() != 2 {
+ return Ok(false);
+ }
+ let cpu_list: Vec<_> = prop[1].split('-').collect();
+ //Only contiguous Cpu list allowed
+ if cpu_list.len() != 2 {
+ return Ok(false);
+ }
+ if let Some(cpus) = get_num_cpus() {
+ let max_cpu = cpu_list[1].parse::<usize>()?;
+ if max_cpu == cpus - 1 {
+ return Ok(true);
+ } else {
+ return Ok(false);
+ }
+ }
+ }
+ }
+ Ok(false)
+}
+
// Get guest time from /proc/[crosvm pid]/stat
fn get_guest_time(pid: u32) -> Result<i64> {
let file = read_to_string(format!("/proc/{}/stat", pid))?;
@@ -809,7 +838,7 @@
}
if config.host_cpu_topology {
- if cfg!(virt_cpufreq) {
+ if cfg!(virt_cpufreq) && check_if_all_cpus_allowed()? {
command.arg("--host-cpu-topology");
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "aarch64"))] {
diff --git a/virtualizationmanager/src/dt_overlay.rs b/virtualizationmanager/src/dt_overlay.rs
index b39ba3a..108ed61 100644
--- a/virtualizationmanager/src/dt_overlay.rs
+++ b/virtualizationmanager/src/dt_overlay.rs
@@ -61,8 +61,8 @@
let fdt =
Fdt::create_empty_tree(buffer).map_err(|e| anyhow!("Failed to create empty Fdt: {e:?}"))?;
- let root = fdt.root_mut().map_err(|e| anyhow!("Failed to get root node: {e:?}"))?;
- let mut fragment = root
+ let mut fragment = fdt
+ .root_mut()
.add_subnode(cstr!("fragment@0"))
.map_err(|e| anyhow!("Failed to add fragment node: {e:?}"))?;
fragment
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 2cbc805..fc7fcd2 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -5,7 +5,10 @@
rust_defaults {
name: "virtualizationservice_defaults",
crate_name: "virtualizationservice",
- defaults: ["avf_build_flags_rust"],
+ defaults: [
+ "avf_build_flags_rust",
+ "secretkeeper_use_latest_hal_aidl_rust",
+ ],
edition: "2021",
srcs: ["src/main.rs"],
// Only build on targets which crosvm builds on.
@@ -23,6 +26,7 @@
rustlibs: [
"android.hardware.security.rkp-V3-rust",
"android.system.virtualizationcommon-rust",
+ "android.system.virtualizationmaintenance-rust",
"android.system.virtualizationservice-rust",
"android.system.virtualizationservice_internal-rust",
"android.system.virtualmachineservice-rust",
@@ -35,11 +39,13 @@
"libhypervisor_props",
"liblazy_static",
"liblibc",
+ "liblibsqlite3_sys",
"liblog_rust",
"libnix",
"libopenssl",
"librand",
"librkpd_client",
+ "librusqlite",
"librustutils",
"libstatslog_virtualization_rust",
"libtombstoned_client_rust",
@@ -66,7 +72,10 @@
rust_test {
name: "virtualizationservice_test",
- defaults: ["virtualizationservice_defaults"],
+ defaults: [
+ "authgraph_use_latest_hal_aidl_rust",
+ "virtualizationservice_defaults",
+ ],
test_suites: ["general-tests"],
data: [
":test_rkp_cert_chain",
diff --git a/virtualizationservice/aidl/Android.bp b/virtualizationservice/aidl/Android.bp
index 8ca375a..112e1cc 100644
--- a/virtualizationservice/aidl/Android.bp
+++ b/virtualizationservice/aidl/Android.bp
@@ -55,6 +55,26 @@
}
aidl_interface {
+ name: "android.system.virtualizationmaintenance",
+ srcs: ["android/system/virtualizationmaintenance/**/*.aidl"],
+ unstable: true,
+ backend: {
+ java: {
+ sdk_version: "module_current",
+ apex_available: [
+ "com.android.virt",
+ ],
+ },
+ rust: {
+ enabled: true,
+ apex_available: [
+ "com.android.virt",
+ ],
+ },
+ },
+}
+
+aidl_interface {
name: "android.system.virtualmachineservice",
srcs: ["android/system/virtualmachineservice/**/*.aidl"],
imports: [
diff --git a/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl b/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
new file mode 100644
index 0000000..76d7309
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.system.virtualizationmaintenance;
+
+interface IVirtualizationMaintenance {
+ /**
+ * Notification that an app has been permanently removed, to allow related global state to
+ * be removed.
+ *
+ * @param userId The Android user ID for whom the notification applies.
+ */
+ void appRemoved(int userId, int appId);
+
+ /**
+ * Notification that a user has been removed, to allow related global state to be removed.
+ *
+ * @param userId The Android user ID of the user.
+ */
+ void userRemoved(int userId);
+
+ // TODO(b/294177871): Something for daily reconciliation
+}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
index fc36190..8af881b 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
@@ -96,4 +96,11 @@
* Allocate an instance_id to the (newly created) VM.
*/
byte[64] allocateInstanceId();
+
+ /**
+ * Notification that state associated with a VM should be removed.
+ *
+ * @param instanceId The ID for the VM.
+ */
+ void removeVmInstance(in byte[64] instanceId);
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 79ff89a..c0024f1 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -15,11 +15,13 @@
//! Implementation of the AIDL interface of the VirtualizationService.
use crate::atom::{forward_vm_booted_atom, forward_vm_creation_atom, forward_vm_exited_atom};
+use crate::maintenance;
use crate::remote_provisioning;
use crate::rkpvm::{generate_ecdsa_p256_key_pair, request_attestation};
use crate::{get_calling_pid, get_calling_uid, REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME};
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_system_virtualizationcommon::aidl::android::system::virtualizationcommon;
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice;
use android_system_virtualizationservice_internal as android_vs_internal;
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice;
@@ -49,6 +51,7 @@
use std::sync::{Arc, Mutex, Weak};
use tombstoned_client::{DebuggerdDumpType, TombstonedConnection};
use virtualizationcommon::Certificate::Certificate;
+use virtualizationmaintenance::IVirtualizationMaintenance::IVirtualizationMaintenance;
use virtualizationservice::{
AssignableDevice::AssignableDevice, VirtualMachineDebugInfo::VirtualMachineDebugInfo,
};
@@ -68,8 +71,6 @@
/// The unique ID of a VM used (together with a port number) for vsock communication.
pub type Cid = u32;
-pub const BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtualizationservice";
-
/// Directory in which to write disk image files used while running VMs.
pub const TEMPORARY_DIRECTORY: &str = "/data/misc/virtualizationservice";
@@ -160,14 +161,15 @@
/// Singleton service for allocating globally-unique VM resources, such as the CID, and running
/// singleton servers, like tombstone receiver.
-#[derive(Debug, Default)]
+#[derive(Clone)]
pub struct VirtualizationServiceInternal {
state: Arc<Mutex<GlobalState>>,
}
impl VirtualizationServiceInternal {
pub fn init() -> VirtualizationServiceInternal {
- let service = VirtualizationServiceInternal::default();
+ let service =
+ VirtualizationServiceInternal { state: Arc::new(Mutex::new(GlobalState::new())) };
std::thread::spawn(|| {
if let Err(e) = handle_stream_connection_tombstoned() {
@@ -390,6 +392,41 @@
info!("Allocated a VM's instance_id: {:?}, for uid: {:?}", hex::encode(id), uid);
Ok(id)
}
+
+ fn removeVmInstance(&self, instance_id: &[u8; 64]) -> binder::Result<()> {
+ let state = &mut *self.state.lock().unwrap();
+ if let Some(sk_state) = &mut state.sk_state {
+ info!("removeVmInstance(): delete secret");
+ sk_state.delete_ids(&[*instance_id]);
+ } else {
+ info!("ignoring removeVmInstance() as no ISecretkeeper");
+ }
+ Ok(())
+ }
+}
+
+impl IVirtualizationMaintenance for VirtualizationServiceInternal {
+ fn appRemoved(&self, user_id: i32, app_id: i32) -> binder::Result<()> {
+ let state = &mut *self.state.lock().unwrap();
+ if let Some(sk_state) = &mut state.sk_state {
+ info!("packageRemoved(user_id={user_id}, app_id={app_id})");
+ sk_state.delete_ids_for_app(user_id, app_id).or_service_specific_exception(-1)?;
+ } else {
+ info!("ignoring packageRemoved(user_id={user_id}, app_id={app_id})");
+ }
+ Ok(())
+ }
+
+ fn userRemoved(&self, user_id: i32) -> binder::Result<()> {
+ let state = &mut *self.state.lock().unwrap();
+ if let Some(sk_state) = &mut state.sk_state {
+ info!("userRemoved({user_id})");
+ sk_state.delete_ids_for_user(user_id).or_service_specific_exception(-1)?;
+ } else {
+ info!("ignoring userRemoved(user_id={user_id})");
+ }
+ Ok(())
+ }
}
// KEEP IN SYNC WITH assignable_devices.xsd
@@ -474,7 +511,6 @@
/// The mutable state of the VirtualizationServiceInternal. There should only be one instance
/// of this struct.
-#[derive(Debug, Default)]
struct GlobalState {
/// VM contexts currently allocated to running VMs. A CID is never recycled as long
/// as there is a strong reference held by a GlobalVmContext.
@@ -482,9 +518,20 @@
/// Cached read-only FD of VM DTBO file. Also serves as a lock for creating the file.
dtbo_file: Mutex<Option<File>>,
+
+ /// State relating to secrets held by (optional) Secretkeeper instance on behalf of VMs.
+ sk_state: Option<maintenance::State>,
}
impl GlobalState {
+ fn new() -> Self {
+ Self {
+ held_contexts: HashMap::new(),
+ dtbo_file: Mutex::new(None),
+ sk_state: maintenance::State::new(),
+ }
+ }
+
/// Get the next available CID, or an error if we have run out. The last CID used is stored in
/// a system property so that restart of virtualizationservice doesn't reuse CID while the host
/// Android is up.
@@ -729,7 +776,6 @@
#[cfg(test)]
mod tests {
use super::*;
- use std::fs;
const TEST_RKP_CERT_CHAIN_PATH: &str = "testdata/rkp_cert_chain.der";
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index ad21e89..bcea1bc 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -16,18 +16,21 @@
mod aidl;
mod atom;
+mod maintenance;
mod remote_provisioning;
mod rkpvm;
-use crate::aidl::{
- remove_temporary_dir, BINDER_SERVICE_IDENTIFIER, TEMPORARY_DIRECTORY,
- VirtualizationServiceInternal
-};
+use crate::aidl::{remove_temporary_dir, TEMPORARY_DIRECTORY, VirtualizationServiceInternal};
use android_logger::{Config, FilterBuilder};
-use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal;
-use anyhow::Error;
+use android_system_virtualizationservice_internal::aidl::android::system::{
+ virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal
+};
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance::{
+ IVirtualizationMaintenance::BnVirtualizationMaintenance
+};
+use anyhow::{bail, Context, Error, Result};
use binder::{register_lazy_service, BinderFeatures, ProcessState, ThreadState};
-use log::{info, LevelFilter};
+use log::{error, info, LevelFilter};
use std::fs::{create_dir, read_dir};
use std::os::unix::raw::{pid_t, uid_t};
use std::path::Path;
@@ -35,6 +38,8 @@
const LOG_TAG: &str = "VirtualizationService";
pub(crate) const REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME: &str =
"android.hardware.security.keymint.IRemotelyProvisionedComponent/avf";
+const INTERNAL_SERVICE_NAME: &str = "android.system.virtualizationservice";
+const MAINTENANCE_SERVICE_NAME: &str = "android.system.virtualizationmaintenance";
fn get_calling_pid() -> pid_t {
ThreadState::get_calling_pid()
@@ -45,6 +50,13 @@
}
fn main() {
+ if let Err(e) = try_main() {
+ error!("failed with {e:?}");
+ std::process::exit(1);
+ }
+}
+
+fn try_main() -> Result<()> {
android_logger::init_once(
Config::default()
.with_tag(LOG_TAG)
@@ -57,31 +69,42 @@
),
);
- clear_temporary_files().expect("Failed to delete old temporary files");
+ clear_temporary_files().context("Failed to delete old temporary files")?;
let common_dir_path = Path::new(TEMPORARY_DIRECTORY).join("common");
- create_dir(common_dir_path).expect("Failed to create common directory");
+ create_dir(common_dir_path).context("Failed to create common directory")?;
ProcessState::start_thread_pool();
+ // One instance of `VirtualizationServiceInternal` implements both the internal interface
+ // and (optionally) the maintenance interface.
let service = VirtualizationServiceInternal::init();
- let service = BnVirtualizationServiceInternal::new_binder(service, BinderFeatures::default());
- register_lazy_service(BINDER_SERVICE_IDENTIFIER, service.as_binder()).unwrap();
- info!("Registered Binder service {}.", BINDER_SERVICE_IDENTIFIER);
+ let internal_service =
+ BnVirtualizationServiceInternal::new_binder(service.clone(), BinderFeatures::default());
+ register(INTERNAL_SERVICE_NAME, internal_service)?;
if cfg!(remote_attestation) {
// The IRemotelyProvisionedComponent service is only supposed to be triggered by rkpd for
// RKP VM attestation.
let remote_provisioning_service = remote_provisioning::new_binder();
- register_lazy_service(
- REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME,
- remote_provisioning_service.as_binder(),
- )
- .unwrap();
- info!("Registered Binder service {}.", REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME);
+ register(REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME, remote_provisioning_service)?;
+ }
+
+ if cfg!(llpvm_changes) {
+ let maintenance_service =
+ BnVirtualizationMaintenance::new_binder(service.clone(), BinderFeatures::default());
+ register(MAINTENANCE_SERVICE_NAME, maintenance_service)?;
}
ProcessState::join_thread_pool();
+ bail!("Thread pool unexpectedly ended");
+}
+
+fn register<T: binder::FromIBinder + ?Sized>(name: &str, service: binder::Strong<T>) -> Result<()> {
+ register_lazy_service(name, service.as_binder())
+ .with_context(|| format!("Failed to register {name}"))?;
+ info!("Registered Binder service {name}.");
+ Ok(())
}
/// Remove any files under `TEMPORARY_DIRECTORY`.
diff --git a/virtualizationservice/src/maintenance.rs b/virtualizationservice/src/maintenance.rs
new file mode 100644
index 0000000..7fc2f37
--- /dev/null
+++ b/virtualizationservice/src/maintenance.rs
@@ -0,0 +1,256 @@
+// Copyright 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::{
+ ISecretkeeper::ISecretkeeper, SecretId::SecretId,
+};
+use anyhow::Result;
+use log::{error, info, warn};
+
+mod vmdb;
+use vmdb::{VmId, VmIdDb};
+
+/// Interface name for the Secretkeeper HAL.
+const SECRETKEEPER_SERVICE: &str = "android.hardware.security.secretkeeper.ISecretkeeper/default";
+
+/// Directory in which to write persistent state.
+const PERSISTENT_DIRECTORY: &str = "/data/misc/apexdata/com.android.virt";
+
+/// Maximum number of VM IDs to delete at once. Needs to be smaller than both the maximum
+/// number of SQLite parameters (999) and also small enough that an ISecretkeeper::deleteIds
+/// parcel fits within max AIDL message size.
+const DELETE_MAX_BATCH_SIZE: usize = 100;
+
+/// State related to VM secrets.
+pub struct State {
+ sk: binder::Strong<dyn ISecretkeeper>,
+ /// Database of VM IDs,
+ vm_id_db: VmIdDb,
+ batch_size: usize,
+}
+
+impl State {
+ pub fn new() -> Option<Self> {
+ let sk = match Self::find_sk() {
+ Some(sk) => sk,
+ None => {
+ warn!("failed to find a Secretkeeper instance; skipping secret management");
+ return None;
+ }
+ };
+ let (vm_id_db, created) = match VmIdDb::new(PERSISTENT_DIRECTORY) {
+ Ok(v) => v,
+ Err(e) => {
+ error!("skipping secret management, failed to connect to database: {e:?}");
+ return None;
+ }
+ };
+ if created {
+ // If the database did not previously exist, then this appears to be the first run of
+ // `virtualizationservice` since device setup or factory reset. In case of the latter,
+ // delete any secrets that may be left over from before reset, thus ensuring that the
+ // local database state matches that of the TA (i.e. empty).
+ warn!("no existing VM ID DB; clearing any previous secrets to match fresh DB");
+ if let Err(e) = sk.deleteAll() {
+ error!("failed to delete previous secrets, dropping database: {e:?}");
+ vm_id_db.delete_db_file(PERSISTENT_DIRECTORY);
+ return None;
+ }
+ } else {
+ info!("re-using existing VM ID DB");
+ }
+ Some(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE })
+ }
+
+ fn find_sk() -> Option<binder::Strong<dyn ISecretkeeper>> {
+ if let Ok(true) = binder::is_declared(SECRETKEEPER_SERVICE) {
+ match binder::get_interface(SECRETKEEPER_SERVICE) {
+ Ok(sk) => Some(sk),
+ Err(e) => {
+ error!("failed to connect to {SECRETKEEPER_SERVICE}: {e:?}");
+ None
+ }
+ }
+ } else {
+ info!("instance {SECRETKEEPER_SERVICE} not declared");
+ None
+ }
+ }
+
+ /// Delete the VM IDs associated with Android user ID `user_id`.
+ pub fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
+ let vm_ids = self.vm_id_db.vm_ids_for_user(user_id)?;
+ info!(
+ "delete_ids_for_user(user_id={user_id}) triggers deletion of {} secrets",
+ vm_ids.len()
+ );
+ self.delete_ids(&vm_ids);
+ Ok(())
+ }
+
+ /// Delete the VM IDs associated with `(user_id, app_id)`.
+ pub fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
+ let vm_ids = self.vm_id_db.vm_ids_for_app(user_id, app_id)?;
+ info!(
+ "delete_ids_for_app(user_id={user_id}, app_id={app_id}) removes {} secrets",
+ vm_ids.len()
+ );
+ self.delete_ids(&vm_ids);
+ Ok(())
+ }
+
+ /// Delete the provided VM IDs from both Secretkeeper and the database.
+ pub fn delete_ids(&mut self, mut vm_ids: &[VmId]) {
+ while !vm_ids.is_empty() {
+ let len = std::cmp::min(vm_ids.len(), self.batch_size);
+ let batch = &vm_ids[..len];
+ self.delete_ids_batch(batch);
+ vm_ids = &vm_ids[len..];
+ }
+ }
+
+ /// Delete a batch of VM IDs from both Secretkeeper and the database. The batch is assumed
+ /// to be smaller than both:
+ /// - the corresponding limit for number of database parameters
+ /// - the corresponding limit for maximum size of a single AIDL message for `ISecretkeeper`.
+ fn delete_ids_batch(&mut self, vm_ids: &[VmId]) {
+ let secret_ids: Vec<SecretId> = vm_ids.iter().map(|id| SecretId { id: *id }).collect();
+ if let Err(e) = self.sk.deleteIds(&secret_ids) {
+ error!("failed to delete all secrets from Secretkeeper: {e:?}");
+ }
+ if let Err(e) = self.vm_id_db.delete_vm_ids(vm_ids) {
+ error!("failed to remove secret IDs from database: {e:?}");
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::sync::{Arc, Mutex};
+ use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph::{
+ IAuthGraphKeyExchange::IAuthGraphKeyExchange,
+ };
+ use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::{
+ ISecretkeeper::BnSecretkeeper
+ };
+
+ /// Fake implementation of Secretkeeper that keeps a history of what operations were invoked.
+ #[derive(Default)]
+ struct FakeSk {
+ history: Arc<Mutex<Vec<SkOp>>>,
+ }
+
+ #[derive(Clone, PartialEq, Eq, Debug)]
+ enum SkOp {
+ Management,
+ DeleteIds(Vec<VmId>),
+ DeleteAll,
+ }
+
+ impl ISecretkeeper for FakeSk {
+ fn processSecretManagementRequest(&self, _req: &[u8]) -> binder::Result<Vec<u8>> {
+ self.history.lock().unwrap().push(SkOp::Management);
+ Ok(vec![])
+ }
+
+ fn getAuthGraphKe(&self) -> binder::Result<binder::Strong<dyn IAuthGraphKeyExchange>> {
+ unimplemented!()
+ }
+
+ fn deleteIds(&self, ids: &[SecretId]) -> binder::Result<()> {
+ self.history.lock().unwrap().push(SkOp::DeleteIds(ids.iter().map(|s| s.id).collect()));
+ Ok(())
+ }
+
+ fn deleteAll(&self) -> binder::Result<()> {
+ self.history.lock().unwrap().push(SkOp::DeleteAll);
+ Ok(())
+ }
+ }
+ impl binder::Interface for FakeSk {}
+
+ fn new_test_state(history: Arc<Mutex<Vec<SkOp>>>, batch_size: usize) -> State {
+ let vm_id_db = vmdb::new_test_db();
+ let sk = FakeSk { history };
+ let sk = BnSecretkeeper::new_binder(sk, binder::BinderFeatures::default());
+ State { sk, vm_id_db, batch_size }
+ }
+
+ const VM_ID1: VmId = [1u8; 64];
+ const VM_ID2: VmId = [2u8; 64];
+ const VM_ID3: VmId = [3u8; 64];
+ const VM_ID4: VmId = [4u8; 64];
+ const VM_ID5: VmId = [5u8; 64];
+
+ #[test]
+ fn test_sk_state_batching() {
+ let history = Arc::new(Mutex::new(Vec::new()));
+ let mut sk_state = new_test_state(history.clone(), 2);
+ sk_state.delete_ids(&[VM_ID1, VM_ID2, VM_ID3, VM_ID4, VM_ID5]);
+ let got = (*history.lock().unwrap()).clone();
+ assert_eq!(
+ got,
+ vec![
+ SkOp::DeleteIds(vec![VM_ID1, VM_ID2]),
+ SkOp::DeleteIds(vec![VM_ID3, VM_ID4]),
+ SkOp::DeleteIds(vec![VM_ID5]),
+ ]
+ );
+ }
+
+ #[test]
+ fn test_sk_state_no_batching() {
+ let history = Arc::new(Mutex::new(Vec::new()));
+ let mut sk_state = new_test_state(history.clone(), 6);
+ sk_state.delete_ids(&[VM_ID1, VM_ID2, VM_ID3, VM_ID4, VM_ID5]);
+ let got = (*history.lock().unwrap()).clone();
+ assert_eq!(got, vec![SkOp::DeleteIds(vec![VM_ID1, VM_ID2, VM_ID3, VM_ID4, VM_ID5])]);
+ }
+
+ #[test]
+ fn test_sk_state() {
+ const USER1: i32 = 1;
+ const USER2: i32 = 2;
+ const USER3: i32 = 3;
+ const APP_A: i32 = 50;
+ const APP_B: i32 = 60;
+ const APP_C: i32 = 70;
+
+ let history = Arc::new(Mutex::new(Vec::new()));
+ let mut sk_state = new_test_state(history.clone(), 2);
+
+ sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID4, USER3, APP_A).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+ assert_eq!((*history.lock().unwrap()).clone(), vec![]);
+
+ sk_state.delete_ids_for_app(USER2, APP_B).unwrap();
+ assert_eq!((*history.lock().unwrap()).clone(), vec![SkOp::DeleteIds(vec![VM_ID3])]);
+
+ sk_state.delete_ids_for_user(USER3).unwrap();
+ assert_eq!(
+ (*history.lock().unwrap()).clone(),
+ vec![SkOp::DeleteIds(vec![VM_ID3]), SkOp::DeleteIds(vec![VM_ID4, VM_ID5]),]
+ );
+
+ assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
+ let empty: Vec<VmId> = Vec::new();
+ assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+ }
+}
diff --git a/virtualizationservice/src/maintenance/vmdb.rs b/virtualizationservice/src/maintenance/vmdb.rs
new file mode 100644
index 0000000..bdff034
--- /dev/null
+++ b/virtualizationservice/src/maintenance/vmdb.rs
@@ -0,0 +1,265 @@
+// Copyright 2024, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Database of VM IDs.
+
+use anyhow::{Context, Result};
+use log::{debug, error, info, warn};
+use rusqlite::{params, params_from_iter, Connection, OpenFlags, Rows};
+use std::path::PathBuf;
+
+/// Subdirectory to hold the database.
+const DB_DIR: &str = "vmdb";
+
+/// Name of the file that holds the database.
+const DB_FILENAME: &str = "vmids.sqlite";
+
+/// Maximum number of host parameters in a single SQL statement.
+/// (Default value of `SQLITE_LIMIT_VARIABLE_NUMBER` for <= 3.32.0)
+const MAX_VARIABLES: usize = 999;
+
+/// Identifier for a VM and its corresponding secret.
+pub type VmId = [u8; 64];
+
+/// Representation of an on-disk database of VM IDs.
+pub struct VmIdDb {
+ conn: Connection,
+}
+
+impl VmIdDb {
+ /// Connect to the VM ID database file held in the given directory, creating it if necessary.
+ /// The second return value indicates whether a new database file was created.
+ ///
+ /// This function assumes no other threads/processes are attempting to connect concurrently.
+ pub fn new(db_dir: &str) -> Result<(Self, bool)> {
+ let mut db_path = PathBuf::from(db_dir);
+ db_path.push(DB_DIR);
+ if !db_path.exists() {
+ std::fs::create_dir(&db_path).context("failed to create {db_path:?}")?;
+ info!("created persistent db dir {db_path:?}");
+ }
+
+ db_path.push(DB_FILENAME);
+ let (flags, created) = if db_path.exists() {
+ debug!("connecting to existing database {db_path:?}");
+ (
+ OpenFlags::SQLITE_OPEN_READ_WRITE
+ | OpenFlags::SQLITE_OPEN_URI
+ | OpenFlags::SQLITE_OPEN_NO_MUTEX,
+ false,
+ )
+ } else {
+ info!("creating fresh database {db_path:?}");
+ (
+ OpenFlags::SQLITE_OPEN_READ_WRITE
+ | OpenFlags::SQLITE_OPEN_CREATE
+ | OpenFlags::SQLITE_OPEN_URI
+ | OpenFlags::SQLITE_OPEN_NO_MUTEX,
+ true,
+ )
+ };
+ let mut result = Self {
+ conn: Connection::open_with_flags(db_path, flags)
+ .context(format!("failed to open/create DB with {flags:?}"))?,
+ };
+
+ if created {
+ result.init_tables().context("failed to create tables")?;
+ }
+ Ok((result, created))
+ }
+
+ /// Delete the associated database file.
+ pub fn delete_db_file(self, db_dir: &str) {
+ let mut db_path = PathBuf::from(db_dir);
+ db_path.push(DB_DIR);
+ db_path.push(DB_FILENAME);
+
+ // Drop the connection before removing the backing file.
+ drop(self);
+ warn!("removing database file {db_path:?}");
+ if let Err(e) = std::fs::remove_file(&db_path) {
+ error!("failed to remove database file {db_path:?}: {e:?}");
+ }
+ }
+
+ /// Create the database table and indices.
+ fn init_tables(&mut self) -> Result<()> {
+ self.conn
+ .execute(
+ "CREATE TABLE IF NOT EXISTS main.vmids (
+ vm_id BLOB PRIMARY KEY,
+ user_id INTEGER,
+ app_id INTEGER
+ ) WITHOUT ROWID;",
+ (),
+ )
+ .context("failed to create table")?;
+ self.conn
+ .execute("CREATE INDEX IF NOT EXISTS main.vmids_user_index ON vmids(user_id);", [])
+ .context("Failed to create user index")?;
+ self.conn
+ .execute(
+ "CREATE INDEX IF NOT EXISTS main.vmids_app_index ON vmids(user_id, app_id);",
+ [],
+ )
+ .context("Failed to create app index")?;
+ Ok(())
+ }
+
+ /// Add the given VM ID into the database.
+ #[allow(dead_code)] // TODO(b/294177871): connect this up
+ pub fn add_vm_id(&mut self, vm_id: &VmId, user_id: i32, app_id: i32) -> Result<()> {
+ let _rows = self
+ .conn
+ .execute(
+ "REPLACE INTO main.vmids (vm_id, user_id, app_id) VALUES (?1, ?2, ?3);",
+ params![vm_id, &user_id, &app_id],
+ )
+ .context("failed to add VM ID")?;
+ Ok(())
+ }
+
+ /// Remove the given VM IDs from the database. The collection of IDs is assumed to be smaller
+ /// than the maximum number of SQLite parameters.
+ pub fn delete_vm_ids(&mut self, vm_ids: &[VmId]) -> Result<()> {
+ assert!(vm_ids.len() < MAX_VARIABLES);
+ let mut vars = "?,".repeat(vm_ids.len());
+ vars.pop(); // remove trailing comma
+ let sql = format!("DELETE FROM main.vmids WHERE vm_id IN ({});", vars);
+ let mut stmt = self.conn.prepare(&sql).context("failed to prepare DELETE stmt")?;
+ let _rows = stmt.execute(params_from_iter(vm_ids)).context("failed to delete VM IDs")?;
+ Ok(())
+ }
+
+ /// Return the VM IDs associated with Android user ID `user_id`.
+ pub fn vm_ids_for_user(&mut self, user_id: i32) -> Result<Vec<VmId>> {
+ let mut stmt = self
+ .conn
+ .prepare("SELECT vm_id FROM main.vmids WHERE user_id = ?;")
+ .context("failed to prepare SELECT stmt")?;
+ let rows = stmt.query(params![user_id]).context("query failed")?;
+ Self::vm_ids_from_rows(rows)
+ }
+
+ /// Return the VM IDs associated with `(user_id, app_id)`.
+ pub fn vm_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<Vec<VmId>> {
+ let mut stmt = self
+ .conn
+ .prepare("SELECT vm_id FROM main.vmids WHERE user_id = ? AND app_id = ?;")
+ .context("failed to prepare SELECT stmt")?;
+ let rows = stmt.query(params![user_id, app_id]).context("query failed")?;
+ Self::vm_ids_from_rows(rows)
+ }
+
+ /// Retrieve a collection of VM IDs from database rows.
+ fn vm_ids_from_rows(mut rows: Rows) -> Result<Vec<VmId>> {
+ let mut vm_ids: Vec<VmId> = Vec::new();
+ while let Some(row) = rows.next().context("failed row unpack")? {
+ match row.get(0) {
+ Ok(vm_id) => vm_ids.push(vm_id),
+ Err(e) => log::error!("failed to parse row: {e:?}"),
+ }
+ }
+
+ Ok(vm_ids)
+ }
+}
+
+#[cfg(test)]
+pub fn new_test_db() -> VmIdDb {
+ let mut db = VmIdDb { conn: Connection::open_in_memory().unwrap() };
+ db.init_tables().unwrap();
+ db
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ const VM_ID1: VmId = [1u8; 64];
+ const VM_ID2: VmId = [2u8; 64];
+ const VM_ID3: VmId = [3u8; 64];
+ const VM_ID4: VmId = [4u8; 64];
+ const VM_ID5: VmId = [5u8; 64];
+ const USER1: i32 = 1;
+ const USER2: i32 = 2;
+ const USER3: i32 = 3;
+ const USER_UNKNOWN: i32 = 4;
+ const APP_A: i32 = 50;
+ const APP_B: i32 = 60;
+ const APP_C: i32 = 70;
+ const APP_UNKNOWN: i32 = 99;
+
+ #[test]
+ fn test_add_remove() {
+ let mut db = new_test_db();
+ db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID3, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID4, USER2, APP_B).unwrap();
+ db.add_vm_id(&VM_ID5, USER3, APP_A).unwrap();
+ db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+ let empty: Vec<VmId> = Vec::new();
+
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(vec![VM_ID4], db.vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(vec![VM_ID5], db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(empty, db.vm_ids_for_user(USER_UNKNOWN).unwrap());
+ assert_eq!(empty, db.vm_ids_for_app(USER1, APP_UNKNOWN).unwrap());
+
+ db.delete_vm_ids(&[VM_ID2, VM_ID3]).unwrap();
+
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_app(USER1, APP_A).unwrap());
+
+ // OK to delete things that don't exist.
+ db.delete_vm_ids(&[VM_ID2, VM_ID3]).unwrap();
+
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_app(USER1, APP_A).unwrap());
+
+ db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID3, USER1, APP_A).unwrap();
+
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(vec![VM_ID4], db.vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(vec![VM_ID5], db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(empty, db.vm_ids_for_user(USER_UNKNOWN).unwrap());
+ assert_eq!(empty, db.vm_ids_for_app(USER1, APP_UNKNOWN).unwrap());
+ }
+
+ #[test]
+ fn test_invalid_vm_id() {
+ let mut db = new_test_db();
+ db.add_vm_id(&VM_ID3, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+
+ // Note that results are returned in `vm_id` order, because the table is `WITHOUT ROWID`.
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+
+ // Manually insert a row with a VM ID that's the wrong size.
+ db.conn
+ .execute(
+ "REPLACE INTO main.vmids (vm_id, user_id, app_id) VALUES (?1, ?2, ?3);",
+ params![&[99u8; 60], &USER1, APP_A],
+ )
+ .unwrap();
+
+ // Invalid row is skipped and remainder returned.
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+ }
+}
diff --git a/virtualizationservice/src/rkpvm.rs b/virtualizationservice/src/rkpvm.rs
index 79e09b0..67ba740 100644
--- a/virtualizationservice/src/rkpvm.rs
+++ b/virtualizationservice/src/rkpvm.rs
@@ -35,7 +35,7 @@
let request = Request::RequestClientVmAttestation(params);
match vm.process_request(request).context("Failed to process request")? {
Response::RequestClientVmAttestation(cert) => Ok(cert),
- _ => bail!("Incorrect response type"),
+ other => bail!("Incorrect response type {other:?}"),
}
}
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index a745fd6..80d289b 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -34,7 +34,7 @@
source_stem: "bindings",
bindgen_flags: [
"--default-enum-style rust",
- "--allowlist-type=attestation_status_t",
+ "--allowlist-type=AVmAttestationStatus",
],
visibility: [":__subpackages__"],
}
diff --git a/vm_payload/include-restricted/vm_payload_restricted.h b/vm_payload/include-restricted/vm_payload_restricted.h
index d7324a8..5dd12ad 100644
--- a/vm_payload/include-restricted/vm_payload_restricted.h
+++ b/vm_payload/include-restricted/vm_payload_restricted.h
@@ -72,7 +72,7 @@
* succeeds. The result remains valid until it is freed with
* `AVmPayload_freeAttestationResult`.
*/
-attestation_status_t AVmPayload_requestAttestationForTesting(
+AVmAttestationStatus AVmPayload_requestAttestationForTesting(
const void* _Nonnull challenge, size_t challenge_size,
struct AVmAttestationResult* _Nullable* _Nonnull result) __INTRODUCED_IN(__ANDROID_API_V__);
diff --git a/vm_payload/include/vm_payload.h b/vm_payload/include/vm_payload.h
index af755c9..5e15607 100644
--- a/vm_payload/include/vm_payload.h
+++ b/vm_payload/include/vm_payload.h
@@ -25,20 +25,19 @@
__BEGIN_DECLS
-struct AIBinder;
typedef struct AIBinder AIBinder;
/**
* Introduced in API 35.
* Remote attestation result if the attestation succeeds.
*/
-struct AVmAttestationResult;
+typedef struct AVmAttestationResult AVmAttestationResult;
/**
* Introduced in API 35.
* Remote attestation status types returned from remote attestation functions.
*/
-typedef enum attestation_status_t : int32_t {
+typedef enum AVmAttestationStatus : int32_t {
/** The remote attestation completes successfully. */
ATTESTATION_OK = 0,
@@ -50,7 +49,7 @@
/** Remote attestation is not supported in the current environment. */
ATTESTATION_ERROR_UNSUPPORTED = -10003,
-} attestation_status_t;
+} AVmAttestationStatus;
/**
* Notifies the host that the payload is ready.
@@ -151,9 +150,10 @@
*
* \return ATTESTATION_OK upon successful attestation.
*/
-attestation_status_t AVmPayload_requestAttestation(
- const void* _Nonnull challenge, size_t challenge_size,
- struct AVmAttestationResult* _Nullable* _Nonnull result) __INTRODUCED_IN(__ANDROID_API_V__);
+AVmAttestationStatus AVmPayload_requestAttestation(const void* _Nonnull challenge,
+ size_t challenge_size,
+ AVmAttestationResult* _Nullable* _Nonnull result)
+ __INTRODUCED_IN(__ANDROID_API_V__);
/**
* Converts the return value from `AVmPayload_requestAttestation` to a text string
@@ -162,7 +162,7 @@
* \return a constant string value representing the status code. The string should not
* be deleted or freed by the application and remains valid for the lifetime of the VM.
*/
-const char* _Nonnull AVmAttestationResult_resultToString(attestation_status_t status)
+const char* _Nonnull AVmAttestationStatus_toString(AVmAttestationStatus status)
__INTRODUCED_IN(__ANDROID_API_V__);
/**
@@ -173,7 +173,7 @@
*
* \param result A pointer to the attestation result.
*/
-void AVmAttestationResult_free(struct AVmAttestationResult* _Nullable result)
+void AVmAttestationResult_free(AVmAttestationResult* _Nullable result)
__INTRODUCED_IN(__ANDROID_API_V__);
/**
@@ -192,7 +192,7 @@
*
* [RFC 5915 s3]: https://datatracker.ietf.org/doc/html/rfc5915#section-3
*/
-size_t AVmAttestationResult_getPrivateKey(const struct AVmAttestationResult* _Nonnull result,
+size_t AVmAttestationResult_getPrivateKey(const AVmAttestationResult* _Nonnull result,
void* _Nullable data, size_t size)
__INTRODUCED_IN(__ANDROID_API_V__);
@@ -215,7 +215,7 @@
*
* [RFC 6979]: https://datatracker.ietf.org/doc/html/rfc6979
*/
-size_t AVmAttestationResult_sign(const struct AVmAttestationResult* _Nonnull result,
+size_t AVmAttestationResult_sign(const AVmAttestationResult* _Nonnull result,
const void* _Nonnull message, size_t message_size,
void* _Nullable data, size_t size)
__INTRODUCED_IN(__ANDROID_API_V__);
@@ -232,7 +232,7 @@
*
* \return The number of certificates in the certificate chain.
*/
-size_t AVmAttestationResult_getCertificateCount(const struct AVmAttestationResult* _Nonnull result)
+size_t AVmAttestationResult_getCertificateCount(const AVmAttestationResult* _Nonnull result)
__INTRODUCED_IN(__ANDROID_API_V__);
/**
@@ -256,7 +256,7 @@
*
* \return The total size of the certificate at the given `index`.
*/
-size_t AVmAttestationResult_getCertificateAt(const struct AVmAttestationResult* _Nonnull result,
+size_t AVmAttestationResult_getCertificateAt(const AVmAttestationResult* _Nonnull result,
size_t index, void* _Nullable data, size_t size)
__INTRODUCED_IN(__ANDROID_API_V__);
diff --git a/vm_payload/libvm_payload.map.txt b/vm_payload/libvm_payload.map.txt
index caf8f84..3daad00 100644
--- a/vm_payload/libvm_payload.map.txt
+++ b/vm_payload/libvm_payload.map.txt
@@ -12,7 +12,7 @@
AVmAttestationResult_getPrivateKey; # systemapi introduced=VanillaIceCream
AVmAttestationResult_sign; # systemapi introduced=VanillaIceCream
AVmAttestationResult_free; # systemapi introduced=VanillaIceCream
- AVmAttestationResult_resultToString; # systemapi introduced=VanillaIceCream
+ AVmAttestationStatus_toString; # systemapi introduced=VanillaIceCream
AVmAttestationResult_getCertificateCount; # systemapi introduced=VanillaIceCream
AVmAttestationResult_getCertificateAt; # systemapi introduced=VanillaIceCream
local:
diff --git a/vm_payload/src/lib.rs b/vm_payload/src/lib.rs
index 6188b21..5cc4431 100644
--- a/vm_payload/src/lib.rs
+++ b/vm_payload/src/lib.rs
@@ -37,7 +37,7 @@
atomic::{AtomicBool, Ordering},
Mutex,
};
-use vm_payload_status_bindgen::attestation_status_t;
+use vm_payload_status_bindgen::AVmAttestationStatus;
/// Maximum size of an ECDSA signature for EC P-256 key is 72 bytes.
const MAX_ECDSA_P256_SIGNATURE_SIZE: usize = 72;
@@ -283,7 +283,7 @@
challenge: *const u8,
challenge_size: usize,
res: &mut *mut AttestationResult,
-) -> attestation_status_t {
+) -> AVmAttestationStatus {
// SAFETY: The caller guarantees that `challenge` is valid for reads and `res` is valid
// for writes.
unsafe {
@@ -310,7 +310,7 @@
challenge: *const u8,
challenge_size: usize,
res: &mut *mut AttestationResult,
-) -> attestation_status_t {
+) -> AVmAttestationStatus {
// SAFETY: The caller guarantees that `challenge` is valid for reads and `res` is valid
// for writes.
unsafe {
@@ -337,11 +337,11 @@
challenge_size: usize,
test_mode: bool,
res: &mut *mut AttestationResult,
-) -> attestation_status_t {
+) -> AVmAttestationStatus {
initialize_logging();
const MAX_CHALLENGE_SIZE: usize = 64;
if challenge_size > MAX_CHALLENGE_SIZE {
- return attestation_status_t::ATTESTATION_ERROR_INVALID_CHALLENGE;
+ return AVmAttestationStatus::ATTESTATION_ERROR_INVALID_CHALLENGE;
}
let challenge = if challenge_size == 0 {
&[]
@@ -354,7 +354,7 @@
match service.requestAttestation(challenge, test_mode) {
Ok(attestation_res) => {
*res = Box::into_raw(Box::new(attestation_res));
- attestation_status_t::ATTESTATION_OK
+ AVmAttestationStatus::ATTESTATION_OK
}
Err(e) => {
error!("Remote attestation failed: {e:?}");
@@ -363,31 +363,29 @@
}
}
-fn binder_status_to_attestation_status(status: binder::Status) -> attestation_status_t {
+fn binder_status_to_attestation_status(status: binder::Status) -> AVmAttestationStatus {
match status.exception_code() {
- ExceptionCode::UNSUPPORTED_OPERATION => attestation_status_t::ATTESTATION_ERROR_UNSUPPORTED,
- _ => attestation_status_t::ATTESTATION_ERROR_ATTESTATION_FAILED,
+ ExceptionCode::UNSUPPORTED_OPERATION => AVmAttestationStatus::ATTESTATION_ERROR_UNSUPPORTED,
+ _ => AVmAttestationStatus::ATTESTATION_ERROR_ATTESTATION_FAILED,
}
}
/// Converts the return value from `AVmPayload_requestAttestation` to a text string
/// representing the error code.
#[no_mangle]
-pub extern "C" fn AVmAttestationResult_resultToString(
- status: attestation_status_t,
-) -> *const c_char {
+pub extern "C" fn AVmAttestationStatus_toString(status: AVmAttestationStatus) -> *const c_char {
let message = match status {
- attestation_status_t::ATTESTATION_OK => {
+ AVmAttestationStatus::ATTESTATION_OK => {
CStr::from_bytes_with_nul(b"The remote attestation completes successfully.\0").unwrap()
}
- attestation_status_t::ATTESTATION_ERROR_INVALID_CHALLENGE => {
+ AVmAttestationStatus::ATTESTATION_ERROR_INVALID_CHALLENGE => {
CStr::from_bytes_with_nul(b"The challenge size is not between 0 and 64.\0").unwrap()
}
- attestation_status_t::ATTESTATION_ERROR_ATTESTATION_FAILED => {
+ AVmAttestationStatus::ATTESTATION_ERROR_ATTESTATION_FAILED => {
CStr::from_bytes_with_nul(b"Failed to attest the VM. Please retry at a later time.\0")
.unwrap()
}
- attestation_status_t::ATTESTATION_ERROR_UNSUPPORTED => CStr::from_bytes_with_nul(
+ AVmAttestationStatus::ATTESTATION_ERROR_UNSUPPORTED => CStr::from_bytes_with_nul(
b"Remote attestation is not supported in the current environment.\0",
)
.unwrap(),