Merge "pVMs to use Sk for rollback protected secrets" into main
diff --git a/README.md b/README.md
index 1b092f6..827e55c 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@
AVF components:
* [pVM firmware](pvmfw/README.md)
+* [Android Boot Loader (ABL)](docs/abl.md)
* [Microdroid](microdroid/README.md)
* [Microdroid kernel](microdroid/kernel/README.md)
* [Microdroid payload](microdroid/payload/README.md)
diff --git a/TEST_MAPPING b/TEST_MAPPING
index f146b4e..89d4552 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -12,6 +12,9 @@
"name": "MicrodroidTestApp"
},
{
+ "name": "MicrodroidTestAppNoPerm"
+ },
+ {
"name": "VmAttestationTestApp"
},
{
diff --git a/apex/Android.bp b/apex/Android.bp
index cc59b16..3b5141e 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -81,7 +81,6 @@
// TODO(b/295593640) Unfortunately these are added to the apex even though they are unused.
// Once the build system is fixed, remove this.
unwanted_transitive_deps: [
- "libdrm",
"libsso",
"libutils",
],
@@ -139,6 +138,7 @@
"microdroid_initrd_normal",
"microdroid.json",
"microdroid_kernel",
+ "com.android.virt.init.rc",
],
host_required: [
"vm_shell",
@@ -172,13 +172,9 @@
],
},
release_avf_enable_remote_attestation: {
- prebuilts: ["com.android.virt.init_attestation_enabled.rc"],
vintf_fragments: [
"virtualizationservice.xml",
],
- conditions_default: {
- prebuilts: ["com.android.virt.init.rc"],
- },
},
},
}
@@ -200,16 +196,35 @@
certificate: "com.android.virt",
}
-prebuilt_etc {
- name: "com.android.virt.init.rc",
- src: "virtualizationservice.rc",
- filename: "virtualizationservice.rc",
- installable: false,
+soong_config_module_type {
+ name: "avf_flag_aware_genrule",
+ module_type: "genrule",
+ config_namespace: "ANDROID",
+ bool_variables: [
+ "release_avf_enable_llpvm_changes",
+ "release_avf_enable_remote_attestation",
+ ],
+ properties: ["srcs"],
+}
+
+avf_flag_aware_genrule {
+ name: "virtualizationservice_rc_combined",
+ srcs: ["virtualizationservice.rc.base"],
+ soong_config_variables: {
+ release_avf_enable_llpvm_changes: {
+ srcs: ["virtualizationservice.rc.llpvm"],
+ },
+ release_avf_enable_remote_attestation: {
+ srcs: ["virtualizationservice.rc.ra"],
+ },
+ },
+ out: ["virtualizationservice.rc"],
+ cmd: "cat $(in) > $(out)",
}
prebuilt_etc {
- name: "com.android.virt.init_attestation_enabled.rc",
- src: "virtualizationservice_attestation_enabled.rc",
+ name: "com.android.virt.init.rc",
+ src: ":virtualizationservice_rc_combined",
filename: "virtualizationservice.rc",
installable: false,
}
@@ -239,6 +254,7 @@
"initrd_bootconfig",
"lpmake",
"lpunpack",
+ "lz4",
"simg2img",
],
}
@@ -259,6 +275,7 @@
"initrd_bootconfig",
"lpmake",
"lpunpack",
+ "lz4",
"sign_virt_apex",
"simg2img",
],
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index 0b6137b..7c59b54 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -153,12 +153,18 @@
'--key', key, '--output', output])
+def is_lz4(args, path):
+ # error 44: Unrecognized header
+ result = RunCommand(args, ['lz4', '-t', path], expected_return_values={0, 44})
+ return result[1] == 0
+
+
def AvbInfo(args, image_path):
"""Parses avbtool --info image output
Args:
args: program arguments.
- image_path: The path to the image.
+ image_path: The path to the image, either raw or lz4 compressed
descriptor_name: Descriptor name of interest.
Returns:
@@ -169,6 +175,11 @@
if not os.path.exists(image_path):
raise ValueError(f'Failed to find image: {image_path}')
+ if is_lz4(args, image_path):
+ with tempfile.NamedTemporaryFile() as decompressed_image:
+ RunCommand(args, ['lz4', '-d', '-f', image_path, decompressed_image.name])
+ return AvbInfo(args, decompressed_image.name)
+
output, ret_code = RunCommand(
args, ['avbtool', 'info_image', '--image', image_path], expected_return_values={0, 1})
if ret_code == 1:
@@ -560,11 +571,7 @@
wait=[vbmeta_f])
# Re-sign kernel. Note kernel's vbmeta contain addition descriptor from ramdisk(s)
- def resign_kernel(kernel, initrd_normal, initrd_debug):
- kernel_file = files[kernel]
- initrd_normal_file = files[initrd_normal]
- initrd_debug_file = files[initrd_debug]
-
+ def resign_decompressed_kernel(kernel_file, initrd_normal_file, initrd_debug_file):
_, kernel_image_descriptors = AvbInfo(args, kernel_file)
salts = extract_hash_descriptors(
kernel_image_descriptors, lambda descriptor: descriptor['Salt'])
@@ -580,21 +587,47 @@
additional_images=[initrd_normal_hashdesc, initrd_debug_hashdesc],
wait=[initrd_n_f, initrd_d_f])
+ def resign_compressed_kernel(kernel_file, initrd_normal_file, initrd_debug_file):
+ # decompress, re-sign, compress again
+ with tempfile.TemporaryDirectory() as work_dir:
+ decompressed_kernel_file = os.path.join(work_dir, os.path.basename(kernel_file))
+ RunCommand(args, ['lz4', '-d', kernel_file, decompressed_kernel_file])
+ resign_decompressed_kernel(decompressed_kernel_file, initrd_normal_file,
+ initrd_debug_file).result()
+ RunCommand(args, ['lz4', '-9', '-f', decompressed_kernel_file, kernel_file])
+
+ def resign_kernel(kernel, initrd_normal, initrd_debug):
+ kernel_file = files[kernel]
+ initrd_normal_file = files[initrd_normal]
+ initrd_debug_file = files[initrd_debug]
+
+ # kernel may be compressed with lz4.
+ if is_lz4(args, kernel_file):
+ return Async(resign_compressed_kernel, kernel_file, initrd_normal_file,
+ initrd_debug_file)
+ else:
+ return resign_decompressed_kernel(kernel_file, initrd_normal_file, initrd_debug_file)
+
_, original_kernel_descriptors = AvbInfo(args, files['kernel'])
- resign_kernel_task = resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')
+ resign_kernel_tasks = [resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')]
+ original_kernels = {"kernel" : original_kernel_descriptors}
for ver in gki_versions:
if f'gki-{ver}_kernel' in files:
- resign_kernel(
- f'gki-{ver}_kernel',
+ kernel_name = f'gki-{ver}_kernel'
+ _, original_kernel_descriptors = AvbInfo(args, files[kernel_name])
+ task = resign_kernel(
+ kernel_name,
f'gki-{ver}_initrd_normal.img',
f'gki-{ver}_initrd_debuggable.img')
+ resign_kernel_tasks.append(task)
+ original_kernels[kernel_name] = original_kernel_descriptors
# Re-sign rialto if it exists. Rialto only exists in arm64 environment.
if os.path.exists(files['rialto']):
update_initrd_digests_task = Async(
- update_initrd_digests_in_rialto, original_kernel_descriptors, args,
- files, wait=[resign_kernel_task])
+ update_initrd_digests_of_kernels_in_rialto, original_kernels, args, files,
+ wait=resign_kernel_tasks)
Async(resign_rialto, args, key, files['rialto'], wait=[update_initrd_digests_task])
def resign_rialto(args, key, rialto_path):
@@ -628,18 +661,7 @@
f"Value of '{key}' should change for '{context}'" \
f"Original value: {original[key]}, updated value: {updated[key]}"
-def update_initrd_digests_in_rialto(original_descriptors, args, files):
- _, updated_descriptors = AvbInfo(args, files['kernel'])
-
- original_digests = extract_hash_descriptors(
- original_descriptors, lambda x: binascii.unhexlify(x['Digest']))
- updated_digests = extract_hash_descriptors(
- updated_descriptors, lambda x: binascii.unhexlify(x['Digest']))
- assert original_digests.pop("boot") == updated_digests.pop("boot"), \
- "Hash descriptor of boot should not change for kernel. " \
- f"Original descriptors: {original_descriptors}, " \
- f"updated descriptors: {updated_descriptors}"
-
+def update_initrd_digests_of_kernels_in_rialto(original_kernels, args, files):
# Update the hashes of initrd_normal and initrd_debug in rialto if the
# bootconfigs in them are updated.
if args.do_not_update_bootconfigs:
@@ -648,6 +670,26 @@
with open(files['rialto'], "rb") as file:
content = file.read()
+ for kernel_name, descriptors in original_kernels.items():
+ content = update_initrd_digests_in_rialto(
+ descriptors, args, files, kernel_name, content)
+
+ with open(files['rialto'], "wb") as file:
+ file.write(content)
+
+def update_initrd_digests_in_rialto(
+ original_descriptors, args, files, kernel_name, content):
+ _, updated_descriptors = AvbInfo(args, files[kernel_name])
+
+ original_digests = extract_hash_descriptors(
+ original_descriptors, lambda x: binascii.unhexlify(x['Digest']))
+ updated_digests = extract_hash_descriptors(
+ updated_descriptors, lambda x: binascii.unhexlify(x['Digest']))
+ assert original_digests.pop("boot") == updated_digests.pop("boot"), \
+ "Hash descriptor of boot should not change for " + kernel_name + \
+ f"\nOriginal descriptors: {original_descriptors}, " \
+ f"\nUpdated descriptors: {updated_descriptors}"
+
# Check that the original and updated digests are different before updating rialto.
partition_names = {'initrd_normal', 'initrd_debug'}
assert set(original_digests.keys()) == set(updated_digests.keys()) == partition_names, \
@@ -671,8 +713,7 @@
f"original digest of the partition {partition_name} not found."
content = new_content
- with open(files['rialto'], "wb") as file:
- file.write(content)
+ return content
def extract_hash_descriptors(descriptors, f=lambda x: x):
return {desc["Partition Name"]: f(desc) for desc in
diff --git a/apex/virtualizationservice.rc b/apex/virtualizationservice.rc.base
similarity index 99%
rename from apex/virtualizationservice.rc
rename to apex/virtualizationservice.rc.base
index 02b2081..688db10 100644
--- a/apex/virtualizationservice.rc
+++ b/apex/virtualizationservice.rc.base
@@ -16,6 +16,6 @@
class main
user system
group system
- interface aidl android.system.virtualizationservice
disabled
oneshot
+ interface aidl android.system.virtualizationservice
diff --git a/apex/virtualizationservice.rc.llpvm b/apex/virtualizationservice.rc.llpvm
new file mode 100644
index 0000000..916d508
--- /dev/null
+++ b/apex/virtualizationservice.rc.llpvm
@@ -0,0 +1 @@
+ interface aidl android.system.virtualizationmaintenance
diff --git a/apex/virtualizationservice.rc.ra b/apex/virtualizationservice.rc.ra
new file mode 100644
index 0000000..3554259
--- /dev/null
+++ b/apex/virtualizationservice.rc.ra
@@ -0,0 +1 @@
+ interface aidl android.hardware.security.keymint.IRemotelyProvisionedComponent/avf
diff --git a/apex/virtualizationservice_attestation_enabled.rc b/apex/virtualizationservice_attestation_enabled.rc
deleted file mode 100644
index 8eaccae..0000000
--- a/apex/virtualizationservice_attestation_enabled.rc
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-service virtualizationservice /apex/com.android.virt/bin/virtualizationservice
- class main
- user system
- group system
- interface aidl android.system.virtualizationservice
- interface aidl android.hardware.security.keymint.IRemotelyProvisionedComponent/avf
- disabled
- oneshot
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index b0294dd..75f0c4f 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -7,6 +7,7 @@
srcs: ["src/composd_main.rs"],
edition: "2021",
prefer_rlib: true,
+ defaults: ["avf_build_flags_rust"],
rustlibs: [
"android.system.composd-rust",
"android.system.virtualizationservice-rust",
diff --git a/docs/abl.md b/docs/abl.md
new file mode 100644
index 0000000..b08464e
--- /dev/null
+++ b/docs/abl.md
@@ -0,0 +1,53 @@
+# Android Bootloader (ABL)
+
+[ABL](https://source.android.com/docs/core/architecture/bootloader) is not a component of AVF, but
+it plays a crucial role in loading the necessary AVF components and initializing them in a correct
+way. This doc explains the responsibilities of ABL from the perspective of AVF.
+
+## pVM firmware (pvmfw)
+
+ABL is responsible for the followings:
+
+* locating pvmfw binary from the pvmfw partition,
+* verifying it as part of the [verified
+ boot](https://source.android.com/docs/security/features/verifiedboot) process,
+* loading it into memory, and
+* describing the region where pvmfw is loaded using DT and passing it to hypervisor.
+
+See [ABL Support](../pvmfw/README.md#android-bootloader-abl_support) for more detail.
+
+ABL is also responsible for constructing the pvmfw configuration data. The data consists of the
+following info:
+
+* DICE chain (also known as BCC Handover)
+* DTBO describing [debug policy](debug/README.md#debug-policy) (if available)
+* DTBO describing [assignable devices](device_assignment.md) (if available)
+* Reference DT carrying extra information that needs to be passed to the guest VM
+
+See [Configuration Data](../pvmfw/README.md#configuration-data) for more detail.
+
+## Android
+
+ABL is responsible for setting the following bootconfigs describing the status and capabilities of
+the hypervisor.
+
+* `androidboot.hypervisor.version`: free-form description of the hypervisor
+* `androidboot.hypervisor.vm.supported`: whether traditional VMs (i.e. non-protected VMS) are
+ supported or not
+* `androidboot.hypervisor.protected_vm.supported`: whether protected VMs are supported or not
+
+Thee bootconfigs are converted into system properties by the init process.
+
+See
+[HypervisorProperties.prop](https://android.googlesource.com/platform/system/libsysprop/+/refs/heads/main/srcs/android/sysprop/HypervisorProperties.sysprop)
+for more detail.
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/updatable_vm.md b/docs/updatable_vm.md
new file mode 100644
index 0000000..de5552e
--- /dev/null
+++ b/docs/updatable_vm.md
@@ -0,0 +1,3 @@
+# Updatable VM
+
+(To be filled)
diff --git a/docs/vm_remote_attestation.md b/docs/vm_remote_attestation.md
new file mode 100644
index 0000000..093418b
--- /dev/null
+++ b/docs/vm_remote_attestation.md
@@ -0,0 +1,3 @@
+# VM Remote Attestation
+
+(To be filled)
diff --git a/java/service/Android.bp b/java/service/Android.bp
index fdfb203..8bac7be 100644
--- a/java/service/Android.bp
+++ b/java/service/Android.bp
@@ -29,6 +29,9 @@
"framework",
"services.core",
],
+ static_libs: [
+ "android.system.virtualizationmaintenance-java",
+ ],
sdk_version: "core_platform",
apex_available: ["com.android.virt"],
installable: true,
diff --git a/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java b/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java
index 2905acd..3f973b4 100644
--- a/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java
+++ b/java/service/src/com/android/system/virtualmachine/VirtualizationSystemService.java
@@ -16,16 +16,121 @@
package com.android.system.virtualmachine;
+import android.content.BroadcastReceiver;
import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.os.Handler;
+import android.os.IBinder;
+import android.os.ServiceManager;
+import android.os.UserHandle;
+import android.system.virtualizationmaintenance.IVirtualizationMaintenance;
+import android.util.Log;
+
+import com.android.internal.os.BackgroundThread;
import com.android.server.SystemService;
-/** TODO */
+/**
+ * This class exists to notify virtualization service of relevant things happening in the Android
+ * framework.
+ *
+ * <p>It currently is responsible for Secretkeeper-related maintenance - ensuring that we are not
+ * storing secrets for apps or users that no longer exist.
+ */
public class VirtualizationSystemService extends SystemService {
+ private static final String TAG = VirtualizationSystemService.class.getName();
+ private static final String SERVICE_NAME = "android.system.virtualizationmaintenance";
+ private Handler mHandler;
public VirtualizationSystemService(Context context) {
super(context);
}
@Override
- public void onStart() {}
+ public void onStart() {
+ // Nothing needed here - we don't expose any binder service. The binder service we use is
+ // exposed as a lazy service by the virtualizationservice native binary.
+ }
+
+ @Override
+ public void onBootPhase(int phase) {
+ if (phase != PHASE_BOOT_COMPLETED) return;
+
+ mHandler = BackgroundThread.getHandler();
+ new Receiver().registerForBroadcasts();
+ }
+
+ private void notifyAppRemoved(int uid) {
+ try {
+ IVirtualizationMaintenance maintenance = connectToMaintenanceService();
+ maintenance.appRemoved(UserHandle.getUserId(uid), UserHandle.getAppId(uid));
+ } catch (Exception e) {
+ Log.e(TAG, "notifyAppRemoved failed", e);
+ }
+ }
+
+ private void notifyUserRemoved(int userId) {
+ try {
+ IVirtualizationMaintenance maintenance = connectToMaintenanceService();
+ maintenance.userRemoved(userId);
+ } catch (Exception e) {
+ Log.e(TAG, "notifyUserRemoved failed", e);
+ }
+ }
+
+ private static IVirtualizationMaintenance connectToMaintenanceService() {
+ IBinder binder = ServiceManager.waitForService(SERVICE_NAME);
+ IVirtualizationMaintenance maintenance =
+ IVirtualizationMaintenance.Stub.asInterface(binder);
+ if (maintenance == null) {
+ throw new IllegalStateException("Failed to connect to " + SERVICE_NAME);
+ }
+ return maintenance;
+ }
+
+ private class Receiver extends BroadcastReceiver {
+ public void registerForBroadcasts() {
+ Context allUsers = getContext().createContextAsUser(UserHandle.ALL, 0 /* flags */);
+
+ allUsers.registerReceiver(this, new IntentFilter(Intent.ACTION_USER_REMOVED));
+
+ IntentFilter packageFilter = new IntentFilter(Intent.ACTION_PACKAGE_REMOVED);
+ packageFilter.addDataScheme("package");
+ allUsers.registerReceiver(this, packageFilter);
+ }
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ switch (intent.getAction()) {
+ case Intent.ACTION_USER_REMOVED:
+ onUserRemoved(intent);
+ break;
+ case Intent.ACTION_PACKAGE_REMOVED:
+ onPackageRemoved(intent);
+ break;
+ default:
+ Log.e(TAG, "received unexpected intent: " + intent.getAction());
+ break;
+ }
+ }
+
+ private void onUserRemoved(Intent intent) {
+ int userId = intent.getIntExtra(Intent.EXTRA_USER_HANDLE, UserHandle.USER_NULL);
+ if (userId != UserHandle.USER_NULL) {
+ mHandler.post(() -> notifyUserRemoved(userId));
+ }
+ }
+
+ private void onPackageRemoved(Intent intent) {
+ if (intent.getBooleanExtra(Intent.EXTRA_REPLACING, false)
+ || !intent.getBooleanExtra(Intent.EXTRA_DATA_REMOVED, false)) {
+ // Package is being updated rather than uninstalled.
+ return;
+ }
+ int uid = intent.getIntExtra(Intent.EXTRA_UID, -1);
+ if (uid != -1) {
+ mHandler.post(() -> notifyAppRemoved(uid));
+ }
+ }
+ }
}
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index cb7afda..743c52b 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -33,7 +33,7 @@
impl<'a> CompatibleIterator<'a> {
pub(crate) fn new(fdt: &'a Fdt, compatible: &'a CStr) -> Result<Self, FdtError> {
- let node = fdt.root()?;
+ let node = fdt.root();
Ok(Self { node, compatible })
}
}
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 3339262..8ea9cd9 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -478,14 +478,38 @@
self.delete_and_next(next_offset)
}
- /// Returns the next node
+ /// Returns the next node. Use this API to travel descendant of a node.
+ ///
+ /// Returned depth is relative to the initial node that had called with any of next node APIs.
+ /// Returns None if end of FDT reached or depth becomes negative.
+ ///
+ /// See also: [`next_node_skip_subnodes`], and [`delete_and_next_node`]
pub fn next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
let next = self.fdt.next_node(self.offset, depth)?;
Ok(next.map(|(offset, depth)| (Self { fdt: self.fdt, offset }, depth)))
}
- /// Deletes this and returns the next node
+ /// Returns the next node skipping subnodes. Use this API to travel descendants of a node while
+ /// ignoring certain node.
+ ///
+ /// Returned depth is relative to the initial node that had called with any of next node APIs.
+ /// Returns None if end of FDT reached or depth becomes negative.
+ ///
+ /// See also: [`next_node`], and [`delete_and_next_node`]
+ pub fn next_node_skip_subnodes(self, depth: usize) -> Result<Option<(Self, usize)>> {
+ let next = self.fdt.next_node_skip_subnodes(self.offset, depth)?;
+
+ Ok(next.map(|(offset, depth)| (Self { fdt: self.fdt, offset }, depth)))
+ }
+
+ /// Deletes this and returns the next node. Use this API to travel descendants of a node while
+ /// removing certain node.
+ ///
+ /// Returned depth is relative to the initial node that had called with any of next node APIs.
+ /// Returns None if end of FDT reached or depth becomes negative.
+ ///
+ /// See also: [`next_node`], and [`next_node_skip_subnodes`]
pub fn delete_and_next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
let next_node = self.fdt.next_node_skip_subnodes(self.offset, depth)?;
if let Some((offset, depth)) = next_node {
@@ -669,7 +693,7 @@
///
/// NOTE: This does not support individual "/memory@XXXX" banks.
pub fn memory(&self) -> Result<MemRegIterator> {
- let node = self.root()?.subnode(cstr!("memory"))?.ok_or(FdtError::NotFound)?;
+ let node = self.root().subnode(cstr!("memory"))?.ok_or(FdtError::NotFound)?;
if node.device_type()? != Some(cstr!("memory")) {
return Err(FdtError::BadValue);
}
@@ -683,7 +707,7 @@
/// Returns the standard /chosen node.
pub fn chosen(&self) -> Result<Option<FdtNode>> {
- self.root()?.subnode(cstr!("chosen"))
+ self.root().subnode(cstr!("chosen"))
}
/// Returns the standard /chosen node as mutable.
@@ -692,13 +716,13 @@
}
/// Returns the root node of the tree.
- pub fn root(&self) -> Result<FdtNode> {
- Ok(FdtNode { fdt: self, offset: NodeOffset::ROOT })
+ pub fn root(&self) -> FdtNode {
+ FdtNode { fdt: self, offset: NodeOffset::ROOT }
}
/// Returns the standard /__symbols__ node.
pub fn symbols(&self) -> Result<Option<FdtNode>> {
- self.root()?.subnode(cstr!("__symbols__"))
+ self.root().subnode(cstr!("__symbols__"))
}
/// Returns the standard /__symbols__ node as mutable
@@ -738,8 +762,8 @@
}
/// Returns the mutable root node of the tree.
- pub fn root_mut(&mut self) -> Result<FdtNodeMut> {
- Ok(FdtNodeMut { fdt: self, offset: NodeOffset::ROOT })
+ pub fn root_mut(&mut self) -> FdtNodeMut {
+ FdtNodeMut { fdt: self, offset: NodeOffset::ROOT }
}
/// Returns a mutable tree node by its full path.
diff --git a/libs/libfdt/tests/api_test.rs b/libs/libfdt/tests/api_test.rs
index 8f5b76d..f521a00 100644
--- a/libs/libfdt/tests/api_test.rs
+++ b/libs/libfdt/tests/api_test.rs
@@ -81,7 +81,7 @@
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
- let root = fdt.root().unwrap();
+ let root = fdt.root();
assert_eq!(root.name(), Ok(cstr!("")));
let chosen = fdt.chosen().unwrap().unwrap();
@@ -96,7 +96,7 @@
fn node_subnodes() {
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let expected = [Ok(cstr!("cpus")), Ok(cstr!("randomnode")), Ok(cstr!("chosen"))];
let root_subnodes = root.subnodes().unwrap();
@@ -108,7 +108,7 @@
fn node_properties() {
let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
let fdt = Fdt::from_slice(&data).unwrap();
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let one_be = 0x1_u32.to_be_bytes();
type Result<T> = core::result::Result<T, FdtError>;
let expected: Vec<(Result<&CStr>, Result<&[u8]>)> = vec![
@@ -290,7 +290,7 @@
let fdt = Fdt::from_slice(&data).unwrap();
let name = cstr!("node_a");
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let node = root.subnode(name).unwrap();
assert_ne!(None, node);
let node = node.unwrap();
@@ -304,7 +304,7 @@
let fdt = Fdt::from_slice(&data).unwrap();
let name = b"node_aaaaa";
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let node = root.subnode_with_name_bytes(&name[0..6]).unwrap();
assert_ne!(None, node);
let node = node.unwrap();
@@ -319,7 +319,7 @@
let name = cstr!("node_a");
let node = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.subnode(name).unwrap().unwrap()
};
@@ -378,7 +378,7 @@
let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut data).unwrap();
- let root = fdt.root_mut().unwrap();
+ let root = fdt.root_mut();
let mut subnode_iter = root.first_subnode().unwrap();
while let Some(subnode) = subnode_iter {
@@ -389,7 +389,7 @@
}
}
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let expected_names = vec![
Ok(cstr!("node_a")),
Ok(cstr!("node_b")),
@@ -416,7 +416,7 @@
];
let mut expected_nodes_iter = expected_nodes.iter();
- let mut iter = fdt.root_mut().unwrap().next_node(0).unwrap();
+ let mut iter = fdt.root_mut().next_node(0).unwrap();
while let Some((node, depth)) = iter {
let node_name = node.as_node().name();
if node_name == Ok(cstr!("node_a")) || node_name == Ok(cstr!("node_zz")) {
@@ -431,7 +431,7 @@
}
assert_eq!(None, expected_nodes_iter.next());
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let all_descendants: Vec<_> =
root.descendants().map(|(node, depth)| (node.name(), depth)).collect();
assert_eq!(expected_nodes, all_descendants);
@@ -442,12 +442,12 @@
let mut data = fs::read(TEST_TREE_WITH_EMPTY_MEMORY_RANGE_PATH).unwrap();
let fdt = Fdt::from_mut_slice(&mut data).unwrap();
- let mut iter = fdt.root_mut().unwrap().next_node(0).unwrap();
+ let mut iter = fdt.root_mut().next_node(0).unwrap();
while let Some((node, depth)) = iter {
iter = node.delete_and_next_node(depth).unwrap();
}
- let root = fdt.root().unwrap();
+ let root = fdt.root();
let all_descendants: Vec<_> =
root.descendants().map(|(node, depth)| (node.name(), depth)).collect();
assert!(all_descendants.is_empty(), "{all_descendants:?}");
@@ -460,7 +460,7 @@
let fdt = Fdt::from_slice(&data).unwrap();
let name = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.name()
// Make root to be dropped
};
@@ -472,12 +472,12 @@
let mut data = vec![0_u8; 1000];
let fdt = Fdt::create_empty_tree(&mut data).unwrap();
- let root = fdt.root_mut().unwrap();
+ let root = fdt.root_mut();
let names = [cstr!("a"), cstr!("b")];
root.add_subnodes(&names).unwrap();
let expected: HashSet<_> = names.into_iter().collect();
- let subnodes = fdt.root().unwrap().subnodes().unwrap();
+ let subnodes = fdt.root().subnodes().unwrap();
let names: HashSet<_> = subnodes.map(|node| node.name().unwrap()).collect();
assert_eq!(expected, names);
@@ -491,7 +491,7 @@
let name = {
let node_a = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.subnode(cstr!("node_a")).unwrap()
// Make root to be dropped
};
@@ -511,7 +511,7 @@
let first_subnode_name = {
let first_subnode = {
let mut subnodes_iter = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.subnodes().unwrap()
// Make root to be dropped
};
@@ -533,7 +533,7 @@
let first_descendant_name = {
let (first_descendant, _) = {
let mut descendants_iter = {
- let root = fdt.root().unwrap();
+ let root = fdt.root();
root.descendants()
// Make root to be dropped
};
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 4aaa793..999dc52 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -559,7 +559,7 @@
avb_add_hash_footer {
name: "microdroid_gki-android14-6.1_kernel_signed",
defaults: ["microdroid_kernel_signed_defaults"],
- filename: "microdroid_gki-android14-6.1_kernel",
+ filename: "microdroid_gki-android14-6.1_kernel_signed",
arch: {
arm64: {
src: ":microdroid_gki_kernel_prebuilts-6.1-arm64",
@@ -574,13 +574,29 @@
],
}
+// HACK: use cc_genrule for arch-specific properties
+cc_genrule {
+ name: "microdroid_gki-android14-6.1_kernel_signed-lz4",
+ out: ["microdroid_gki-android14-6.1_kernel_signed-lz4"],
+ srcs: [":empty_file"],
+ arch: {
+ arm64: {
+ srcs: [":microdroid_gki-android14-6.1_kernel_signed"],
+ exclude_srcs: [":empty_file"],
+ },
+ },
+ tools: ["lz4"],
+ cmd: "$(location lz4) -9 $(in) $(out)",
+}
+
prebuilt_etc {
name: "microdroid_gki-android14-6.1_kernel",
+ filename: "microdroid_gki-android14-6.1_kernel",
src: ":empty_file",
relative_install_path: "fs",
arch: {
arm64: {
- src: ":microdroid_gki-android14-6.1_kernel_signed",
+ src: ":microdroid_gki-android14-6.1_kernel_signed-lz4",
},
x86_64: {
src: ":microdroid_gki-android14-6.1_kernel_signed",
@@ -599,3 +615,41 @@
defaults: ["microdroid_initrd_debug_defaults"],
src: ":microdroid_gki-android14-6.1_initrd_debuggable",
}
+
+python_binary_host {
+ name: "extract_microdroid_kernel_hashes",
+ srcs: ["extract_microdroid_kernel_hashes.py"],
+}
+
+// HACK: use cc_genrule for arch-specific properties
+cc_genrule {
+ name: "microdroid_kernel_hashes_rs",
+ srcs: [":microdroid_kernel"],
+ arch: {
+ arm64: {
+ srcs: [":microdroid_gki-android14-6.1_kernel_signed"],
+ },
+ x86_64: {
+ srcs: [":microdroid_gki-android14-6.1_kernel_signed"],
+ },
+ },
+ out: ["lib.rs"],
+ tools: [
+ "extract_microdroid_kernel_hashes",
+ "avbtool",
+ ],
+ cmd: "$(location extract_microdroid_kernel_hashes) --avbtool $(location avbtool) " +
+ "--kernel $(in) > $(out)",
+}
+
+rust_library_rlib {
+ name: "libmicrodroid_kernel_hashes",
+ srcs: [":microdroid_kernel_hashes_rs"],
+ crate_name: "microdroid_kernel_hashes",
+ prefer_rlib: true,
+ no_stdlibs: true,
+ stdlibs: [
+ "libcompiler_builtins.rust_sysroot",
+ "libcore.rust_sysroot",
+ ],
+}
diff --git a/microdroid/extract_microdroid_kernel_hashes.py b/microdroid/extract_microdroid_kernel_hashes.py
new file mode 100644
index 0000000..f2c6ae7
--- /dev/null
+++ b/microdroid/extract_microdroid_kernel_hashes.py
@@ -0,0 +1,104 @@
+"""Extracts the following hashes from the AVB footer of Microdroid's kernel:
+
+- kernel hash
+- initrd_normal hash
+- initrd_debug hash
+
+The hashes are written to stdout as a Rust file.
+
+In unsupportive environments such as x86, when the kernel is just an empty file,
+the output Rust file has the same hash constant fields for compatibility
+reasons, but all of them are empty.
+"""
+#!/usr/bin/env python3
+
+import argparse
+from collections import defaultdict
+import subprocess
+from typing import Dict
+
+PARTITION_NAME_BOOT = 'boot'
+PARTITION_NAME_INITRD_NORMAL = 'initrd_normal'
+PARTITION_NAME_INITRD_DEBUG = 'initrd_debug'
+HASH_SIZE = 32
+
+def main(args):
+ """Main function."""
+ avbtool = args.avbtool
+ num_kernel_images = len(args.kernel)
+
+ print("//! This file is generated by extract_microdroid_kernel_hashes.py.")
+ print("//! It contains the hashes of the kernel and initrds.\n")
+ print("#![no_std]\n#![allow(missing_docs)]\n")
+
+ print("pub const HASH_SIZE: usize = " + str(HASH_SIZE) + ";\n")
+ print("pub struct OsHashes {")
+ print(" pub kernel: [u8; HASH_SIZE],")
+ print(" pub initrd_normal: [u8; HASH_SIZE],")
+ print(" pub initrd_debug: [u8; HASH_SIZE],")
+ print("}\n")
+
+ hashes = defaultdict(list)
+ for kernel_image_path in args.kernel:
+ collected_hashes = collect_hashes(avbtool, kernel_image_path)
+
+ if collected_hashes.keys() == {PARTITION_NAME_BOOT,
+ PARTITION_NAME_INITRD_NORMAL,
+ PARTITION_NAME_INITRD_DEBUG}:
+ for partition_name, v in collected_hashes.items():
+ hashes[partition_name].append(v)
+ else:
+ # Microdroid's kernel is just an empty file in unsupportive
+ # environments such as x86, in this case the hashes should be empty.
+ print("/// The kernel is empty, no hashes are available.")
+ hashes[PARTITION_NAME_BOOT].append("")
+ hashes[PARTITION_NAME_INITRD_NORMAL].append("")
+ hashes[PARTITION_NAME_INITRD_DEBUG].append("")
+
+ print("pub const OS_HASHES: [OsHashes; " + str(num_kernel_images) + "] = [")
+ for i in range(num_kernel_images):
+ print("OsHashes {")
+ print(" kernel: [" +
+ format_hex_string(hashes[PARTITION_NAME_BOOT][i]) + "],")
+ print(" initrd_normal: [" +
+ format_hex_string(hashes[PARTITION_NAME_INITRD_NORMAL][i]) + "],")
+ print(" initrd_debug: [" +
+ format_hex_string(hashes[PARTITION_NAME_INITRD_DEBUG][i]) + "],")
+ print("},")
+ print("];")
+
+def collect_hashes(avbtool: str, kernel_image_path: str) -> Dict[str, str]:
+ """Collects the hashes from the AVB footer of the kernel image."""
+ hashes = {}
+ with subprocess.Popen(
+ [avbtool, 'print_partition_digests', '--image', kernel_image_path],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
+ stdout, _ = proc.communicate()
+ for line in stdout.decode("utf-8").split("\n"):
+ line = line.replace(" ", "").split(":")
+ if len(line) == 2:
+ partition_name, hash_ = line
+ hashes[partition_name] = hash_
+ return hashes
+
+def format_hex_string(hex_string: str) -> str:
+ """Formats a hex string into a Rust array."""
+ if not hex_string:
+ return "0x00, " * HASH_SIZE
+ assert len(hex_string) == HASH_SIZE * 2, \
+ "Hex string must have length " + str(HASH_SIZE * 2) + ": " + \
+ hex_string
+ return ", ".join(["\n0x" + hex_string[i:i+2] if i % 32 == 0
+ else "0x" + hex_string[i:i+2]
+ for i in range(0, len(hex_string), 2)])
+
+def parse_args():
+ """Parses the command line arguments."""
+ parser = argparse.ArgumentParser(
+ "Extracts the hashes from the kernels' AVB footer")
+ parser.add_argument('--avbtool', help='Path to the avbtool binary')
+ parser.add_argument('--kernel', help='Path to the kernel image', nargs='+')
+ return parser.parse_args()
+
+if __name__ == '__main__':
+ main(parse_args())
diff --git a/microdroid_manager/src/vm_payload_service.rs b/microdroid_manager/src/vm_payload_service.rs
index 959197a..7f4317b 100644
--- a/microdroid_manager/src/vm_payload_service.rs
+++ b/microdroid_manager/src/vm_payload_service.rs
@@ -73,7 +73,6 @@
challenge: &[u8],
test_mode: bool,
) -> binder::Result<AttestationResult> {
- self.check_restricted_apis_allowed()?;
let ClientVmAttestationData { private_key, csr } =
generate_attestation_key_and_csr(challenge, self.secret.dice_artifacts())
.map_err(|e| {
diff --git a/pvmfw/src/device_assignment.rs b/pvmfw/src/device_assignment.rs
index 54b5a47..2c47f9e 100644
--- a/pvmfw/src/device_assignment.rs
+++ b/pvmfw/src/device_assignment.rs
@@ -294,6 +294,26 @@
.map_or(false, |name| name == b"__overlay__")
}
+fn filter_dangling_symbols(fdt: &mut Fdt) -> Result<()> {
+ if let Some(symbols) = fdt.symbols()? {
+ let mut removed = vec![];
+ for prop in symbols.properties()? {
+ let path = CStr::from_bytes_with_nul(prop.value()?)
+ .map_err(|_| DeviceAssignmentError::Internal)?;
+ if fdt.node(path)?.is_none() {
+ let name = prop.name()?;
+ removed.push(CString::from(name));
+ }
+ }
+
+ let mut symbols = fdt.symbols_mut()?.unwrap();
+ for name in removed {
+ symbols.nop_property(&name)?;
+ }
+ }
+ Ok(())
+}
+
impl AsRef<Fdt> for VmDtbo {
fn as_ref(&self) -> &Fdt {
&self.0
@@ -715,7 +735,7 @@
}
fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
- let mut compatible = fdt.root_mut()?.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
+ let mut compatible = fdt.root_mut().next_compatible(Self::PVIOMMU_COMPATIBLE)?;
let mut pviommu_phandles = BTreeMap::new();
for pviommu in &self.pviommus {
@@ -744,7 +764,8 @@
device.patch(fdt, &pviommu_phandles)?;
}
- Ok(())
+ // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
+ filter_dangling_symbols(fdt)
}
}
@@ -1020,6 +1041,39 @@
}
#[test]
+ fn device_info_patch_no_pviommus() {
+ let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
+ let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
+ let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
+ let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
+ let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
+ let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
+
+ let hypervisor = MockHypervisor {
+ mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
+ iommu_tokens: BTreeMap::new(),
+ };
+ let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
+ device_info.filter(vm_dtbo).unwrap();
+
+ // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
+ unsafe {
+ platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
+ }
+ device_info.patch(platform_dt).unwrap();
+
+ let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu")).unwrap();
+ assert_eq!(None, compatible);
+
+ if let Some(symbols) = platform_dt.symbols().unwrap() {
+ for prop in symbols.properties().unwrap() {
+ let path = CStr::from_bytes_with_nul(prop.value().unwrap()).unwrap();
+ assert_ne!(None, platform_dt.node(path).unwrap());
+ }
+ }
+ }
+
+ #[test]
fn device_info_overlay_iommu() {
let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index 146d012..51ba112 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -368,7 +368,7 @@
n: usize,
compat: &CStr,
) -> libfdt::Result<Option<FdtNodeMut<'a>>> {
- let mut node = fdt.root_mut()?.next_compatible(compat)?;
+ let mut node = fdt.root_mut().next_compatible(compat)?;
for _ in 0..n {
node = node.ok_or(FdtError::NoSpace)?.next_compatible(compat)?;
}
@@ -479,7 +479,7 @@
vm_ref_dt: &Fdt,
props_info: &BTreeMap<CString, Vec<u8>>,
) -> libfdt::Result<()> {
- let root_vm_dt = vm_dt.root_mut()?;
+ let root_vm_dt = vm_dt.root_mut();
let mut avf_vm_dt = root_vm_dt.add_subnode(cstr!("avf"))?;
// TODO(b/318431677): Validate nodes beyond /avf.
let avf_node = vm_ref_dt.node(cstr!("/avf"))?.ok_or(FdtError::NotFound)?;
@@ -714,10 +714,8 @@
}
fn patch_pci_info(fdt: &mut Fdt, pci_info: &PciInfo) -> libfdt::Result<()> {
- let mut node = fdt
- .root_mut()?
- .next_compatible(cstr!("pci-host-cam-generic"))?
- .ok_or(FdtError::NotFound)?;
+ let mut node =
+ fdt.root_mut().next_compatible(cstr!("pci-host-cam-generic"))?.ok_or(FdtError::NotFound)?;
let irq_masks_size = pci_info.irq_masks.len() * size_of::<PciIrqMask>();
node.trimprop(cstr!("interrupt-map-mask"), irq_masks_size)?;
@@ -758,7 +756,7 @@
/// Patch the DT by deleting the ns16550a compatible nodes whose address are unknown
fn patch_serial_info(fdt: &mut Fdt, serial_info: &SerialInfo) -> libfdt::Result<()> {
let name = cstr!("ns16550a");
- let mut next = fdt.root_mut()?.next_compatible(name);
+ let mut next = fdt.root_mut().next_compatible(name);
while let Some(current) = next? {
let reg =
current.as_node().reg()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)?;
@@ -806,7 +804,7 @@
fn patch_swiotlb_info(fdt: &mut Fdt, swiotlb_info: &SwiotlbInfo) -> libfdt::Result<()> {
let mut node =
- fdt.root_mut()?.next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
+ fdt.root_mut().next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
if let Some(range) = swiotlb_info.fixed_range() {
node.setprop_addrrange_inplace(
@@ -845,7 +843,7 @@
let value = [addr0, size0.unwrap(), addr1, size1.unwrap()];
let mut node =
- fdt.root_mut()?.next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
+ fdt.root_mut().next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
node.setprop_inplace(cstr!("reg"), flatten(&value))
}
@@ -869,7 +867,7 @@
let value = value.into_inner();
let mut node =
- fdt.root_mut()?.next_compatible(cstr!("arm,armv8-timer"))?.ok_or(FdtError::NotFound)?;
+ fdt.root_mut().next_compatible(cstr!("arm,armv8-timer"))?.ok_or(FdtError::NotFound)?;
node.setprop_inplace(cstr!("interrupts"), value.as_bytes())
}
@@ -877,7 +875,7 @@
let avf_node = if let Some(node) = fdt.node_mut(cstr!("/avf"))? {
node
} else {
- fdt.root_mut()?.add_subnode(cstr!("avf"))?
+ fdt.root_mut().add_subnode(cstr!("avf"))?
};
// The node shouldn't already be present; if it is, return the error.
diff --git a/service_vm/fake_chain/src/client_vm.rs b/service_vm/fake_chain/src/client_vm.rs
index 44ea898..6f956a7 100644
--- a/service_vm/fake_chain/src/client_vm.rs
+++ b/service_vm/fake_chain/src/client_vm.rs
@@ -29,7 +29,7 @@
HIDDEN_SIZE,
};
use log::error;
-use microdroid_kernel_hashes::{INITRD_DEBUG_HASH, KERNEL_HASH};
+use microdroid_kernel_hashes::OS_HASHES;
type CborResult<T> = result::Result<T, ciborium::value::Error>;
@@ -176,6 +176,7 @@
}
fn kernel_code_hash() -> Result<[u8; HASH_SIZE]> {
- let code_hash = [KERNEL_HASH, INITRD_DEBUG_HASH].concat();
+ let os_hashes = &OS_HASHES[0];
+ let code_hash = [os_hashes.kernel, os_hashes.initrd_debug].concat();
hash(&code_hash)
}
diff --git a/service_vm/kernel/Android.bp b/service_vm/kernel/Android.bp
deleted file mode 100644
index 79158e6..0000000
--- a/service_vm/kernel/Android.bp
+++ /dev/null
@@ -1,31 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-python_binary_host {
- name: "extract_microdroid_kernel_hashes",
- srcs: ["extract_microdroid_kernel_hashes.py"],
-}
-
-genrule {
- name: "microdroid_kernel_hashes_rs",
- srcs: [":microdroid_kernel"],
- out: ["lib.rs"],
- tools: [
- "extract_microdroid_kernel_hashes",
- "avbtool",
- ],
- cmd: "$(location extract_microdroid_kernel_hashes) $(location avbtool) $(in) > $(out)",
-}
-
-rust_library_rlib {
- name: "libmicrodroid_kernel_hashes",
- srcs: [":microdroid_kernel_hashes_rs"],
- crate_name: "microdroid_kernel_hashes",
- prefer_rlib: true,
- no_stdlibs: true,
- stdlibs: [
- "libcompiler_builtins.rust_sysroot",
- "libcore.rust_sysroot",
- ],
-}
diff --git a/service_vm/kernel/extract_microdroid_kernel_hashes.py b/service_vm/kernel/extract_microdroid_kernel_hashes.py
deleted file mode 100644
index 148e8be..0000000
--- a/service_vm/kernel/extract_microdroid_kernel_hashes.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Extracts the following hashes from the AVB footer of Microdroid's kernel:
-
-- kernel hash
-- initrd_normal hash
-- initrd_debug hash
-
-The hashes are written to stdout as a Rust file.
-
-In unsupportive environments such as x86, when the kernel is just an empty file,
-the output Rust file has the same hash constant fields for compatibility
-reasons, but all of them are empty.
-"""
-#!/usr/bin/env python3
-
-import sys
-import subprocess
-from typing import Dict
-
-PARTITION_NAME_BOOT = 'boot'
-PARTITION_NAME_INITRD_NORMAL = 'initrd_normal'
-PARTITION_NAME_INITRD_DEBUG = 'initrd_debug'
-
-def main(args):
- """Main function."""
- avbtool = args[0]
- kernel_image_path = args[1]
- hashes = collect_hashes(avbtool, kernel_image_path)
-
- print("//! This file is generated by extract_microdroid_kernel_hashes.py.")
- print("//! It contains the hashes of the kernel and initrds.\n")
- print("#![no_std]\n#![allow(missing_docs)]\n")
-
- # Microdroid's kernel is just an empty file in unsupportive environments
- # such as x86, in this case the hashes should be empty.
- if hashes.keys() != {PARTITION_NAME_BOOT,
- PARTITION_NAME_INITRD_NORMAL,
- PARTITION_NAME_INITRD_DEBUG}:
- print("/// The kernel is empty, no hashes are available.")
- hashes[PARTITION_NAME_BOOT] = ""
- hashes[PARTITION_NAME_INITRD_NORMAL] = ""
- hashes[PARTITION_NAME_INITRD_DEBUG] = ""
-
- print("pub const KERNEL_HASH: &[u8] = &["
- f"{format_hex_string(hashes[PARTITION_NAME_BOOT])}];\n")
- print("pub const INITRD_NORMAL_HASH: &[u8] = &["
- f"{format_hex_string(hashes[PARTITION_NAME_INITRD_NORMAL])}];\n")
- print("pub const INITRD_DEBUG_HASH: &[u8] = &["
- f"{format_hex_string(hashes[PARTITION_NAME_INITRD_DEBUG])}];")
-
-def collect_hashes(avbtool: str, kernel_image_path: str) -> Dict[str, str]:
- """Collects the hashes from the AVB footer of the kernel image."""
- hashes = {}
- with subprocess.Popen(
- [avbtool, 'print_partition_digests', '--image', kernel_image_path],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
- stdout, _ = proc.communicate()
- for line in stdout.decode("utf-8").split("\n"):
- line = line.replace(" ", "").split(":")
- if len(line) == 2:
- partition_name, hash_ = line
- hashes[partition_name] = hash_
- return hashes
-
-def format_hex_string(hex_string: str) -> str:
- """Formats a hex string into a Rust array."""
- assert len(hex_string) % 2 == 0, \
- "Hex string must have even length: " + hex_string
- return ", ".join(["\n0x" + hex_string[i:i+2] if i % 32 == 0
- else "0x" + hex_string[i:i+2]
- for i in range(0, len(hex_string), 2)])
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/service_vm/requests/src/client_vm.rs b/service_vm/requests/src/client_vm.rs
index d4474cf..15a3bd0 100644
--- a/service_vm/requests/src/client_vm.rs
+++ b/service_vm/requests/src/client_vm.rs
@@ -29,7 +29,7 @@
use der::{Decode, Encode};
use diced_open_dice::{DiceArtifacts, HASH_SIZE};
use log::{error, info};
-use microdroid_kernel_hashes::{INITRD_DEBUG_HASH, INITRD_NORMAL_HASH, KERNEL_HASH};
+use microdroid_kernel_hashes::{HASH_SIZE as KERNEL_HASH_SIZE, OS_HASHES};
use service_vm_comm::{ClientVmAttestationParams, Csr, CsrPayload, RequestProcessingError};
use x509_cert::{certificate::Certificate, name::Name};
@@ -159,10 +159,10 @@
/// embedded during the build time.
fn validate_kernel_code_hash(dice_chain: &ClientVmDiceChain) -> Result<()> {
let kernel = dice_chain.microdroid_kernel();
- if expected_kernel_code_hash_normal()? == kernel.code_hash {
+ if matches_any_kernel_code_hash(&kernel.code_hash, /* is_debug= */ false)? {
return Ok(());
}
- if expected_kernel_code_hash_debug()? == kernel.code_hash {
+ if matches_any_kernel_code_hash(&kernel.code_hash, /* is_debug= */ true)? {
if dice_chain.all_entries_are_secure() {
error!("The Microdroid kernel has debug initrd but the DICE chain is secure");
return Err(RequestProcessingError::InvalidDiceChain);
@@ -173,18 +173,20 @@
Err(RequestProcessingError::InvalidDiceChain)
}
-fn expected_kernel_code_hash_normal() -> bssl_avf::Result<Vec<u8>> {
- let mut code_hash = [0u8; 64];
- code_hash[0..32].copy_from_slice(KERNEL_HASH);
- code_hash[32..].copy_from_slice(INITRD_NORMAL_HASH);
- Digester::sha512().digest(&code_hash)
-}
-
-fn expected_kernel_code_hash_debug() -> bssl_avf::Result<Vec<u8>> {
- let mut code_hash = [0u8; 64];
- code_hash[0..32].copy_from_slice(KERNEL_HASH);
- code_hash[32..].copy_from_slice(INITRD_DEBUG_HASH);
- Digester::sha512().digest(&code_hash)
+fn matches_any_kernel_code_hash(actual_code_hash: &[u8], is_debug: bool) -> bssl_avf::Result<bool> {
+ for os_hash in OS_HASHES {
+ let mut code_hash = [0u8; KERNEL_HASH_SIZE * 2];
+ code_hash[0..KERNEL_HASH_SIZE].copy_from_slice(&os_hash.kernel);
+ if is_debug {
+ code_hash[KERNEL_HASH_SIZE..].copy_from_slice(&os_hash.initrd_debug);
+ } else {
+ code_hash[KERNEL_HASH_SIZE..].copy_from_slice(&os_hash.initrd_normal);
+ }
+ if Digester::sha512().digest(&code_hash)? == actual_code_hash {
+ return Ok(true);
+ }
+ }
+ Ok(false)
}
fn expected_kernel_authority_hash(service_vm_entry: &Value) -> Result<[u8; HASH_SIZE]> {
diff --git a/service_vm/test_apk/assets/config.json b/service_vm/test_apk/assets/config.json
deleted file mode 100644
index caae3ce..0000000
--- a/service_vm/test_apk/assets/config.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "os": {
- "name": "microdroid"
- },
- "task": {
- "type": "microdroid_launcher",
- "command": "libvm_attestation_test_payload.so"
- },
- "export_tombstones": true
- }
\ No newline at end of file
diff --git a/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java b/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java
index 7771e83..af99711 100644
--- a/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java
+++ b/service_vm/test_apk/src/java/com/android/virt/vm_attestation/testapp/VmAttestationTests.java
@@ -40,7 +40,7 @@
@RunWith(Parameterized.class)
public class VmAttestationTests extends MicrodroidDeviceTestBase {
private static final String TAG = "VmAttestationTest";
- private static final String DEFAULT_CONFIG = "assets/config.json";
+ private static final String VM_PAYLOAD_PATH = "libvm_attestation_test_payload.so";
@Parameterized.Parameter(0)
public String mGki;
@@ -71,7 +71,7 @@
assumeFeatureEnabled(VirtualMachineManager.FEATURE_REMOTE_ATTESTATION);
VirtualMachineConfig.Builder builder =
- newVmConfigBuilderWithPayloadConfig(DEFAULT_CONFIG)
+ newVmConfigBuilderWithPayloadBinary(VM_PAYLOAD_PATH)
.setDebugLevel(DEBUG_LEVEL_FULL)
.setVmOutputCaptured(true);
VirtualMachineConfig config = builder.build();
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index 13a9925..41d244d 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -35,6 +35,7 @@
"initrd_bootconfig",
"lpmake",
"lpunpack",
+ "lz4",
"sign_virt_apex",
"simg2img",
"dtdiff",
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index 9b95461..4f502ab 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -553,7 +553,8 @@
mMicrodroidDevice.enableAdbRoot();
CommandRunner microdroid = new CommandRunner(mMicrodroidDevice);
- microdroid.run(crashCommand);
+ // can crash in the middle of crashCommand; fail is ok
+ microdroid.tryRun(crashCommand);
// check until microdroid is shut down
waitForCrosvmExit(android, testStartTime);
@@ -953,7 +954,43 @@
assertThat(hasDebugPolicy).isFalse();
}
+ private boolean isLz4(String path) throws Exception {
+ File lz4tool = findTestFile("lz4");
+ CommandResult result =
+ new RunUtil().runTimedCmd(5000, lz4tool.getAbsolutePath(), "-t", path);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
+ private void decompressLz4(String inputPath, String outputPath) throws Exception {
+ File lz4tool = findTestFile("lz4");
+ CommandResult result =
+ new RunUtil()
+ .runTimedCmd(
+ 5000, lz4tool.getAbsolutePath(), "-d", "-f", inputPath, outputPath);
+ String out = result.getStdout();
+ String err = result.getStderr();
+ assertWithMessage(
+ "lz4 image "
+ + inputPath
+ + " decompression failed."
+ + "\n\tout: "
+ + out
+ + "\n\terr: "
+ + err
+ + "\n")
+ .about(command_results())
+ .that(result)
+ .isSuccess();
+ }
+
private String avbInfo(String image_path) throws Exception {
+ if (isLz4(image_path)) {
+ File decompressedImage = FileUtil.createTempFile("decompressed", ".img");
+ decompressedImage.deleteOnExit();
+ decompressLz4(image_path, decompressedImage.getAbsolutePath());
+ image_path = decompressedImage.getAbsolutePath();
+ }
+
File avbtool = findTestFile("avbtool");
List<String> command =
Arrays.asList(avbtool.getAbsolutePath(), "info_image", "--image", image_path);
diff --git a/tests/libs/libdts/Android.bp b/tests/libs/libdts/Android.bp
new file mode 100644
index 0000000..512c50b
--- /dev/null
+++ b/tests/libs/libdts/Android.bp
@@ -0,0 +1,17 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library_rlib {
+ name: "libdts",
+ crate_name: "dts",
+ defaults: ["avf_build_flags_rust"],
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ prefer_rlib: true,
+ rustlibs: [
+ "libanyhow",
+ "liblibfdt",
+ ],
+ apex_available: ["com.android.virt"],
+}
diff --git a/tests/libs/libdts/README.md b/tests/libs/libdts/README.md
new file mode 100644
index 0000000..ed63bd0
--- /dev/null
+++ b/tests/libs/libdts/README.md
@@ -0,0 +1,16 @@
+Device tree source (DTS) decompiler on Android device.
+
+This is alternative to dtdiff, which only support bash.
+
+How to use for rust_test
+========================
+
+Following dependencies are needed in addition to libdts.
+
+```
+rust_test {
+ ...
+ data_bins: ["dtc_static"],
+ compile_multilib: "first",
+}
+```
diff --git a/tests/libs/libdts/src/lib.rs b/tests/libs/libdts/src/lib.rs
new file mode 100644
index 0000000..0ee9b66
--- /dev/null
+++ b/tests/libs/libdts/src/lib.rs
@@ -0,0 +1,75 @@
+// Copyright 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Device tree source (dts) for comparing device tree contents
+//! i.e. sorted dts decompiled by `dtc -s -O dts`.
+
+use anyhow::{anyhow, Result};
+use libfdt::Fdt;
+use std::io::Write;
+use std::path::Path;
+use std::process::{Command, Stdio};
+
+/// Device tree source (dts)
+#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
+pub struct Dts {
+ dts: String,
+}
+
+impl Dts {
+ /// Creates a device tree source from /proc/device-tree style directory
+ pub fn from_fs(path: &Path) -> Result<Self> {
+ let path = path.to_str().unwrap();
+ let res = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "fs", "-O", "dts", path])
+ .output()?;
+ if !res.status.success() {
+ return Err(anyhow!("Failed to run dtc_static, res={res:?}"));
+ }
+ Ok(Self { dts: String::from_utf8(res.stdout)? })
+ }
+
+ /// Creates a device tree source from dtb
+ pub fn from_dtb(path: &Path) -> Result<Self> {
+ let path = path.to_str().unwrap();
+ let res = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "dtb", "-O", "dts", path])
+ .output()?;
+ if !res.status.success() {
+ return Err(anyhow!("Failed to run dtc_static, res={res:?}"));
+ }
+ Ok(Self { dts: String::from_utf8(res.stdout)? })
+ }
+
+ /// Creates a device tree source from Fdt
+ pub fn from_fdt(fdt: &Fdt) -> Result<Self> {
+ let mut dtc = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "dtb", "-O", "dts"])
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()?;
+
+ {
+ let mut stdin = dtc.stdin.take().unwrap();
+ stdin.write_all(fdt.as_slice())?;
+ // Explicitly drop stdin to avoid indefinite blocking
+ }
+
+ let res = dtc.wait_with_output()?;
+ if !res.status.success() {
+ return Err(anyhow!("Failed to run dtc_static, res={res:?}"));
+ }
+ Ok(Self { dts: String::from_utf8(res.stdout)? })
+ }
+}
diff --git a/tests/testapk_no_perm/Android.bp b/tests/testapk_no_perm/Android.bp
new file mode 100644
index 0000000..22616de
--- /dev/null
+++ b/tests/testapk_no_perm/Android.bp
@@ -0,0 +1,26 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+android_test {
+ name: "MicrodroidTestAppNoPerm",
+ static_libs: [
+ "MicrodroidDeviceTestHelper",
+ "MicrodroidTestHelper",
+ "androidx.test.runner",
+ "androidx.test.ext.junit",
+ "com.android.microdroid.testservice-java",
+ "truth",
+ "compatibility-common-util-devicesidelib",
+ ],
+ jni_libs: [
+ "MicrodroidTestNativeLib",
+ ],
+ test_suites: [
+ "general-tests",
+ "cts",
+ ],
+ srcs: ["src/java/**/*.java"],
+ defaults: ["MicrodroidTestAppsDefaults"],
+ min_sdk_version: "33",
+}
diff --git a/tests/testapk_no_perm/AndroidManifest.xml b/tests/testapk_no_perm/AndroidManifest.xml
new file mode 100644
index 0000000..44aa92a
--- /dev/null
+++ b/tests/testapk_no_perm/AndroidManifest.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2024 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.microdroid.test_no_perm">
+ <uses-sdk android:minSdkVersion="33" android:targetSdkVersion="33" />
+ <uses-feature android:name="android.software.virtualization_framework" android:required="false" />
+ <application />
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.microdroid.test_no_perm"
+ android:label="No Permission Microdroid Test" />
+</manifest>
diff --git a/tests/testapk_no_perm/AndroidTest.xml b/tests/testapk_no_perm/AndroidTest.xml
new file mode 100644
index 0000000..d4a818f
--- /dev/null
+++ b/tests/testapk_no_perm/AndroidTest.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2024 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs Microdroid Tests with no permission">
+ <option name="test-suite-tag" value="cts" />
+ <option name="config-descriptor:metadata" key="component" value="security" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_instant_app" />
+ <option name="config-descriptor:metadata" key="parameter" value="not_multi_abi" />
+ <option name="config-descriptor:metadata" key="parameter" value="secondary_user" />
+ <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
+ <option name="test-file-name" value="MicrodroidTestAppNoPerm.apk" />
+ </target_preparer>
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.microdroid.test_no_perm" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="shell-timeout" value="300000" />
+ <option name="test-timeout" value="300000" />
+ </test>
+</configuration>
diff --git a/tests/testapk_no_perm/src/java/com/android/microdroid/test/MicrodroidTestAppNoPerm.java b/tests/testapk_no_perm/src/java/com/android/microdroid/test/MicrodroidTestAppNoPerm.java
new file mode 100644
index 0000000..1772e6b
--- /dev/null
+++ b/tests/testapk_no_perm/src/java/com/android/microdroid/test/MicrodroidTestAppNoPerm.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.microdroid.test;
+
+import android.system.virtualmachine.VirtualMachineConfig;
+
+import com.android.compatibility.common.util.CddTest;
+import com.android.microdroid.test.device.MicrodroidDeviceTestBase;
+
+import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertThrows;
+
+import org.junit.Before;
+import org.junit.runners.Parameterized;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+/**
+ * Test that the android.permission.MANAGE_VIRTUAL_MACHINE is enforced and that an app cannot launch
+ * a VM without said permission.
+ */
+@RunWith(Parameterized.class)
+public class MicrodroidTestAppNoPerm extends MicrodroidDeviceTestBase {
+
+ @Parameterized.Parameters(name = "protectedVm={0}")
+ public static Object[] protectedVmConfigs() {
+ return new Object[] {false, true};
+ }
+
+ @Parameterized.Parameter public boolean mProtectedVm;
+
+ @Before
+ public void setup() {
+ prepareTestSetup(mProtectedVm, null);
+ }
+
+ @Test
+ @CddTest(
+ requirements = {
+ "9.17/C-1-1",
+ "9.17/C-1-2",
+ "9.17/C-1-4",
+ })
+ public void createVmRequiresPermission() {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilderWithPayloadBinary("MicrodroidTestNativeLib.so").build();
+
+ SecurityException e =
+ assertThrows(
+ SecurityException.class,
+ () -> forceCreateNewVirtualMachine("test_vm_requires_permission", config));
+ assertThat(e)
+ .hasMessageThat()
+ .contains("android.permission.MANAGE_VIRTUAL_MACHINE permission");
+ }
+}
diff --git a/virtualizationmanager/fsfdt/Android.bp b/virtualizationmanager/fsfdt/Android.bp
index 7a1e5ed..1d03522 100644
--- a/virtualizationmanager/fsfdt/Android.bp
+++ b/virtualizationmanager/fsfdt/Android.bp
@@ -41,6 +41,7 @@
defaults: ["libfsfdt_default"],
data: ["testdata/**/*"],
data_bins: ["dtc_static"],
- rustlibs: ["libtempfile"],
+ prefer_rlib: true,
+ rustlibs: ["libdts"],
compile_multilib: "first",
}
diff --git a/virtualizationmanager/fsfdt/src/lib.rs b/virtualizationmanager/fsfdt/src/lib.rs
index 84e50c1..e176b7b 100644
--- a/virtualizationmanager/fsfdt/src/lib.rs
+++ b/virtualizationmanager/fsfdt/src/lib.rs
@@ -114,51 +114,20 @@
#[cfg(test)]
mod test {
use super::*;
- use std::io::Write;
- use std::process::Command;
- use tempfile::NamedTempFile;
+ use dts::Dts;
const TEST_FS_FDT_ROOT_PATH: &str = "testdata/fs";
const BUF_SIZE_MAX: usize = 1024;
- fn dts_from_fs(path: &Path) -> String {
- let path = path.to_str().unwrap();
- let res = Command::new("./dtc_static")
- .args(["-f", "-s", "-I", "fs", "-O", "dts", path])
- .output()
- .unwrap();
- assert!(res.status.success(), "{res:?}");
- String::from_utf8(res.stdout).unwrap()
- }
-
- fn dts_from_dtb(path: &Path) -> String {
- let path = path.to_str().unwrap();
- let res = Command::new("./dtc_static")
- .args(["-f", "-s", "-I", "dtb", "-O", "dts", path])
- .output()
- .unwrap();
- assert!(res.status.success(), "{res:?}");
- String::from_utf8(res.stdout).unwrap()
- }
-
- fn to_temp_file(fdt: &Fdt) -> Result<NamedTempFile> {
- let mut file = NamedTempFile::new()?;
- file.as_file_mut().write_all(fdt.as_slice())?;
- file.as_file_mut().sync_all()?;
-
- Ok(file)
- }
-
#[test]
fn test_from_fs() {
let fs_path = Path::new(TEST_FS_FDT_ROOT_PATH);
let mut data = vec![0_u8; BUF_SIZE_MAX];
let fdt = Fdt::from_fs(fs_path, &mut data).unwrap();
- let file = to_temp_file(fdt).unwrap();
- let expected = dts_from_fs(fs_path);
- let actual = dts_from_dtb(file.path());
+ let expected = Dts::from_fs(fs_path).unwrap();
+ let actual = Dts::from_fdt(fdt).unwrap();
assert_eq!(&expected, &actual);
// Again append fdt from TEST_FS_FDT_ROOT_PATH at root & ensure it succeeds when some
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 7a1ce37..961bb24 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -83,7 +83,7 @@
use std::io::{BufRead, BufReader, Error, ErrorKind, Seek, SeekFrom, Write};
use std::iter;
use std::num::{NonZeroU16, NonZeroU32};
-use std::os::unix::io::{FromRawFd, IntoRawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::os::unix::raw::pid_t;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, Weak};
@@ -488,6 +488,11 @@
.try_for_each(check_label_for_partition)
.or_service_specific_exception(-1)?;
+ // Check if files for payloads and bases are NOT coming from /vendor and /odm, as they may
+ // have unstable interfaces.
+ // TODO(b/316431494): remove once Treble interfaces are stabilized.
+ check_partitions_for_files(config).or_service_specific_exception(-1)?;
+
let kernel = maybe_clone_file(&config.kernel)?;
let initrd = maybe_clone_file(&config.initrd)?;
@@ -859,6 +864,38 @@
Ok(vm_config)
}
+fn check_partition_for_file(fd: &ParcelFileDescriptor) -> Result<()> {
+ let path = format!("/proc/self/fd/{}", fd.as_raw_fd());
+ let link = fs::read_link(&path).context(format!("can't read_link {path}"))?;
+
+ // microdroid vendor image is OK
+ if cfg!(vendor_modules) && link == Path::new("/vendor/etc/avf/microdroid/microdroid_vendor.img")
+ {
+ return Ok(());
+ }
+
+ if link.starts_with("/vendor") || link.starts_with("/odm") {
+ bail!("vendor or odm file {} can't be used for VM", link.display());
+ }
+
+ Ok(())
+}
+
+fn check_partitions_for_files(config: &VirtualMachineRawConfig) -> Result<()> {
+ config
+ .disks
+ .iter()
+ .flat_map(|disk| disk.partitions.iter())
+ .filter_map(|partition| partition.image.as_ref())
+ .try_for_each(check_partition_for_file)?;
+
+ config.kernel.as_ref().map_or(Ok(()), check_partition_for_file)?;
+ config.initrd.as_ref().map_or(Ok(()), check_partition_for_file)?;
+ config.bootloader.as_ref().map_or(Ok(()), check_partition_for_file)?;
+
+ Ok(())
+}
+
fn load_vm_payload_config_from_file(apk_file: &File, config_path: &str) -> Result<VmPayloadConfig> {
let mut apk_zip = ZipArchive::new(apk_file)?;
let config_file = apk_zip.by_name(config_path)?;
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index ddd3e68..97a27e0 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -595,6 +595,35 @@
}
}
+// Get Cpus_allowed mask
+fn check_if_all_cpus_allowed() -> Result<bool> {
+ let file = read_to_string("/proc/self/status")?;
+ let lines: Vec<_> = file.split('\n').collect();
+
+ for line in lines {
+ if line.contains("Cpus_allowed_list") {
+ let prop: Vec<_> = line.split_whitespace().collect();
+ if prop.len() != 2 {
+ return Ok(false);
+ }
+ let cpu_list: Vec<_> = prop[1].split('-').collect();
+ //Only contiguous Cpu list allowed
+ if cpu_list.len() != 2 {
+ return Ok(false);
+ }
+ if let Some(cpus) = get_num_cpus() {
+ let max_cpu = cpu_list[1].parse::<usize>()?;
+ if max_cpu == cpus - 1 {
+ return Ok(true);
+ } else {
+ return Ok(false);
+ }
+ }
+ }
+ }
+ Ok(false)
+}
+
// Get guest time from /proc/[crosvm pid]/stat
fn get_guest_time(pid: u32) -> Result<i64> {
let file = read_to_string(format!("/proc/{}/stat", pid))?;
@@ -809,7 +838,7 @@
}
if config.host_cpu_topology {
- if cfg!(virt_cpufreq) {
+ if cfg!(virt_cpufreq) && check_if_all_cpus_allowed()? {
command.arg("--host-cpu-topology");
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "aarch64"))] {
diff --git a/virtualizationmanager/src/dt_overlay.rs b/virtualizationmanager/src/dt_overlay.rs
index b39ba3a..108ed61 100644
--- a/virtualizationmanager/src/dt_overlay.rs
+++ b/virtualizationmanager/src/dt_overlay.rs
@@ -61,8 +61,8 @@
let fdt =
Fdt::create_empty_tree(buffer).map_err(|e| anyhow!("Failed to create empty Fdt: {e:?}"))?;
- let root = fdt.root_mut().map_err(|e| anyhow!("Failed to get root node: {e:?}"))?;
- let mut fragment = root
+ let mut fragment = fdt
+ .root_mut()
.add_subnode(cstr!("fragment@0"))
.map_err(|e| anyhow!("Failed to add fragment node: {e:?}"))?;
fragment
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 2cbc805..5dd1e0f 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -23,6 +23,7 @@
rustlibs: [
"android.hardware.security.rkp-V3-rust",
"android.system.virtualizationcommon-rust",
+ "android.system.virtualizationmaintenance-rust",
"android.system.virtualizationservice-rust",
"android.system.virtualizationservice_internal-rust",
"android.system.virtualmachineservice-rust",
diff --git a/virtualizationservice/aidl/Android.bp b/virtualizationservice/aidl/Android.bp
index 8ca375a..112e1cc 100644
--- a/virtualizationservice/aidl/Android.bp
+++ b/virtualizationservice/aidl/Android.bp
@@ -55,6 +55,26 @@
}
aidl_interface {
+ name: "android.system.virtualizationmaintenance",
+ srcs: ["android/system/virtualizationmaintenance/**/*.aidl"],
+ unstable: true,
+ backend: {
+ java: {
+ sdk_version: "module_current",
+ apex_available: [
+ "com.android.virt",
+ ],
+ },
+ rust: {
+ enabled: true,
+ apex_available: [
+ "com.android.virt",
+ ],
+ },
+ },
+}
+
+aidl_interface {
name: "android.system.virtualmachineservice",
srcs: ["android/system/virtualmachineservice/**/*.aidl"],
imports: [
diff --git a/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl b/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
new file mode 100644
index 0000000..161673a
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.system.virtualizationmaintenance;
+
+interface IVirtualizationMaintenance {
+ void appRemoved(int userId, int appId);
+
+ void userRemoved(int userId);
+
+ // TODO: Something for daily reconciliation
+}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 79ff89a..3bc7caf 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -60,7 +60,9 @@
IGlobalVmContext::{BnGlobalVmContext, IGlobalVmContext},
IVfioHandler::VfioDev::VfioDev,
IVfioHandler::{BpVfioHandler, IVfioHandler},
- IVirtualizationServiceInternal::IVirtualizationServiceInternal,
+ IVirtualizationServiceInternal::{
+ BnVirtualizationServiceInternal, IVirtualizationServiceInternal,
+ },
};
use virtualmachineservice::IVirtualMachineService::VM_TOMBSTONES_SERVICE_PORT;
use vsock::{VsockListener, VsockStream};
@@ -68,8 +70,6 @@
/// The unique ID of a VM used (together with a port number) for vsock communication.
pub type Cid = u32;
-pub const BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtualizationservice";
-
/// Directory in which to write disk image files used while running VMs.
pub const TEMPORARY_DIRECTORY: &str = "/data/misc/virtualizationservice";
@@ -166,7 +166,7 @@
}
impl VirtualizationServiceInternal {
- pub fn init() -> VirtualizationServiceInternal {
+ pub fn init() -> Strong<dyn IVirtualizationServiceInternal> {
let service = VirtualizationServiceInternal::default();
std::thread::spawn(|| {
@@ -175,7 +175,7 @@
}
});
- service
+ BnVirtualizationServiceInternal::new_binder(service, BinderFeatures::default())
}
}
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index ad21e89..97bb38f 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -16,18 +16,15 @@
mod aidl;
mod atom;
+mod maintenance;
mod remote_provisioning;
mod rkpvm;
-use crate::aidl::{
- remove_temporary_dir, BINDER_SERVICE_IDENTIFIER, TEMPORARY_DIRECTORY,
- VirtualizationServiceInternal
-};
+use crate::aidl::{remove_temporary_dir, VirtualizationServiceInternal, TEMPORARY_DIRECTORY};
use android_logger::{Config, FilterBuilder};
-use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal;
-use anyhow::Error;
-use binder::{register_lazy_service, BinderFeatures, ProcessState, ThreadState};
-use log::{info, LevelFilter};
+use anyhow::{bail, Context, Error, Result};
+use binder::{register_lazy_service, ProcessState, ThreadState};
+use log::{error, info, LevelFilter};
use std::fs::{create_dir, read_dir};
use std::os::unix::raw::{pid_t, uid_t};
use std::path::Path;
@@ -35,6 +32,8 @@
const LOG_TAG: &str = "VirtualizationService";
pub(crate) const REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME: &str =
"android.hardware.security.keymint.IRemotelyProvisionedComponent/avf";
+const INTERNAL_SERVICE_NAME: &str = "android.system.virtualizationservice";
+const MAINTENANCE_SERVICE_NAME: &str = "android.system.virtualizationmaintenance";
fn get_calling_pid() -> pid_t {
ThreadState::get_calling_pid()
@@ -45,6 +44,13 @@
}
fn main() {
+ if let Err(e) = try_main() {
+ error!("failed with {e:?}");
+ std::process::exit(1);
+ }
+}
+
+fn try_main() -> Result<()> {
android_logger::init_once(
Config::default()
.with_tag(LOG_TAG)
@@ -57,31 +63,33 @@
),
);
- clear_temporary_files().expect("Failed to delete old temporary files");
+ clear_temporary_files().context("Failed to delete old temporary files")?;
let common_dir_path = Path::new(TEMPORARY_DIRECTORY).join("common");
- create_dir(common_dir_path).expect("Failed to create common directory");
+ create_dir(common_dir_path).context("Failed to create common directory")?;
ProcessState::start_thread_pool();
-
- let service = VirtualizationServiceInternal::init();
- let service = BnVirtualizationServiceInternal::new_binder(service, BinderFeatures::default());
- register_lazy_service(BINDER_SERVICE_IDENTIFIER, service.as_binder()).unwrap();
- info!("Registered Binder service {}.", BINDER_SERVICE_IDENTIFIER);
+ register(INTERNAL_SERVICE_NAME, VirtualizationServiceInternal::init())?;
if cfg!(remote_attestation) {
// The IRemotelyProvisionedComponent service is only supposed to be triggered by rkpd for
// RKP VM attestation.
- let remote_provisioning_service = remote_provisioning::new_binder();
- register_lazy_service(
- REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME,
- remote_provisioning_service.as_binder(),
- )
- .unwrap();
- info!("Registered Binder service {}.", REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME);
+ register(REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME, remote_provisioning::new_binder())?;
+ }
+
+ if cfg!(llpvm_changes) {
+ register(MAINTENANCE_SERVICE_NAME, maintenance::new_binder())?;
}
ProcessState::join_thread_pool();
+ bail!("Thread pool unexpectedly ended");
+}
+
+fn register<T: binder::FromIBinder + ?Sized>(name: &str, service: binder::Strong<T>) -> Result<()> {
+ register_lazy_service(name, service.as_binder())
+ .with_context(|| format!("Failed to register {name}"))?;
+ info!("Registered Binder service {name}.");
+ Ok(())
}
/// Remove any files under `TEMPORARY_DIRECTORY`.
diff --git a/virtualizationservice/src/maintenance.rs b/virtualizationservice/src/maintenance.rs
new file mode 100644
index 0000000..191d39a
--- /dev/null
+++ b/virtualizationservice/src/maintenance.rs
@@ -0,0 +1,44 @@
+// Copyright 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance;
+use anyhow::anyhow;
+use binder::{BinderFeatures, ExceptionCode, Interface, IntoBinderResult, Strong};
+use virtualizationmaintenance::IVirtualizationMaintenance::{
+ BnVirtualizationMaintenance, IVirtualizationMaintenance,
+};
+
+pub(crate) fn new_binder() -> Strong<dyn IVirtualizationMaintenance> {
+ BnVirtualizationMaintenance::new_binder(
+ VirtualizationMaintenanceService {},
+ BinderFeatures::default(),
+ )
+}
+
+pub struct VirtualizationMaintenanceService;
+
+impl Interface for VirtualizationMaintenanceService {}
+
+#[allow(non_snake_case)]
+impl IVirtualizationMaintenance for VirtualizationMaintenanceService {
+ fn appRemoved(&self, _user_id: i32, _app_id: i32) -> binder::Result<()> {
+ Err(anyhow!("appRemoved not supported"))
+ .or_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION)
+ }
+
+ fn userRemoved(&self, _user_id: i32) -> binder::Result<()> {
+ Err(anyhow!("userRemoved not supported"))
+ .or_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION)
+ }
+}
diff --git a/virtualizationservice/src/rkpvm.rs b/virtualizationservice/src/rkpvm.rs
index 79e09b0..67ba740 100644
--- a/virtualizationservice/src/rkpvm.rs
+++ b/virtualizationservice/src/rkpvm.rs
@@ -35,7 +35,7 @@
let request = Request::RequestClientVmAttestation(params);
match vm.process_request(request).context("Failed to process request")? {
Response::RequestClientVmAttestation(cert) => Ok(cert),
- _ => bail!("Incorrect response type"),
+ other => bail!("Incorrect response type {other:?}"),
}
}