Merge "[test] Fix AvfRkpdAppGoogleIntegrationTests on registered device" into main
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 33d46dd..db0b43a 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -3,6 +3,12 @@
 {
   "avf-presubmit": [
     {
+      "name": "AvfRkpdAppIntegrationTests"
+    },
+    {
+      "name": "AvfRkpdVmAttestationTestApp"
+    },
+    {
       "name": "MicrodroidHostTestCases"
     },
     {
@@ -56,16 +62,8 @@
       "name": "AVFHostTestCases"
     },
     {
-      // TODO(b/325610326): Add this target to presubmit once there is enough
-      // SLO data for it.
-      "name": "AvfRkpdAppIntegrationTests"
-    },
-    {
       "name": "AvfRkpdAppGoogleIntegrationTests",
       "keywords": ["internal"]
-    },
-    {
-      "name": "AvfRkpdVmAttestationTestApp"
     }
   ],
   "postsubmit": [
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index d0ca026..6914380 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -24,7 +24,10 @@
 use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
     CpuTopology::CpuTopology,
     IVirtualizationService::IVirtualizationService,
-    VirtualMachineAppConfig::{DebugLevel::DebugLevel, Payload::Payload, VirtualMachineAppConfig},
+    VirtualMachineAppConfig::{
+        CustomConfig::CustomConfig, DebugLevel::DebugLevel, Payload::Payload,
+        VirtualMachineAppConfig,
+    },
     VirtualMachineConfig::VirtualMachineConfig,
 };
 use anyhow::{anyhow, bail, Context, Result};
@@ -116,6 +119,11 @@
             VmCpuTopology::MatchHost => CpuTopology::MATCH_HOST,
         };
 
+        // The CompOS VM doesn't need to be updatable (by design it should run exactly twice,
+        // with the same APKs and APEXes each time). And having it so causes some interesting
+        // circular dependencies when run at boot time by odsign: b/331417880.
+        let custom_config = Some(CustomConfig { wantUpdatable: false, ..Default::default() });
+
         let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
             name: parameters.name.clone(),
             apk: Some(apk_fd),
@@ -128,6 +136,7 @@
             protectedVm: protected_vm,
             memoryMib: parameters.memory_mib.unwrap_or(0), // 0 means use the default
             cpuTopology: cpu_topology,
+            customConfig: custom_config,
             ..Default::default()
         });
 
diff --git a/demo/java/com/android/microdroid/demo/MainActivity.java b/demo/java/com/android/microdroid/demo/MainActivity.java
index f27b23b..906d18e 100644
--- a/demo/java/com/android/microdroid/demo/MainActivity.java
+++ b/demo/java/com/android/microdroid/demo/MainActivity.java
@@ -76,8 +76,10 @@
                         model.stop();
                     } else {
                         CheckBox debugModeCheckBox = findViewById(R.id.debugMode);
+                        CheckBox protectedModeCheckBox = findViewById(R.id.protectedMode);
                         final boolean debug = debugModeCheckBox.isChecked();
-                        model.run(debug);
+                        final boolean protectedVm = protectedModeCheckBox.isChecked();
+                        model.run(debug, protectedVm);
                     }
                 });
 
@@ -157,7 +159,7 @@
         }
 
         /** Runs a VM */
-        public void run(boolean debug) {
+        public void run(boolean debug, boolean protectedVm) {
             // Create a VM and run it.
             mExecutorService = Executors.newFixedThreadPool(4);
 
@@ -243,7 +245,8 @@
                 VirtualMachineConfig.Builder builder =
                         new VirtualMachineConfig.Builder(getApplication());
                 builder.setPayloadBinaryName("MicrodroidTestNativeLib.so");
-                builder.setProtectedVm(true);
+                builder.setProtectedVm(protectedVm);
+
                 if (debug) {
                     builder.setDebugLevel(VirtualMachineConfig.DEBUG_LEVEL_FULL);
                     builder.setVmOutputCaptured(true);
diff --git a/demo/res/layout/activity_main.xml b/demo/res/layout/activity_main.xml
index f0e35d6..baa7b1f 100644
--- a/demo/res/layout/activity_main.xml
+++ b/demo/res/layout/activity_main.xml
@@ -31,6 +31,13 @@
                 android:layout_height="wrap_content"
                 android:layout_weight="1"
                 android:text="Debug mode" />
+
+            <CheckBox
+                android:id="@+id/protectedMode"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:layout_weight="1"
+                android:text="Protected vm" />
         </LinearLayout>
 
         <TextView
diff --git a/docs/custom_vm.md b/docs/custom_vm.md
index 270ea36..b218a5e 100644
--- a/docs/custom_vm.md
+++ b/docs/custom_vm.md
@@ -1,6 +1,9 @@
 # Custom VM
 
-You can spawn your own custom VMs by passing a JSON config file to the
+## Headless VMs
+
+If your VM is headless (i.e. console in/out is the primary way of interacting
+with it), you can spawn it by passing a JSON config file to the
 VirtualizationService via the `vm` tool on a rooted AVF-enabled device. If your
 device is attached over ADB, you can run:
 
@@ -21,3 +24,225 @@
 
 The `vm` command also has other subcommands for debugging; run
 `/apex/com.android.virt/bin/vm help` for details.
+
+## Graphical VMs
+
+To run OSes with graphics support, follow the instruction below.
+
+### Prepare a guest image
+
+As of today (April 2024), ChromiumOS is the only officially supported guest
+payload. We will be adding more OSes in the future.
+
+#### Build ChromiumOS for VM
+
+First, check out source code from the ChromiumOS and Chromium projects.
+
+* Checking out ChromiumOS: https://www.chromium.org/chromium-os/developer-library/guides/development/developer-guide/
+* Checking out Chromium: https://g3doc.corp.google.com/chrome/chromeos/system_services_team/dev_instructions/g3doc/setup_checkout.md?cl=headless
+
+Important: When you are at the step “Set up gclient args” in the Chromium checkout instruction, configure .gclient as follows.
+
+```
+$ cat ~/chromium/.gclient
+solutions = [
+  {
+    "name": "src",
+    "url": "https://chromium.googlesource.com/chromium/src.git",
+    "managed": False,
+    "custom_deps": {},
+    "custom_vars": {
+      "checkout_src_internal": True,
+    },
+  },
+]
+target_os = ['chromeos']
+```
+
+In this doc, it is assumed that ChromiumOS is checked out at `~/chromiumos` and
+Chromium is at `~/chromium`. If you downloaded to different places, you can
+create symlinks.
+
+Then enter into the cros sdk.
+
+```
+$ cd ~/chromiumos
+$ cros_sdk --chrome-root=$(readlink -f ~/chromium)
+```
+
+Now you are in the cros sdk. `(cr)` below means that the commands should be
+executed inside the sdk.
+
+First, choose the target board. `ferrochrome` is the name of the virtual board
+for AVF-compatible VM.
+
+```
+(cr) setup_board --board=ferrochrome
+```
+
+Then, tell the cros sdk that you want to build chrome (the browser) from the
+local checkout and also with your local modifications instead of prebuilts.
+
+```
+(cr) CHROME_ORIGIN=LOCAL_SOURCE
+(cr) ACCEPT_LICENSES='*'
+(cr) cros workon -b ferrochrome start \
+chromeos-base/chromeos-chrome \
+chromeos-base/chrome-icu
+(cr) cros_workon_make --board ferrochrome chromeos-chrome
+```
+
+Optionally, if you have touched the kernel source code (which is under
+~/chromiumos/src/third_party/kernel/v5.15), you have to tell the cros sdk that
+you want it also to be built from the modified source code, not from the
+official HEAD.
+
+```
+(cr) cros workon -b ferrochrome start chromeos-kernel-5_15
+```
+
+Finally, build individual packages, and build the disk image out of the packages.
+
+```
+(cr) cros build-packages --board=ferrochrome --chromium --accept-licenses='*'
+(cr) cros build-image --board=ferrochrome --no-enable-rootfs-verification test
+```
+
+This takes some time. When the build is done, exit from the sdk.
+
+Note: If build-packages doesn’t seem to include your local changes, try
+invoking emerge directly:
+
+```
+(cr) emerge-ferrochrome -av chromeos-base/chromeos-chrome
+```
+
+Don’t forget to call `build-image` afterwards.
+
+You need two outputs:
+
+* ChromiumOS disk image: ~/chromiumos/src/build/images/ferrochrome/latest/chromiumos_test_image.bin
+* The kernel: ~/chromiumos/out/build/ferrochrome/boot/vmlinuz
+
+### Create a guest VM configuration
+
+Push the kernel and the main image to the Android device.
+
+```
+$ adb push  ~/chromiumos/src/build/images/ferrochrome/latest/chromiumos_test_image.bin /data/local/tmp/
+$ adb push ~/chromiumos/out/build/ferrochrome/boot/vmlinuz /data/local/tmp/kernel
+```
+
+Create a VM config file as below.
+
+```
+$ cat > vm_config.json; adb push vm_config.json /data/local/tmp
+{
+    "name": "cros",
+    "kernel": "/data/local/tmp/kernel",
+    "disks": [
+        {
+            "image": "/data/local/tmp/chromiumos_test_image.bin",
+            "partitions": [],
+            "writable": true
+        }
+    ],
+    "params": "root=/dev/vda3 rootwait noinitrd ro enforcing=0 cros_debug cros_secure",
+    "protected": false,
+    "cpu_topology": "match_host",
+    "platform_version": "~1.0",
+    "memory_mib" : 8096
+}
+```
+
+### Running the VM
+
+First, enable the `VmLauncherApp` app. This needs to be done only once. In the
+future, this step won't be necesssary.
+
+```
+$ adb root
+$ adb shell pm enable com.android.virtualization.vmlauncher/.MainActivity
+$ adb unroot
+```
+
+Then execute the below to set up the network. In the future, this step won't be necessary.
+
+```
+$ cat > setup_network.sh; adb push setup_network.sh /data/local/tmp
+#!/system/bin/sh
+
+set -e
+
+TAP_IFACE=crosvm_tap
+TAP_ADDR=192.168.1.1
+TAP_NET=192.168.1.0
+
+function setup_network() {
+  local WAN_IFACE=$(ip route get 8.8.8.8 2> /dev/null | awk -- '{printf $5}')
+  if [ "${WAN_IFACE}" == "" ]; then
+    echo "No network. Connect to a WiFi network and start again"
+    return 1
+  fi
+
+  if ip link show ${TAP_IFACE} &> /dev/null ; then
+    echo "TAP interface ${TAP_IFACE} already exists"
+    return 1
+  fi
+
+  ip tuntap add mode tap group virtualmachine vnet_hdr ${TAP_IFACE}
+  ip addr add ${TAP_ADDR}/24 dev ${TAP_IFACE}
+  ip link set ${TAP_IFACE} up
+  ip rule flush
+  ip rule add from all lookup ${WAN_IFACE}
+  ip route add ${TAP_NET}/24 dev ${TAP_IFACE} table ${WAN_IFACE}
+  sysctl net.ipv4.ip_forward=1
+  iptables -t filter -F
+  iptables -t nat -A POSTROUTING -s ${TAP_NET}/24 -j MASQUERADE
+}
+
+function setup_if_necessary() {
+  if [ "$(getprop ro.crosvm.network.setup.done)" == 1 ]; then
+    return
+  fi
+  echo "Setting up..."
+  check_privilege
+  setup_network
+  setenforce 0
+  chmod 666 /dev/tun
+  setprop ro.crosvm.network.setup.done 1
+}
+
+function check_privilege() {
+  if [ "$(id -u)" -ne 0 ]; then
+    echo "Run 'adb root' first"
+    return 1
+  fi
+}
+
+setup_if_necessary
+^D
+
+adb root; adb shell /data/local/tmp/setup_network.sh
+```
+
+Then, finally tap the VmLauncherApp app from the launcher UI. You will see
+Ferrochrome booting!
+
+If it doesn’t work well, try
+
+```
+$ adb shell pm clear com.android.virtualization.vmlauncher
+```
+
+### Inside guest OS (for ChromiumOS only)
+
+Go to the network setting and configure as below.
+
+* IP: 192.168.1.2 (other addresses in the 192.168.1.0/24 subnet also works)
+* netmask: 255.255.255.0
+* gateway: 192.168.1.1
+* DNS: 8.8.8.8 (or any DNS server you know)
+
+These settings are persistent; stored in chromiumos_test_image.bin. So you
+don’t have to repeat this next time.`
diff --git a/encryptedstore/README.md b/encryptedstore/README.md
index 544d6eb..3d55d85 100644
--- a/encryptedstore/README.md
+++ b/encryptedstore/README.md
@@ -5,7 +5,7 @@
 Any data written in encrypted storage is persisted and is available next time the VM is run.
 
 Encrypted Storage is backed by a para-virtualized block device on the guest which is further
-backed by a qcow2 disk image in the host. The block device is formatted with an ext4 filesystem.
+backed by a disk image file in the host. The block device is formatted with an ext4 filesystem.
 
 ## Security
 
diff --git a/java/framework/src/android/system/virtualmachine/VirtualMachineConfig.java b/java/framework/src/android/system/virtualmachine/VirtualMachineConfig.java
index a8f318c..1b915cd 100644
--- a/java/framework/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/java/framework/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -601,7 +601,7 @@
         config.name = Optional.ofNullable(customImageConfig.getName()).orElse("");
         config.instanceId = new byte[64];
         config.kernel =
-                Optional.of(customImageConfig.getKernelPath())
+                Optional.ofNullable(customImageConfig.getKernelPath())
                         .map(
                                 (path) -> {
                                     try {
diff --git a/java/framework/src/android/system/virtualmachine/VirtualMachineCustomImageConfig.java b/java/framework/src/android/system/virtualmachine/VirtualMachineCustomImageConfig.java
index 8d294fd..8ec9d2c 100644
--- a/java/framework/src/android/system/virtualmachine/VirtualMachineCustomImageConfig.java
+++ b/java/framework/src/android/system/virtualmachine/VirtualMachineCustomImageConfig.java
@@ -16,7 +16,6 @@
 
 package android.system.virtualmachine;
 
-import android.annotation.NonNull;
 import android.annotation.Nullable;
 import android.os.PersistableBundle;
 
@@ -38,7 +37,7 @@
     private static final String KEY_KEYBOARD = "keyboard";
 
     @Nullable private final String name;
-    @NonNull private final String kernelPath;
+    @Nullable private final String kernelPath;
     @Nullable private final String initrdPath;
     @Nullable private final String bootloaderPath;
     @Nullable private final String[] params;
@@ -62,7 +61,7 @@
         return initrdPath;
     }
 
-    @NonNull
+    @Nullable
     public String getKernelPath() {
         return kernelPath;
     }
diff --git a/libs/bssl/error/src/lib.rs b/libs/bssl/error/src/lib.rs
index 82a2d5e..822e02d 100644
--- a/libs/bssl/error/src/lib.rs
+++ b/libs/bssl/error/src/lib.rs
@@ -88,6 +88,9 @@
     EC_KEY_new_by_curve_name,
     EC_KEY_set_public_key_affine_coordinates,
     EC_POINT_get_affine_coordinates,
+    ECDSA_SIG_from_bytes,
+    ECDSA_SIG_new,
+    ECDSA_SIG_set0,
     ECDSA_sign,
     ECDSA_size,
     ECDSA_verify,
@@ -105,6 +108,7 @@
     EVP_DigestVerifyInit,
     HKDF,
     HMAC,
+    i2d_ECDSA_SIG,
     RAND_bytes,
     SHA256,
 }
diff --git a/libs/bssl/src/ec_key.rs b/libs/bssl/src/ec_key.rs
index 897f8a1..3e2e382 100644
--- a/libs/bssl/src/ec_key.rs
+++ b/libs/bssl/src/ec_key.rs
@@ -22,15 +22,18 @@
 use alloc::vec::Vec;
 use bssl_avf_error::{ApiName, Error, Result};
 use bssl_sys::{
-    BN_bin2bn, BN_bn2bin_padded, BN_clear_free, BN_new, CBB_flush, CBB_len, ECDSA_sign, ECDSA_size,
-    ECDSA_verify, EC_GROUP_get_curve_name, EC_GROUP_new_by_curve_name, EC_KEY_check_key,
-    EC_KEY_free, EC_KEY_generate_key, EC_KEY_get0_group, EC_KEY_get0_public_key,
-    EC_KEY_marshal_private_key, EC_KEY_new_by_curve_name, EC_KEY_parse_private_key,
-    EC_KEY_set_public_key_affine_coordinates, EC_POINT_get_affine_coordinates,
-    NID_X9_62_prime256v1, NID_secp384r1, BIGNUM, EC_GROUP, EC_KEY, EC_POINT,
+    i2d_ECDSA_SIG, BN_bin2bn, BN_bn2bin_padded, BN_clear_free, BN_new, CBB_flush, CBB_len,
+    ECDSA_SIG_free, ECDSA_SIG_from_bytes, ECDSA_SIG_get0_r, ECDSA_SIG_get0_s, ECDSA_SIG_new,
+    ECDSA_SIG_set0, ECDSA_sign, ECDSA_size, ECDSA_verify, EC_GROUP_get_curve_name,
+    EC_GROUP_new_by_curve_name, EC_KEY_check_key, EC_KEY_free, EC_KEY_generate_key,
+    EC_KEY_get0_group, EC_KEY_get0_public_key, EC_KEY_marshal_private_key,
+    EC_KEY_new_by_curve_name, EC_KEY_parse_private_key, EC_KEY_set_public_key_affine_coordinates,
+    EC_POINT_get_affine_coordinates, NID_X9_62_prime256v1, NID_secp384r1, BIGNUM, ECDSA_SIG,
+    EC_GROUP, EC_KEY, EC_POINT,
 };
 use cbor_util::{get_label_value, get_label_value_as_bytes};
 use ciborium::Value;
+use core::mem;
 use core::ptr::{self, NonNull};
 use coset::{
     iana::{self, EnumI64},
@@ -144,7 +147,7 @@
     /// Verifies the DER-encoded ECDSA `signature` of the `digest` with the current `EcKey`.
     ///
     /// Returns Ok(()) if the verification succeeds, otherwise an error will be returned.
-    pub fn ecdsa_verify(&self, signature: &[u8], digest: &[u8]) -> Result<()> {
+    pub fn ecdsa_verify_der(&self, signature: &[u8], digest: &[u8]) -> Result<()> {
         // The `type` argument should be 0 as required in the BoringSSL spec.
         const TYPE: i32 = 0;
 
@@ -163,10 +166,19 @@
         check_int_result(ret, ApiName::ECDSA_verify)
     }
 
+    /// Verifies the COSE-encoded (R | S, see RFC8152) ECDSA `signature` of the `digest` with the
+    /// current `EcKey`.
+    ///
+    /// Returns Ok(()) if the verification succeeds, otherwise an error will be returned.
+    pub fn ecdsa_verify_cose(&self, signature: &[u8], digest: &[u8]) -> Result<()> {
+        let signature = ec_cose_signature_to_der(signature)?;
+        self.ecdsa_verify_der(&signature, digest)
+    }
+
     /// Signs the `digest` with the current `EcKey` using ECDSA.
     ///
     /// Returns the DER-encoded ECDSA signature.
-    pub fn ecdsa_sign(&self, digest: &[u8]) -> Result<Vec<u8>> {
+    pub fn ecdsa_sign_der(&self, digest: &[u8]) -> Result<Vec<u8>> {
         // The `type` argument should be 0 as required in the BoringSSL spec.
         const TYPE: i32 = 0;
 
@@ -193,6 +205,15 @@
         }
     }
 
+    /// Signs the `digest` with the current `EcKey` using ECDSA.
+    ///
+    /// Returns the COSE-encoded (R | S, see RFC8152) ECDSA signature.
+    pub fn ecdsa_sign_cose(&self, digest: &[u8]) -> Result<Vec<u8>> {
+        let signature = self.ecdsa_sign_der(digest)?;
+        let coord_bytes = self.ec_group()?.affine_coordinate_size()?;
+        ec_der_signature_to_cose(&signature, coord_bytes)
+    }
+
     /// Returns the maximum size of an ECDSA signature using the current `EcKey`.
     fn ecdsa_size(&self) -> Result<usize> {
         // SAFETY: This function only reads the `EC_KEY` that has been initialized
@@ -324,6 +345,129 @@
     }
 }
 
+/// Convert a COSE format (R | S) ECDSA signature to a DER-encoded form.
+fn ec_cose_signature_to_der(signature: &[u8]) -> Result<Vec<u8>> {
+    let mut ec_sig = EcSignature::new()?;
+    ec_sig.load_from_cose(signature)?;
+    ec_sig.to_der()
+}
+
+/// Convert a DER-encoded signature to COSE format (R | S).
+fn ec_der_signature_to_cose(signature: &[u8], coord_bytes: usize) -> Result<Vec<u8>> {
+    let ec_sig = EcSignature::new_from_der(signature)?;
+    ec_sig.to_cose(coord_bytes)
+}
+
+/// Wrapper for an `ECDSA_SIG` object representing an EC signature.
+struct EcSignature(NonNull<ECDSA_SIG>);
+
+impl EcSignature {
+    /// Allocate a signature object.
+    fn new() -> Result<Self> {
+        // SAFETY: We take ownership of the returned pointer if it is non-null.
+        let signature = unsafe { ECDSA_SIG_new() };
+
+        let signature =
+            NonNull::new(signature).ok_or_else(|| to_call_failed_error(ApiName::ECDSA_SIG_new))?;
+        Ok(Self(signature))
+    }
+
+    /// Populate the signature parameters from a COSE encoding (R | S).
+    fn load_from_cose(&mut self, signature: &[u8]) -> Result<()> {
+        let coord_bytes = signature.len() / 2;
+        if signature.len() != 2 * coord_bytes {
+            return Err(Error::InternalError);
+        }
+        let mut r = BigNum::from_slice(&signature[..coord_bytes])?;
+        let mut s = BigNum::from_slice(&signature[coord_bytes..])?;
+
+        check_int_result(
+            // SAFETY: The ECDSA_SIG was properly allocated and not yet freed. We have ownership
+            // of the two BigNums and they are not null.
+            unsafe { ECDSA_SIG_set0(self.0.as_mut(), r.as_mut_ptr(), s.as_mut_ptr()) },
+            ApiName::ECDSA_SIG_set0,
+        )?;
+
+        // On success, the ECDSA_SIG has taken ownership of the BigNums.
+        mem::forget(r);
+        mem::forget(s);
+
+        Ok(())
+    }
+
+    fn to_cose(&self, coord_bytes: usize) -> Result<Vec<u8>> {
+        let mut result = vec![0u8; coord_bytes.checked_mul(2).unwrap()];
+        let (r_bytes, s_bytes) = result.split_at_mut(coord_bytes);
+
+        // SAFETY: The ECDSA_SIG was properly allocated and not yet freed. Always returns a valid
+        // non-null, non-owning pointer.
+        let r = unsafe { ECDSA_SIG_get0_r(self.0.as_ptr()) };
+        check_int_result(
+            // SAFETY: The r pointer is known to be valid. Only writes within the destination
+            // slice.
+            unsafe { BN_bn2bin_padded(r_bytes.as_mut_ptr(), r_bytes.len(), r) },
+            ApiName::BN_bn2bin_padded,
+        )?;
+
+        // SAFETY: The ECDSA_SIG was properly allocated and not yet freed. Always returns a valid
+        // non-null, non-owning pointer.
+        let s = unsafe { ECDSA_SIG_get0_s(self.0.as_ptr()) };
+        check_int_result(
+            // SAFETY: The r pointer is known to be valid. Only writes within the destination
+            // slice.
+            unsafe { BN_bn2bin_padded(s_bytes.as_mut_ptr(), s_bytes.len(), s) },
+            ApiName::BN_bn2bin_padded,
+        )?;
+
+        Ok(result)
+    }
+
+    /// Populate the signature parameters from a DER encoding
+    fn new_from_der(signature: &[u8]) -> Result<Self> {
+        // SAFETY: Only reads within the bounds of the slice. Returns a pointer to a new ECDSA_SIG
+        // which we take ownership of, or null on error which we check.
+        let signature = unsafe { ECDSA_SIG_from_bytes(signature.as_ptr(), signature.len()) };
+
+        let signature = NonNull::new(signature)
+            .ok_or_else(|| to_call_failed_error(ApiName::ECDSA_SIG_from_bytes))?;
+        Ok(Self(signature))
+    }
+
+    /// Return the signature encoded as DER.
+    fn to_der(&self) -> Result<Vec<u8>> {
+        // SAFETY: The ECDSA_SIG was properly allocated and not yet freed. Null is a valid
+        // value for `outp`; no output is written.
+        let len = unsafe { i2d_ECDSA_SIG(self.0.as_ptr(), ptr::null_mut()) };
+        if len < 0 {
+            return Err(to_call_failed_error(ApiName::i2d_ECDSA_SIG));
+        }
+
+        let mut buf = vec![0; len.try_into().map_err(|_| Error::InternalError)?];
+        let outp = &mut buf.as_mut_ptr();
+        // SAFETY: The ECDSA_SIG was properly allocated and not yet freed. `outp` is a non-null
+        // pointer to a mutable buffer of the right size to which the result will be written.
+        let final_len = unsafe { i2d_ECDSA_SIG(self.0.as_ptr(), outp) };
+        if final_len < 0 {
+            return Err(to_call_failed_error(ApiName::i2d_ECDSA_SIG));
+        }
+        // The input hasn't changed, so the length of the output shouldn't have. If it has we
+        // already have potentially undefined behavior so panic.
+        assert_eq!(
+            len, final_len,
+            "i2d_ECDSA_SIG returned inconsistent lengths: {len}, {final_len}"
+        );
+
+        Ok(buf)
+    }
+}
+
+impl Drop for EcSignature {
+    fn drop(&mut self) {
+        // SAFETY: The pointer was allocated by `ECDSA_SIG_new`.
+        unsafe { ECDSA_SIG_free(self.0.as_mut()) };
+    }
+}
+
 /// Wrapper of an `EC_GROUP` reference.
 struct EcGroup<'a>(&'a EC_GROUP);
 
diff --git a/libs/bssl/tests/eckey_test.rs b/libs/bssl/tests/eckey_test.rs
index 3c0e45d..0fc78a1 100644
--- a/libs/bssl/tests/eckey_test.rs
+++ b/libs/bssl/tests/eckey_test.rs
@@ -87,8 +87,8 @@
     let digest = digester.digest(MESSAGE1)?;
     assert_eq!(digest, sha256(MESSAGE1)?);
 
-    let signature = ec_key.ecdsa_sign(&digest)?;
-    ec_key.ecdsa_verify(&signature, &digest)?;
+    let signature = ec_key.ecdsa_sign_der(&digest)?;
+    ec_key.ecdsa_verify_der(&signature, &digest)?;
     // Building a `PKey` from a temporary `CoseKey` should work as the lifetime
     // of the `PKey` is not tied to the lifetime of the `CoseKey`.
     let pkey = PKey::from_cose_public_key(&ec_key.cose_public_key()?)?;
@@ -102,8 +102,8 @@
     let digester = Digester::sha384();
     let digest = digester.digest(MESSAGE1)?;
 
-    let signature = ec_key.ecdsa_sign(&digest)?;
-    ec_key.ecdsa_verify(&signature, &digest)?;
+    let signature = ec_key.ecdsa_sign_der(&digest)?;
+    ec_key.ecdsa_verify_der(&signature, &digest)?;
     let pkey = PKey::from_cose_public_key(&ec_key.cose_public_key()?)?;
     pkey.verify(&signature, MESSAGE1, Some(digester))
 }
@@ -113,11 +113,11 @@
     let mut ec_key1 = EcKey::new_p256()?;
     ec_key1.generate_key()?;
     let digest = sha256(MESSAGE1)?;
-    let signature = ec_key1.ecdsa_sign(&digest)?;
+    let signature = ec_key1.ecdsa_sign_der(&digest)?;
 
     let mut ec_key2 = EcKey::new_p256()?;
     ec_key2.generate_key()?;
-    let err = ec_key2.ecdsa_verify(&signature, &digest).unwrap_err();
+    let err = ec_key2.ecdsa_verify_der(&signature, &digest).unwrap_err();
     let expected_err = Error::CallFailed(ApiName::ECDSA_verify, EcdsaError::BadSignature.into());
     assert_eq!(expected_err, err);
 
@@ -134,10 +134,49 @@
     let mut ec_key = EcKey::new_p256()?;
     ec_key.generate_key()?;
     let digest1 = sha256(MESSAGE1)?;
-    let signature = ec_key.ecdsa_sign(&digest1)?;
+    let signature = ec_key.ecdsa_sign_der(&digest1)?;
     let digest2 = sha256(MESSAGE2)?;
 
-    let err = ec_key.ecdsa_verify(&signature, &digest2).unwrap_err();
+    let err = ec_key.ecdsa_verify_der(&signature, &digest2).unwrap_err();
+    let expected_err = Error::CallFailed(ApiName::ECDSA_verify, EcdsaError::BadSignature.into());
+    assert_eq!(expected_err, err);
+    Ok(())
+}
+
+#[test]
+fn ecdsa_cose_signing_and_verification_succeed() -> Result<()> {
+    let digest = sha256(MESSAGE1)?;
+    let mut ec_key = EcKey::new_p256()?;
+    ec_key.generate_key()?;
+
+    let signature = ec_key.ecdsa_sign_cose(&digest)?;
+    ec_key.ecdsa_verify_cose(&signature, &digest)?;
+    assert_eq!(signature.len(), 64);
+    Ok(())
+}
+
+#[test]
+fn verifying_ecdsa_cose_signed_with_a_different_message_fails() -> Result<()> {
+    let digest = sha256(MESSAGE1)?;
+    let mut ec_key = EcKey::new_p256()?;
+    ec_key.generate_key()?;
+
+    let signature = ec_key.ecdsa_sign_cose(&digest)?;
+
+    let err = ec_key.ecdsa_verify_cose(&signature, &sha256(MESSAGE2)?).unwrap_err();
+    let expected_err = Error::CallFailed(ApiName::ECDSA_verify, EcdsaError::BadSignature.into());
+    assert_eq!(expected_err, err);
+    Ok(())
+}
+
+#[test]
+fn verifying_ecdsa_cose_signed_as_der_fails() -> Result<()> {
+    let digest = sha256(MESSAGE1)?;
+    let mut ec_key = EcKey::new_p256()?;
+    ec_key.generate_key()?;
+
+    let signature = ec_key.ecdsa_sign_cose(&digest)?;
+    let err = ec_key.ecdsa_verify_der(&signature, &digest).unwrap_err();
     let expected_err = Error::CallFailed(ApiName::ECDSA_verify, EcdsaError::BadSignature.into());
     assert_eq!(expected_err, err);
     Ok(())
diff --git a/libs/libfdt/src/libfdt.rs b/libs/libfdt/src/libfdt.rs
index 1af9edf..9ddfbaa 100644
--- a/libs/libfdt/src/libfdt.rs
+++ b/libs/libfdt/src/libfdt.rs
@@ -311,6 +311,7 @@
     }
 
     /// Safe wrapper around `fdt_open_into()` (C function).
+    #[allow(dead_code)]
     fn open_into(&self, dest: &mut [u8]) -> Result<()> {
         let fdt = self.as_fdt_slice().as_ptr().cast();
 
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 98a541f..ff17ed1 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -181,28 +181,28 @@
     filename: "init.rc",
     src: "init.rc",
     relative_install_path: "init/hw",
-    installable: false, // avoid collision with system partition's init.rc
+    no_full_install: true, // avoid collision with system partition's init.rc
 }
 
 prebuilt_etc {
     name: "microdroid_ueventd_rc",
     filename: "ueventd.rc",
     src: "ueventd.rc",
-    installable: false, // avoid collision with system partition's ueventd.rc
+    no_full_install: true, // avoid collision with system partition's ueventd.rc
 }
 
 prebuilt_etc {
     name: "microdroid_etc_passwd",
     src: "microdroid_passwd",
     filename: "passwd",
-    installable: false,
+    no_full_install: true,
 }
 
 prebuilt_etc {
     name: "microdroid_etc_group",
     src: "microdroid_group",
     filename: "group",
-    installable: false,
+    no_full_install: true,
 }
 
 prebuilt_root {
@@ -217,7 +217,7 @@
             src: ":microdroid_build_prop_gen_arm64",
         },
     },
-    installable: false,
+    no_full_install: true,
 }
 
 genrule {
@@ -389,7 +389,7 @@
     name: "microdroid_fstab",
     src: "fstab.microdroid",
     filename: "fstab.microdroid",
-    installable: false,
+    no_full_install: true,
 }
 
 // python -c "import hashlib; print(hashlib.sha256(b'bootloader').hexdigest())"
@@ -441,14 +441,14 @@
     src: "microdroid_manifest.xml",
     filename: "manifest.xml",
     relative_install_path: "vintf",
-    installable: false,
+    no_full_install: true,
 }
 
 prebuilt_etc {
     name: "microdroid_event-log-tags",
     src: "microdroid_event-log-tags",
     filename: "event-log-tags",
-    installable: false,
+    no_full_install: true,
 }
 
 filegroup {
diff --git a/microdroid/kdump/Android.bp b/microdroid/kdump/Android.bp
index 6c85c43..cd68539 100644
--- a/microdroid/kdump/Android.bp
+++ b/microdroid/kdump/Android.bp
@@ -7,7 +7,7 @@
     defaults: ["avf_build_flags_cc"],
     stem: "kexec_load",
     srcs: ["kexec.c"],
-    installable: false,
+    no_full_install: true,
     static_executable: true, // required because this runs before linkerconfig
     compile_multilib: "64",
 }
@@ -18,7 +18,7 @@
     stem: "crashdump",
     srcs: ["crashdump.c"],
     static_executable: true,
-    installable: false,
+    no_full_install: true,
     compile_multilib: "64",
     sanitize: {
         hwaddress: false, // HWASAN setup fails when run as init process
diff --git a/microdroid/kdump/kernel/Android.bp b/microdroid/kdump/kernel/Android.bp
index 0705875..2bab6a8 100644
--- a/microdroid/kdump/kernel/Android.bp
+++ b/microdroid/kdump/kernel/Android.bp
@@ -21,5 +21,5 @@
             src: "x86_64/kernel-5.15",
         },
     },
-    installable: false,
+    no_full_install: true,
 }
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 025edff..11e67cb 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -234,4 +234,4 @@
 }
 
 main!(main);
-configure_heap!(SIZE_128KB);
+configure_heap!(SIZE_128KB * 2);
diff --git a/rialto/tests/test.rs b/rialto/tests/test.rs
index 9151ce1..0d57301 100644
--- a/rialto/tests/test.rs
+++ b/rialto/tests/test.rs
@@ -228,7 +228,7 @@
     let tbs_cert = cert.tbs_certificate;
     let digest = sha256(&tbs_cert.to_der().unwrap()).unwrap();
     authority_public_key
-        .ecdsa_verify(cert.signature.raw_bytes(), &digest)
+        .ecdsa_verify_der(cert.signature.raw_bytes(), &digest)
         .expect("Failed to verify the certificate signature with the authority public key");
 
     // Checks that the certificate's subject public key is equal to the key in the CSR.
diff --git a/service_vm/client_vm_csr/src/lib.rs b/service_vm/client_vm_csr/src/lib.rs
index 512ecaf..0babfff 100644
--- a/service_vm/client_vm_csr/src/lib.rs
+++ b/service_vm/client_vm_csr/src/lib.rs
@@ -100,7 +100,7 @@
             sign(message, cdi_leaf_priv.as_array()).map(|v| v.to_vec())
         })?
         .try_add_created_signature(attestation_key_sig_headers, aad, |message| {
-            ecdsa_sign(message, attestation_key)
+            ecdsa_sign_cose(message, attestation_key)
         })?
         .build();
     Ok(signed_data)
@@ -113,12 +113,18 @@
     CoseSignatureBuilder::new().protected(protected).build()
 }
 
-fn ecdsa_sign(message: &[u8], key: &EcKeyRef<Private>) -> Result<Vec<u8>> {
+fn ecdsa_sign_cose(message: &[u8], key: &EcKeyRef<Private>) -> Result<Vec<u8>> {
     let digest = sha256(message);
     // Passes the digest to `ECDSA_do_sign` as recommended in the spec:
     // https://commondatastorage.googleapis.com/chromium-boringssl-docs/ecdsa.h.html#ECDSA_do_sign
     let sig = EcdsaSig::sign::<Private>(&digest, key)?;
-    Ok(sig.to_der()?)
+    ecdsa_sig_to_cose(&sig)
+}
+
+fn ecdsa_sig_to_cose(signature: &EcdsaSig) -> Result<Vec<u8>> {
+    let mut result = signature.r().to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
+    result.extend_from_slice(&signature.s().to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?);
+    Ok(result)
 }
 
 fn get_affine_coordinates(key: &EcKeyRef<Private>) -> Result<(Vec<u8>, Vec<u8>)> {
@@ -175,29 +181,38 @@
         let chain = dice::Chain::from_cbor(&session, &csr.dice_cert_chain)?;
         let public_key = chain.leaf().subject_public_key();
         cose_sign
-            .verify_signature(0, aad, |signature, message| public_key.verify(signature, message))?;
+            .verify_signature(0, aad, |signature, message| public_key.verify(signature, message))
+            .context("Verifying CDI_Leaf_Priv signature")?;
 
         // Checks the second signature is signed with attestation key.
         let attestation_public_key = CoseKey::from_slice(&csr_payload.public_key).unwrap();
         let ec_public_key = to_ec_public_key(&attestation_public_key)?;
-        cose_sign.verify_signature(1, aad, |signature, message| {
-            ecdsa_verify(signature, message, &ec_public_key)
-        })?;
+        cose_sign
+            .verify_signature(1, aad, |signature, message| {
+                ecdsa_verify_cose(signature, message, &ec_public_key)
+            })
+            .context("Verifying attestation key signature")?;
 
         // Verifies that private key and the public key form a valid key pair.
         let message = b"test message";
-        let signature = ecdsa_sign(message, &ec_private_key)?;
-        ecdsa_verify(&signature, message, &ec_public_key)?;
+        let signature = ecdsa_sign_cose(message, &ec_private_key)?;
+        ecdsa_verify_cose(&signature, message, &ec_public_key)
+            .context("Verifying signature with attested key")?;
 
         Ok(())
     }
 
-    fn ecdsa_verify(
+    fn ecdsa_verify_cose(
         signature: &[u8],
         message: &[u8],
         ec_public_key: &EcKeyRef<Public>,
     ) -> Result<()> {
-        let sig = EcdsaSig::from_der(signature)?;
+        let coord_bytes = signature.len() / 2;
+        assert_eq!(signature.len(), coord_bytes * 2);
+
+        let r = BigNum::from_slice(&signature[..coord_bytes])?;
+        let s = BigNum::from_slice(&signature[coord_bytes..])?;
+        let sig = EcdsaSig::from_private_components(r, s)?;
         let digest = sha256(message);
         if sig.verify(&digest, ec_public_key)? {
             Ok(())
diff --git a/service_vm/requests/src/client_vm.rs b/service_vm/requests/src/client_vm.rs
index d2e674b..2aa7113 100644
--- a/service_vm/requests/src/client_vm.rs
+++ b/service_vm/requests/src/client_vm.rs
@@ -66,8 +66,9 @@
     // Verifies the second signature with the public key in the CSR payload.
     let ec_public_key = EcKey::from_cose_public_key_slice(&csr_payload.public_key)?;
     cose_sign.verify_signature(ATTESTATION_KEY_SIGNATURE_INDEX, aad, |signature, message| {
-        ecdsa_verify(&ec_public_key, signature, message)
+        ecdsa_verify_cose(&ec_public_key, signature, message)
     })?;
+
     let subject_public_key_info = PKey::try_from(ec_public_key)?.subject_public_key_info()?;
 
     // Builds the TBSCertificate.
@@ -80,12 +81,9 @@
     rand_bytes(&mut serial_number)?;
     let subject = Name::encode_from_string("CN=Android Protected Virtual Machine Key")?;
     let rkp_cert = Certificate::from_der(&params.remotely_provisioned_cert)?;
+    let vm_components = client_vm_dice_chain.microdroid_payload_components()?;
     let vm_components =
-        if let Some(components) = client_vm_dice_chain.microdroid_payload_components() {
-            components.iter().map(cert::VmComponent::new).collect::<der::Result<Vec<_>>>()?
-        } else {
-            Vec::new()
-        };
+        vm_components.iter().map(cert::VmComponent::new).collect::<der::Result<Vec<_>>>()?;
 
     info!("The client VM DICE chain validation succeeded. Beginning to generate the certificate.");
     let attestation_ext = cert::AttestationExtension::new(
@@ -112,20 +110,20 @@
                 RequestProcessingError::FailedToDecryptKeyBlob
             })?;
     let ec_private_key = EcKey::from_ec_private_key(private_key.as_slice())?;
-    let signature = ecdsa_sign(&ec_private_key, &tbs_cert.to_der()?)?;
+    let signature = ecdsa_sign_der(&ec_private_key, &tbs_cert.to_der()?)?;
     let certificate = cert::build_certificate(tbs_cert, &signature)?;
     Ok(certificate.to_der()?)
 }
 
-fn ecdsa_verify(key: &EcKey, signature: &[u8], message: &[u8]) -> bssl_avf::Result<()> {
+fn ecdsa_verify_cose(key: &EcKey, signature: &[u8], message: &[u8]) -> bssl_avf::Result<()> {
     // The message was signed with ECDSA with curve P-256 and SHA-256 at the signature generation.
     let digest = sha256(message)?;
-    key.ecdsa_verify(signature, &digest)
+    key.ecdsa_verify_cose(signature, &digest)
 }
 
-fn ecdsa_sign(key: &EcKey, message: &[u8]) -> bssl_avf::Result<Vec<u8>> {
+fn ecdsa_sign_der(key: &EcKey, message: &[u8]) -> bssl_avf::Result<Vec<u8>> {
     let digest = sha256(message)?;
-    key.ecdsa_sign(&digest)
+    key.ecdsa_sign_der(&digest)
 }
 
 fn validate_service_vm_dice_chain_length(service_vm_dice_chain: &[Value]) -> Result<()> {
diff --git a/service_vm/requests/src/dice.rs b/service_vm/requests/src/dice.rs
index df29676..247c34e 100644
--- a/service_vm/requests/src/dice.rs
+++ b/service_vm/requests/src/dice.rs
@@ -15,6 +15,7 @@
 //! This module contains functions related to DICE.
 
 use alloc::string::String;
+use alloc::vec;
 use alloc::vec::Vec;
 use bssl_avf::{ed25519_verify, Digester, EcKey};
 use cbor_util::{
@@ -112,11 +113,10 @@
     ) -> Result<Self> {
         let microdroid_payload_name =
             &dice_entry_payloads[dice_entry_payloads.len() - 1].config_descriptor.component_name;
-        if MICRODROID_PAYLOAD_COMPONENT_NAME != microdroid_payload_name {
+        if Some(MICRODROID_PAYLOAD_COMPONENT_NAME) != microdroid_payload_name.as_deref() {
             error!(
                 "The last entry in the client VM DICE chain must describe the Microdroid \
-                 payload. Got '{}'",
-                microdroid_payload_name
+                 payload. Got '{microdroid_payload_name:?}'"
             );
             return Err(RequestProcessingError::InvalidDiceChain);
         }
@@ -125,11 +125,10 @@
             let index = dice_entry_payloads.len() - 2;
             let vendor_partition_name =
                 &dice_entry_payloads[index].config_descriptor.component_name;
-            if VENDOR_PARTITION_COMPONENT_NAME != vendor_partition_name {
+            if Some(VENDOR_PARTITION_COMPONENT_NAME) != vendor_partition_name.as_deref() {
                 error!(
                     "The vendor partition entry in the client VM DICE chain must describe the \
-                        vendor partition. Got '{}'",
-                    vendor_partition_name,
+                    vendor partition. Got '{vendor_partition_name:?}'"
                 );
                 return Err(RequestProcessingError::InvalidDiceChain);
             }
@@ -139,11 +138,10 @@
         };
 
         let kernel_name = &dice_entry_payloads[kernel_index].config_descriptor.component_name;
-        if KERNEL_COMPONENT_NAME != kernel_name {
+        if Some(KERNEL_COMPONENT_NAME) != kernel_name.as_deref() {
             error!(
                 "The microdroid kernel entry in the client VM DICE chain must describe the \
-                 Microdroid kernel. Got '{}'",
-                kernel_name,
+                 Microdroid kernel. Got '{kernel_name:?}'"
             );
             return Err(RequestProcessingError::InvalidDiceChain);
         }
@@ -164,8 +162,8 @@
         &self.payloads[self.payloads.len() - 1]
     }
 
-    pub(crate) fn microdroid_payload_components(&self) -> Option<&Vec<SubComponent>> {
-        self.microdroid_payload().config_descriptor.sub_components.as_ref()
+    pub(crate) fn microdroid_payload_components(&self) -> Result<Vec<SubComponent>> {
+        self.microdroid_payload().config_descriptor.sub_components()
     }
 
     /// Returns true if all payloads in the DICE chain are in normal mode.
@@ -210,8 +208,13 @@
     type Error = RequestProcessingError;
 
     fn try_from(key: CoseKey) -> Result<Self> {
-        if !key.key_ops.contains(&KeyOperation::Assigned(iana::KeyOperation::Verify)) {
-            error!("Public key does not support verification");
+        // The public key must allow use for verification.
+        // Note that an empty key_ops set implicitly allows everything.
+        let key_ops = &key.key_ops;
+        if !key_ops.is_empty()
+            && !key_ops.contains(&KeyOperation::Assigned(iana::KeyOperation::Verify))
+        {
+            error!("Public key does not support verification - key_ops: {key_ops:?}");
             return Err(RequestProcessingError::InvalidDiceChain);
         }
         Ok(Self(key))
@@ -226,6 +229,9 @@
     /// generateCertificateRequestV2.cddl:
     ///
     /// PubKeyEd25519 / PubKeyECDSA256 / PubKeyECDSA384
+    ///
+    /// The signature should be in the format defined by COSE in RFC 9053 section 2 for the
+    /// specific algorithm.
     pub(crate) fn verify(&self, signature: &[u8], message: &[u8]) -> Result<()> {
         match &self.0.kty {
             KeyType::Assigned(iana::KeyType::EC2) => {
@@ -243,7 +249,7 @@
                     }
                 };
                 let digest = digester.digest(message)?;
-                Ok(public_key.ecdsa_verify(signature, &digest)?)
+                Ok(public_key.ecdsa_verify_cose(signature, &digest)?)
             }
             KeyType::Assigned(iana::KeyType::OKP) => {
                 let curve_type =
@@ -346,15 +352,21 @@
 ///
 /// hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/
 /// generateCertificateRequestV2.cddl
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, Default)]
 pub(crate) struct ConfigDescriptor {
-    component_name: String,
-    sub_components: Option<Vec<SubComponent>>,
+    component_name: Option<String>,
+    sub_components: Option<Value>,
 }
 
 impl ConfigDescriptor {
     fn from_slice(data: &[u8]) -> Result<Self> {
-        let value = Value::from_slice(data)?;
+        let value = Value::from_slice(data);
+        let Ok(value) = value else {
+            // Some DICE implementations store a hash in the config descriptor. So we just
+            // skip anything that doesn't parse correctly.
+            info!("Ignoring malformed config descriptor");
+            return Ok(Default::default());
+        };
         let entries = value_to_map(value, "ConfigDescriptor")?;
         let mut builder = ConfigDescriptorBuilder::default();
         for (key, value) in entries.into_iter() {
@@ -365,24 +377,31 @@
                     builder.component_name(name)?;
                 }
                 CONFIG_DESC_SUB_COMPONENTS => {
-                    let sub_components = value_to_array(value, "ConfigDescriptor sub_components")?;
-                    let sub_components = sub_components
-                        .into_iter()
-                        .map(SubComponent::try_from)
-                        .collect::<Result<Vec<_>>>()?;
-                    builder.sub_components(sub_components)?
+                    // If this is the Microdroid payload node then these are the subcomponents. But
+                    // for any other node it could be anything - this isn't a reserved key. So defer
+                    // decoding until we know which node is which.
+                    builder.sub_components(value)?
                 }
                 _ => {}
             }
         }
         builder.build()
     }
+
+    /// Attempt to decode any Microdroid sub-components that were present in this config descriptor.
+    fn sub_components(&self) -> Result<Vec<SubComponent>> {
+        let Some(value) = &self.sub_components else {
+            return Ok(vec![]);
+        };
+        let sub_components = value_to_array(value.clone(), "ConfigDescriptor sub_components")?;
+        sub_components.into_iter().map(SubComponent::try_from).collect()
+    }
 }
 
 #[derive(Debug, Clone, Default)]
 struct ConfigDescriptorBuilder {
     component_name: OnceCell<String>,
-    sub_components: OnceCell<Vec<SubComponent>>,
+    sub_components: OnceCell<Value>,
 }
 
 impl ConfigDescriptorBuilder {
@@ -390,13 +409,12 @@
         set_once(&self.component_name, component_name, "ConfigDescriptor component_name")
     }
 
-    fn sub_components(&mut self, sub_components: Vec<SubComponent>) -> Result<()> {
+    fn sub_components(&mut self, sub_components: Value) -> Result<()> {
         set_once(&self.sub_components, sub_components, "ConfigDescriptor sub_components")
     }
 
     fn build(mut self) -> Result<ConfigDescriptor> {
-        let component_name =
-            take_value(&mut self.component_name, "ConfigDescriptor component_name")?;
+        let component_name = self.component_name.take();
         let sub_components = self.sub_components.take();
         Ok(ConfigDescriptor { component_name, sub_components })
     }
diff --git a/service_vm/requests/src/rkp.rs b/service_vm/requests/src/rkp.rs
index cac0129..4f2262f 100644
--- a/service_vm/requests/src/rkp.rs
+++ b/service_vm/requests/src/rkp.rs
@@ -28,7 +28,7 @@
 use core::result;
 use coset::{iana, AsCborValue, CoseSign1, CoseSign1Builder, HeaderBuilder};
 use diced_open_dice::{derive_cdi_leaf_priv, kdf, sign, DiceArtifacts, PrivateKey};
-use log::error;
+use log::{debug, error};
 use service_vm_comm::{EcdsaP256KeyPair, GenerateCertificateRequestParams, RequestProcessingError};
 use zeroize::Zeroizing;
 
@@ -78,6 +78,8 @@
         let public_key = validate_public_key(&key_to_sign, hmac_key.as_ref())?;
         public_keys.push(public_key.to_cbor_value()?);
     }
+    debug!("Successfully validated all '{}' public keys.", public_keys.len());
+
     // Builds `CsrPayload`.
     let csr_payload = cbor!([
         Value::Integer(CSR_PAYLOAD_SCHEMA_V3.into()),
@@ -91,6 +93,7 @@
     let signed_data_payload =
         cbor!([Value::Bytes(params.challenge.to_vec()), Value::Bytes(csr_payload)])?;
     let signed_data = build_signed_data(&signed_data_payload, dice_artifacts)?.to_cbor_value()?;
+    debug!("Successfully signed the CSR payload.");
 
     // Builds `AuthenticatedRequest<CsrPayload>`.
     // Currently `UdsCerts` is left empty because it is only needed for Samsung devices.
@@ -104,6 +107,7 @@
         dice_cert_chain,
         signed_data,
     ])?;
+    debug!("Successfully built the CBOR authenticated request.");
     Ok(cbor_util::serialize(&auth_req)?)
 }
 
diff --git a/service_vm/test_apk/Android.bp b/service_vm/test_apk/Android.bp
index 72e411e..1ba156f 100644
--- a/service_vm/test_apk/Android.bp
+++ b/service_vm/test_apk/Android.bp
@@ -6,6 +6,7 @@
     name: "vm_attestation_testapp_defaults",
     test_suites: [
         "general-tests",
+        "pts",
     ],
     static_libs: [
         "MicrodroidDeviceTestHelper",
diff --git a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
index b176cfc..0280652 100644
--- a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
+++ b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
@@ -30,6 +30,7 @@
 
 import com.android.microdroid.test.common.MetricsProcessor;
 import com.android.microdroid.test.host.CommandRunner;
+import com.android.microdroid.test.host.KvmHypTracer;
 import com.android.microdroid.test.host.MicrodroidHostTestCaseBase;
 import com.android.tradefed.device.DeviceNotAvailableException;
 import com.android.tradefed.device.ITestDevice;
@@ -37,6 +38,7 @@
 import com.android.tradefed.log.LogUtil.CLog;
 import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
 import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.SimpleStats;
 
 import org.junit.After;
 import org.junit.Before;
@@ -118,6 +120,20 @@
     }
 
     @Test
+    public void testNoLongHypSections() throws Exception {
+        assumeTrue("Skip without hypervisor tracing", KvmHypTracer.isSupported(getDevice()));
+
+        KvmHypTracer tracer = new KvmHypTracer(getDevice());
+        String result = tracer.run(COMPOSD_CMD_BIN + " test-compile");
+        assertWithMessage("Failed to test compilation VM.")
+                .that(result).ignoringCase().contains("all ok");
+
+        SimpleStats stats = tracer.getDurationStats();
+        reportMetric(stats.getData(), "hyp_sections", "s");
+        CLog.i("Hypervisor traces parsed successfully.");
+    }
+
+    @Test
     public void testCameraAppStartupTime() throws Exception {
         String[] launchIntentPackages = {
             "com.android.camera2",
diff --git a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
index 364e769..b2a77a7 100644
--- a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
+++ b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
@@ -48,6 +48,7 @@
 
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -206,6 +207,11 @@
         assume().withMessage("Device doesn't support AVF")
                 .that(mCtx.getPackageManager().hasSystemFeature(FEATURE_VIRTUALIZATION_FRAMEWORK))
                 .isTrue();
+        int vendorApiLevel = SystemProperties.getInt("ro.vendor.api_level", 0);
+        boolean isGsi = new File("/system/system_ext/etc/init/init.gsi.rc").exists();
+        assume().withMessage("GSI with vendor API level < 202404 may not support AVF")
+                .that(isGsi && vendorApiLevel < 202404)
+                .isFalse();
     }
 
     protected void assumeSupportedDevice() {
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/KvmHypTracer.java b/tests/hostside/helper/java/com/android/microdroid/test/host/KvmHypTracer.java
new file mode 100644
index 0000000..0d8ee96
--- /dev/null
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/KvmHypTracer.java
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.microdroid.test.host;
+
+import static com.google.common.truth.Truth.assertWithMessage;
+import static org.junit.Assert.assertNotNull;
+
+import com.android.microdroid.test.host.CommandRunner;
+import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.util.SimpleStats;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.BufferedReader;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import javax.annotation.Nonnull;
+
+/** This class provides utilities to interact with the hyp tracing subsystem */
+public final class KvmHypTracer {
+
+    private static final String HYP_TRACING_ROOT = "/sys/kernel/tracing/hyp/";
+    private static final String HYP_EVENTS[] = { "hyp_enter", "hyp_exit" };
+    private static final int DEFAULT_BUF_SIZE_KB = 4 * 1024;
+    private static final Pattern LOST_EVENT_PATTERN = Pattern.compile(
+            "^CPU:[0-9]* \\[LOST ([0-9]*) EVENTS\\]");
+    private static final Pattern EVENT_PATTERN = Pattern.compile(
+            "^\\[([0-9]*)\\][ \t]*([0-9]*\\.[0-9]*): (" + String.join("|", HYP_EVENTS) + ") (.*)");
+
+    private final CommandRunner mRunner;
+    private final ITestDevice mDevice;
+    private final int mNrCpus;
+
+    private final ArrayList<File> mTraces;
+
+    private void setNode(String node, int val) throws Exception {
+        mRunner.run("echo " + val + " > " + HYP_TRACING_ROOT + node);
+    }
+
+    private static String eventDir(String event) {
+        return "events/hyp/" + event + "/";
+    }
+
+    public static boolean isSupported(ITestDevice device) throws Exception {
+        for (String event: HYP_EVENTS) {
+            if (!device.doesFileExist(HYP_TRACING_ROOT + eventDir(event) + "/enable"))
+                return false;
+        }
+        return true;
+    }
+
+    public KvmHypTracer(@Nonnull ITestDevice device) throws Exception {
+        assertWithMessage("Hypervisor tracing not supported")
+                .that(isSupported(device)).isTrue();
+
+        mDevice = device;
+        mRunner = new CommandRunner(mDevice);
+        mTraces = new ArrayList<File>();
+        mNrCpus = Integer.parseInt(mRunner.run("nproc"));
+    }
+
+    public String run(String payload_cmd) throws Exception {
+        mTraces.clear();
+
+        setNode("tracing_on", 0);
+        mRunner.run("echo 0 | tee " + HYP_TRACING_ROOT + "events/*/*/enable");
+        setNode("buffer_size_kb", DEFAULT_BUF_SIZE_KB);
+        for (String event: HYP_EVENTS)
+            setNode(eventDir(event) + "/enable", 1);
+        setNode("trace", 0);
+
+        /* Cat each per-cpu trace_pipe in its own tmp file in the background */
+        String cmd = "cd " + HYP_TRACING_ROOT + ";";
+        String trace_pipes[] = new String[mNrCpus];
+        for (int i = 0; i < mNrCpus; i++) {
+            trace_pipes[i] = mRunner.run("mktemp -t trace_pipe.cpu" + i + ".XXXXXXXXXX");
+            cmd += "cat per_cpu/cpu" + i + "/trace_pipe > " + trace_pipes[i] + " &";
+            cmd += "CPU" + i + "_TRACE_PIPE_PID=$!;";
+        }
+
+        /* Run the payload with tracing enabled */
+        cmd += "echo 1 > tracing_on;";
+        String cmd_stdout = mRunner.run("mktemp -t cmd_stdout.XXXXXXXXXX");
+        cmd += payload_cmd + " > " + cmd_stdout + ";";
+        cmd += "echo 0 > tracing_on;";
+
+        /* Actively kill the cat subprocesses as trace_pipe is blocking */
+        for (int i = 0; i < mNrCpus; i++)
+            cmd += "kill -9 $CPU" + i + "_TRACE_PIPE_PID;";
+        cmd += "wait";
+
+        /*
+         * The whole thing runs in a single command for simplicity as `adb
+         * shell` doesn't play well with subprocesses outliving their parent,
+         * and cat-ing a trace_pipe is blocking, so doing so from separate Java
+         * threads wouldn't be much easier as we would need to actively kill
+         * them too.
+         */
+        mRunner.run(cmd);
+
+        for (String t: trace_pipes) {
+            File trace = mDevice.pullFile(t);
+            assertNotNull(trace);
+            mTraces.add(trace);
+            mRunner.run("rm -f " + t);
+        }
+
+        String res = mRunner.run("cat " + cmd_stdout);
+        mRunner.run("rm -f " + cmd_stdout);
+        return res;
+    }
+
+    public SimpleStats getDurationStats() throws Exception {
+        SimpleStats stats = new SimpleStats();
+
+        for (File trace: mTraces) {
+            BufferedReader br = new BufferedReader(new FileReader(trace));
+            double last = 0.0, hyp_enter = 0.0;
+            String l, prev_event = "";
+            while ((l = br.readLine()) != null) {
+                Matcher matcher = LOST_EVENT_PATTERN.matcher(l);
+                if (matcher.find())
+                    throw new OutOfMemoryError("Lost " + matcher.group(1) + " events");
+
+                matcher = EVENT_PATTERN.matcher(l);
+                if (!matcher.find()) {
+                    CLog.w("Failed to parse hyp event: " + l);
+                    continue;
+                }
+
+                int cpu = Integer.parseInt(matcher.group(1));
+                if (cpu < 0 || cpu >= mNrCpus)
+                    throw new ParseException("Incorrect CPU number: " + cpu, 0);
+
+                double cur = Double.parseDouble(matcher.group(2));
+                if (cur < last)
+                    throw new ParseException("Time must not go backward: " + cur, 0);
+                last = cur;
+
+                String event = matcher.group(3);
+                if (event.equals(prev_event)) {
+                    throw new ParseException("Hyp event found twice in a row: " + trace + " - " + l,
+                                             0);
+                }
+
+                switch (event) {
+                    case "hyp_exit":
+                        if (prev_event.equals("hyp_enter"))
+                            stats.add(cur - hyp_enter);
+                        break;
+                    case "hyp_enter":
+                        hyp_enter = cur;
+                        break;
+                    default:
+                        throw new ParseException("Unexpected line in trace" + l, 0);
+                }
+                prev_event = event;
+            }
+        }
+
+        return stats;
+    }
+}
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
index 203bcae..41ddd48 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
@@ -20,6 +20,7 @@
 
 import static com.google.common.truth.Truth.assertWithMessage;
 
+import static org.junit.Assume.assumeFalse;
 import static org.junit.Assume.assumeTrue;
 
 import com.android.compatibility.common.tradefed.build.CompatibilityBuildHelper;
@@ -136,6 +137,15 @@
                 "Requires VM support",
                 testDevice.hasFeature("android.software.virtualization_framework"));
         assumeTrue("Requires VM support", testDevice.supportsMicrodroid());
+
+        CommandRunner android = new CommandRunner(androidDevice);
+        long vendorApiLevel = androidDevice.getIntProperty("ro.vendor.api_level", 0);
+        boolean isGsi =
+                android.runForResult("[ -e /system/system_ext/etc/init/init.gsi.rc ]").getStatus()
+                        == CommandStatus.SUCCESS;
+        assumeFalse(
+                "GSI with vendor API level < 202404 may not support AVF",
+                isGsi && vendorApiLevel < 202404);
     }
 
     public static void archiveLogThenDelete(TestLogData logs, ITestDevice device, String remotePath,
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 0c4aa7c..f1509e2 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -67,7 +67,6 @@
     IntoBinderResult,
 };
 use cstr::cstr;
-use disk::QcowFile;
 use glob::glob;
 use lazy_static::lazy_static;
 use log::{debug, error, info, warn};
@@ -256,15 +255,17 @@
             .context("failed to move cursor to start")
             .or_service_specific_exception(-1)?;
         image.set_len(0).context("Failed to reset a file").or_service_specific_exception(-1)?;
-
-        let mut part = QcowFile::new(image, size_bytes)
-            .context("Failed to create QCOW2 image")
+        // Set the file length. In most filesystems, this will not allocate any physical disk
+        // space, it will only change the logical size.
+        image
+            .set_len(size_bytes)
+            .context("Failed to extend file")
             .or_service_specific_exception(-1)?;
 
         match partition_type {
             PartitionType::RAW => Ok(()),
-            PartitionType::ANDROID_VM_INSTANCE => format_as_android_vm_instance(&mut part),
-            PartitionType::ENCRYPTEDSTORE => format_as_encryptedstore(&mut part),
+            PartitionType::ANDROID_VM_INSTANCE => format_as_android_vm_instance(&mut image),
+            PartitionType::ENCRYPTEDSTORE => format_as_encryptedstore(&mut image),
             _ => Err(Error::new(
                 ErrorKind::Unsupported,
                 format!("Unsupported partition type {:?}", partition_type),
@@ -434,7 +435,8 @@
         if cfg!(llpvm_changes) {
             instance_id = extract_instance_id(config);
             untrusted_props.push((cstr!("instance-id"), &instance_id[..]));
-            if is_secretkeeper_supported() {
+            let want_updatable = extract_want_updatable(config);
+            if want_updatable && is_secretkeeper_supported() {
                 // Let guest know that it can defer rollback protection to Secretkeeper by setting
                 // an empty property in untrusted node in DT. This enables Updatable VMs.
                 untrusted_props.push((cstr!("defer-rollback-protection"), &[]))
@@ -906,14 +908,14 @@
             append_kernel_param("androidboot.microdroid.mount_vendor=1", &mut vm_config)
         }
 
-        vm_config.devices = custom_config.devices.clone();
+        vm_config.devices.clone_from(&custom_config.devices);
     }
 
     if config.memoryMib > 0 {
         vm_config.memoryMib = config.memoryMib;
     }
 
-    vm_config.name = config.name.clone();
+    vm_config.name.clone_from(&config.name);
     vm_config.protectedVm = config.protectedVm;
     vm_config.cpuTopology = config.cpuTopology;
 
@@ -1374,6 +1376,16 @@
     }
 }
 
+fn extract_want_updatable(config: &VirtualMachineConfig) -> bool {
+    match config {
+        VirtualMachineConfig::RawConfig(_) => true,
+        VirtualMachineConfig::AppConfig(config) => {
+            let Some(custom) = &config.customConfig else { return true };
+            custom.wantUpdatable
+        }
+    }
+}
+
 fn extract_gdb_port(config: &VirtualMachineConfig) -> Option<NonZeroU16> {
     match config {
         VirtualMachineConfig::RawConfig(config) => NonZeroU16::new(config.gdbPort as u16),
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
index 890535b..417d5d3 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
@@ -118,6 +118,12 @@
 
         /** List of SysFS nodes of devices to be assigned */
         String[] devices;
+
+        /**
+         * Whether the VM should be able to keep its secret when updated, if possible. This
+         * should rarely need to be set false.
+         */
+        boolean wantUpdatable = true;
     }
 
     /** Configuration parameters guarded by android.permission.USE_CUSTOM_VIRTUAL_MACHINE */
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index bcea1bc..8acfdd3 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -20,20 +20,18 @@
 mod remote_provisioning;
 mod rkpvm;
 
-use crate::aidl::{remove_temporary_dir, TEMPORARY_DIRECTORY, VirtualizationServiceInternal};
+use crate::aidl::{remove_temporary_dir, VirtualizationServiceInternal, TEMPORARY_DIRECTORY};
 use android_logger::{Config, FilterBuilder};
-use android_system_virtualizationservice_internal::aidl::android::system::{
-    virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal
-};
-use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance::{
-    IVirtualizationMaintenance::BnVirtualizationMaintenance
-};
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance;
+use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal;
 use anyhow::{bail, Context, Error, Result};
 use binder::{register_lazy_service, BinderFeatures, ProcessState, ThreadState};
 use log::{error, info, LevelFilter};
 use std::fs::{create_dir, read_dir};
 use std::os::unix::raw::{pid_t, uid_t};
 use std::path::Path;
+use virtualizationmaintenance::IVirtualizationMaintenance::BnVirtualizationMaintenance;
+use virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal;
 
 const LOG_TAG: &str = "VirtualizationService";
 pub(crate) const REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME: &str =
diff --git a/virtualizationservice/src/maintenance.rs b/virtualizationservice/src/maintenance.rs
index 8efc58d..4732e1f 100644
--- a/virtualizationservice/src/maintenance.rs
+++ b/virtualizationservice/src/maintenance.rs
@@ -24,11 +24,6 @@
 mod vmdb;
 use vmdb::{VmId, VmIdDb};
 
-/// Indicate whether an app ID belongs to a system core component.
-fn core_app_id(app_id: i32) -> bool {
-    app_id < 10000
-}
-
 /// Interface name for the Secretkeeper HAL.
 const SECRETKEEPER_SERVICE: &str = "android.hardware.security.secretkeeper.ISecretkeeper/default";
 
@@ -45,6 +40,11 @@
 
 /// State related to VM secrets.
 pub struct State {
+    /// The real state, lazily created when we first need it.
+    inner: Option<InnerState>,
+}
+
+struct InnerState {
     sk: binder::Strong<dyn ISecretkeeper>,
     /// Database of VM IDs,
     vm_id_db: VmIdDb,
@@ -53,20 +53,69 @@
 
 impl State {
     pub fn new() -> Option<Self> {
-        let sk = match Self::find_sk() {
-            Some(sk) => sk,
-            None => {
-                warn!("failed to find a Secretkeeper instance; skipping secret management");
-                return None;
-            }
+        if is_sk_present() {
+            // Don't instantiate the inner state yet, that will happen when it is needed.
+            Some(Self { inner: None })
+        } else {
+            // If the Secretkeeper HAL doesn't exist, there's never any point in trying to
+            // handle maintenance for it.
+            info!("Failed to find a Secretkeeper instance; skipping secret management");
+            None
+        }
+    }
+
+    /// Return the existing inner state, or create one if there isn't one.
+    /// This is done on demand as in early boot (before we need Secretkeeper) it may not be
+    /// available to connect to. See b/331417880.
+    fn get_inner(&mut self) -> Result<&mut InnerState> {
+        if self.inner.is_none() {
+            self.inner = Some(InnerState::new()?);
+        }
+        Ok(self.inner.as_mut().unwrap())
+    }
+
+    /// Record a new VM ID.  If there is an existing owner (user_id, app_id) for the VM ID,
+    /// it will be replaced.
+    pub fn add_id(&mut self, vm_id: &VmId, user_id: u32, app_id: u32) -> Result<()> {
+        self.get_inner()?.add_id(vm_id, user_id, app_id)
+    }
+
+    /// Delete the VM IDs associated with Android user ID `user_id`.
+    pub fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
+        self.get_inner()?.delete_ids_for_user(user_id)
+    }
+
+    /// Delete the VM IDs associated with `(user_id, app_id)`.
+    pub fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
+        self.get_inner()?.delete_ids_for_app(user_id, app_id)
+    }
+
+    /// Delete the provided VM IDs from both Secretkeeper and the database.
+    pub fn delete_ids(&mut self, vm_ids: &[VmId]) {
+        let Ok(inner) = self.get_inner() else {
+            warn!("No Secretkeeper available, not deleting secrets");
+            return;
         };
-        let (vm_id_db, created) = match VmIdDb::new(PERSISTENT_DIRECTORY) {
-            Ok(v) => v,
-            Err(e) => {
-                error!("skipping secret management, failed to connect to database: {e:?}");
-                return None;
-            }
-        };
+
+        inner.delete_ids(vm_ids)
+    }
+
+    /// Perform reconciliation to allow for possibly missed notifications of user or app removal.
+    pub fn reconcile(
+        &mut self,
+        callback: &Strong<dyn IVirtualizationReconciliationCallback>,
+    ) -> Result<()> {
+        self.get_inner()?.reconcile(callback)
+    }
+}
+
+impl InnerState {
+    fn new() -> Result<Self> {
+        info!("Connecting to {SECRETKEEPER_SERVICE}");
+        let sk = binder::wait_for_interface::<dyn ISecretkeeper>(SECRETKEEPER_SERVICE)
+            .context("Connecting to {SECRETKEEPER_SERVICE}")?;
+        let (vm_id_db, created) = VmIdDb::new(PERSISTENT_DIRECTORY)
+            .context("Connecting to secret management database")?;
         if created {
             // If the database did not previously exist, then this appears to be the first run of
             // `virtualizationservice` since device setup or factory reset.  In case of the latter,
@@ -76,32 +125,15 @@
             if let Err(e) = sk.deleteAll() {
                 error!("failed to delete previous secrets, dropping database: {e:?}");
                 vm_id_db.delete_db_file(PERSISTENT_DIRECTORY);
-                return None;
+                return Err(e.into());
             }
         } else {
             info!("re-using existing VM ID DB");
         }
-        Some(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE })
+        Ok(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE })
     }
 
-    fn find_sk() -> Option<binder::Strong<dyn ISecretkeeper>> {
-        if let Ok(true) = binder::is_declared(SECRETKEEPER_SERVICE) {
-            match binder::get_interface(SECRETKEEPER_SERVICE) {
-                Ok(sk) => Some(sk),
-                Err(e) => {
-                    error!("failed to connect to {SECRETKEEPER_SERVICE}: {e:?}");
-                    None
-                }
-            }
-        } else {
-            info!("instance {SECRETKEEPER_SERVICE} not declared");
-            None
-        }
-    }
-
-    /// Record a new VM ID.  If there is an existing owner (user_id, app_id) for the VM ID,
-    /// it will be replaced.
-    pub fn add_id(&mut self, vm_id: &VmId, user_id: u32, app_id: u32) -> Result<()> {
+    fn add_id(&mut self, vm_id: &VmId, user_id: u32, app_id: u32) -> Result<()> {
         let user_id: i32 = user_id.try_into().context(format!("user_id {user_id} out of range"))?;
         let app_id: i32 = app_id.try_into().context(format!("app_id {app_id} out of range"))?;
 
@@ -125,8 +157,7 @@
         self.vm_id_db.add_vm_id(vm_id, user_id, app_id)
     }
 
-    /// Delete the VM IDs associated with Android user ID `user_id`.
-    pub fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
+    fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
         let vm_ids = self.vm_id_db.vm_ids_for_user(user_id)?;
         info!(
             "delete_ids_for_user(user_id={user_id}) triggers deletion of {} secrets",
@@ -136,8 +167,7 @@
         Ok(())
     }
 
-    /// Delete the VM IDs associated with `(user_id, app_id)`.
-    pub fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
+    fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
         let vm_ids = self.vm_id_db.vm_ids_for_app(user_id, app_id)?;
         info!(
             "delete_ids_for_app(user_id={user_id}, app_id={app_id}) removes {} secrets",
@@ -147,8 +177,7 @@
         Ok(())
     }
 
-    /// Delete the provided VM IDs from both Secretkeeper and the database.
-    pub fn delete_ids(&mut self, mut vm_ids: &[VmId]) {
+    fn delete_ids(&mut self, mut vm_ids: &[VmId]) {
         while !vm_ids.is_empty() {
             let len = std::cmp::min(vm_ids.len(), self.batch_size);
             let batch = &vm_ids[..len];
@@ -171,8 +200,7 @@
         }
     }
 
-    /// Perform reconciliation to allow for possibly missed notifications of user or app removal.
-    pub fn reconcile(
+    fn reconcile(
         &mut self,
         callback: &Strong<dyn IVirtualizationReconciliationCallback>,
     ) -> Result<()> {
@@ -245,19 +273,24 @@
     }
 }
 
+/// Indicate whether an app ID belongs to a system core component.
+fn core_app_id(app_id: i32) -> bool {
+    app_id < 10000
+}
+
+fn is_sk_present() -> bool {
+    matches!(binder::is_declared(SECRETKEEPER_SERVICE), Ok(true))
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
+    use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph;
+    use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper;
+    use authgraph::IAuthGraphKeyExchange::IAuthGraphKeyExchange;
+    use secretkeeper::ISecretkeeper::BnSecretkeeper;
     use std::sync::{Arc, Mutex};
-    use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph::{
-        IAuthGraphKeyExchange::IAuthGraphKeyExchange,
-    };
-    use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::{
-        ISecretkeeper::BnSecretkeeper
-    };
-    use virtualizationmaintenance::IVirtualizationReconciliationCallback::{
-        BnVirtualizationReconciliationCallback
-    };
+    use virtualizationmaintenance::IVirtualizationReconciliationCallback::BnVirtualizationReconciliationCallback;
 
     /// Fake implementation of Secretkeeper that keeps a history of what operations were invoked.
     #[derive(Default)]
@@ -298,7 +331,12 @@
         let vm_id_db = vmdb::new_test_db();
         let sk = FakeSk { history };
         let sk = BnSecretkeeper::new_binder(sk, binder::BinderFeatures::default());
-        State { sk, vm_id_db, batch_size }
+        let inner = InnerState { sk, vm_id_db, batch_size };
+        State { inner: Some(inner) }
+    }
+
+    fn get_db(state: &mut State) -> &mut VmIdDb {
+        &mut state.inner.as_mut().unwrap().vm_id_db
     }
 
     struct Reconciliation {
@@ -360,11 +398,11 @@
         let history = Arc::new(Mutex::new(Vec::new()));
         let mut sk_state = new_test_state(history.clone(), 2);
 
-        sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID4, USER3, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap(); // Overwrites APP_A
+        get_db(&mut sk_state).add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID4, USER3, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID5, USER3, APP_C).unwrap(); // Overwrites APP_A
         assert_eq!((*history.lock().unwrap()).clone(), vec![]);
 
         sk_state.delete_ids_for_app(USER2, APP_B).unwrap();
@@ -376,11 +414,14 @@
             vec![SkOp::DeleteIds(vec![VM_ID3]), SkOp::DeleteIds(vec![VM_ID4, VM_ID5]),]
         );
 
-        assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
-        assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
+        assert_eq!(vec![VM_ID1, VM_ID2], get_db(&mut sk_state).vm_ids_for_user(USER1).unwrap());
+        assert_eq!(
+            vec![VM_ID1, VM_ID2],
+            get_db(&mut sk_state).vm_ids_for_app(USER1, APP_A).unwrap()
+        );
         let empty: Vec<VmId> = Vec::new();
-        assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
-        assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+        assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_app(USER2, APP_B).unwrap());
+        assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_user(USER3).unwrap());
     }
 
     #[test]
@@ -388,16 +429,19 @@
         let history = Arc::new(Mutex::new(Vec::new()));
         let mut sk_state = new_test_state(history.clone(), 20);
 
-        sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID4, USER2, CORE_APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID4, USER2, CORE_APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
 
-        assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
-        assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
-        assert_eq!(vec![VM_ID3], sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
-        assert_eq!(vec![VM_ID5], sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+        assert_eq!(vec![VM_ID1, VM_ID2], get_db(&mut sk_state).vm_ids_for_user(USER1).unwrap());
+        assert_eq!(
+            vec![VM_ID1, VM_ID2],
+            get_db(&mut sk_state).vm_ids_for_app(USER1, APP_A).unwrap()
+        );
+        assert_eq!(vec![VM_ID3], get_db(&mut sk_state).vm_ids_for_app(USER2, APP_B).unwrap());
+        assert_eq!(vec![VM_ID5], get_db(&mut sk_state).vm_ids_for_user(USER3).unwrap());
 
         // Perform a reconciliation and pretend that USER1 and [CORE_APP_A, APP_B] are gone.
         let reconciliation =
@@ -409,12 +453,12 @@
         sk_state.reconcile(&callback).unwrap();
 
         let empty: Vec<VmId> = Vec::new();
-        assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
-        assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
+        assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_user(USER1).unwrap());
+        assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_app(USER1, APP_A).unwrap());
         // VM for core app stays even though it's reported as absent.
-        assert_eq!(vec![VM_ID4], sk_state.vm_id_db.vm_ids_for_user(USER2).unwrap());
-        assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
-        assert_eq!(vec![VM_ID5], sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+        assert_eq!(vec![VM_ID4], get_db(&mut sk_state).vm_ids_for_user(USER2).unwrap());
+        assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_app(USER2, APP_B).unwrap());
+        assert_eq!(vec![VM_ID5], get_db(&mut sk_state).vm_ids_for_user(USER3).unwrap());
     }
 
     #[test]
@@ -427,11 +471,11 @@
             let mut vm_id = [0u8; 64];
             vm_id[0..8].copy_from_slice(&(idx as u64).to_be_bytes());
             sk_state.add_id(&vm_id, USER1 as u32, APP_A as u32).unwrap();
-            assert_eq!(idx + 1, sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap());
+            assert_eq!(idx + 1, get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap());
         }
         assert_eq!(
             MAX_VM_IDS_PER_APP,
-            sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap()
+            get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap()
         );
 
         // Beyond the limit it's one in, one out.
@@ -441,12 +485,12 @@
             sk_state.add_id(&vm_id, USER1 as u32, APP_A as u32).unwrap();
             assert_eq!(
                 MAX_VM_IDS_PER_APP,
-                sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap()
+                get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap()
             );
         }
         assert_eq!(
             MAX_VM_IDS_PER_APP,
-            sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap()
+            get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap()
         );
     }
 
@@ -467,10 +511,10 @@
         let history = Arc::new(Mutex::new(Vec::new()));
         let mut sk_state = new_test_state(history.clone(), 20);
 
-        sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
-        sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+        get_db(&mut sk_state).add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
         sk_state.delete_ids_for_user(USER1).unwrap();
         sk_state.delete_ids_for_user(USER2).unwrap();
         sk_state.delete_ids_for_user(USER3).unwrap();
diff --git a/virtualizationservice/vfio_handler/src/aidl.rs b/virtualizationservice/vfio_handler/src/aidl.rs
index c0967af..b527260 100644
--- a/virtualizationservice/vfio_handler/src/aidl.rs
+++ b/virtualizationservice/vfio_handler/src/aidl.rs
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! Implementation of the AIDL interface of the VirtualizationService.
+//! Implementation of the AIDL interface of VfioHandler.
 
 use anyhow::{anyhow, Context};
 use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal::IBoundDevice::{IBoundDevice, BnBoundDevice};
diff --git a/vm/src/run.rs b/vm/src/run.rs
index ca3e857..f3a5987 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -149,7 +149,6 @@
     let payload_config_str = format!("{:?}!{:?}", config.apk, payload);
 
     let custom_config = CustomConfig {
-        customKernelImage: None,
         gdbPort: config.debug.gdb.map(u16::from).unwrap_or(0) as i32, // 0 means no gdb
         vendorImage: vendor,
         devices: config
@@ -160,6 +159,7 @@
                 x.to_str().map(String::from).ok_or(anyhow!("Failed to convert {x:?} to String"))
             })
             .collect::<Result<_, _>>()?,
+        ..Default::default()
     };
 
     let vm_config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index 80d289b..97d4649 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -72,7 +72,7 @@
     ],
     whole_static_libs: ["libvm_payload_impl"],
     export_static_lib_headers: ["libvm_payload_impl"],
-    installable: false,
+    no_full_install: true,
     version_script: "libvm_payload.map.txt",
     stubs: {
         symbol_file: "libvm_payload.map.txt",
diff --git a/vmlauncher_app/java/com/android/virtualization/vmlauncher/MainActivity.java b/vmlauncher_app/java/com/android/virtualization/vmlauncher/MainActivity.java
index 4c42bb4..ec0f8e8 100644
--- a/vmlauncher_app/java/com/android/virtualization/vmlauncher/MainActivity.java
+++ b/vmlauncher_app/java/com/android/virtualization/vmlauncher/MainActivity.java
@@ -91,7 +91,7 @@
                         .forEach(customImageConfigBuilder::addParam);
             }
             if (json.has("bootloader")) {
-                customImageConfigBuilder.setInitrdPath(json.getString("bootloader"));
+                customImageConfigBuilder.setBootloaderPath(json.getString("bootloader"));
             }
             if (json.has("disks")) {
                 JSONArray diskArr = json.getJSONArray("disks");