Merge "Add VM creation failure test for unsigned vendor image" into main
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 77ccc1d..a9193d7 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -105,6 +105,9 @@
       "path": "packages/modules/Virtualization/rialto"
     },
     {
+      "path": "packages/modules/Virtualization/service_vm/client_vm_csr"
+    },
+    {
       "path": "packages/modules/Virtualization/service_vm/comm"
     },
     {
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index 8257aae..029ac76 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -27,6 +27,7 @@
 - lpmake, lpunpack, simg2img, img2simg, initrd_bootconfig
 """
 import argparse
+import builtins
 import hashlib
 import os
 import re
@@ -282,7 +283,7 @@
         avb_version_bc = re.search(
             r"androidboot.vbmeta.avb_version = \"([^\"]*)\"", bootconfigs).group(1)
         if avb_version_curr != avb_version_bc:
-            raise Exception(f'AVB version mismatch between current & one & \
+            raise builtins.Exception(f'AVB version mismatch between current & one & \
                 used to build bootconfigs:{avb_version_curr}&{avb_version_bc}')
 
     def calc_vbmeta_digest():
@@ -430,21 +431,32 @@
 
     # unpacked files (will be unpacked from super.img below)
     system_a_img = os.path.join(unpack_dir.name, 'system_a.img')
+    vendor_a_img = os.path.join(unpack_dir.name, 'vendor_a.img')
 
     # re-sign super.img
     # 1. unpack super.img
-    # 2. resign system
-    # 3. repack super.img out of resigned system
+    # 2. resign system and vendor (if exists)
+    # 3. repack super.img out of resigned system and vendor (if exists)
     UnpackSuperImg(args, files['super.img'], unpack_dir.name)
     system_a_f = Async(AddHashTreeFooter, args, key, system_a_img)
     partitions = {"system_a": system_a_img}
+    images = [system_a_img]
+    images_f = [system_a_f]
+
+    # if vendor_a.img exists, resign it
+    if os.path.exists(vendor_a_img):
+        partitions.update({'vendor_a': vendor_a_img})
+        images.append(vendor_a_img)
+        vendor_a_f = Async(AddHashTreeFooter, args, key, vendor_a_img)
+        images_f.append(vendor_a_f)
+
     Async(MakeSuperImage, args, partitions,
-          files['super.img'], wait=[system_a_f])
+          files['super.img'], wait=images_f)
 
     # re-generate vbmeta from re-signed system_a.img
     vbmeta_f = Async(MakeVbmetaImage, args, key, files['vbmeta.img'],
-                     images=[system_a_img],
-                     wait=[system_a_f])
+                     images=images,
+                     wait=images_f)
 
     vbmeta_bc_f = None
     if not args.do_not_update_bootconfigs:
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index b0fc323..128d581 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -54,10 +54,7 @@
     // SAFETY: We hold a strong pointer, so the raw pointer remains valid. The bindgen AIBinder
     // is the same type as sys::AIBinder. It is safe for on_ready to be invoked at any time, with
     // any parameter.
-    unsafe {
-        AVmPayload_runVsockRpcServer(service, COMPOS_VSOCK_PORT, Some(on_ready), param);
-    }
-    Ok(())
+    unsafe { AVmPayload_runVsockRpcServer(service, COMPOS_VSOCK_PORT, Some(on_ready), param) }
 }
 
 extern "C" fn on_ready(_param: *mut c_void) {
diff --git a/javalib/README.md b/javalib/README.md
index 4eb64df..cf7a6cb 100644
--- a/javalib/README.md
+++ b/javalib/README.md
@@ -15,6 +15,9 @@
 `android.permission.MANAGE_VIRTUAL_MACHINE` permission, so they are not
 available to third party apps.
 
+All of these APIs were introduced in API level 34 (Android 14). The classes may
+not exist in devices running an earlier version.
+
 ## Detecting AVF Support
 
 The simplest way to detect whether a device has support for AVF is to retrieve
@@ -22,7 +25,7 @@
 [`VirtualMachineManager`](src/android/system/virtualmachine/VirtualMachineManager.java)
 class; if the result is not `null` then the device has support. You can then
 find out whether protected, non-protected VMs, or both are supported using the
-`getCapabilities()` method:
+`getCapabilities()` method. Note that this code requires API level 34 or higher:
 
 ```Java
 VirtualMachineManager vmm = context.getSystemService(VirtualMachineManager.class);
@@ -41,7 +44,8 @@
 ```
 
 An alternative for detecting AVF support is to query support for the
-`android.software.virtualization_framework` system feature:
+`android.software.virtualization_framework` system feature. This method will
+work on any API level, and return false if it is below 34:
 
 ```Java
 if (getPackageManager().hasSystemFeature(PackageManager.FEATURE_VIRTUALIZATION_FRAMEWORK)) {
@@ -116,7 +120,9 @@
   reached - but there is some overhead proportional to the maximum size.)
 - How many virtual CPUs the VM has.
 - How much encrypted storage the VM has.
-- The path to the installed APK containing the code to run as the VM payload.
+- The path to the installed APK containing the code to run as the VM
+  payload. (Normally you don't need this; the APK path is determined from the
+  context passed to the config builder.)
 
 ## VM Life-cycle
 
@@ -244,7 +250,7 @@
 
 ### Binder
 
-The use of AIDL interfaces between the VM and app is support via Binder RPC,
+The use of AIDL interfaces between the VM and app is supported via Binder RPC,
 which transmits messages over an underlying vsock socket.
 
 Note that Binder RPC has some limitations compared to the kernel Binder used in
@@ -304,10 +310,12 @@
 which includes the payload's exit code.
 
 Use of `stop()` should be reserved as a recovery mechanism - for example if the
-VM has not stopped within a reasonable time after being requested to.
+VM has not stopped within a reasonable time (a few seconds, say) after being
+requested to.
 
-The status of a VM will be `STATUS_STOPPED` after a successful call to `stop()`,
-or if your `onPayloadStopped()` callback is invoked.
+The status of a VM will be `STATUS_STOPPED` if your `onStopped()` callback is
+invoked, or after a successful call to `stop()`. Note that your `onStopped()`
+will be called on the VM even if it ended as a result of a call to `stop()`.
 
 # Encrypted Storage
 
@@ -333,9 +341,8 @@
 
 # Transferring a VM
 
-It is possible to make a copy of a VM instance with a new name. This can be used
-to transfer a VM from one app to another, which can be useful in some
-circumstances.
+It is possible to make a copy of a VM instance. This can be used to transfer a
+VM from one app to another, which can be useful in some circumstances.
 
 This should only be done while the VM is stopped. The first step is to call
 `toDescriptor()` on the
diff --git a/libs/bssl/error/src/lib.rs b/libs/bssl/error/src/lib.rs
index 3766c41..88929af 100644
--- a/libs/bssl/error/src/lib.rs
+++ b/libs/bssl/error/src/lib.rs
@@ -56,11 +56,13 @@
     BN_bn2bin_padded,
     CBB_flush,
     CBB_len,
+    EC_GROUP_new_by_curve_name,
     EC_KEY_check_key,
     EC_KEY_generate_key,
     EC_KEY_get0_group,
     EC_KEY_get0_public_key,
     EC_KEY_marshal_private_key,
+    EC_KEY_parse_private_key,
     EC_KEY_new_by_curve_name,
     EC_POINT_get_affine_coordinates,
     EVP_AEAD_CTX_new,
diff --git a/libs/bssl/src/cbs.rs b/libs/bssl/src/cbs.rs
new file mode 100644
index 0000000..9718903
--- /dev/null
+++ b/libs/bssl/src/cbs.rs
@@ -0,0 +1,55 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Helpers for using BoringSSL CBS (crypto byte string) objects.
+
+use bssl_ffi::{CBS_init, CBS};
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+
+/// CRYPTO ByteString.
+///
+/// Wraps a `CBS` that references an existing fixed-sized buffer; no memory is allocated, but the
+/// buffer cannot grow.
+pub struct Cbs<'a> {
+    cbs: CBS,
+    /// The CBS contains a mutable reference to the buffer, disguised as a pointer.
+    /// Make sure the borrow checker knows that.
+    _buffer: PhantomData<&'a [u8]>,
+}
+
+impl<'a> Cbs<'a> {
+    /// Creates a new CBS that points to the given buffer.
+    pub fn new(buffer: &'a [u8]) -> Self {
+        let mut cbs = MaybeUninit::uninit();
+        // SAFETY: `CBS_init()` only sets `cbs` to point to `buffer`. It doesn't take ownership
+        // of data.
+        unsafe { CBS_init(cbs.as_mut_ptr(), buffer.as_ptr(), buffer.len()) };
+        // SAFETY: `cbs` has just been initialized by `CBS_init()`.
+        let cbs = unsafe { cbs.assume_init() };
+        Self { cbs, _buffer: PhantomData }
+    }
+}
+
+impl<'a> AsRef<CBS> for Cbs<'a> {
+    fn as_ref(&self) -> &CBS {
+        &self.cbs
+    }
+}
+
+impl<'a> AsMut<CBS> for Cbs<'a> {
+    fn as_mut(&mut self) -> &mut CBS {
+        &mut self.cbs
+    }
+}
diff --git a/libs/bssl/src/ec_key.rs b/libs/bssl/src/ec_key.rs
index 7038e21..4c1ba5c 100644
--- a/libs/bssl/src/ec_key.rs
+++ b/libs/bssl/src/ec_key.rs
@@ -16,14 +16,15 @@
 //! BoringSSL.
 
 use crate::cbb::CbbFixed;
+use crate::cbs::Cbs;
 use crate::util::{check_int_result, to_call_failed_error};
 use alloc::vec::Vec;
 use bssl_avf_error::{ApiName, Error, Result};
 use bssl_ffi::{
-    BN_bn2bin_padded, BN_clear_free, BN_new, CBB_flush, CBB_len, EC_KEY_free, EC_KEY_generate_key,
-    EC_KEY_get0_group, EC_KEY_get0_public_key, EC_KEY_marshal_private_key,
-    EC_KEY_new_by_curve_name, EC_POINT_get_affine_coordinates, NID_X9_62_prime256v1, BIGNUM,
-    EC_GROUP, EC_KEY, EC_POINT,
+    BN_bn2bin_padded, BN_clear_free, BN_new, CBB_flush, CBB_len, EC_GROUP_new_by_curve_name,
+    EC_KEY_check_key, EC_KEY_free, EC_KEY_generate_key, EC_KEY_get0_group, EC_KEY_get0_public_key,
+    EC_KEY_marshal_private_key, EC_KEY_new_by_curve_name, EC_KEY_parse_private_key,
+    EC_POINT_get_affine_coordinates, NID_X9_62_prime256v1, BIGNUM, EC_GROUP, EC_KEY, EC_POINT,
 };
 use core::ptr::{self, NonNull};
 use core::result;
@@ -59,6 +60,16 @@
         Ok(ec_key)
     }
 
+    /// Performs several checks on the key. See BoringSSL doc for more details:
+    ///
+    /// https://commondatastorage.googleapis.com/chromium-boringssl-docs/ec_key.h.html#EC_KEY_check_key
+    pub fn check_key(&self) -> Result<()> {
+        // SAFETY: This function only reads the `EC_KEY` pointer, the non-null check is performed
+        // within the function.
+        let ret = unsafe { EC_KEY_check_key(self.0.as_ptr()) };
+        check_int_result(ret, ApiName::EC_KEY_check_key)
+    }
+
     /// Generates a random, private key, calculates the corresponding public key and stores both
     /// in the `EC_KEY`.
     fn generate_key(&mut self) -> Result<()> {
@@ -124,10 +135,34 @@
         }
     }
 
+    /// Constructs an `EcKey` instance from the provided DER-encoded ECPrivateKey slice.
+    ///
+    /// Currently, only the EC P-256 curve is supported.
+    pub fn from_ec_private_key(der_encoded_ec_private_key: &[u8]) -> Result<Self> {
+        // SAFETY: This function only returns a pointer to a static object, and the
+        // return is checked below.
+        let ec_group = unsafe {
+            EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1) // EC P-256 CURVE Nid
+        };
+        if ec_group.is_null() {
+            return Err(to_call_failed_error(ApiName::EC_GROUP_new_by_curve_name));
+        }
+        let mut cbs = Cbs::new(der_encoded_ec_private_key);
+        // SAFETY: The function only reads bytes from the buffer managed by the valid `CBS`
+        // object, and the returned EC_KEY is checked.
+        let ec_key = unsafe { EC_KEY_parse_private_key(cbs.as_mut(), ec_group) };
+
+        let ec_key = NonNull::new(ec_key)
+            .map(Self)
+            .ok_or(to_call_failed_error(ApiName::EC_KEY_parse_private_key))?;
+        ec_key.check_key()?;
+        Ok(ec_key)
+    }
+
     /// Returns the DER-encoded ECPrivateKey structure described in RFC 5915 Section 3:
     ///
     /// https://datatracker.ietf.org/doc/html/rfc5915#section-3
-    pub fn private_key(&self) -> Result<ZVec> {
+    pub fn ec_private_key(&self) -> Result<ZVec> {
         const CAPACITY: usize = 256;
         let mut buf = Zeroizing::new([0u8; CAPACITY]);
         let mut cbb = CbbFixed::new(buf.as_mut());
diff --git a/libs/bssl/src/lib.rs b/libs/bssl/src/lib.rs
index 709e8ad..de81368 100644
--- a/libs/bssl/src/lib.rs
+++ b/libs/bssl/src/lib.rs
@@ -20,6 +20,7 @@
 
 mod aead;
 mod cbb;
+mod cbs;
 mod digest;
 mod ec_key;
 mod err;
@@ -32,6 +33,7 @@
 
 pub use aead::{Aead, AeadContext, AES_GCM_NONCE_LENGTH};
 pub use cbb::CbbFixed;
+pub use cbs::Cbs;
 pub use digest::Digester;
 pub use ec_key::{EcKey, ZVec};
 pub use hkdf::hkdf;
diff --git a/libs/bssl/tests/eckey_test.rs b/libs/bssl/tests/eckey_test.rs
new file mode 100644
index 0000000..a013fba
--- /dev/null
+++ b/libs/bssl/tests/eckey_test.rs
@@ -0,0 +1,25 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use bssl_avf::{EcKey, Result};
+
+#[test]
+fn ec_private_key_serialization() -> Result<()> {
+    let ec_key = EcKey::new_p256()?;
+    let der_encoded_ec_private_key = ec_key.ec_private_key()?;
+    let deserialized_ec_key = EcKey::from_ec_private_key(der_encoded_ec_private_key.as_slice())?;
+
+    assert_eq!(ec_key.cose_public_key()?, deserialized_ec_key.cose_public_key()?);
+    Ok(())
+}
diff --git a/libs/bssl/tests/tests.rs b/libs/bssl/tests/tests.rs
index 4c0b0b0..02666d8 100644
--- a/libs/bssl/tests/tests.rs
+++ b/libs/bssl/tests/tests.rs
@@ -15,5 +15,6 @@
 //! API tests of the crate `bssl_avf`.
 
 mod aead_test;
+mod eckey_test;
 mod hkdf_test;
 mod hmac_test;
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index b811730..b369390 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -504,6 +504,18 @@
 
         fdt_err_or_option(ret)?.map(|offset| FdtProperty::new(self.fdt, offset)).transpose()
     }
+
+    /// Returns the phandle
+    pub fn get_phandle(&self) -> Result<Option<Phandle>> {
+        // This rewrites the fdt_get_phandle() because it doesn't return error code.
+        if let Some(prop) = self.getprop_u32(cstr!("phandle"))? {
+            Ok(Some(prop.try_into()?))
+        } else if let Some(prop) = self.getprop_u32(cstr!("linux,phandle"))? {
+            Ok(Some(prop.try_into()?))
+        } else {
+            Ok(None)
+        }
+    }
 }
 
 impl<'a> PartialEq for FdtNode<'a> {
@@ -1012,9 +1024,20 @@
 
     /// Returns a node with the phandle
     pub fn node_with_phandle(&self, phandle: Phandle) -> Result<Option<FdtNode>> {
-        // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+        let offset = self.node_offset_with_phandle(phandle)?;
+        Ok(offset.map(|offset| FdtNode { fdt: self, offset }))
+    }
+
+    /// Returns a mutable node with the phandle
+    pub fn node_mut_with_phandle(&mut self, phandle: Phandle) -> Result<Option<FdtNodeMut>> {
+        let offset = self.node_offset_with_phandle(phandle)?;
+        Ok(offset.map(|offset| FdtNodeMut { fdt: self, offset }))
+    }
+
+    fn node_offset_with_phandle(&self, phandle: Phandle) -> Result<Option<c_int>> {
+        // SAFETY: Accesses are constrained to the DT totalsize.
         let ret = unsafe { libfdt_bindgen::fdt_node_offset_by_phandle(self.as_ptr(), phandle.0) };
-        Ok(fdt_err_or_option(ret)?.map(|offset| FdtNode { fdt: self, offset }))
+        fdt_err_or_option(ret)
     }
 
     /// Returns the mutable root node of the tree.
diff --git a/libs/libfdt/tests/api_test.rs b/libs/libfdt/tests/api_test.rs
index bc306ad..d76b1a4 100644
--- a/libs/libfdt/tests/api_test.rs
+++ b/libs/libfdt/tests/api_test.rs
@@ -16,6 +16,7 @@
 
 //! Integration tests of the library libfdt.
 
+use core::ffi::CStr;
 use libfdt::{Fdt, FdtError, FdtNodeMut, Phandle};
 use std::ffi::CString;
 use std::fs;
@@ -106,11 +107,11 @@
     let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
     let fdt = Fdt::from_slice(&data).unwrap();
     let root = fdt.root().unwrap();
-    let expected = [cstr!("cpus"), cstr!("randomnode"), cstr!("chosen")];
+    let expected = [Ok(cstr!("cpus")), Ok(cstr!("randomnode")), Ok(cstr!("chosen"))];
 
-    for (node, name) in root.subnodes().unwrap().zip(expected) {
-        assert_eq!(node.name(), Ok(name));
-    }
+    let root_subnodes = root.subnodes().unwrap();
+    let subnode_names: Vec<_> = root_subnodes.map(|node| node.name()).collect();
+    assert_eq!(subnode_names, expected);
 }
 
 #[test]
@@ -119,18 +120,19 @@
     let fdt = Fdt::from_slice(&data).unwrap();
     let root = fdt.root().unwrap();
     let one_be = 0x1_u32.to_be_bytes();
-    let expected = [
-        (cstr!("model"), b"MyBoardName\0".as_ref()),
-        (cstr!("compatible"), b"MyBoardName\0MyBoardFamilyName\0".as_ref()),
-        (cstr!("#address-cells"), &one_be),
-        (cstr!("#size-cells"), &one_be),
-        (cstr!("empty_prop"), &[]),
+    type Result<T> = core::result::Result<T, FdtError>;
+    let expected: Vec<(Result<&CStr>, Result<&[u8]>)> = vec![
+        (Ok(cstr!("model")), Ok(b"MyBoardName\0".as_ref())),
+        (Ok(cstr!("compatible")), Ok(b"MyBoardName\0MyBoardFamilyName\0".as_ref())),
+        (Ok(cstr!("#address-cells")), Ok(&one_be)),
+        (Ok(cstr!("#size-cells")), Ok(&one_be)),
+        (Ok(cstr!("empty_prop")), Ok(&[])),
     ];
 
     let properties = root.properties().unwrap();
-    for (prop, (name, value)) in properties.zip(expected.into_iter()) {
-        assert_eq!((prop.name(), prop.value()), (Ok(name), Ok(value)));
-    }
+    let subnode_properties: Vec<_> = properties.map(|prop| (prop.name(), prop.value())).collect();
+
+    assert_eq!(subnode_properties, expected);
 }
 
 #[test]
@@ -138,12 +140,16 @@
     let data = fs::read(TEST_TREE_WITH_NO_MEMORY_NODE_PATH).unwrap();
     let fdt = Fdt::from_slice(&data).unwrap();
     let node = fdt.node(cstr!("/cpus/PowerPC,970@1")).unwrap().unwrap();
-    let expected = [cstr!(""), cstr!("cpus"), cstr!("PowerPC,970@1")];
+    let expected = vec![Ok(cstr!("")), Ok(cstr!("cpus")), Ok(cstr!("PowerPC,970@1"))];
 
-    for (depth, name) in expected.into_iter().enumerate() {
-        let supernode = node.supernode_at_depth(depth).unwrap();
-        assert_eq!(supernode.name(), Ok(name));
+    let mut supernode_names = vec![];
+    let mut depth = 0;
+    while let Ok(supernode) = node.supernode_at_depth(depth) {
+        supernode_names.push(supernode.name());
+        depth += 1;
     }
+
+    assert_eq!(supernode_names, expected);
 }
 
 #[test]
@@ -200,6 +206,40 @@
 }
 
 #[test]
+fn node_mut_with_phandle() {
+    let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+    let fdt = Fdt::from_mut_slice(&mut data).unwrap();
+
+    // Test linux,phandle
+    let phandle = Phandle::new(0xFF).unwrap();
+    let node: FdtNodeMut = fdt.node_mut_with_phandle(phandle).unwrap().unwrap();
+    assert_eq!(node.as_node().name(), Ok(cstr!("node_zz")));
+
+    // Test phandle
+    let phandle = Phandle::new(0x22).unwrap();
+    let node: FdtNodeMut = fdt.node_mut_with_phandle(phandle).unwrap().unwrap();
+    assert_eq!(node.as_node().name(), Ok(cstr!("node_abc")));
+}
+
+#[test]
+fn node_get_phandle() {
+    let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+    let fdt = Fdt::from_slice(&data).unwrap();
+
+    // Test linux,phandle
+    let node = fdt.node(cstr!("/node_z/node_zz")).unwrap().unwrap();
+    assert_eq!(node.get_phandle(), Ok(Phandle::new(0xFF)));
+
+    // Test phandle
+    let node = fdt.node(cstr!("/node_a/node_ab/node_abc")).unwrap().unwrap();
+    assert_eq!(node.get_phandle(), Ok(Phandle::new(0x22)));
+
+    // Test no phandle
+    let node = fdt.node(cstr!("/node_b")).unwrap().unwrap();
+    assert_eq!(node.get_phandle(), Ok(None));
+}
+
+#[test]
 fn node_nop() {
     let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
     let fdt = Fdt::from_mut_slice(&mut data).unwrap();
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 42ff4b0..4e735e6 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -243,7 +243,35 @@
         "echo ro.product.cpu.abi=arm64-v8a) > $(out)",
 }
 
-logical_partition {
+// Need to keep microdroid_vendor for the release configurations that don't
+// have RELEASE_AVF_ENABLE_VENDOR_MODULES build flag enabled.
+android_filesystem {
+    name: "microdroid_vendor",
+    partition_name: "vendor",
+    use_avb: true,
+    avb_private_key: ":microdroid_sign_key",
+    avb_algorithm: "SHA256_RSA4096",
+    avb_hash_algorithm: "sha256",
+    file_contexts: ":microdroid_vendor_file_contexts.gen",
+    // For deterministic output, use fake_timestamp, hard-coded uuid
+    fake_timestamp: "1611569676",
+    // python -c "import uuid; print(uuid.uuid5(uuid.NAMESPACE_URL, 'www.android.com/avf/microdroid/vendor'))"
+    uuid: "156d40d7-8d8e-5c99-8913-ec82de549a70",
+}
+
+soong_config_module_type {
+    name: "flag_aware_microdroid_super_partition",
+    module_type: "logical_partition",
+    config_namespace: "ANDROID",
+    bool_variables: [
+        "release_avf_enable_vendor_modules",
+    ],
+    properties: [
+        "default_group",
+    ],
+}
+
+flag_aware_microdroid_super_partition {
     name: "microdroid_super",
     sparse: true,
     size: "auto",
@@ -253,6 +281,16 @@
             filesystem: ":microdroid",
         },
     ],
+    soong_config_variables: {
+        release_avf_enable_vendor_modules: {
+            default_group: [
+                {
+                    name: "vendor_a",
+                    filesystem: ":microdroid_vendor",
+                },
+            ],
+        },
+    },
 }
 
 android_filesystem {
@@ -330,13 +368,30 @@
     srcs: [":avb_testkey_rsa4096"],
 }
 
-vbmeta {
+soong_config_module_type {
+    name: "flag_aware_microdroid_vbmeta",
+    module_type: "vbmeta",
+    config_namespace: "ANDROID",
+    bool_variables: [
+        "release_avf_enable_vendor_modules",
+    ],
+    properties: [
+        "partitions",
+    ],
+}
+
+flag_aware_microdroid_vbmeta {
     name: "microdroid_vbmeta",
     partition_name: "vbmeta",
     private_key: ":microdroid_sign_key",
     partitions: [
         "microdroid",
     ],
+    soong_config_variables: {
+        release_avf_enable_vendor_modules: {
+            partitions: ["microdroid_vendor"],
+        },
+    },
 }
 
 prebuilt_etc {
diff --git a/microdroid/init.rc b/microdroid/init.rc
index f5f3f15..4cc0475 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -30,6 +30,11 @@
     # We don't directly exec the binary to specify stdio_to_kmsg.
     exec_start init_debug_policy
 
+    # Wait for ueventd to have finished cold boot.
+    # This is needed by prng-seeder (at least).
+    # (In Android this happens inside apexd-bootstrap.)
+    wait_for_prop ro.cold_boot_done true
+
 on init
     mkdir /mnt/apk 0755 root root
     mkdir /mnt/extra-apk 0755 root root
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index 93f49ef..8481edf 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -23,6 +23,7 @@
         "libbinder_rs",
         "libbyteorder",
         "libcap_rust",
+        "libclient_vm_csr",
         "libciborium",
         "libcoset",
         "libdiced_open_dice",
@@ -46,12 +47,10 @@
         "libserde",
         "libserde_cbor",
         "libserde_json",
-        "libservice_vm_comm",
         "libthiserror",
         "libuuid",
         "libvsock",
         "librand",
-        "libzeroize",
     ],
     init_rc: ["microdroid_manager.rc"],
     multilib: {
@@ -72,7 +71,6 @@
     defaults: ["microdroid_manager_defaults"],
     test_suites: ["general-tests"],
     rustlibs: [
-        "libhwtrust",
         "libtempfile",
     ],
     multilib: {
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 7ba54f8..1b41e58 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -34,7 +34,7 @@
 
 use crate::dice::dice_derivation;
 use crate::dice_driver::DiceDriver;
-use crate::instance::{ApexData, InstanceDisk, MicrodroidData};
+use crate::instance::{InstanceDisk, MicrodroidData};
 use crate::verify::verify_payload;
 use crate::vm_payload_service::register_vm_payload_service;
 use anyhow::{anyhow, bail, ensure, Context, Error, Result};
@@ -42,10 +42,10 @@
 use keystore2_crypto::ZVec;
 use libc::VMADDR_CID_HOST;
 use log::{error, info};
-use microdroid_metadata::{write_metadata, PayloadMetadata};
+use microdroid_metadata::PayloadMetadata;
 use microdroid_payload_config::{OsConfig, Task, TaskType, VmPayloadConfig};
 use nix::sys::signal::Signal;
-use payload::{load_metadata, to_metadata};
+use payload::load_metadata;
 use rpcbinder::RpcSession;
 use rustutils::sockets::android_get_control_socket;
 use rustutils::system_properties;
@@ -143,15 +143,6 @@
     Ok(())
 }
 
-fn get_vms_rpc_binder() -> Result<Strong<dyn IVirtualMachineService>> {
-    // The host is running a VirtualMachineService for this VM on a port equal
-    // to the CID of this VM.
-    let port = vsock::get_local_cid().context("Could not determine local CID")?;
-    RpcSession::new()
-        .setup_vsock_client(VMADDR_CID_HOST, port)
-        .context("Could not connect to IVirtualMachineService")
-}
-
 fn main() -> Result<()> {
     // If debuggable, print full backtrace to console log with stdio_to_kmsg
     if is_debuggable()? {
@@ -174,25 +165,6 @@
     })
 }
 
-/// Prepares a socket file descriptor for the vm payload service.
-///
-/// # Safety
-///
-/// The caller must ensure that this function is the only place that claims ownership
-/// of the file descriptor and it is called only once.
-unsafe fn prepare_vm_payload_service_socket() -> Result<OwnedFd> {
-    let raw_fd = android_get_control_socket(VM_PAYLOAD_SERVICE_SOCKET_NAME)?;
-
-    // Creating OwnedFd for stdio FDs is not safe.
-    if [libc::STDIN_FILENO, libc::STDOUT_FILENO, libc::STDERR_FILENO].contains(&raw_fd) {
-        bail!("File descriptor {raw_fd} is standard I/O descriptor");
-    }
-    // SAFETY: Initializing OwnedFd for a RawFd created by the init.
-    // We checked that the integer value corresponds to a valid FD and that the caller
-    // ensures that this is the only place to claim its ownership.
-    Ok(unsafe { OwnedFd::from_raw_fd(raw_fd) })
-}
-
 fn try_main() -> Result<()> {
     android_logger::init_once(
         android_logger::Config::default()
@@ -245,71 +217,6 @@
     }
 }
 
-fn post_payload_work() -> Result<()> {
-    // Sync the encrypted storage filesystem (flushes the filesystem caches).
-    if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
-        let mountpoint = CString::new(ENCRYPTEDSTORE_MOUNTPOINT).unwrap();
-
-        // SAFETY: `mountpoint` is a valid C string. `syncfs` and `close` are safe for any parameter
-        // values.
-        let ret = unsafe {
-            let dirfd = libc::open(
-                mountpoint.as_ptr(),
-                libc::O_DIRECTORY | libc::O_RDONLY | libc::O_CLOEXEC,
-            );
-            ensure!(dirfd >= 0, "Unable to open {:?}", mountpoint);
-            let ret = libc::syncfs(dirfd);
-            libc::close(dirfd);
-            ret
-        };
-        if ret != 0 {
-            error!("failed to sync encrypted storage.");
-            return Err(anyhow!(std::io::Error::last_os_error()));
-        }
-    }
-    Ok(())
-}
-
-fn is_strict_boot() -> bool {
-    Path::new(AVF_STRICT_BOOT).exists()
-}
-
-fn is_new_instance() -> bool {
-    Path::new(AVF_NEW_INSTANCE).exists()
-}
-
-fn is_verified_boot() -> bool {
-    !Path::new(DEBUG_MICRODROID_NO_VERIFIED_BOOT).exists()
-}
-
-fn is_debuggable() -> Result<bool> {
-    Ok(system_properties::read_bool(DEBUGGABLE_PROP, true)?)
-}
-
-fn should_export_tombstones(config: &VmPayloadConfig) -> bool {
-    match config.export_tombstones {
-        Some(b) => b,
-        None => is_debuggable().unwrap_or(false),
-    }
-}
-
-/// Get debug policy value in bool. It's true iff the value is explicitly set to <1>.
-fn get_debug_policy_bool(path: &'static str) -> Result<Option<bool>> {
-    let mut file = match File::open(path) {
-        Ok(dp) => dp,
-        Err(e) => {
-            info!(
-                "Assumes that debug policy is disabled because failed to read debug policy ({e:?})"
-            );
-            return Ok(Some(false));
-        }
-    };
-    let mut log: [u8; 4] = Default::default();
-    file.read_exact(&mut log).context("Malformed data in {path}")?;
-    // DT spec uses big endian although Android is always little endian.
-    Ok(Some(u32::from_be_bytes(log) == 1))
-}
-
 fn try_run_payload(
     service: &Strong<dyn IVirtualMachineService>,
     vm_payload_service_fd: OwnedFd,
@@ -377,6 +284,13 @@
     let dice_artifacts = dice_derivation(dice, &verified_data, &payload_metadata)?;
     let vm_secret = VmSecret::new(dice_artifacts).context("Failed to create VM secrets")?;
 
+    if cfg!(dice_changes) {
+        // Now that the DICE derivation is done, it's ok to allow payload code to run.
+
+        // Start apexd to activate APEXes. This may allow code within them to run.
+        system_properties::write("ctl.start", "apexd-vm")?;
+    }
+
     // Run encryptedstore binary to prepare the storage
     let encryptedstore_child = if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
         info!("Preparing encryptedstore ...");
@@ -419,10 +333,12 @@
     );
     mount_extra_apks(&config, &mut zipfuse)?;
 
-    // Wait until apex config is done. (e.g. linker configuration for apexes)
-    wait_for_apex_config_done()?;
-
-    setup_config_sysprops(&config)?;
+    register_vm_payload_service(
+        allow_restricted_apis,
+        service.clone(),
+        vm_secret,
+        vm_payload_service_fd,
+    )?;
 
     // Set export_tombstones if enabled
     if should_export_tombstones(&config) {
@@ -431,16 +347,20 @@
             .context("set microdroid_manager.export_tombstones.enabled")?;
     }
 
+    // Wait until apex config is done. (e.g. linker configuration for apexes)
+    wait_for_property_true(APEX_CONFIG_DONE_PROP).context("Failed waiting for apex config done")?;
+
+    // Trigger init post-fs-data. This will start authfs if we wask it to.
+    if config.enable_authfs {
+        system_properties::write("microdroid_manager.authfs.enabled", "1")
+            .context("failed to write microdroid_manager.authfs.enabled")?;
+    }
+    system_properties::write("microdroid_manager.config_done", "1")
+        .context("failed to write microdroid_manager.config_done")?;
+
     // Wait until zipfuse has mounted the APKs so we can access the payload
     zipfuse.wait_until_done()?;
 
-    register_vm_payload_service(
-        allow_restricted_apis,
-        service.clone(),
-        vm_secret,
-        vm_payload_service_fd,
-    )?;
-
     // Wait for encryptedstore to finish mounting the storage (if enabled) before setting
     // microdroid_manager.init_done. Reason is init stops uneventd after that.
     // Encryptedstore, however requires ueventd
@@ -449,7 +369,10 @@
         ensure!(exitcode.success(), "Unable to prepare encrypted storage. Exitcode={}", exitcode);
     }
 
+    // Wait for init to have finished booting.
     wait_for_property_true("dev.bootcomplete").context("failed waiting for dev.bootcomplete")?;
+
+    // And then tell it we're done so unnecessary services can be shut down.
     system_properties::write("microdroid_manager.init_done", "1")
         .context("set microdroid_manager.init_done")?;
 
@@ -457,6 +380,120 @@
     exec_task(task, service).context("Failed to run payload")
 }
 
+fn post_payload_work() -> Result<()> {
+    // Sync the encrypted storage filesystem (flushes the filesystem caches).
+    if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
+        let mountpoint = CString::new(ENCRYPTEDSTORE_MOUNTPOINT).unwrap();
+
+        // SAFETY: `mountpoint` is a valid C string. `syncfs` and `close` are safe for any parameter
+        // values.
+        let ret = unsafe {
+            let dirfd = libc::open(
+                mountpoint.as_ptr(),
+                libc::O_DIRECTORY | libc::O_RDONLY | libc::O_CLOEXEC,
+            );
+            ensure!(dirfd >= 0, "Unable to open {:?}", mountpoint);
+            let ret = libc::syncfs(dirfd);
+            libc::close(dirfd);
+            ret
+        };
+        if ret != 0 {
+            error!("failed to sync encrypted storage.");
+            return Err(anyhow!(std::io::Error::last_os_error()));
+        }
+    }
+    Ok(())
+}
+
+fn mount_extra_apks(config: &VmPayloadConfig, zipfuse: &mut Zipfuse) -> Result<()> {
+    // For now, only the number of apks is important, as the mount point and dm-verity name is fixed
+    for i in 0..config.extra_apks.len() {
+        let mount_dir = format!("/mnt/extra-apk/{i}");
+        create_dir(Path::new(&mount_dir)).context("Failed to create mount dir for extra apks")?;
+
+        let mount_for_exec =
+            if cfg!(multi_tenant) { MountForExec::Allowed } else { MountForExec::Disallowed };
+        // These run asynchronously in parallel - we wait later for them to complete.
+        zipfuse.mount(
+            mount_for_exec,
+            "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:extra_apk_file:s0",
+            Path::new(&format!("/dev/block/mapper/extra-apk-{i}")),
+            Path::new(&mount_dir),
+            format!("microdroid_manager.extra_apk.mounted.{i}"),
+        )?;
+    }
+
+    Ok(())
+}
+
+fn get_vms_rpc_binder() -> Result<Strong<dyn IVirtualMachineService>> {
+    // The host is running a VirtualMachineService for this VM on a port equal
+    // to the CID of this VM.
+    let port = vsock::get_local_cid().context("Could not determine local CID")?;
+    RpcSession::new()
+        .setup_vsock_client(VMADDR_CID_HOST, port)
+        .context("Could not connect to IVirtualMachineService")
+}
+
+/// Prepares a socket file descriptor for the vm payload service.
+///
+/// # Safety
+///
+/// The caller must ensure that this function is the only place that claims ownership
+/// of the file descriptor and it is called only once.
+unsafe fn prepare_vm_payload_service_socket() -> Result<OwnedFd> {
+    let raw_fd = android_get_control_socket(VM_PAYLOAD_SERVICE_SOCKET_NAME)?;
+
+    // Creating OwnedFd for stdio FDs is not safe.
+    if [libc::STDIN_FILENO, libc::STDOUT_FILENO, libc::STDERR_FILENO].contains(&raw_fd) {
+        bail!("File descriptor {raw_fd} is standard I/O descriptor");
+    }
+    // SAFETY: Initializing OwnedFd for a RawFd created by the init.
+    // We checked that the integer value corresponds to a valid FD and that the caller
+    // ensures that this is the only place to claim its ownership.
+    Ok(unsafe { OwnedFd::from_raw_fd(raw_fd) })
+}
+
+fn is_strict_boot() -> bool {
+    Path::new(AVF_STRICT_BOOT).exists()
+}
+
+fn is_new_instance() -> bool {
+    Path::new(AVF_NEW_INSTANCE).exists()
+}
+
+fn is_verified_boot() -> bool {
+    !Path::new(DEBUG_MICRODROID_NO_VERIFIED_BOOT).exists()
+}
+
+fn is_debuggable() -> Result<bool> {
+    Ok(system_properties::read_bool(DEBUGGABLE_PROP, true)?)
+}
+
+fn should_export_tombstones(config: &VmPayloadConfig) -> bool {
+    match config.export_tombstones {
+        Some(b) => b,
+        None => is_debuggable().unwrap_or(false),
+    }
+}
+
+/// Get debug policy value in bool. It's true iff the value is explicitly set to <1>.
+fn get_debug_policy_bool(path: &'static str) -> Result<Option<bool>> {
+    let mut file = match File::open(path) {
+        Ok(dp) => dp,
+        Err(e) => {
+            info!(
+                "Assumes that debug policy is disabled because failed to read debug policy ({e:?})"
+            );
+            return Ok(Some(false));
+        }
+    };
+    let mut log: [u8; 4] = Default::default();
+    file.read_exact(&mut log).context("Malformed data in {path}")?;
+    // DT spec uses big endian although Android is always little endian.
+    Ok(Some(u32::from_be_bytes(log) == 1))
+}
+
 enum MountForExec {
     Allowed,
     Disallowed,
@@ -504,65 +541,6 @@
     }
 }
 
-fn write_apex_payload_data(
-    saved_data: Option<&MicrodroidData>,
-    apex_data_from_payload: &[ApexData],
-) -> Result<()> {
-    if let Some(saved_apex_data) = saved_data.map(|d| &d.apex_data) {
-        // We don't support APEX updates. (assuming that update will change root digest)
-        ensure!(
-            saved_apex_data == apex_data_from_payload,
-            MicrodroidError::PayloadChanged(String::from("APEXes have changed."))
-        );
-        let apex_metadata = to_metadata(apex_data_from_payload);
-        // Pass metadata(with public keys and root digests) to apexd so that it uses the passed
-        // metadata instead of the default one (/dev/block/by-name/payload-metadata)
-        OpenOptions::new()
-            .create_new(true)
-            .write(true)
-            .open("/apex/vm-payload-metadata")
-            .context("Failed to open /apex/vm-payload-metadata")
-            .and_then(|f| write_metadata(&apex_metadata, f))?;
-    }
-    Ok(())
-}
-
-fn mount_extra_apks(config: &VmPayloadConfig, zipfuse: &mut Zipfuse) -> Result<()> {
-    // For now, only the number of apks is important, as the mount point and dm-verity name is fixed
-    for i in 0..config.extra_apks.len() {
-        let mount_dir = format!("/mnt/extra-apk/{i}");
-        create_dir(Path::new(&mount_dir)).context("Failed to create mount dir for extra apks")?;
-
-        let mount_for_exec =
-            if cfg!(multi_tenant) { MountForExec::Allowed } else { MountForExec::Disallowed };
-        // These run asynchronously in parallel - we wait later for them to complete.
-        zipfuse.mount(
-            mount_for_exec,
-            "fscontext=u:object_r:zipfusefs:s0,context=u:object_r:extra_apk_file:s0",
-            Path::new(&format!("/dev/block/mapper/extra-apk-{i}")),
-            Path::new(&mount_dir),
-            format!("microdroid_manager.extra_apk.mounted.{i}"),
-        )?;
-    }
-
-    Ok(())
-}
-
-fn setup_config_sysprops(config: &VmPayloadConfig) -> Result<()> {
-    if config.enable_authfs {
-        system_properties::write("microdroid_manager.authfs.enabled", "1")
-            .context("failed to write microdroid_manager.authfs.enabled")?;
-    }
-    system_properties::write("microdroid_manager.config_done", "1")
-        .context("failed to write microdroid_manager.config_done")?;
-    Ok(())
-}
-
-// Waits until linker config is generated
-fn wait_for_apex_config_done() -> Result<()> {
-    wait_for_property_true(APEX_CONFIG_DONE_PROP).context("Failed waiting for apex config done")
-}
-
 fn wait_for_property_true(property_name: &str) -> Result<()> {
     let mut prop = PropertyWatcher::new(property_name)?;
     loop {
diff --git a/microdroid_manager/src/verify.rs b/microdroid_manager/src/verify.rs
index 22f3414..e63530b 100644
--- a/microdroid_manager/src/verify.rs
+++ b/microdroid_manager/src/verify.rs
@@ -12,18 +12,19 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use crate::instance::{ApkData, MicrodroidData, RootHash};
-use crate::payload::get_apex_data_from_payload;
-use crate::{is_strict_boot, is_verified_boot, write_apex_payload_data, MicrodroidError};
+use crate::instance::{ApexData, ApkData, MicrodroidData, RootHash};
+use crate::payload::{get_apex_data_from_payload, to_metadata};
+use crate::{is_strict_boot, is_verified_boot, MicrodroidError};
 use anyhow::{anyhow, ensure, Context, Result};
 use apkmanifest::get_manifest_info;
 use apkverify::{get_public_key_der, verify, V4Signature};
 use glob::glob;
 use itertools::sorted;
 use log::{info, warn};
-use microdroid_metadata::Metadata;
+use microdroid_metadata::{write_metadata, Metadata};
 use rand::Fill;
 use rustutils::system_properties;
+use std::fs::OpenOptions;
 use std::path::Path;
 use std::process::{Child, Command};
 use std::str;
@@ -134,8 +135,10 @@
         write_apex_payload_data(saved_data, &apex_data_from_payload)?;
     }
 
-    // Start apexd to activate APEXes
-    system_properties::write("ctl.start", "apexd-vm")?;
+    if cfg!(not(dice_changes)) {
+        // Start apexd to activate APEXes
+        system_properties::write("ctl.start", "apexd-vm")?;
+    }
 
     // TODO(inseob): add timeout
     apkdmverity_child.wait()?;
@@ -207,6 +210,29 @@
     })
 }
 
+fn write_apex_payload_data(
+    saved_data: Option<&MicrodroidData>,
+    apex_data_from_payload: &[ApexData],
+) -> Result<()> {
+    if let Some(saved_apex_data) = saved_data.map(|d| &d.apex_data) {
+        // We don't support APEX updates. (assuming that update will change root digest)
+        ensure!(
+            saved_apex_data == apex_data_from_payload,
+            MicrodroidError::PayloadChanged(String::from("APEXes have changed."))
+        );
+        let apex_metadata = to_metadata(apex_data_from_payload);
+        // Pass metadata(with public keys and root digests) to apexd so that it uses the passed
+        // metadata instead of the default one (/dev/block/by-name/payload-metadata)
+        OpenOptions::new()
+            .create_new(true)
+            .write(true)
+            .open("/apex/vm-payload-metadata")
+            .context("Failed to open /apex/vm-payload-metadata")
+            .and_then(|f| write_metadata(&apex_metadata, f))?;
+    }
+    Ok(())
+}
+
 fn get_apk_root_hash_from_idsig<P: AsRef<Path>>(idsig_path: P) -> Result<Box<RootHash>> {
     Ok(V4Signature::from_idsig_path(idsig_path)?.hashing_info.raw_root_hash)
 }
diff --git a/microdroid_manager/src/vm_payload_service.rs b/microdroid_manager/src/vm_payload_service.rs
index 0661314..d3346d8 100644
--- a/microdroid_manager/src/vm_payload_service.rs
+++ b/microdroid_manager/src/vm_payload_service.rs
@@ -22,31 +22,12 @@
 use anyhow::{anyhow, Context, Result};
 use avflog::LogResult;
 use binder::{Interface, BinderFeatures, ExceptionCode, Strong, IntoBinderResult, Status};
-use diced_open_dice::{DiceArtifacts, derive_cdi_leaf_priv, PrivateKey, sign};
+use client_vm_csr::{generate_attestation_key_and_csr, ClientVmAttestationData};
+use diced_open_dice::DiceArtifacts;
 use log::info;
 use rpcbinder::RpcServer;
-
 use crate::vm_secret::VmSecret;
-use coset::{
-    iana, CborSerializable, CoseKey, CoseKeyBuilder, CoseSign, CoseSignBuilder, CoseSignature,
-    CoseSignatureBuilder, HeaderBuilder,
-};
-use openssl::{
-    bn::{BigNum, BigNumContext},
-    ec::{EcGroup, EcKey, EcKeyRef},
-    ecdsa::EcdsaSig,
-    nid::Nid,
-    pkey::Private,
-    sha::sha256,
-};
-use service_vm_comm::{Csr, CsrPayload};
 use std::os::unix::io::OwnedFd;
-use zeroize::Zeroizing;
-
-const ATTESTATION_KEY_NID: Nid = Nid::X9_62_PRIME256V1; // NIST P-256 curve
-const ATTESTATION_KEY_ALGO: iana::Algorithm = iana::Algorithm::ES256;
-const ATTESTATION_KEY_CURVE: iana::EllipticCurve = iana::EllipticCurve::P_256;
-const ATTESTATION_KEY_AFFINE_COORDINATE_SIZE: i32 = 32;
 
 /// Implementation of `IVmPayloadService`.
 struct VmPayloadService {
@@ -90,11 +71,21 @@
 
     fn requestAttestation(&self, challenge: &[u8]) -> binder::Result<AttestationResult> {
         self.check_restricted_apis_allowed()?;
-        let (private_key, csr) = generate_attestation_key_and_csr(challenge, self.secret.dice())
+        let ClientVmAttestationData { private_key, csr } =
+            generate_attestation_key_and_csr(challenge, self.secret.dice())
+                .map_err(|e| {
+                    Status::new_service_specific_error_str(
+                        STATUS_FAILED_TO_PREPARE_CSR_AND_KEY,
+                        Some(format!("Failed to prepare the CSR and key pair: {e:?}")),
+                    )
+                })
+                .with_log()?;
+        let csr = csr
+            .into_cbor_vec()
             .map_err(|e| {
                 Status::new_service_specific_error_str(
                     STATUS_FAILED_TO_PREPARE_CSR_AND_KEY,
-                    Some(format!("Failed to prepare the CSR and key pair: {e:?}")),
+                    Some(format!("Failed to serialize CSR into CBOR: {e:?}")),
                 )
             })
             .with_log()?;
@@ -106,93 +97,6 @@
     }
 }
 
-fn generate_attestation_key_and_csr(
-    challenge: &[u8],
-    dice_artifacts: &dyn DiceArtifacts,
-) -> Result<(Zeroizing<Vec<u8>>, Vec<u8>)> {
-    let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
-    let attestation_key = EcKey::generate(&group)?;
-    let csr = build_csr(challenge, attestation_key.as_ref(), dice_artifacts)?;
-
-    let csr = csr.into_cbor_vec().context("Failed to serialize CSR")?;
-    let private_key = attestation_key.private_key_to_der()?;
-    Ok((Zeroizing::new(private_key), csr))
-}
-
-fn build_csr(
-    challenge: &[u8],
-    attestation_key: &EcKeyRef<Private>,
-    dice_artifacts: &dyn DiceArtifacts,
-) -> Result<Csr> {
-    // Builds CSR Payload to be signed.
-    let public_key =
-        to_cose_public_key(attestation_key)?.to_vec().context("Failed to serialize public key")?;
-    let csr_payload = CsrPayload { public_key, challenge: challenge.to_vec() };
-    let csr_payload = csr_payload.into_cbor_vec()?;
-
-    // Builds signed CSR Payload.
-    let cdi_leaf_priv = derive_cdi_leaf_priv(dice_artifacts)?;
-    let signed_csr_payload = build_signed_data(csr_payload, &cdi_leaf_priv, attestation_key)?
-        .to_vec()
-        .context("Failed to serialize signed CSR payload")?;
-
-    // Builds CSR.
-    let dice_cert_chain = dice_artifacts.bcc().ok_or(anyhow!("bcc is none"))?.to_vec();
-    Ok(Csr { dice_cert_chain, signed_csr_payload })
-}
-
-fn build_signed_data(
-    payload: Vec<u8>,
-    cdi_leaf_priv: &PrivateKey,
-    attestation_key: &EcKeyRef<Private>,
-) -> Result<CoseSign> {
-    let cdi_leaf_sig_headers = build_signature_headers(iana::Algorithm::EdDSA);
-    let attestation_key_sig_headers = build_signature_headers(ATTESTATION_KEY_ALGO);
-    let aad = &[];
-    let signed_data = CoseSignBuilder::new()
-        .payload(payload)
-        .try_add_created_signature(cdi_leaf_sig_headers, aad, |message| {
-            sign(message, cdi_leaf_priv.as_array()).map(|v| v.to_vec())
-        })?
-        .try_add_created_signature(attestation_key_sig_headers, aad, |message| {
-            ecdsa_sign(message, attestation_key)
-        })?
-        .build();
-    Ok(signed_data)
-}
-
-/// Builds a signature with headers filled with the provided algorithm.
-/// The signature data will be filled later when building the signed data.
-fn build_signature_headers(alg: iana::Algorithm) -> CoseSignature {
-    let protected = HeaderBuilder::new().algorithm(alg).build();
-    CoseSignatureBuilder::new().protected(protected).build()
-}
-
-fn ecdsa_sign(message: &[u8], key: &EcKeyRef<Private>) -> Result<Vec<u8>> {
-    let digest = sha256(message);
-    // Passes the digest to `ECDSA_do_sign` as recommended in the spec:
-    // https://commondatastorage.googleapis.com/chromium-boringssl-docs/ecdsa.h.html#ECDSA_do_sign
-    let sig = EcdsaSig::sign::<Private>(&digest, key)?;
-    Ok(sig.to_der()?)
-}
-
-fn get_affine_coordinates(key: &EcKeyRef<Private>) -> Result<(Vec<u8>, Vec<u8>)> {
-    let mut ctx = BigNumContext::new()?;
-    let mut x = BigNum::new()?;
-    let mut y = BigNum::new()?;
-    key.public_key().affine_coordinates_gfp(key.group(), &mut x, &mut y, &mut ctx)?;
-    let x = x.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
-    let y = y.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
-    Ok((x, y))
-}
-
-fn to_cose_public_key(key: &EcKeyRef<Private>) -> Result<CoseKey> {
-    let (x, y) = get_affine_coordinates(key)?;
-    Ok(CoseKeyBuilder::new_ec2_pub_key(ATTESTATION_KEY_CURVE, x, y)
-        .algorithm(ATTESTATION_KEY_ALGO)
-        .build())
-}
-
 impl Interface for VmPayloadService {}
 
 impl VmPayloadService {
@@ -237,106 +141,3 @@
     });
     Ok(())
 }
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use anyhow::bail;
-    use ciborium::Value;
-    use coset::{iana::EnumI64, Label};
-    use hwtrust::{dice, session::Session};
-    use openssl::pkey::Public;
-
-    /// The following data is generated randomly with urandom.
-    const CHALLENGE: [u8; 16] = [
-        0xb3, 0x66, 0xfa, 0x72, 0x92, 0x32, 0x2c, 0xd4, 0x99, 0xcb, 0x00, 0x1f, 0x0e, 0xe0, 0xc7,
-        0x41,
-    ];
-
-    #[test]
-    fn csr_and_private_key_have_correct_format() -> Result<()> {
-        let dice_artifacts = diced_sample_inputs::make_sample_bcc_and_cdis()?;
-
-        let (private_key, csr) = generate_attestation_key_and_csr(&CHALLENGE, &dice_artifacts)?;
-        let ec_private_key = EcKey::private_key_from_der(&private_key)?;
-        let csr = Csr::from_cbor_slice(&csr).unwrap();
-        let cose_sign = CoseSign::from_slice(&csr.signed_csr_payload).unwrap();
-        let aad = &[];
-
-        // Checks CSR payload.
-        let csr_payload =
-            cose_sign.payload.as_ref().and_then(|v| CsrPayload::from_cbor_slice(v).ok()).unwrap();
-        let public_key = to_cose_public_key(&ec_private_key)?.to_vec().unwrap();
-        let expected_csr_payload = CsrPayload { challenge: CHALLENGE.to_vec(), public_key };
-        assert_eq!(expected_csr_payload, csr_payload);
-
-        // Checks the first signature is signed with CDI_Leaf_Priv.
-        let session = Session::default();
-        let chain = dice::Chain::from_cbor(&session, &csr.dice_cert_chain)?;
-        let public_key = chain.leaf().subject_public_key();
-        cose_sign
-            .verify_signature(0, aad, |signature, message| public_key.verify(signature, message))?;
-
-        // Checks the second signature is signed with attestation key.
-        let attestation_public_key = CoseKey::from_slice(&csr_payload.public_key).unwrap();
-        let ec_public_key = to_ec_public_key(&attestation_public_key)?;
-        cose_sign.verify_signature(1, aad, |signature, message| {
-            ecdsa_verify(signature, message, &ec_public_key)
-        })?;
-
-        // Verifies that private key and the public key form a valid key pair.
-        let message = b"test message";
-        let signature = ecdsa_sign(message, &ec_private_key)?;
-        ecdsa_verify(&signature, message, &ec_public_key)?;
-
-        Ok(())
-    }
-
-    fn ecdsa_verify(
-        signature: &[u8],
-        message: &[u8],
-        ec_public_key: &EcKeyRef<Public>,
-    ) -> Result<()> {
-        let sig = EcdsaSig::from_der(signature)?;
-        let digest = sha256(message);
-        if sig.verify(&digest, ec_public_key)? {
-            Ok(())
-        } else {
-            bail!("Signature does not match")
-        }
-    }
-
-    fn to_ec_public_key(cose_key: &CoseKey) -> Result<EcKey<Public>> {
-        check_ec_key_params(cose_key)?;
-        let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
-        let x = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::X.to_i64()))?;
-        let y = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::Y.to_i64()))?;
-        let key = EcKey::from_public_key_affine_coordinates(&group, &x, &y)?;
-        key.check_key()?;
-        Ok(key)
-    }
-
-    fn check_ec_key_params(cose_key: &CoseKey) -> Result<()> {
-        assert_eq!(coset::KeyType::Assigned(iana::KeyType::EC2), cose_key.kty);
-        assert_eq!(Some(coset::Algorithm::Assigned(ATTESTATION_KEY_ALGO)), cose_key.alg);
-        let crv = get_label_value(cose_key, Label::Int(iana::Ec2KeyParameter::Crv.to_i64()))?;
-        assert_eq!(&Value::from(ATTESTATION_KEY_CURVE.to_i64()), crv);
-        Ok(())
-    }
-
-    fn get_label_value_as_bignum(key: &CoseKey, label: Label) -> Result<BigNum> {
-        get_label_value(key, label)?
-            .as_bytes()
-            .map(|v| BigNum::from_slice(&v[..]).unwrap())
-            .ok_or_else(|| anyhow!("Value not a bstr."))
-    }
-
-    fn get_label_value(key: &CoseKey, label: Label) -> Result<&Value> {
-        Ok(&key
-            .params
-            .iter()
-            .find(|(k, _)| k == &label)
-            .ok_or_else(|| anyhow!("Label {:?} not found", label))?
-            .1)
-    }
-}
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index 946ed85..b7b5900 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -224,7 +224,7 @@
     srcs: [":pvmfw_platform.dts.preprocessed"],
     out: ["lib.rs"],
     tools: ["dtc"],
-    cmd: "$(location dtc) -I dts -O dtb -o $(genDir)/compiled.dtbo $(in) && " +
+    cmd: "$(location dtc) -@ -I dts -O dtb -o $(genDir)/compiled.dtbo $(in) && " +
         "(" +
         "    echo '#![no_std]';" +
         "    echo '#![allow(missing_docs)]';" +
diff --git a/pvmfw/platform.dts b/pvmfw/platform.dts
index cb8e30d..4a269c3 100644
--- a/pvmfw/platform.dts
+++ b/pvmfw/platform.dts
@@ -261,4 +261,64 @@
 		clock-frequency = <10>;
 		timeout-sec = <8>;
 	};
+
+	pviommu_0: pviommu0 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_1: pviommu1 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_2: pviommu2 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_3: pviommu3 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_4: pviommu4 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_5: pviommu5 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_6: pviommu6 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_7: pviommu7 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_8: pviommu8 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
+
+	pviommu_9: pviommu9 {
+		compatible = "pkvm,pviommu";
+		id = <PLACEHOLDER>;
+		#iommu-cells = <0>;
+	};
 };
diff --git a/pvmfw/src/device_assignment.rs b/pvmfw/src/device_assignment.rs
index 7eae09f..a92b418 100644
--- a/pvmfw/src/device_assignment.rs
+++ b/pvmfw/src/device_assignment.rs
@@ -401,17 +401,28 @@
         unsafe {
             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
         }
+        device_info.patch(platform_dt).unwrap();
 
-        let rng_node = platform_dt.node(cstr!("/rng")).unwrap().unwrap();
-        let expected: Vec<(&CStr, Vec<u8>)> = vec![
-            (cstr!("android,rng,ignore-gctrl-reset"), Vec::<u8>::new()),
-            (cstr!("compatible"), b"android,rng\0".to_vec()),
-            (cstr!("reg"), into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF])),
-            (cstr!("interrupts"), into_fdt_prop(vec![0x0, 0xF, 0x4])),
+        type FdtResult<T> = libfdt::Result<T>;
+        let expected: Vec<(FdtResult<&CStr>, FdtResult<Vec<u8>>)> = vec![
+            (Ok(cstr!("android,rng,ignore-gctrl-reset")), Ok(Vec::new())),
+            (Ok(cstr!("compatible")), Ok(Vec::from(*b"android,rng\0"))),
+            (Ok(cstr!("interrupts")), Ok(into_fdt_prop(vec![0x0, 0xF, 0x4]))),
+            (Ok(cstr!("reg")), Ok(into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]))),
         ];
 
-        for (prop, (prop_name, prop_value)) in rng_node.properties().unwrap().zip(expected) {
-            assert_eq!((prop.name(), prop.value()), (Ok(prop_name), Ok(prop_value.as_slice())));
-        }
+        let rng_node = platform_dt.node(cstr!("/rng")).unwrap().unwrap();
+        let mut properties: Vec<_> = rng_node
+            .properties()
+            .unwrap()
+            .map(|prop| (prop.name(), prop.value().map(|x| x.into())))
+            .collect();
+        properties.sort_by(|a, b| {
+            let lhs = a.0.unwrap_or_default();
+            let rhs = b.0.unwrap_or_default();
+            lhs.partial_cmp(rhs).unwrap()
+        });
+
+        assert_eq!(properties, expected);
     }
 }
diff --git a/rialto/Android.bp b/rialto/Android.bp
index 326f6fc..28c261e 100644
--- a/rialto/Android.bp
+++ b/rialto/Android.bp
@@ -109,6 +109,8 @@
         "libandroid_logger",
         "libanyhow",
         "libciborium",
+        "libclient_vm_csr",
+        "libdiced_sample_inputs",
         "liblibc",
         "liblog_rust",
         "libservice_vm_comm",
diff --git a/rialto/tests/test.rs b/rialto/tests/test.rs
index e13b7a1..0f59350 100644
--- a/rialto/tests/test.rs
+++ b/rialto/tests/test.rs
@@ -23,9 +23,11 @@
 };
 use anyhow::{bail, Context, Result};
 use ciborium::value::Value;
+use client_vm_csr::generate_attestation_key_and_csr;
 use log::info;
 use service_vm_comm::{
-    EcdsaP256KeyPair, GenerateCertificateRequestParams, Request, Response, VmType,
+    ClientVmAttestationParams, EcdsaP256KeyPair, GenerateCertificateRequestParams, Request,
+    RequestProcessingError, Response, VmType,
 };
 use service_vm_manager::ServiceVm;
 use std::fs::File;
@@ -51,8 +53,9 @@
     let mut vm = start_service_vm(vm_type)?;
 
     check_processing_reverse_request(&mut vm)?;
-    let maced_public_key = check_processing_generating_key_pair_request(&mut vm)?;
-    check_processing_generating_certificate_request(&mut vm, maced_public_key)?;
+    let key_pair = check_processing_generating_key_pair_request(&mut vm)?;
+    check_processing_generating_certificate_request(&mut vm, &key_pair.maced_public_key)?;
+    check_attestation_request(&mut vm, &key_pair.key_blob)?;
     Ok(())
 }
 
@@ -68,17 +71,17 @@
     Ok(())
 }
 
-fn check_processing_generating_key_pair_request(vm: &mut ServiceVm) -> Result<Vec<u8>> {
+fn check_processing_generating_key_pair_request(vm: &mut ServiceVm) -> Result<EcdsaP256KeyPair> {
     let request = Request::GenerateEcdsaP256KeyPair;
 
     let response = vm.process_request(request)?;
     info!("Received response: {response:?}.");
 
     match response {
-        Response::GenerateEcdsaP256KeyPair(EcdsaP256KeyPair { maced_public_key, key_blob }) => {
-            assert_array_has_nonzero(&maced_public_key);
-            assert_array_has_nonzero(&key_blob);
-            Ok(maced_public_key)
+        Response::GenerateEcdsaP256KeyPair(key_pair) => {
+            assert_array_has_nonzero(&key_pair.maced_public_key);
+            assert_array_has_nonzero(&key_pair.key_blob);
+            Ok(key_pair)
         }
         _ => bail!("Incorrect response type: {response:?}"),
     }
@@ -90,10 +93,10 @@
 
 fn check_processing_generating_certificate_request(
     vm: &mut ServiceVm,
-    maced_public_key: Vec<u8>,
+    maced_public_key: &[u8],
 ) -> Result<()> {
     let params = GenerateCertificateRequestParams {
-        keys_to_sign: vec![maced_public_key],
+        keys_to_sign: vec![maced_public_key.to_vec()],
         challenge: vec![],
     };
     let request = Request::GenerateCertificateRequest(params);
@@ -107,6 +110,31 @@
     }
 }
 
+fn check_attestation_request(vm: &mut ServiceVm, key_blob: &[u8]) -> Result<()> {
+    /// The following data was generated randomly with urandom.
+    const CHALLENGE: [u8; 16] = [
+        0x7d, 0x86, 0x58, 0x79, 0x3a, 0x09, 0xdf, 0x1c, 0xa5, 0x80, 0x80, 0x15, 0x2b, 0x13, 0x17,
+        0x5c,
+    ];
+    let dice_artifacts = diced_sample_inputs::make_sample_bcc_and_cdis()?;
+    let attestation_data = generate_attestation_key_and_csr(&CHALLENGE, &dice_artifacts)?;
+
+    let params = ClientVmAttestationParams {
+        csr: attestation_data.csr.into_cbor_vec()?,
+        remotely_provisioned_key_blob: key_blob.to_vec(),
+    };
+    let request = Request::RequestClientVmAttestation(params);
+
+    let response = vm.process_request(request)?;
+    info!("Received response: {response:?}.");
+
+    match response {
+        // TODO(b/309441500): Check the certificate once it is implemented.
+        Response::Err(RequestProcessingError::OperationUnimplemented) => Ok(()),
+        _ => bail!("Incorrect response type: {response:?}"),
+    }
+}
+
 /// TODO(b/300625792): Check the CSR with libhwtrust once the CSR is complete.
 fn check_csr(csr: Vec<u8>) -> Result<()> {
     let mut reader = io::Cursor::new(csr);
diff --git a/secretkeeper/dice_policy/Android.bp b/secretkeeper/dice_policy/Android.bp
index a7ac5b9..4f1e8b6 100644
--- a/secretkeeper/dice_policy/Android.bp
+++ b/secretkeeper/dice_policy/Android.bp
@@ -13,7 +13,9 @@
         "libanyhow",
         "libciborium",
         "libcoset",
+        "libnum_traits",
     ],
+    proc_macros: ["libnum_derive"],
 }
 
 rust_library {
diff --git a/secretkeeper/dice_policy/src/lib.rs b/secretkeeper/dice_policy/src/lib.rs
index 327b8a4..2e91305 100644
--- a/secretkeeper/dice_policy/src/lib.rs
+++ b/secretkeeper/dice_policy/src/lib.rs
@@ -57,16 +57,20 @@
 //!
 //! value = bool / int / tstr / bstr
 
-use anyhow::{anyhow, bail, Context, Result};
+use anyhow::{anyhow, bail, ensure, Context, Result};
 use ciborium::Value;
 use coset::{AsCborValue, CoseSign1};
+use num_derive::FromPrimitive;
+use num_traits::FromPrimitive;
 use std::borrow::Cow;
+use std::iter::zip;
 
 const DICE_POLICY_VERSION: u64 = 1;
 
 /// Constraint Types supported in Dice policy.
+#[repr(u16)]
 #[non_exhaustive]
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, FromPrimitive, PartialEq)]
 pub enum ConstraintType {
     /// Enforce exact match criteria, indicating the policy should match
     /// if the dice chain has exact same specified values.
@@ -133,6 +137,7 @@
     ///    ];
     ///
     /// 2. For hypothetical (and highly simplified) dice chain:
+    ///
     ///    [ROT_KEY, [{1 : 'a', 2 : {200 : 5, 201 : 'b'}}]]
     ///    The following can be used
     ///    constraint_spec =[
@@ -140,13 +145,7 @@
     ///     ConstraintSpec(ConstraintType::GreaterOrEqual, vec![2, 200]),// matches any value >= 5
     ///    ];
     pub fn from_dice_chain(dice_chain: &[u8], constraint_spec: &[ConstraintSpec]) -> Result<Self> {
-        // TODO(b/298217847): Check if the given dice chain adheres to Explicit-key DiceCertChain
-        // format and if not, convert it before policy construction.
-        let dice_chain = value_from_bytes(dice_chain).context("Unable to decode top-level CBOR")?;
-        let dice_chain = match dice_chain {
-            Value::Array(array) if array.len() >= 2 => array,
-            _ => bail!("Expected an array of at least length 2, found: {:?}", dice_chain),
-        };
+        let dice_chain = deserialize_dice_chain(dice_chain)?;
         let mut constraints_list: Vec<NodeConstraints> = Vec::with_capacity(dice_chain.len());
         let mut it = dice_chain.into_iter();
 
@@ -167,6 +166,61 @@
             node_constraints_list: constraints_list.into_boxed_slice(),
         })
     }
+
+    /// Dice chain policy verifier - Compare the input dice chain against this Dice policy.
+    /// The method returns Ok() if the dice chain meets the constraints set in Dice policy,
+    /// otherwise returns error in case of mismatch.
+    /// TODO(b/291238565) Create a separate error module for DicePolicy mismatches.
+    pub fn matches_dice_chain(&self, dice_chain: &[u8]) -> Result<()> {
+        let dice_chain = deserialize_dice_chain(dice_chain)?;
+        ensure!(
+            dice_chain.len() == self.node_constraints_list.len(),
+            format!(
+                "Dice chain size({}) does not match policy({})",
+                dice_chain.len(),
+                self.node_constraints_list.len()
+            )
+        );
+
+        for (n, (dice_node, node_constraints)) in
+            zip(dice_chain, self.node_constraints_list.iter()).enumerate()
+        {
+            let dice_node_payload = if n == 0 {
+                dice_node
+            } else {
+                cbor_value_from_cose_sign(dice_node)
+                    .with_context(|| format!("Unable to get Cose payload at: {}", n))?
+            };
+            check_constraints_on_node(node_constraints, &dice_node_payload)
+                .context(format!("Mismatch found at {}", n))?;
+        }
+        Ok(())
+    }
+}
+
+fn check_constraints_on_node(node_constraints: &NodeConstraints, dice_node: &Value) -> Result<()> {
+    for constraint in node_constraints.0.iter() {
+        check_constraint_on_node(constraint, dice_node)?;
+    }
+    Ok(())
+}
+
+fn check_constraint_on_node(constraint: &Constraint, dice_node: &Value) -> Result<()> {
+    let Constraint(cons_type, path, value_in_constraint) = constraint;
+    let value_in_node = lookup_value_in_nested_map(dice_node, path)?;
+    match ConstraintType::from_u16(*cons_type).ok_or(anyhow!("Unexpected Constraint type"))? {
+        ConstraintType::ExactMatch => ensure!(value_in_node == *value_in_constraint),
+        ConstraintType::GreaterOrEqual => {
+            let value_in_node = value_in_node
+                .as_integer()
+                .ok_or(anyhow!("Mismatch type: expected a cbor integer"))?;
+            let value_min = value_in_constraint
+                .as_integer()
+                .ok_or(anyhow!("Mismatch type: expected a cbor integer"))?;
+            ensure!(value_in_node >= value_min);
+        }
+    };
+    Ok(())
 }
 
 // Take the payload of a dice node & construct the constraints on it.
@@ -231,6 +285,17 @@
         Some(payload) => Ok(value_from_bytes(&payload)?),
     }
 }
+fn deserialize_dice_chain(dice_chain_bytes: &[u8]) -> Result<Vec<Value>> {
+    // TODO(b/298217847): Check if the given dice chain adheres to Explicit-key DiceCertChain
+    // format and if not, convert it.
+    let dice_chain =
+        value_from_bytes(dice_chain_bytes).context("Unable to decode top-level CBOR")?;
+    let dice_chain = match dice_chain {
+        Value::Array(array) if array.len() >= 2 => array,
+        _ => bail!("Expected an array of at least length 2, found: {:?}", dice_chain),
+    };
+    Ok(dice_chain)
+}
 
 /// Decodes the provided binary CBOR-encoded value and returns a
 /// ciborium::Value struct wrapped in Result.
@@ -266,38 +331,29 @@
         constraint_spec: Vec<ConstraintSpec>,
         // The expected dice policy if above constraint_spec is applied to input_dice.
         expected_dice_policy: DicePolicy,
+        // Another dice chain, which is almost same as the input_dice, but (roughly) imitates
+        // an 'updated' one, ie, some int entries are higher than corresponding entry
+        // in input_chain.
+        updated_input_dice: Vec<u8>,
     }
 
     impl TestArtifacts {
         // Get an example instance of TestArtifacts. This uses a hard coded, hypothetical
         // chain of certificates & a list of constraint_spec on this.
         fn get_example() -> Self {
-            const EXAMPLE_NUM: i64 = 59765;
+            const EXAMPLE_NUM_1: i64 = 59765;
+            const EXAMPLE_NUM_2: i64 = 59766;
             const EXAMPLE_STRING: &str = "testing_dice_policy";
+            const UNCONSTRAINED_STRING: &str = "unconstrained_string";
+            const ANOTHER_UNCONSTRAINED_STRING: &str = "another_unconstrained_string";
 
             let rot_key = CoseKey::default().to_cbor_value().unwrap();
-            let nested_payload = cbor!({
-                100 => EXAMPLE_NUM
-            })
-            .unwrap();
-            let payload = cbor!({
-                1 => EXAMPLE_STRING,
-                2 => "some_other_example_string",
-                3 => Value::Bytes(value_to_bytes(&nested_payload).unwrap()),
-            })
-            .unwrap();
-            let payload = value_to_bytes(&payload).unwrap();
-            let dice_node = CoseSign1 {
-                protected: ProtectedHeader::default(),
-                unprotected: Header::default(),
-                payload: Some(payload),
-                signature: b"ddef".to_vec(),
-            }
-            .to_cbor_value()
-            .unwrap();
-            let input_dice = Value::Array([rot_key.clone(), dice_node].to_vec());
-
-            let input_dice = value_to_bytes(&input_dice).unwrap();
+            let input_dice = Self::get_dice_chain_helper(
+                rot_key.clone(),
+                EXAMPLE_NUM_1,
+                EXAMPLE_STRING,
+                UNCONSTRAINED_STRING,
+            );
 
             // Now construct constraint_spec on the input dice, note this will use the keys
             // which are also hardcoded within the get_dice_chain_helper.
@@ -305,7 +361,7 @@
             let constraint_spec = vec![
                 ConstraintSpec::new(ConstraintType::ExactMatch, vec![1]).unwrap(),
                 // Notice how key "2" is (deliberately) absent in ConstraintSpec
-                // so policy should not constraint it.
+                // so policy should not constrain it.
                 ConstraintSpec::new(ConstraintType::GreaterOrEqual, vec![3, 100]).unwrap(),
             ];
             let expected_dice_policy = DicePolicy {
@@ -325,12 +381,53 @@
                         Constraint(
                             ConstraintType::GreaterOrEqual as u16,
                             vec![3, 100],
-                            Value::from(EXAMPLE_NUM),
+                            Value::from(EXAMPLE_NUM_1),
                         ),
                     ])),
                 ]),
             };
-            Self { input_dice, constraint_spec, expected_dice_policy }
+
+            let updated_input_dice = Self::get_dice_chain_helper(
+                rot_key.clone(),
+                EXAMPLE_NUM_2,
+                EXAMPLE_STRING,
+                ANOTHER_UNCONSTRAINED_STRING,
+            );
+            Self { input_dice, constraint_spec, expected_dice_policy, updated_input_dice }
+        }
+
+        // Helper method method to generate a dice chain with a given rot_key.
+        // Other arguments are ad-hoc values in the nested map. Callers use these to
+        // construct appropriate constrains in dice policies.
+        fn get_dice_chain_helper(
+            rot_key: Value,
+            version: i64,
+            constrained_string: &str,
+            unconstrained_string: &str,
+        ) -> Vec<u8> {
+            let nested_payload = cbor!({
+                100 => version
+            })
+            .unwrap();
+
+            let payload = cbor!({
+                1 => constrained_string,
+                2 => unconstrained_string,
+                3 => Value::Bytes(value_to_bytes(&nested_payload).unwrap()),
+            })
+            .unwrap();
+            let payload = value_to_bytes(&payload).unwrap();
+            let dice_node = CoseSign1 {
+                protected: ProtectedHeader::default(),
+                unprotected: Header::default(),
+                payload: Some(payload),
+                signature: b"ddef".to_vec(),
+            }
+            .to_cbor_value()
+            .unwrap();
+            let input_dice = Value::Array([rot_key.clone(), dice_node].to_vec());
+
+            value_to_bytes(&input_dice).unwrap()
         }
     }
 
@@ -344,6 +441,43 @@
         assert_eq!(policy, example.expected_dice_policy);
     }
 
+    test!(policy_matches_original_dice_chain);
+    fn policy_matches_original_dice_chain() {
+        let example = TestArtifacts::get_example();
+        assert!(
+            DicePolicy::from_dice_chain(&example.input_dice, &example.constraint_spec)
+                .unwrap()
+                .matches_dice_chain(&example.input_dice)
+                .is_ok(),
+            "The dice chain did not match the policy constructed out of it!"
+        );
+    }
+
+    test!(policy_matches_updated_dice_chain);
+    fn policy_matches_updated_dice_chain() {
+        let example = TestArtifacts::get_example();
+        assert!(
+            DicePolicy::from_dice_chain(&example.input_dice, &example.constraint_spec)
+                .unwrap()
+                .matches_dice_chain(&example.updated_input_dice)
+                .is_ok(),
+            "The updated dice chain did not match the original policy!"
+        );
+    }
+
+    test!(policy_mismatch_downgraded_dice_chain);
+    fn policy_mismatch_downgraded_dice_chain() {
+        let example = TestArtifacts::get_example();
+        assert!(
+            DicePolicy::from_dice_chain(&example.updated_input_dice, &example.constraint_spec)
+                .unwrap()
+                .matches_dice_chain(&example.input_dice)
+                .is_err(),
+            "The (downgraded) dice chain matched the policy constructed out of the 'updated'\
+            dice chain!!"
+        );
+    }
+
     test!(policy_dice_size_is_same);
     fn policy_dice_size_is_same() {
         // This is the number of certs in compos bcc (including the first ROT)
diff --git a/service_vm/client_vm_csr/Android.bp b/service_vm/client_vm_csr/Android.bp
new file mode 100644
index 0000000..8d738d8
--- /dev/null
+++ b/service_vm/client_vm_csr/Android.bp
@@ -0,0 +1,37 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+    name: "libclient_vm_csr_defaults",
+    crate_name: "client_vm_csr",
+    srcs: ["src/lib.rs"],
+    rustlibs: [
+        "libanyhow",
+        "libcoset",
+        "libdiced_open_dice",
+        "libopenssl",
+        "libservice_vm_comm",
+        "libzeroize",
+    ],
+}
+
+rust_library {
+    name: "libclient_vm_csr",
+    defaults: ["libclient_vm_csr_defaults"],
+    prefer_rlib: true,
+    apex_available: [
+        "com.android.virt",
+    ],
+}
+
+rust_test {
+    name: "libclient_vm_csr.test",
+    defaults: ["libclient_vm_csr_defaults"],
+    test_suites: ["general-tests"],
+    rustlibs: [
+        "libciborium",
+        "libdiced_sample_inputs",
+        "libhwtrust",
+    ],
+}
diff --git a/service_vm/client_vm_csr/TEST_MAPPING b/service_vm/client_vm_csr/TEST_MAPPING
new file mode 100644
index 0000000..5bc06c0
--- /dev/null
+++ b/service_vm/client_vm_csr/TEST_MAPPING
@@ -0,0 +1,9 @@
+// When adding or removing tests here, don't forget to amend _all_modules list in
+// wireless/android/busytown/ath_config/configs/prod/avf/tests.gcl
+{
+  "avf-presubmit" : [
+    {
+      "name" : "libclient_vm_csr.test"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/service_vm/client_vm_csr/src/lib.rs b/service_vm/client_vm_csr/src/lib.rs
new file mode 100644
index 0000000..512ecaf
--- /dev/null
+++ b/service_vm/client_vm_csr/src/lib.rs
@@ -0,0 +1,242 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Generate the attestation key and CSR for client VM in the remote
+//! attestation.
+
+use anyhow::{anyhow, Context, Result};
+use coset::{
+    iana, CborSerializable, CoseKey, CoseKeyBuilder, CoseSign, CoseSignBuilder, CoseSignature,
+    CoseSignatureBuilder, HeaderBuilder,
+};
+use diced_open_dice::{derive_cdi_leaf_priv, sign, DiceArtifacts, PrivateKey};
+use openssl::{
+    bn::{BigNum, BigNumContext},
+    ec::{EcGroup, EcKey, EcKeyRef},
+    ecdsa::EcdsaSig,
+    nid::Nid,
+    pkey::Private,
+    sha::sha256,
+};
+use service_vm_comm::{Csr, CsrPayload};
+use zeroize::Zeroizing;
+
+/// Key parameters for the attestation key.
+///
+/// See service_vm/comm/client_vm_csr.cddl for more information about the attestation key.
+const ATTESTATION_KEY_NID: Nid = Nid::X9_62_PRIME256V1; // NIST P-256 curve
+const ATTESTATION_KEY_ALGO: iana::Algorithm = iana::Algorithm::ES256;
+const ATTESTATION_KEY_CURVE: iana::EllipticCurve = iana::EllipticCurve::P_256;
+const ATTESTATION_KEY_AFFINE_COORDINATE_SIZE: i32 = 32;
+
+/// Represents the output of generating the attestation key and CSR for the client VM.
+pub struct ClientVmAttestationData {
+    /// DER-encoded ECPrivateKey to be attested.
+    pub private_key: Zeroizing<Vec<u8>>,
+
+    /// CSR containing client VM information and the public key corresponding to the
+    /// private key to be attested.
+    pub csr: Csr,
+}
+
+/// Generates the attestation key and CSR including the public key to be attested for the
+/// client VM in remote attestation.
+pub fn generate_attestation_key_and_csr(
+    challenge: &[u8],
+    dice_artifacts: &dyn DiceArtifacts,
+) -> Result<ClientVmAttestationData> {
+    let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
+    let attestation_key = EcKey::generate(&group)?;
+
+    let csr = build_csr(challenge, attestation_key.as_ref(), dice_artifacts)?;
+    let private_key = attestation_key.private_key_to_der()?;
+    Ok(ClientVmAttestationData { private_key: Zeroizing::new(private_key), csr })
+}
+
+fn build_csr(
+    challenge: &[u8],
+    attestation_key: &EcKeyRef<Private>,
+    dice_artifacts: &dyn DiceArtifacts,
+) -> Result<Csr> {
+    // Builds CSR Payload to be signed.
+    let public_key =
+        to_cose_public_key(attestation_key)?.to_vec().context("Failed to serialize public key")?;
+    let csr_payload = CsrPayload { public_key, challenge: challenge.to_vec() };
+    let csr_payload = csr_payload.into_cbor_vec()?;
+
+    // Builds signed CSR Payload.
+    let cdi_leaf_priv = derive_cdi_leaf_priv(dice_artifacts)?;
+    let signed_csr_payload = build_signed_data(csr_payload, &cdi_leaf_priv, attestation_key)?
+        .to_vec()
+        .context("Failed to serialize signed CSR payload")?;
+
+    // Builds CSR.
+    let dice_cert_chain = dice_artifacts.bcc().ok_or(anyhow!("bcc is none"))?.to_vec();
+    Ok(Csr { dice_cert_chain, signed_csr_payload })
+}
+
+fn build_signed_data(
+    payload: Vec<u8>,
+    cdi_leaf_priv: &PrivateKey,
+    attestation_key: &EcKeyRef<Private>,
+) -> Result<CoseSign> {
+    let cdi_leaf_sig_headers = build_signature_headers(iana::Algorithm::EdDSA);
+    let attestation_key_sig_headers = build_signature_headers(ATTESTATION_KEY_ALGO);
+    let aad = &[];
+    let signed_data = CoseSignBuilder::new()
+        .payload(payload)
+        .try_add_created_signature(cdi_leaf_sig_headers, aad, |message| {
+            sign(message, cdi_leaf_priv.as_array()).map(|v| v.to_vec())
+        })?
+        .try_add_created_signature(attestation_key_sig_headers, aad, |message| {
+            ecdsa_sign(message, attestation_key)
+        })?
+        .build();
+    Ok(signed_data)
+}
+
+/// Builds a signature with headers filled with the provided algorithm.
+/// The signature data will be filled later when building the signed data.
+fn build_signature_headers(alg: iana::Algorithm) -> CoseSignature {
+    let protected = HeaderBuilder::new().algorithm(alg).build();
+    CoseSignatureBuilder::new().protected(protected).build()
+}
+
+fn ecdsa_sign(message: &[u8], key: &EcKeyRef<Private>) -> Result<Vec<u8>> {
+    let digest = sha256(message);
+    // Passes the digest to `ECDSA_do_sign` as recommended in the spec:
+    // https://commondatastorage.googleapis.com/chromium-boringssl-docs/ecdsa.h.html#ECDSA_do_sign
+    let sig = EcdsaSig::sign::<Private>(&digest, key)?;
+    Ok(sig.to_der()?)
+}
+
+fn get_affine_coordinates(key: &EcKeyRef<Private>) -> Result<(Vec<u8>, Vec<u8>)> {
+    let mut ctx = BigNumContext::new()?;
+    let mut x = BigNum::new()?;
+    let mut y = BigNum::new()?;
+    key.public_key().affine_coordinates_gfp(key.group(), &mut x, &mut y, &mut ctx)?;
+    let x = x.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
+    let y = y.to_vec_padded(ATTESTATION_KEY_AFFINE_COORDINATE_SIZE)?;
+    Ok((x, y))
+}
+
+fn to_cose_public_key(key: &EcKeyRef<Private>) -> Result<CoseKey> {
+    let (x, y) = get_affine_coordinates(key)?;
+    Ok(CoseKeyBuilder::new_ec2_pub_key(ATTESTATION_KEY_CURVE, x, y)
+        .algorithm(ATTESTATION_KEY_ALGO)
+        .build())
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use anyhow::bail;
+    use ciborium::Value;
+    use coset::{iana::EnumI64, Label};
+    use hwtrust::{dice, session::Session};
+    use openssl::pkey::Public;
+
+    /// The following data was generated randomly with urandom.
+    const CHALLENGE: [u8; 16] = [
+        0xb3, 0x66, 0xfa, 0x72, 0x92, 0x32, 0x2c, 0xd4, 0x99, 0xcb, 0x00, 0x1f, 0x0e, 0xe0, 0xc7,
+        0x41,
+    ];
+
+    #[test]
+    fn csr_and_private_key_have_correct_format() -> Result<()> {
+        let dice_artifacts = diced_sample_inputs::make_sample_bcc_and_cdis()?;
+
+        let ClientVmAttestationData { private_key, csr } =
+            generate_attestation_key_and_csr(&CHALLENGE, &dice_artifacts)?;
+        let ec_private_key = EcKey::private_key_from_der(&private_key)?;
+        let cose_sign = CoseSign::from_slice(&csr.signed_csr_payload).unwrap();
+        let aad = &[];
+
+        // Checks CSR payload.
+        let csr_payload =
+            cose_sign.payload.as_ref().and_then(|v| CsrPayload::from_cbor_slice(v).ok()).unwrap();
+        let public_key = to_cose_public_key(&ec_private_key)?.to_vec().unwrap();
+        let expected_csr_payload = CsrPayload { challenge: CHALLENGE.to_vec(), public_key };
+        assert_eq!(expected_csr_payload, csr_payload);
+
+        // Checks the first signature is signed with CDI_Leaf_Priv.
+        let session = Session::default();
+        let chain = dice::Chain::from_cbor(&session, &csr.dice_cert_chain)?;
+        let public_key = chain.leaf().subject_public_key();
+        cose_sign
+            .verify_signature(0, aad, |signature, message| public_key.verify(signature, message))?;
+
+        // Checks the second signature is signed with attestation key.
+        let attestation_public_key = CoseKey::from_slice(&csr_payload.public_key).unwrap();
+        let ec_public_key = to_ec_public_key(&attestation_public_key)?;
+        cose_sign.verify_signature(1, aad, |signature, message| {
+            ecdsa_verify(signature, message, &ec_public_key)
+        })?;
+
+        // Verifies that private key and the public key form a valid key pair.
+        let message = b"test message";
+        let signature = ecdsa_sign(message, &ec_private_key)?;
+        ecdsa_verify(&signature, message, &ec_public_key)?;
+
+        Ok(())
+    }
+
+    fn ecdsa_verify(
+        signature: &[u8],
+        message: &[u8],
+        ec_public_key: &EcKeyRef<Public>,
+    ) -> Result<()> {
+        let sig = EcdsaSig::from_der(signature)?;
+        let digest = sha256(message);
+        if sig.verify(&digest, ec_public_key)? {
+            Ok(())
+        } else {
+            bail!("Signature does not match")
+        }
+    }
+
+    fn to_ec_public_key(cose_key: &CoseKey) -> Result<EcKey<Public>> {
+        check_ec_key_params(cose_key)?;
+        let group = EcGroup::from_curve_name(ATTESTATION_KEY_NID)?;
+        let x = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::X.to_i64()))?;
+        let y = get_label_value_as_bignum(cose_key, Label::Int(iana::Ec2KeyParameter::Y.to_i64()))?;
+        let key = EcKey::from_public_key_affine_coordinates(&group, &x, &y)?;
+        key.check_key()?;
+        Ok(key)
+    }
+
+    fn check_ec_key_params(cose_key: &CoseKey) -> Result<()> {
+        assert_eq!(coset::KeyType::Assigned(iana::KeyType::EC2), cose_key.kty);
+        assert_eq!(Some(coset::Algorithm::Assigned(ATTESTATION_KEY_ALGO)), cose_key.alg);
+        let crv = get_label_value(cose_key, Label::Int(iana::Ec2KeyParameter::Crv.to_i64()))?;
+        assert_eq!(&Value::from(ATTESTATION_KEY_CURVE.to_i64()), crv);
+        Ok(())
+    }
+
+    fn get_label_value_as_bignum(key: &CoseKey, label: Label) -> Result<BigNum> {
+        get_label_value(key, label)?
+            .as_bytes()
+            .map(|v| BigNum::from_slice(&v[..]).unwrap())
+            .ok_or_else(|| anyhow!("Value not a bstr."))
+    }
+
+    fn get_label_value(key: &CoseKey, label: Label) -> Result<&Value> {
+        Ok(&key
+            .params
+            .iter()
+            .find(|(k, _)| k == &label)
+            .ok_or_else(|| anyhow!("Label {:?} not found", label))?
+            .1)
+    }
+}
diff --git a/service_vm/comm/src/client_vm_csr.cddl b/service_vm/comm/src/client_vm_csr.cddl
new file mode 100644
index 0000000..bbc709a
--- /dev/null
+++ b/service_vm/comm/src/client_vm_csr.cddl
@@ -0,0 +1,62 @@
+; CDDL for the CSR sent from the client VM to the RKP VM for pVM remote attestation.
+
+Csr = [
+    DiceCertChain,      ; The DICE chain containing measurement of the client VM. See
+                        ; keymint/generateCertificateRequestV2.cddl for the DiceCertChain
+                        ; definition.
+    SignedData,
+]
+
+; COSE_Sign [RFC9052 s4.1]
+SignedData = [
+    protected: {},            ; The signing algorithms are specified in each signature
+                              ; separately.
+    unprotected: {},
+    payload: bstr .cbor CsrPayload,
+    Signatures,
+]
+
+CsrPayload = [                    ; CBOR Array defining the payload for CSR
+   challenge: bstr .size (0..64), ; The challenge is provided by the client server.
+                                  ; It will be included in the certificate chain in the
+                                  ; attestation result, serving as proof of the freshness
+                                  ; of the result.
+   PublicKey,                     ; COSE_Key encoded EC P-256 public key [ RFC9053 s7.1.1 ]
+                                  ; to be attested. See keymint/PublicKey.cddl for the
+                                  ; definition, the test flag `-70000` is never used.
+]
+
+Signatures = [
+    dice_cdi_leaf_signature: COSE_Signature_Dice_Cdi_Leaf,
+    attestation_key_signature: COSE_Signature_Attestation_Key,
+]
+
+; COSE_Signature [RFC9052 s4.1]
+COSE_Signature_Dice_Cdi_Leaf = [
+    protected: bstr .cbor { 1: AlgorithmEdDSA },
+    unprotected: {},
+    signature: bstr,                         ; Ed25519(CDI_Leaf_Priv, SigStruct)
+]
+
+; COSE_Signature [RFC9052 s4.1]
+COSE_Signature_Attestation_Key = [
+    protected: bstr .cbor { 1: AlgorithmES256 },
+    unprotected: {},
+    signature: bstr,                         ; ECDSA(PrivateKey, SigStruct)
+]
+
+; Sig_structure for SignedData [ RFC9052 s4.4 ]
+SigStruct = {
+    context: "Signature",
+    external_aad: bstr .size 0,
+    payload: bstr .cbor CsrPayload,
+}
+
+; ASN.1 DER-encoded EC P-256 ECPrivateKey [ RFC 5915 s3 ]:
+; ECPrivateKey ::= SEQUENCE {
+;     version        INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1),
+;     privateKey     OCTET STRING,
+;     parameters [0] ECParameters {{ NamedCurve }} OPTIONAL,
+;     publicKey  [1] BIT STRING OPTIONAL
+;}
+PrivateKey = bstr
diff --git a/service_vm/comm/src/csr.rs b/service_vm/comm/src/csr.rs
index 5e1cbad..757d080 100644
--- a/service_vm/comm/src/csr.rs
+++ b/service_vm/comm/src/csr.rs
@@ -19,8 +19,11 @@
 use alloc::vec::Vec;
 use ciborium::Value;
 use coset::{self, CborSerializable, CoseError};
+use log::error;
 
 /// Represents a CSR sent from the client VM to the service VM for attestation.
+///
+/// See client_vm_csr.cddl for the definition of the CSR.
 #[derive(Clone, Debug, Eq, PartialEq)]
 pub struct Csr {
     /// The DICE certificate chain of the client VM.
@@ -52,8 +55,8 @@
             return Err(CoseError::UnexpectedItem("array", "array with 2 items"));
         }
         Ok(Self {
-            signed_csr_payload: try_as_bytes(arr.remove(1))?,
-            dice_cert_chain: try_as_bytes(arr.remove(0))?,
+            signed_csr_payload: try_as_bytes(arr.remove(1), "signed_csr_payload")?,
+            dice_cert_chain: try_as_bytes(arr.remove(0), "dice_cert_chain")?,
         })
     }
 }
@@ -91,17 +94,19 @@
             return Err(CoseError::UnexpectedItem("array", "array with 2 items"));
         }
         Ok(Self {
-            challenge: try_as_bytes(arr.remove(1))?,
-            public_key: try_as_bytes(arr.remove(0))?,
+            challenge: try_as_bytes(arr.remove(1), "challenge")?,
+            public_key: try_as_bytes(arr.remove(0), "public_key")?,
         })
     }
 }
 
-fn try_as_bytes(v: Value) -> coset::Result<Vec<u8>> {
+fn try_as_bytes(v: Value, context: &str) -> coset::Result<Vec<u8>> {
     if let Value::Bytes(data) = v {
         Ok(data)
     } else {
-        Err(CoseError::UnexpectedItem(cbor_value_type(&v), "bytes"))
+        let v_type = cbor_value_type(&v);
+        error!("The provided value type '{v_type}' is not of type 'bytes': {context}");
+        Err(CoseError::UnexpectedItem(v_type, "bytes"))
     }
 }
 
diff --git a/service_vm/comm/src/lib.rs b/service_vm/comm/src/lib.rs
index 0818f24..bb85a26 100644
--- a/service_vm/comm/src/lib.rs
+++ b/service_vm/comm/src/lib.rs
@@ -25,7 +25,7 @@
 
 pub use csr::{Csr, CsrPayload};
 pub use message::{
-    EcdsaP256KeyPair, GenerateCertificateRequestParams, Request, RequestProcessingError, Response,
-    ServiceVmRequest,
+    ClientVmAttestationParams, EcdsaP256KeyPair, GenerateCertificateRequestParams, Request,
+    RequestProcessingError, Response, ServiceVmRequest,
 };
 pub use vsock::VmType;
diff --git a/service_vm/comm/src/message.rs b/service_vm/comm/src/message.rs
index f8d7420..6dd0ccd 100644
--- a/service_vm/comm/src/message.rs
+++ b/service_vm/comm/src/message.rs
@@ -50,6 +50,22 @@
     /// Creates a certificate signing request to be sent to the
     /// provisioning server.
     GenerateCertificateRequest(GenerateCertificateRequestParams),
+
+    /// Requests the service VM to attest the client VM and issue a certificate
+    /// if the attestation succeeds.
+    RequestClientVmAttestation(ClientVmAttestationParams),
+}
+
+/// Represents the params passed to `Request::RequestClientVmAttestation`.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct ClientVmAttestationParams {
+    /// The CBOR-encoded CSR signed by the CDI_Leaf_Priv of the client VM's DICE chain
+    /// and the private key to be attested.
+    /// See client_vm_csr.cddl for the definition of the CSR.
+    pub csr: Vec<u8>,
+
+    /// The key blob retrieved from RKPD by virtualizationservice.
+    pub remotely_provisioned_key_blob: Vec<u8>,
 }
 
 /// Represents a response to a request sent to the service VM.
@@ -66,6 +82,11 @@
     /// Returns a CBOR Certificate Signing Request (Csr) serialized into a byte array.
     GenerateCertificateRequest(Vec<u8>),
 
+    /// Returns a certificate covering the public key to be attested in the provided CSR.
+    /// The certificate is signed by the remotely provisioned private key and also
+    /// includes an extension that describes the attested client VM.
+    RequestClientVmAttestation(Vec<u8>),
+
     /// Encountered an error during the request processing.
     Err(RequestProcessingError),
 }
@@ -93,6 +114,12 @@
 
     /// The DICE chain of the service VM is missing.
     MissingDiceChain,
+
+    /// Failed to decrypt the remotely provisioned key blob.
+    FailedToDecryptKeyBlob,
+
+    /// The requested operation has not been implemented.
+    OperationUnimplemented,
 }
 
 impl fmt::Display for RequestProcessingError {
@@ -109,6 +136,12 @@
                 write!(f, "An error happened when serializing to/from a CBOR Value.")
             }
             Self::MissingDiceChain => write!(f, "The DICE chain of the service VM is missing"),
+            Self::FailedToDecryptKeyBlob => {
+                write!(f, "Failed to decrypt the remotely provisioned key blob")
+            }
+            Self::OperationUnimplemented => {
+                write!(f, "The requested operation has not been implemented")
+            }
         }
     }
 }
diff --git a/service_vm/requests/src/api.rs b/service_vm/requests/src/api.rs
index eae0370..315d2af 100644
--- a/service_vm/requests/src/api.rs
+++ b/service_vm/requests/src/api.rs
@@ -14,6 +14,7 @@
 
 //! This module contains the main API for the request processing module.
 
+use crate::client_vm;
 use crate::rkp;
 use alloc::vec::Vec;
 use diced_open_dice::DiceArtifacts;
@@ -31,6 +32,8 @@
             rkp::generate_certificate_request(p, dice_artifacts)
                 .map_or_else(Response::Err, Response::GenerateCertificateRequest)
         }
+        Request::RequestClientVmAttestation(p) => client_vm::request_attestation(p, dice_artifacts)
+            .map_or_else(Response::Err, Response::RequestClientVmAttestation),
     }
 }
 
diff --git a/service_vm/requests/src/client_vm.rs b/service_vm/requests/src/client_vm.rs
new file mode 100644
index 0000000..74c26d3
--- /dev/null
+++ b/service_vm/requests/src/client_vm.rs
@@ -0,0 +1,51 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module contains functions related to the attestation of the
+//! client VM.
+
+use crate::keyblob::decrypt_private_key;
+use alloc::vec::Vec;
+use bssl_avf::EcKey;
+use core::result;
+use coset::{CborSerializable, CoseSign};
+use diced_open_dice::DiceArtifacts;
+use log::error;
+use service_vm_comm::{ClientVmAttestationParams, Csr, RequestProcessingError};
+
+type Result<T> = result::Result<T, RequestProcessingError>;
+
+pub(super) fn request_attestation(
+    params: ClientVmAttestationParams,
+    dice_artifacts: &dyn DiceArtifacts,
+) -> Result<Vec<u8>> {
+    let csr = Csr::from_cbor_slice(&params.csr)?;
+    let _cose_sign = CoseSign::from_slice(&csr.signed_csr_payload)?;
+    // TODO(b/309440321): Verify the signatures in the `_cose_sign`.
+
+    // TODO(b/278717513): Compare client VM's DICE chain in the `csr` up to pvmfw
+    // cert with RKP VM's DICE chain.
+
+    let private_key =
+        decrypt_private_key(&params.remotely_provisioned_key_blob, dice_artifacts.cdi_seal())
+            .map_err(|e| {
+                error!("Failed to decrypt the remotely provisioned key blob: {e}");
+                RequestProcessingError::FailedToDecryptKeyBlob
+            })?;
+    let _ec_private_key = EcKey::from_ec_private_key(private_key.as_slice())?;
+
+    // TODO(b/309441500): Build a new certificate signed with the remotely provisioned
+    // `_private_key`.
+    Err(RequestProcessingError::OperationUnimplemented)
+}
diff --git a/service_vm/requests/src/keyblob.rs b/service_vm/requests/src/keyblob.rs
index 456c879..1fb7a67 100644
--- a/service_vm/requests/src/keyblob.rs
+++ b/service_vm/requests/src/keyblob.rs
@@ -20,8 +20,6 @@
 use core::result;
 use serde::{Deserialize, Serialize};
 use service_vm_comm::RequestProcessingError;
-// TODO(b/241428146): This will be used once the retrieval mechanism is available.
-#[cfg(test)]
 use zeroize::Zeroizing;
 
 type Result<T> = result::Result<T, RequestProcessingError>;
@@ -61,9 +59,6 @@
         EncryptedKeyBlobV1::new(private_key, kek_secret).map(Self::V1)
     }
 
-    // TODO(b/241428146): Use this function to decrypt the retrieved keyblob once the retrieval
-    // mechanism is available.
-    #[cfg(test)]
     pub(crate) fn decrypt_private_key(&self, kek_secret: &[u8]) -> Result<Zeroizing<Vec<u8>>> {
         match self {
             Self::V1(blob) => blob.decrypt_private_key(kek_secret),
@@ -85,7 +80,6 @@
         Ok(Self { kek_salt, encrypted_private_key: ciphertext.to_vec() })
     }
 
-    #[cfg(test)]
     fn decrypt_private_key(&self, kek_secret: &[u8]) -> Result<Zeroizing<Vec<u8>>> {
         let kek = hkdf::<32>(kek_secret, &self.kek_salt, KEK_INFO, Digester::sha512())?;
         let mut out = Zeroizing::new(vec![0u8; self.encrypted_private_key.len()]);
@@ -101,6 +95,15 @@
     }
 }
 
+pub(crate) fn decrypt_private_key(
+    encrypted_key_blob: &[u8],
+    kek_secret: &[u8],
+) -> Result<Zeroizing<Vec<u8>>> {
+    let key_blob: EncryptedKeyBlob = cbor_util::deserialize(encrypted_key_blob)?;
+    let private_key = key_blob.decrypt_private_key(kek_secret)?;
+    Ok(private_key)
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
@@ -127,8 +130,7 @@
     fn decrypting_keyblob_succeeds_with_the_same_kek() -> Result<()> {
         let encrypted_key_blob =
             cbor_util::serialize(&EncryptedKeyBlob::new(&TEST_KEY, &TEST_SECRET1)?)?;
-        let encrypted_key_blob: EncryptedKeyBlob = cbor_util::deserialize(&encrypted_key_blob)?;
-        let decrypted_key = encrypted_key_blob.decrypt_private_key(&TEST_SECRET1)?;
+        let decrypted_key = decrypt_private_key(&encrypted_key_blob, &TEST_SECRET1)?;
 
         assert_eq!(TEST_KEY, decrypted_key.as_slice());
         Ok(())
@@ -138,8 +140,7 @@
     fn decrypting_keyblob_fails_with_a_different_kek() -> Result<()> {
         let encrypted_key_blob =
             cbor_util::serialize(&EncryptedKeyBlob::new(&TEST_KEY, &TEST_SECRET1)?)?;
-        let encrypted_key_blob: EncryptedKeyBlob = cbor_util::deserialize(&encrypted_key_blob)?;
-        let err = encrypted_key_blob.decrypt_private_key(&TEST_SECRET2).unwrap_err();
+        let err = decrypt_private_key(&encrypted_key_blob, &TEST_SECRET2).unwrap_err();
 
         let expected_err: RequestProcessingError =
             Error::CallFailed(ApiName::EVP_AEAD_CTX_open, CipherError::BadDecrypt.into()).into();
diff --git a/service_vm/requests/src/lib.rs b/service_vm/requests/src/lib.rs
index e3c5794..b2db298 100644
--- a/service_vm/requests/src/lib.rs
+++ b/service_vm/requests/src/lib.rs
@@ -19,6 +19,7 @@
 extern crate alloc;
 
 mod api;
+mod client_vm;
 mod keyblob;
 mod pub_key;
 mod rkp;
diff --git a/service_vm/requests/src/rkp.rs b/service_vm/requests/src/rkp.rs
index 8d7d771..c2c13b3 100644
--- a/service_vm/requests/src/rkp.rs
+++ b/service_vm/requests/src/rkp.rs
@@ -48,7 +48,7 @@
 
     let maced_public_key = build_maced_public_key(ec_key.cose_public_key()?, hmac_key.as_ref())?;
     let key_blob =
-        EncryptedKeyBlob::new(ec_key.private_key()?.as_slice(), dice_artifacts.cdi_seal())?;
+        EncryptedKeyBlob::new(ec_key.ec_private_key()?.as_slice(), dice_artifacts.cdi_seal())?;
 
     let key_pair =
         EcdsaP256KeyPair { maced_public_key, key_blob: cbor_util::serialize(&key_blob)? };
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 70e9be9..c6a30aa 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -168,23 +168,23 @@
 }
 
 impl Interface for VirtualizationService {
-    fn dump(&self, mut file: &File, _args: &[&CStr]) -> Result<(), StatusCode> {
+    fn dump(&self, writer: &mut dyn Write, _args: &[&CStr]) -> Result<(), StatusCode> {
         check_permission("android.permission.DUMP").or(Err(StatusCode::PERMISSION_DENIED))?;
         let state = &mut *self.state.lock().unwrap();
         let vms = state.vms();
-        writeln!(file, "Running {0} VMs:", vms.len()).or(Err(StatusCode::UNKNOWN_ERROR))?;
+        writeln!(writer, "Running {0} VMs:", vms.len()).or(Err(StatusCode::UNKNOWN_ERROR))?;
         for vm in vms {
-            writeln!(file, "VM CID: {}", vm.cid).or(Err(StatusCode::UNKNOWN_ERROR))?;
-            writeln!(file, "\tState: {:?}", vm.vm_state.lock().unwrap())
+            writeln!(writer, "VM CID: {}", vm.cid).or(Err(StatusCode::UNKNOWN_ERROR))?;
+            writeln!(writer, "\tState: {:?}", vm.vm_state.lock().unwrap())
                 .or(Err(StatusCode::UNKNOWN_ERROR))?;
-            writeln!(file, "\tPayload state {:?}", vm.payload_state())
+            writeln!(writer, "\tPayload state {:?}", vm.payload_state())
                 .or(Err(StatusCode::UNKNOWN_ERROR))?;
-            writeln!(file, "\tProtected: {}", vm.protected).or(Err(StatusCode::UNKNOWN_ERROR))?;
-            writeln!(file, "\ttemporary_directory: {}", vm.temporary_directory.to_string_lossy())
+            writeln!(writer, "\tProtected: {}", vm.protected).or(Err(StatusCode::UNKNOWN_ERROR))?;
+            writeln!(writer, "\ttemporary_directory: {}", vm.temporary_directory.to_string_lossy())
                 .or(Err(StatusCode::UNKNOWN_ERROR))?;
-            writeln!(file, "\trequester_uid: {}", vm.requester_uid)
+            writeln!(writer, "\trequester_uid: {}", vm.requester_uid)
                 .or(Err(StatusCode::UNKNOWN_ERROR))?;
-            writeln!(file, "\trequester_debug_pid: {}", vm.requester_debug_pid)
+            writeln!(writer, "\trequester_debug_pid: {}", vm.requester_debug_pid)
                 .or(Err(StatusCode::UNKNOWN_ERROR))?;
         }
         Ok(())
@@ -1114,6 +1114,7 @@
         .try_clone()
         .context("Failed to clone File from ParcelFileDescriptor")
         .or_binder_exception(ExceptionCode::BAD_PARCELABLE)
+        .map(File::from)
 }
 
 /// Converts an `&Option<ParcelFileDescriptor>` to an `Option<File>` by cloning the file.
diff --git a/virtualizationmanager/src/composite.rs b/virtualizationmanager/src/composite.rs
index fe17ff4..a4b7eae 100644
--- a/virtualizationmanager/src/composite.rs
+++ b/virtualizationmanager/src/composite.rs
@@ -93,7 +93,8 @@
                 .context("Invalid partition image file descriptor")?
                 .as_ref()
                 .try_clone()
-                .context("Failed to clone partition image file descriptor")?;
+                .context("Failed to clone partition image file descriptor")?
+                .into();
             let path = fd_path_for_file(&file);
             let size = get_partition_size(&file, &path)?;
             files.push(file);
diff --git a/virtualizationmanager/src/selinux.rs b/virtualizationmanager/src/selinux.rs
index 0485943..ba62b7f 100644
--- a/virtualizationmanager/src/selinux.rs
+++ b/virtualizationmanager/src/selinux.rs
@@ -17,11 +17,10 @@
 use anyhow::{anyhow, bail, Context, Result};
 use std::ffi::{CStr, CString};
 use std::fmt;
-use std::fs::File;
 use std::io;
 use std::ops::Deref;
+use std::os::fd::AsRawFd;
 use std::os::raw::c_char;
-use std::os::unix::io::AsRawFd;
 use std::ptr;
 
 // Partially copied from system/security/keystore2/selinux/src/lib.rs
@@ -102,7 +101,7 @@
     }
 }
 
-pub fn getfilecon(file: &File) -> Result<SeContext> {
+pub fn getfilecon<F: AsRawFd>(file: &F) -> Result<SeContext> {
     let fd = file.as_raw_fd();
     let mut con: *mut c_char = ptr::null_mut();
     // SAFETY: the returned pointer `con` is wrapped in SeContext::Raw which is freed with
diff --git a/virtualizationservice/vfio_handler/src/aidl.rs b/virtualizationservice/vfio_handler/src/aidl.rs
index 2968ff9..63f19c6 100644
--- a/virtualizationservice/vfio_handler/src/aidl.rs
+++ b/virtualizationservice/vfio_handler/src/aidl.rs
@@ -282,11 +282,13 @@
         .or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
     let buffer = read_values(dtbo_img_file, dt_size, entry.dt_offset.get().into())?;
 
-    let mut dtbo_fd = dtbo_fd
-        .as_ref()
-        .try_clone()
-        .context("Failed to clone File from ParcelFileDescriptor")
-        .or_binder_exception(ExceptionCode::BAD_PARCELABLE)?;
+    let mut dtbo_fd = File::from(
+        dtbo_fd
+            .as_ref()
+            .try_clone()
+            .context("Failed to create File from ParcelFileDescriptor")
+            .or_binder_exception(ExceptionCode::BAD_PARCELABLE)?,
+    );
 
     dtbo_fd
         .write_all(&buffer)
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index 7f2b9df..286612c 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -8,7 +8,7 @@
     crate_name: "vm_payload",
     defaults: ["avf_build_flags_rust"],
     visibility: ["//visibility:private"],
-    srcs: ["src/*.rs"],
+    srcs: ["src/lib.rs"],
     include_dirs: ["include"],
     prefer_rlib: true,
     rustlibs: [
diff --git a/vm_payload/include/vm_payload.h b/vm_payload/include/vm_payload.h
index 2dfa2cb..78cd80d 100644
--- a/vm_payload/include/vm_payload.h
+++ b/vm_payload/include/vm_payload.h
@@ -19,7 +19,6 @@
 #include <stdbool.h>
 #include <stddef.h>
 #include <stdint.h>
-#include <stdnoreturn.h>
 #include <sys/cdefs.h>
 
 #include "vm_main.h"
@@ -43,11 +42,14 @@
     /** The remote attestation completes successfully. */
     ATTESTATION_OK = 0,
 
-    /** The remote attestation has failed due to an unspecified cause. */
-    ATTESTATION_UNKNOWN_ERROR = -10000,
-
     /** The challenge size is not between 0 and 64. */
     ATTESTATION_ERROR_INVALID_CHALLENGE = -10001,
+
+    /** Failed to attest the VM. Please retry at a later time. */
+    ATTESTATION_ERROR_ATTESTATION_FAILED = -10002,
+
+    /** Remote attestation is not supported in the current environment. */
+    ATTESTATION_ERROR_UNSUPPORTED = -10003,
 } attestation_status_t;
 
 /**
@@ -78,9 +80,9 @@
  * callback will be called at most once.
  * \param param parameter to be passed to the `on_ready` callback.
  */
-noreturn void AVmPayload_runVsockRpcServer(AIBinder* _Nonnull service, uint32_t port,
-                                           void (*_Nullable on_ready)(void* _Nullable param),
-                                           void* _Nullable param);
+__attribute__((noreturn)) void AVmPayload_runVsockRpcServer(
+        AIBinder* _Nonnull service, uint32_t port,
+        void (*_Nullable on_ready)(void* _Nullable param), void* _Nullable param);
 
 /**
  * Returns all or part of a 32-byte secret that is bound to this unique VM
diff --git a/vm_payload/src/api.rs b/vm_payload/src/api.rs
index 64f8d6a..c76f2d3 100644
--- a/vm_payload/src/api.rs
+++ b/vm_payload/src/api.rs
@@ -21,7 +21,7 @@
 use anyhow::{bail, ensure, Context, Result};
 use binder::{
     unstable_api::{new_spibinder, AIBinder},
-    Strong,
+    Strong, ExceptionCode,
 };
 use lazy_static::lazy_static;
 use log::{error, info, Level};
@@ -296,15 +296,24 @@
         // `challenge_size` bytes and `challenge_size` is not zero.
         unsafe { std::slice::from_raw_parts(challenge, challenge_size) }
     };
-    let attestation_res = unwrap_or_abort(try_request_attestation(challenge));
-    *res = Box::into_raw(Box::new(attestation_res));
-    attestation_status_t::ATTESTATION_OK
+    let service = unwrap_or_abort(get_vm_payload_service());
+    match service.requestAttestation(challenge) {
+        Ok(attestation_res) => {
+            *res = Box::into_raw(Box::new(attestation_res));
+            attestation_status_t::ATTESTATION_OK
+        }
+        Err(e) => {
+            error!("Remote attestation failed: {e:?}");
+            binder_status_to_attestation_status(e)
+        }
+    }
 }
 
-fn try_request_attestation(public_key: &[u8]) -> Result<AttestationResult> {
-    get_vm_payload_service()?
-        .requestAttestation(public_key)
-        .context("Failed to request attestation")
+fn binder_status_to_attestation_status(status: binder::Status) -> attestation_status_t {
+    match status.exception_code() {
+        ExceptionCode::UNSUPPORTED_OPERATION => attestation_status_t::ATTESTATION_ERROR_UNSUPPORTED,
+        _ => attestation_status_t::ATTESTATION_ERROR_ATTESTATION_FAILED,
+    }
 }
 
 /// Converts the return value from `AVmPayload_requestAttestation` to a text string
@@ -320,8 +329,12 @@
         attestation_status_t::ATTESTATION_ERROR_INVALID_CHALLENGE => {
             CStr::from_bytes_with_nul(b"The challenge size is not between 0 and 64.\0").unwrap()
         }
-        _ => CStr::from_bytes_with_nul(
-            b"The remote attestation has failed due to an unspecified cause.\0",
+        attestation_status_t::ATTESTATION_ERROR_ATTESTATION_FAILED => {
+            CStr::from_bytes_with_nul(b"Failed to attest the VM. Please retry at a later time.\0")
+                .unwrap()
+        }
+        attestation_status_t::ATTESTATION_ERROR_UNSUPPORTED => CStr::from_bytes_with_nul(
+            b"Remote attestation is not supported in the current environment.\0",
         )
         .unwrap(),
     };
diff --git a/vm_payload/src/lib.rs b/vm_payload/src/lib.rs
index e305769..9e10895 100644
--- a/vm_payload/src/lib.rs
+++ b/vm_payload/src/lib.rs
@@ -18,7 +18,7 @@
 
 pub use api::{
     AVmAttestationResult_free, AVmAttestationResult_getCertificateAt,
-    AVmAttestationResult_getCertificatesCount, AVmAttestationResult_getPrivateKey,
+    AVmAttestationResult_getCertificateCount, AVmAttestationResult_getPrivateKey,
     AVmAttestationResult_resultToString, AVmAttestationResult_sign,
     AVmPayload_getDiceAttestationCdi, AVmPayload_getDiceAttestationChain,
     AVmPayload_getVmInstanceSecret, AVmPayload_notifyPayloadReady, AVmPayload_requestAttestation,
diff --git a/vmbase/src/heap.rs b/vmbase/src/heap.rs
index ec03d38..99c06aa 100644
--- a/vmbase/src/heap.rs
+++ b/vmbase/src/heap.rs
@@ -86,6 +86,21 @@
 }
 
 #[no_mangle]
+unsafe extern "C" fn __memset_chk(
+    dest: *mut c_void,
+    val: u8,
+    len: usize,
+    destlen: usize,
+) -> *mut c_void {
+    assert!(len <= destlen, "memset buffer overflow detected");
+    // SAFETY: `dest` is valid for writes of `len` bytes.
+    unsafe {
+        ptr::write_bytes(dest, val, len);
+    }
+    dest
+}
+
+#[no_mangle]
 /// SAFETY: ptr must be null or point to a currently-allocated block returned by allocate (either
 /// directly or via malloc or calloc). Note that this function is called directly from C, so we have
 /// to trust that the C code is doing the right thing; there are checks below which will catch some
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 401022e..108cd5d 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -14,7 +14,7 @@
 
 //! Hardware management of the access flag and dirty state.
 
-use super::page_table::{is_leaf_pte, PageTable};
+use super::page_table::PageTable;
 use super::util::flush_region;
 use crate::{dsb, isb, read_sysreg, tlbi, write_sysreg};
 use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
@@ -52,14 +52,10 @@
 /// Flushes a memory range the descriptor refers to, if the descriptor is in writable-dirty state.
 pub(super) fn flush_dirty_range(
     va_range: &MemoryRegion,
-    desc: &mut Descriptor,
-    level: usize,
+    desc: &Descriptor,
+    _level: usize,
 ) -> Result<(), ()> {
-    // Only flush ranges corresponding to dirty leaf PTEs.
     let flags = desc.flags().ok_or(())?;
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
     if !flags.contains(Attributes::READ_ONLY) {
         flush_region(va_range.start().0, va_range.len());
     }
@@ -71,12 +67,9 @@
 pub(super) fn mark_dirty_block(
     va_range: &MemoryRegion,
     desc: &mut Descriptor,
-    level: usize,
+    _level: usize,
 ) -> Result<(), ()> {
     let flags = desc.flags().ok_or(())?;
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
     if flags.contains(Attributes::DBM) {
         assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
         desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index e067e96..dc346e7 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -16,7 +16,7 @@
 
 use crate::read_sysreg;
 use aarch64_paging::idmap::IdMap;
-use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
+use aarch64_paging::paging::{Attributes, Constraints, Descriptor, MemoryRegion};
 use aarch64_paging::MapError;
 use core::result;
 
@@ -83,7 +83,9 @@
     /// code being currently executed. Otherwise, the Rust execution model (on which the borrow
     /// checker relies) would be violated.
     pub unsafe fn activate(&mut self) {
-        self.idmap.activate()
+        // SAFETY: the caller of this unsafe function asserts that switching to a different
+        // translation is safe
+        unsafe { self.idmap.activate() }
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as lazily mapped
@@ -107,7 +109,15 @@
     /// Maps the given range of virtual addresses to the physical addresses as non-executable,
     /// read-only and writable-clean normal memory.
     pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> {
-        self.idmap.map_range(range, DATA_DBM)
+        // Map the region down to pages to minimize the size of the regions that will be marked
+        // dirty once a store hits them, but also to ensure that we can clear the read-only
+        // attribute while the mapping is live without causing break-before-make (BBM) violations.
+        // The latter implies that we must avoid the use of the contiguous hint as well.
+        self.idmap.map_range_with_constraints(
+            range,
+            DATA_DBM,
+            Constraints::NO_BLOCK_MAPPINGS | Constraints::NO_CONTIGUOUS_HINT,
+        )
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as read-only
@@ -124,18 +134,20 @@
 
     /// Applies the provided updater function to a number of PTEs corresponding to a given memory
     /// range.
-    pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<()> {
+    pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<()>
+    where
+        F: Fn(&MemoryRegion, &mut Descriptor, usize) -> result::Result<(), ()>,
+    {
         self.idmap.modify_range(range, f)
     }
-}
 
-/// Checks whether a PTE at given level is a page or block descriptor.
-#[inline]
-pub(super) fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
-    const LEAF_PTE_LEVEL: usize = 3;
-    if flags.contains(Attributes::TABLE_OR_PAGE) {
-        level == LEAF_PTE_LEVEL
-    } else {
-        level < LEAF_PTE_LEVEL
+    /// Applies the provided callback function to a number of PTEs corresponding to a given memory
+    /// range.
+    pub fn walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()>
+    where
+        F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>,
+    {
+        let mut callback = |mr: &MemoryRegion, d: &Descriptor, l: usize| f(mr, d, l);
+        self.idmap.walk_range(range, &mut callback)
     }
 }
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 6c8a844..dd433d4 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -16,12 +16,14 @@
 
 use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
 use super::error::MemoryTrackerError;
-use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
+use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
 use super::util::{page_4kb_of, virt_to_phys};
 use crate::dsb;
 use crate::exceptions::HandleExceptionError;
 use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
+use aarch64_paging::paging::{
+    Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, BITS_PER_LEVEL, PAGE_SIZE,
+};
 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
 use alloc::boxed::Box;
 use alloc::vec::Vec;
@@ -253,7 +255,7 @@
         if get_mmio_guard().is_some() {
             for range in &self.mmio_regions {
                 self.page_table
-                    .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+                    .walk_range(&get_va_range(range), &mmio_guard_unmap_page)
                     .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
             }
         }
@@ -319,14 +321,24 @@
     /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
     fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
         let page_start = VirtualAddress(page_4kb_of(addr.0));
+        assert_eq!(page_start.0 % MMIO_GUARD_GRANULE_SIZE, 0);
         let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
         let mmio_guard = get_mmio_guard().unwrap();
+        // This must be safe and free from break-before-make (BBM) violations, given that the
+        // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
+        // created inside the mapping has the same size and alignment.
         self.page_table
-            .modify_range(&page_range, &verify_lazy_mapped_block)
+            .modify_range(&page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
+                let flags = desc.flags().expect("Unsupported PTE flags set");
+                if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+                    desc.modify_flags(Attributes::VALID, Attributes::empty());
+                    Ok(())
+                } else {
+                    Err(())
+                }
+            })
             .map_err(|_| MemoryTrackerError::InvalidPte)?;
-        mmio_guard.map(page_start.0)?;
-        // Maps a single device page, breaking up block mappings if necessary.
-        self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
+        Ok(mmio_guard.map(page_start.0)?)
     }
 
     /// Flush all memory regions marked as writable-dirty.
@@ -340,7 +352,7 @@
         // Now flush writable-dirty pages in those regions.
         for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
             self.page_table
-                .modify_range(&get_va_range(range), &flush_dirty_range)
+                .walk_range(&get_va_range(range), &flush_dirty_range)
                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
         }
         Ok(())
@@ -467,33 +479,13 @@
     }
 }
 
-/// Checks whether block flags indicate it should be MMIO guard mapped.
-fn verify_lazy_mapped_block(
-    _range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
-    }
-    if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
 /// MMIO guard unmaps page
 fn mmio_guard_unmap_page(
     va_range: &VaRange,
-    desc: &mut Descriptor,
+    desc: &Descriptor,
     level: usize,
 ) -> result::Result<(), ()> {
     let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
     // This function will be called on an address range that corresponds to a device. Only if a
     // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
     // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
@@ -503,9 +495,11 @@
             flags.contains(MMIO_LAZY_MAP_FLAG),
             "Attempting MMIO guard unmap for non-device pages"
         );
+        const MMIO_GUARD_GRANULE_SHIFT: u32 = MMIO_GUARD_GRANULE_SIZE.ilog2() - PAGE_SIZE.ilog2();
+        const MMIO_GUARD_GRANULE_LEVEL: usize =
+            3 - (MMIO_GUARD_GRANULE_SHIFT as usize / BITS_PER_LEVEL);
         assert_eq!(
-            va_range.len(),
-            MMIO_GUARD_GRANULE_SIZE,
+            level, MMIO_GUARD_GRANULE_LEVEL,
             "Failed to break down block mapping before MMIO guard mapping"
         );
         let page_base = va_range.start().0;