Merge "Use VM reference DT instead of VM base DTBO in pvmfw config v1.2" into main
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index ffc1697..0b6137b 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -27,6 +27,7 @@
 - lpmake, lpunpack, simg2img, img2simg, initrd_bootconfig
 """
 import argparse
+import binascii
 import builtins
 import hashlib
 import os
@@ -207,56 +208,116 @@
     return info, descriptors
 
 
-# Look up a list of (key, value) with a key. Returns the list of value(s) with the matching key.
-# The order of those values is maintained.
-def LookUp(pairs, key):
+def find_all_values_by_key(pairs, key):
+    """Find all the values of the key in the pairs."""
     return [v for (k, v) in pairs if k == key]
 
 # Extract properties from the descriptors of original vbmeta image,
 # append to command as parameter.
 def AppendPropArgument(cmd, descriptors):
-    for prop in LookUp(descriptors, 'Prop'):
+    for prop in find_all_values_by_key(descriptors, 'Prop'):
         cmd.append('--prop')
         result = re.match(r"(.+) -> '(.+)'", prop)
         cmd.append(result.group(1) + ":" + result.group(2))
 
-def AddHashFooter(args, key, image_path, partition_name, additional_descriptors=None):
+
+def check_resigned_image_avb_info(image_path, original_info, original_descriptors, args):
+    updated_info, updated_descriptors = AvbInfo(args, image_path)
+    assert original_info is not None, f'no avbinfo on original image: {image_path}'
+    assert updated_info is not None, f'no avbinfo on resigned image: {image_path}'
+    assert_different_value(original_info, updated_info, "Public key (sha1)", image_path)
+    updated_public_key = updated_info.pop("Public key (sha1)")
+    if not hasattr(check_resigned_image_avb_info, "new_public_key"):
+        check_resigned_image_avb_info.new_public_key = updated_public_key
+    else:
+        assert check_resigned_image_avb_info.new_public_key == updated_public_key, \
+            "All images should be resigned with the same public key. Expected public key (sha1):" \
+            f" {check_resigned_image_avb_info.new_public_key}, actual public key (sha1): " \
+            f"{updated_public_key}, Path: {image_path}"
+    original_info.pop("Public key (sha1)")
+    assert original_info == updated_info, \
+        f"Original info and updated info should be the same for {image_path}. " \
+        f"Original info: {original_info}, updated info: {updated_info}"
+
+    # Verify the descriptors of the original and updated images.
+    assert len(original_descriptors) == len(updated_descriptors), \
+        f"Number of descriptors should be the same for {image_path}. " \
+        f"Original descriptors: {original_descriptors}, updated descriptors: {updated_descriptors}"
+    original_prop_descriptors = sorted(find_all_values_by_key(original_descriptors, "Prop"))
+    updated_prop_descriptors = sorted(find_all_values_by_key(updated_descriptors, "Prop"))
+    assert original_prop_descriptors == updated_prop_descriptors, \
+        f"Prop descriptors should be the same for {image_path}. " \
+        f"Original prop descriptors: {original_prop_descriptors}, " \
+        f"updated prop descriptors: {updated_prop_descriptors}"
+
+    # Remove digest from hash descriptors before comparing, since some digests should change.
+    original_hash_descriptors = extract_hash_descriptors(original_descriptors, drop_digest)
+    updated_hash_descriptors = extract_hash_descriptors(updated_descriptors, drop_digest)
+    assert original_hash_descriptors == updated_hash_descriptors, \
+        f"Hash descriptors' parameters should be the same for {image_path}. " \
+        f"Original hash descriptors: {original_hash_descriptors}, " \
+        f"updated hash descriptors: {updated_hash_descriptors}"
+
+def drop_digest(descriptor):
+    return {k: v for k, v in descriptor.items() if k != "Digest"}
+
+def AddHashFooter(args, key, image_path, additional_images=()):
     if os.path.basename(image_path) in args.key_overrides:
         key = args.key_overrides[os.path.basename(image_path)]
     info, descriptors = AvbInfo(args, image_path)
-    if info:
-        image_size = ReadBytesSize(info['Image size'])
-        algorithm = info['Algorithm']
-        partition_size = str(image_size)
+    assert info is not None, f'no avbinfo: {image_path}'
 
-        cmd = ['avbtool', 'add_hash_footer',
-               '--key', key,
-               '--algorithm', algorithm,
-               '--partition_name', partition_name,
-               '--partition_size', partition_size,
-               '--image', image_path]
-        AppendPropArgument(cmd, descriptors)
-        if args.signing_args:
-            cmd.extend(shlex.split(args.signing_args))
-        if additional_descriptors:
-            for image in additional_descriptors:
-                cmd.extend(['--include_descriptors_from_image', image])
+    # Extract hash descriptor of original image.
+    hash_descriptors_original = extract_hash_descriptors(descriptors, drop_digest)
+    for additional_image in additional_images:
+        _, additional_desc = AvbInfo(args, additional_image)
+        hash_descriptors = extract_hash_descriptors(additional_desc, drop_digest)
+        for k, v in hash_descriptors.items():
+            assert v == hash_descriptors_original[k], \
+                f"Hash descriptor of {k} in {additional_image} and {image_path} should be " \
+                f"the same. {additional_image}: {v}, {image_path}: {hash_descriptors_original[k]}"
+            del hash_descriptors_original[k]
+    assert len(hash_descriptors_original) == 1, \
+        f"Only one hash descriptor is expected for {image_path} after removing " \
+        f"additional images. Hash descriptors: {hash_descriptors_original}"
+    [(original_image_partition_name, original_image_descriptor)] = hash_descriptors_original.items()
+    assert info["Original image size"] == original_image_descriptor["Image Size"], \
+        f"Original image size should be the same as the image size in the hash descriptor " \
+        f"for {image_path}. Original image size: {info['Original image size']}, " \
+        f"image size in the hash descriptor: {original_image_descriptor['Image Size']}"
 
-        if 'Rollback Index' in info:
-            cmd.extend(['--rollback_index', info['Rollback Index']])
-        RunCommand(args, cmd)
+    partition_size = str(ReadBytesSize(info['Image size']))
+    algorithm = info['Algorithm']
+    original_image_salt = original_image_descriptor['Salt']
 
+    cmd = ['avbtool', 'add_hash_footer',
+           '--key', key,
+           '--algorithm', algorithm,
+           '--partition_name', original_image_partition_name,
+           '--salt', original_image_salt,
+           '--partition_size', partition_size,
+           '--image', image_path]
+    AppendPropArgument(cmd, descriptors)
+    if args.signing_args:
+        cmd.extend(shlex.split(args.signing_args))
+    for additional_image in additional_images:
+        cmd.extend(['--include_descriptors_from_image', additional_image])
+    cmd.extend(['--rollback_index', info['Rollback Index']])
+
+    RunCommand(args, cmd)
+    check_resigned_image_avb_info(image_path, info, descriptors, args)
 
 def AddHashTreeFooter(args, key, image_path):
     if os.path.basename(image_path) in args.key_overrides:
         key = args.key_overrides[os.path.basename(image_path)]
     info, descriptors = AvbInfo(args, image_path)
     if info:
-        descriptor = LookUp(descriptors, 'Hashtree descriptor')[0]
+        descriptor = find_all_values_by_key(descriptors, 'Hashtree descriptor')[0]
         image_size = ReadBytesSize(info['Image size'])
         algorithm = info['Algorithm']
         partition_name = descriptor['Partition Name']
         hash_algorithm = descriptor['Hash Algorithm']
+        salt = descriptor['Salt']
         partition_size = str(image_size)
         cmd = ['avbtool', 'add_hashtree_footer',
                '--key', key,
@@ -265,11 +326,13 @@
                '--partition_size', partition_size,
                '--do_not_generate_fec',
                '--hash_algorithm', hash_algorithm,
+               '--salt', salt,
                '--image', image_path]
         AppendPropArgument(cmd, descriptors)
         if args.signing_args:
             cmd.extend(shlex.split(args.signing_args))
         RunCommand(args, cmd)
+        check_resigned_image_avb_info(image_path, info, descriptors, args)
 
 
 def UpdateVbmetaBootconfig(args, initrds, vbmeta_img):
@@ -385,6 +448,7 @@
             cmd.extend(shlex.split(args.signing_args))
 
         RunCommand(args, cmd)
+        check_resigned_image_avb_info(vbmeta_img, info, descriptors, args)
         # libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
         # which matches this or the read will fail.
         with open(vbmeta_img, 'a', encoding='utf8') as f:
@@ -413,10 +477,11 @@
         RunCommand(args, cmd)
 
 
-def GenVbmetaImage(args, image, output, partition_name):
+def GenVbmetaImage(args, image, output, partition_name, salt):
     cmd = ['avbtool', 'add_hash_footer', '--dynamic_partition_size',
            '--do_not_append_vbmeta_image',
            '--partition_name', partition_name,
+           '--salt', salt,
            '--image', image,
            '--output_vbmeta_image', output]
     RunCommand(args, cmd)
@@ -500,20 +565,23 @@
         initrd_normal_file = files[initrd_normal]
         initrd_debug_file = files[initrd_debug]
 
+        _, kernel_image_descriptors = AvbInfo(args, kernel_file)
+        salts = extract_hash_descriptors(
+            kernel_image_descriptors, lambda descriptor: descriptor['Salt'])
         initrd_normal_hashdesc = tempfile.NamedTemporaryFile(delete=False).name
         initrd_debug_hashdesc = tempfile.NamedTemporaryFile(delete=False).name
         initrd_n_f = Async(GenVbmetaImage, args, initrd_normal_file,
-                           initrd_normal_hashdesc, "initrd_normal",
+                           initrd_normal_hashdesc, "initrd_normal", salts["initrd_normal"],
                            wait=[vbmeta_bc_f] if vbmeta_bc_f is not None else [])
         initrd_d_f = Async(GenVbmetaImage, args, initrd_debug_file,
-                           initrd_debug_hashdesc, "initrd_debug",
+                           initrd_debug_hashdesc, "initrd_debug", salts["initrd_debug"],
                            wait=[vbmeta_bc_f] if vbmeta_bc_f is not None else [])
-        Async(AddHashFooter, args, key, kernel_file, partition_name="boot",
-              additional_descriptors=[
-                  initrd_normal_hashdesc, initrd_debug_hashdesc],
+        return Async(AddHashFooter, args, key, kernel_file,
+              additional_images=[initrd_normal_hashdesc, initrd_debug_hashdesc],
               wait=[initrd_n_f, initrd_d_f])
 
-    resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')
+    _, original_kernel_descriptors = AvbInfo(args, files['kernel'])
+    resign_kernel_task = resign_kernel('kernel', 'initrd_normal.img', 'initrd_debuggable.img')
 
     for ver in gki_versions:
         if f'gki-{ver}_kernel' in files:
@@ -524,8 +592,91 @@
 
     # Re-sign rialto if it exists. Rialto only exists in arm64 environment.
     if os.path.exists(files['rialto']):
-        Async(AddHashFooter, args, key, files['rialto'], partition_name='boot')
+        update_initrd_digests_task = Async(
+            update_initrd_digests_in_rialto, original_kernel_descriptors, args,
+            files, wait=[resign_kernel_task])
+        Async(resign_rialto, args, key, files['rialto'], wait=[update_initrd_digests_task])
 
+def resign_rialto(args, key, rialto_path):
+    _, original_descriptors = AvbInfo(args, rialto_path)
+    AddHashFooter(args, key, rialto_path)
+
+    # Verify the new AVB footer.
+    updated_info, updated_descriptors = AvbInfo(args, rialto_path)
+    assert len(updated_descriptors) == 2, \
+        f"There should be two descriptors for rialto. Updated descriptors: {updated_descriptors}"
+    updated_prop = find_all_values_by_key(updated_descriptors, "Prop")
+    assert len(updated_prop) == 1, "There should be only one Prop descriptor for rialto. " \
+        f"Updated descriptors: {updated_descriptors}"
+    assert updated_info["Rollback Index"] != "0", "Rollback index should not be zero for rialto."
+
+    # Verify the only hash descriptor of rialto.
+    updated_hash_descriptors = extract_hash_descriptors(updated_descriptors)
+    assert len(updated_hash_descriptors) == 1, \
+        f"There should be only one hash descriptor for rialto. " \
+        f"Updated hash descriptors: {updated_hash_descriptors}"
+    # Since salt is not updated, the change of digest reflects the change of content of rialto
+    # kernel.
+    if not args.do_not_update_bootconfigs:
+        [(_, original_descriptor)] = extract_hash_descriptors(original_descriptors).items()
+        [(_, updated_descriptor)] = updated_hash_descriptors.items()
+        assert_different_value(original_descriptor, updated_descriptor, "Digest",
+                               "rialto_hash_descriptor")
+
+def assert_different_value(original, updated, key, context):
+    assert original[key] != updated[key], \
+        f"Value of '{key}' should change for '{context}'" \
+        f"Original value: {original[key]}, updated value: {updated[key]}"
+
+def update_initrd_digests_in_rialto(original_descriptors, args, files):
+    _, updated_descriptors = AvbInfo(args, files['kernel'])
+
+    original_digests = extract_hash_descriptors(
+        original_descriptors, lambda x: binascii.unhexlify(x['Digest']))
+    updated_digests = extract_hash_descriptors(
+        updated_descriptors, lambda x: binascii.unhexlify(x['Digest']))
+    assert original_digests.pop("boot") == updated_digests.pop("boot"), \
+        "Hash descriptor of boot should not change for kernel. " \
+        f"Original descriptors: {original_descriptors}, " \
+        f"updated descriptors: {updated_descriptors}"
+
+    # Update the hashes of initrd_normal and initrd_debug in rialto if the
+    # bootconfigs in them are updated.
+    if args.do_not_update_bootconfigs:
+        return
+
+    with open(files['rialto'], "rb") as file:
+        content = file.read()
+
+    # Check that the original and updated digests are different before updating rialto.
+    partition_names = {'initrd_normal', 'initrd_debug'}
+    assert set(original_digests.keys()) == set(updated_digests.keys()) == partition_names, \
+        f"Original digests' partitions should be {partition_names}. " \
+        f"Original digests: {original_digests}. Updated digests: {updated_digests}"
+    assert set(original_digests.values()).isdisjoint(updated_digests.values()), \
+        "Digests of initrd_normal and initrd_debug should change. " \
+        f"Original descriptors: {original_descriptors}, " \
+        f"updated descriptors: {updated_descriptors}"
+
+    for partition_name, original_digest in original_digests.items():
+        updated_digest = updated_digests[partition_name]
+        assert len(original_digest) == len(updated_digest), \
+            f"Length of original_digest and updated_digest must be the same for {partition_name}." \
+            f" Original digest: {original_digest}, updated digest: {updated_digest}"
+
+        new_content = content.replace(original_digest, updated_digest)
+        assert len(new_content) == len(content), \
+            "Length of new_content and content must be the same."
+        assert new_content != content, \
+            f"original digest of the partition {partition_name} not found."
+        content = new_content
+
+    with open(files['rialto'], "wb") as file:
+        file.write(content)
+
+def extract_hash_descriptors(descriptors, f=lambda x: x):
+    return {desc["Partition Name"]: f(desc) for desc in
+            find_all_values_by_key(descriptors, "Hash descriptor")}
 
 def VerifyVirtApex(args):
     key = args.key
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index 0ecb0ea..d2f88ae 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -161,7 +161,7 @@
 #[cfg(test)]
 mod tests {
     use crate::*;
-    use rdroidtest::test;
+    use rdroidtest::{ignore_if, rdroidtest};
     use std::fs::{File, OpenOptions};
     use std::io::Write;
     use std::ops::Deref;
@@ -232,7 +232,8 @@
         });
     }
 
-    test!(correct_inputs, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn correct_inputs() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -245,7 +246,8 @@
     }
 
     // A single byte change in the APK file causes an IO error
-    test!(incorrect_apk, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn incorrect_apk() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -262,7 +264,8 @@
     }
 
     // A single byte change in the merkle tree also causes an IO error
-    test!(incorrect_merkle_tree, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn incorrect_merkle_tree() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -286,7 +289,8 @@
     // APK is not altered when the verity device is created, but later modified. IO error should
     // occur when trying to read the data around the modified location. This is the main scenario
     // that we'd like to protect.
-    test!(tampered_apk, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn tampered_apk() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -307,7 +311,8 @@
 
     // idsig file is not alread when the verity device is created, but later modified. Unlike to
     // the APK case, this doesn't occur IO error because the merkle tree is already cached.
-    test!(tampered_idsig, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn tampered_idsig() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -324,7 +329,8 @@
     }
 
     // test if both files are already block devices
-    test!(inputs_are_block_devices, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn inputs_are_block_devices() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -374,7 +380,8 @@
     }
 
     // test with custom roothash
-    test!(correct_custom_roothash, ignore_if: should_skip());
+    #[rdroidtest]
+    #[ignore_if(should_skip())]
     fn correct_custom_roothash() {
         let apk = include_bytes!("../testdata/test.apk");
         let idsig = include_bytes!("../testdata/test.apk.idsig");
@@ -396,7 +403,7 @@
         );
     }
 
-    test!(verify_command);
+    #[rdroidtest]
     fn verify_command() {
         // Check that the command parsing has been configured in a valid way.
         clap_command().debug_assert();
diff --git a/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java b/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java
index 7c85797..9c0fd72 100644
--- a/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java
+++ b/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java
@@ -52,7 +52,7 @@
     public static final String FUSE_SUPER_MAGIC_HEX = "65735546";
 
     /** VM config entry path in the test APK */
-    private static final String VM_CONFIG_PATH_IN_APK = "assets/vm_config.json";
+    private static final String VM_CONFIG_PATH_IN_APK = "assets/microdroid/vm_config.json";
 
     /** Test directory on Android where data are located */
     public static final String TEST_DIR = "/data/local/tmp/authfs";
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
index a98f50d..b3fe846 100644
--- a/compos/composd/src/odrefresh_task.rs
+++ b/compos/composd/src/odrefresh_task.rs
@@ -228,7 +228,7 @@
     let odrefresh_current_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(CURRENT_ARTIFACTS_SUBDIR);
     let pending_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(PENDING_ARTIFACTS_SUBDIR);
     let mut reader =
-        File::open(&pending_dir.join("compos.info")).context("Failed to open compos.info")?;
+        File::open(pending_dir.join("compos.info")).context("Failed to open compos.info")?;
     let compos_info = OdsignInfo::parse_from_reader(&mut reader).context("Failed to parse")?;
 
     for path_str in compos_info.file_hashes.keys() {
diff --git a/javalib/api/test-current.txt b/javalib/api/test-current.txt
index 12c099d..34837a3 100644
--- a/javalib/api/test-current.txt
+++ b/javalib/api/test-current.txt
@@ -7,17 +7,20 @@
   }
 
   public final class VirtualMachineConfig {
+    method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @NonNull public String getOs();
     method @Nullable public String getPayloadConfigPath();
     method public boolean isVmConsoleInputSupported();
   }
 
   public static final class VirtualMachineConfig.Builder {
+    method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @NonNull public android.system.virtualmachine.VirtualMachineConfig.Builder setOs(@NonNull String);
     method @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setPayloadConfigPath(@NonNull String);
     method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setVendorDiskImage(@NonNull java.io.File);
     method @NonNull public android.system.virtualmachine.VirtualMachineConfig.Builder setVmConsoleInputSupported(boolean);
   }
 
   public class VirtualMachineManager {
+    method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @NonNull public java.util.List<java.lang.String> getSupportedOSList() throws android.system.virtualmachine.VirtualMachineException;
     method @RequiresPermission(android.system.virtualmachine.VirtualMachine.MANAGE_VIRTUAL_MACHINE_PERMISSION) public boolean isFeatureEnabled(String) throws android.system.virtualmachine.VirtualMachineException;
     field public static final String FEATURE_DICE_CHANGES = "com.android.kvm.DICE_CHANGES";
     field public static final String FEATURE_MULTI_TENANT = "com.android.kvm.MULTI_TENANT";
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
index cc8f65b..cdc8f02 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -66,7 +66,7 @@
     private static String[] EMPTY_STRING_ARRAY = {};
 
     // These define the schema of the config file persisted on disk.
-    private static final int VERSION = 6;
+    private static final int VERSION = 7;
     private static final String KEY_VERSION = "version";
     private static final String KEY_PACKAGENAME = "packageName";
     private static final String KEY_APKPATH = "apkPath";
@@ -80,6 +80,7 @@
     private static final String KEY_VM_OUTPUT_CAPTURED = "vmOutputCaptured";
     private static final String KEY_VM_CONSOLE_INPUT_SUPPORTED = "vmConsoleInputSupported";
     private static final String KEY_VENDOR_DISK_IMAGE_PATH = "vendorDiskImagePath";
+    private static final String KEY_OS = "os";
 
     /** @hide */
     @Retention(RetentionPolicy.SOURCE)
@@ -173,6 +174,8 @@
 
     @Nullable private final File mVendorDiskImage;
 
+    private final String mOs;
+
     private VirtualMachineConfig(
             @Nullable String packageName,
             @Nullable String apkPath,
@@ -185,7 +188,8 @@
             long encryptedStorageBytes,
             boolean vmOutputCaptured,
             boolean vmConsoleInputSupported,
-            @Nullable File vendorDiskImage) {
+            @Nullable File vendorDiskImage,
+            @NonNull String os) {
         // This is only called from Builder.build(); the builder handles parameter validation.
         mPackageName = packageName;
         mApkPath = apkPath;
@@ -199,6 +203,7 @@
         mVmOutputCaptured = vmOutputCaptured;
         mVmConsoleInputSupported = vmConsoleInputSupported;
         mVendorDiskImage = vendorDiskImage;
+        mOs = os;
     }
 
     /** Loads a config from a file. */
@@ -280,6 +285,11 @@
             builder.setVendorDiskImage(new File(vendorDiskImagePath));
         }
 
+        String os = b.getString(KEY_OS);
+        if (os != null) {
+            builder.setOs(os);
+        }
+
         return builder.build();
     }
 
@@ -318,6 +328,7 @@
         if (mVendorDiskImage != null) {
             b.putString(KEY_VENDOR_DISK_IMAGE_PATH, mVendorDiskImage.getAbsolutePath());
         }
+        b.putString(KEY_OS, mOs);
         b.writeToStream(output);
     }
 
@@ -447,6 +458,19 @@
     }
 
     /**
+     * Returns the OS of the VM.
+     *
+     * @see Builder#setOs
+     * @hide
+     */
+    @TestApi
+    @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES")
+    @NonNull
+    public String getOs() {
+        return mOs;
+    }
+
+    /**
      * Tests if this config is compatible with other config. Being compatible means that the configs
      * can be interchangeably used for the same virtual machine; they do not change the VM identity
      * or secrets. Such changes include varying the number of CPUs or the size of the RAM. Changes
@@ -469,7 +493,8 @@
                 && Objects.equals(this.mPayloadConfigPath, other.mPayloadConfigPath)
                 && Objects.equals(this.mPayloadBinaryName, other.mPayloadBinaryName)
                 && Objects.equals(this.mPackageName, other.mPackageName)
-                && Objects.equals(this.mApkPath, other.mApkPath);
+                && Objects.equals(this.mApkPath, other.mApkPath)
+                && Objects.equals(this.mOs, other.mOs);
     }
 
     /**
@@ -493,6 +518,7 @@
         if (mPayloadBinaryName != null) {
             VirtualMachinePayloadConfig payloadConfig = new VirtualMachinePayloadConfig();
             payloadConfig.payloadBinaryName = mPayloadBinaryName;
+            payloadConfig.osName = mOs;
             vsConfig.payload =
                     VirtualMachineAppConfig.Payload.payloadConfig(payloadConfig);
         } else {
@@ -591,6 +617,8 @@
      */
     @SystemApi
     public static final class Builder {
+        private final String DEFAULT_OS = "microdroid";
+
         @Nullable private final String mPackageName;
         @Nullable private String mApkPath;
         @Nullable private String mPayloadConfigPath;
@@ -604,6 +632,7 @@
         private boolean mVmOutputCaptured = false;
         private boolean mVmConsoleInputSupported = false;
         @Nullable private File mVendorDiskImage;
+        private String mOs = DEFAULT_OS;
 
         /**
          * Creates a builder for the given context.
@@ -678,7 +707,8 @@
                     mEncryptedStorageBytes,
                     mVmOutputCaptured,
                     mVmConsoleInputSupported,
-                    mVendorDiskImage);
+                    mVendorDiskImage,
+                    mOs);
         }
 
         /**
@@ -910,5 +940,20 @@
             mVendorDiskImage = vendorDiskImage;
             return this;
         }
+
+        /**
+         * Sets an OS for the VM. Defaults to {@code "microdroid"}.
+         *
+         * <p>See {@link VirtualMachineManager#getSupportedOSList} for available OS names.
+         *
+         * @hide
+         */
+        @TestApi
+        @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES")
+        @NonNull
+        public Builder setOs(@NonNull String os) {
+            mOs = requireNonNull(os, "os must not be null");
+            return this;
+        }
     }
 }
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineManager.java b/javalib/src/android/system/virtualmachine/VirtualMachineManager.java
index a4927db..2802659 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineManager.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineManager.java
@@ -18,6 +18,7 @@
 
 import static java.util.Objects.requireNonNull;
 
+import android.annotation.FlaggedApi;
 import android.annotation.IntDef;
 import android.annotation.NonNull;
 import android.annotation.Nullable;
@@ -39,6 +40,8 @@
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.ref.WeakReference;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -318,6 +321,25 @@
     }
 
     /**
+     * Returns a list of supported OS names.
+     *
+     * @hide
+     */
+    @TestApi
+    @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES")
+    @NonNull
+    public List<String> getSupportedOSList() throws VirtualMachineException {
+        synchronized (sCreateLock) {
+            VirtualizationService service = VirtualizationService.getInstance();
+            try {
+                return Arrays.asList(service.getBinder().getSupportedOSList());
+            } catch (RemoteException e) {
+                throw e.rethrowAsRuntimeException();
+            }
+        }
+    }
+
+    /**
      * Returns {@code true} if given {@code featureName} is enabled.
      *
      * @hide
diff --git a/libs/devicemapper/src/lib.rs b/libs/devicemapper/src/lib.rs
index 8d004f9..5656743 100644
--- a/libs/devicemapper/src/lib.rs
+++ b/libs/devicemapper/src/lib.rs
@@ -235,7 +235,7 @@
 mod tests {
     use super::*;
     use crypt::{CipherType, DmCryptTargetBuilder};
-    use rdroidtest::test;
+    use rdroidtest::{ignore_if, rdroidtest};
     use rustutils::system_properties;
     use std::fs::{read, File, OpenOptions};
     use std::io::Write;
@@ -292,22 +292,24 @@
         }
     }
 
-    test!(mapping_again_keeps_data_xts);
+    #[rdroidtest]
     fn mapping_again_keeps_data_xts() {
         mapping_again_keeps_data(&KEY_SET_XTS, "name1");
     }
 
-    test!(mapping_again_keeps_data_hctr2, ignore_if: !is_hctr2_supported());
+    #[rdroidtest]
+    #[ignore_if(!is_hctr2_supported())]
     fn mapping_again_keeps_data_hctr2() {
         mapping_again_keeps_data(&KEY_SET_HCTR2, "name2");
     }
 
-    test!(data_inaccessible_with_diff_key_xts);
+    #[rdroidtest]
     fn data_inaccessible_with_diff_key_xts() {
         data_inaccessible_with_diff_key(&KEY_SET_XTS, "name3");
     }
 
-    test!(data_inaccessible_with_diff_key_hctr2, ignore_if: !is_hctr2_supported());
+    #[rdroidtest]
+    #[ignore_if(!is_hctr2_supported())]
     fn data_inaccessible_with_diff_key_hctr2() {
         data_inaccessible_with_diff_key(&KEY_SET_HCTR2, "name4");
     }
@@ -325,8 +327,8 @@
             backing_file,
             0,
             sz,
-            /*direct_io*/ true,
-            /*writable*/ true,
+            /* direct_io */ true,
+            /* writable */ true,
         )
         .unwrap();
         let device_diff = device.to_owned() + "_diff";
@@ -357,7 +359,8 @@
 
     fn data_inaccessible_with_diff_key(keyset: &KeySet, device: &str) {
         // This test creates 2 different crypt devices using different keys backed
-        // by same data_device -> Write data on dev1 -> Check the data is visible but not the same on dev2
+        // by same data_device -> Write data on dev1 -> Check the data is visible but not the same
+        // on dev2
         let dm = DeviceMapper::new().unwrap();
         let inputimg = include_bytes!("../testdata/rand8k");
         let sz = inputimg.len() as u64;
@@ -368,8 +371,8 @@
             backing_file,
             0,
             sz,
-            /*direct_io*/ true,
-            /*writable*/ true,
+            /* direct_io */ true,
+            /* writable */ true,
         )
         .unwrap();
         let device_diff = device.to_owned() + "_diff";
diff --git a/libs/dice/open_dice/Android.bp b/libs/dice/open_dice/Android.bp
index 2d0f52c..79d0b96 100644
--- a/libs/dice/open_dice/Android.bp
+++ b/libs/dice/open_dice/Android.bp
@@ -55,8 +55,10 @@
         "libopen_dice_android",
     ],
     visibility: [
+        "//hardware/interfaces/security/secretkeeper/aidl/vts",
         "//packages/modules/Virtualization:__subpackages__",
         "//system/authgraph/tests:__subpackages__",
+        "//system/secretkeeper/client:__subpackages__",
     ],
     apex_available: [
         "//apex_available:platform",
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index cb3b2aa..1696aae 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -29,6 +29,7 @@
         "libclient_vm_csr",
         "libciborium",
         "libcoset",
+        "libdice_policy",
         "libdiced_open_dice",
         "libdiced_sample_inputs",
         "libglob",
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index c94a937..8888feb 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -45,11 +45,14 @@
 use microdroid_metadata::PayloadMetadata;
 use microdroid_payload_config::{OsConfig, Task, TaskType, VmPayloadConfig};
 use nix::sys::signal::Signal;
+use openssl::hkdf::hkdf;
+use openssl::md::Md;
 use payload::load_metadata;
 use rpcbinder::RpcSession;
 use rustutils::sockets::android_get_control_socket;
 use rustutils::system_properties;
 use rustutils::system_properties::PropertyWatcher;
+use secretkeeper_comm::data_types::ID_SIZE;
 use std::borrow::Cow::{Borrowed, Owned};
 use std::env;
 use std::ffi::CString;
@@ -281,8 +284,19 @@
     // To minimize the exposure to untrusted data, derive dice profile as soon as possible.
     info!("DICE derivation for payload");
     let dice_artifacts = dice_derivation(dice, &instance_data, &payload_metadata)?;
+    // TODO(b/291213394): This will be the Id for non-pVM only, instance_data.salt is all 0
+    // for protected VM, implement a mechanism for pVM!
+    let mut vm_id = [0u8; ID_SIZE];
+    hkdf(
+        &mut vm_id,
+        Md::sha256(),
+        &instance_data.salt,
+        /* salt */ b"",
+        /* info */ b"VM_ID",
+    )
+    .context("hkdf failed")?;
     let vm_secret =
-        VmSecret::new(dice_artifacts, service).context("Failed to create VM secrets")?;
+        VmSecret::new(vm_id, dice_artifacts, service).context("Failed to create VM secrets")?;
 
     if cfg!(dice_changes) {
         // Now that the DICE derivation is done, it's ok to allow payload code to run.
diff --git a/microdroid_manager/src/vm_payload_service.rs b/microdroid_manager/src/vm_payload_service.rs
index d3346d8..20a1b89 100644
--- a/microdroid_manager/src/vm_payload_service.rs
+++ b/microdroid_manager/src/vm_payload_service.rs
@@ -23,7 +23,6 @@
 use avflog::LogResult;
 use binder::{Interface, BinderFeatures, ExceptionCode, Strong, IntoBinderResult, Status};
 use client_vm_csr::{generate_attestation_key_and_csr, ClientVmAttestationData};
-use diced_open_dice::DiceArtifacts;
 use log::info;
 use rpcbinder::RpcServer;
 use crate::vm_secret::VmSecret;
@@ -57,7 +56,7 @@
 
     fn getDiceAttestationChain(&self) -> binder::Result<Vec<u8>> {
         self.check_restricted_apis_allowed()?;
-        if let Some(bcc) = self.secret.dice().bcc() {
+        if let Some(bcc) = self.secret.dice_artifacts().bcc() {
             Ok(bcc.to_vec())
         } else {
             Err(anyhow!("bcc is none")).or_binder_exception(ExceptionCode::ILLEGAL_STATE)
@@ -66,13 +65,13 @@
 
     fn getDiceAttestationCdi(&self) -> binder::Result<Vec<u8>> {
         self.check_restricted_apis_allowed()?;
-        Ok(self.secret.dice().cdi_attest().to_vec())
+        Ok(self.secret.dice_artifacts().cdi_attest().to_vec())
     }
 
     fn requestAttestation(&self, challenge: &[u8]) -> binder::Result<AttestationResult> {
         self.check_restricted_apis_allowed()?;
         let ClientVmAttestationData { private_key, csr } =
-            generate_attestation_key_and_csr(challenge, self.secret.dice())
+            generate_attestation_key_and_csr(challenge, self.secret.dice_artifacts())
                 .map_err(|e| {
                     Status::new_service_specific_error_str(
                         STATUS_FAILED_TO_PREPARE_CSR_AND_KEY,
diff --git a/microdroid_manager/src/vm_secret.rs b/microdroid_manager/src/vm_secret.rs
index 89c27c9..9b7d4f1 100644
--- a/microdroid_manager/src/vm_secret.rs
+++ b/microdroid_manager/src/vm_secret.rs
@@ -20,11 +20,13 @@
 use secretkeeper_comm::data_types::request::Request;
 use binder::{Strong};
 use coset::CborSerializable;
+use dice_policy::{ConstraintSpec, ConstraintType, DicePolicy, MissingAction};
 use diced_open_dice::{DiceArtifacts, OwnedDiceArtifacts};
 use keystore2_crypto::ZVec;
 use openssl::hkdf::hkdf;
 use openssl::md::Md;
 use openssl::sha;
+use secretkeeper_client::dice::OwnedDiceArtifactsWithExplicitKey;
 use secretkeeper_client::SkSession;
 use secretkeeper_comm::data_types::{Id, ID_SIZE, Secret, SECRET_SIZE};
 use secretkeeper_comm::data_types::response::Response;
@@ -35,6 +37,10 @@
 use zeroize::Zeroizing;
 
 const ENCRYPTEDSTORE_KEY_IDENTIFIER: &str = "encryptedstore_key";
+const AUTHORITY_HASH: i64 = -4670549;
+const MODE: i64 = -4670551;
+const CONFIG_DESC: i64 = -4670548;
+const SECURITY_VERSION: i64 = -70005;
 
 // Generated using hexdump -vn32 -e'14/1 "0x%02X, " 1 "\n"' /dev/urandom
 const SALT_ENCRYPTED_STORE: &[u8] = &[
@@ -46,19 +52,6 @@
     0x55, 0xF8, 0x08, 0x23, 0x81, 0x5F, 0xF5, 0x16, 0x20, 0x3E, 0xBE, 0xBA, 0xB7, 0xA8, 0x43, 0x92,
 ];
 
-// TODO(b/291213394): Remove this once policy is generated from dice_chain
-const HYPOTHETICAL_DICE_POLICY: [u8; 43] = [
-    0x83, 0x01, 0x81, 0x83, 0x01, 0x80, 0xA1, 0x01, 0x00, 0x82, 0x83, 0x01, 0x81, 0x01, 0x73, 0x74,
-    0x65, 0x73, 0x74, 0x69, 0x6E, 0x67, 0x5F, 0x64, 0x69, 0x63, 0x65, 0x5F, 0x70, 0x6F, 0x6C, 0x69,
-    0x63, 0x79, 0x83, 0x02, 0x82, 0x03, 0x18, 0x64, 0x19, 0xE9, 0x75,
-];
-// TODO(b/291213394): Differentiate the Id of nPVM based on 'salt'
-const ID_NP_VM: [u8; ID_SIZE] = [
-    0xF1, 0xB2, 0xED, 0x3B, 0xD1, 0xBD, 0xF0, 0x7D, 0xE1, 0xF0, 0x01, 0xFC, 0x61, 0x71, 0xD3, 0x42,
-    0xE5, 0x8A, 0xAF, 0x33, 0x6C, 0x11, 0xDC, 0xC8, 0x6F, 0xAE, 0x12, 0x5C, 0x26, 0x44, 0x6B, 0x86,
-    0xCC, 0x24, 0xFD, 0xBF, 0x91, 0x4A, 0x54, 0x84, 0xF9, 0x01, 0x59, 0x25, 0x70, 0x89, 0x38, 0x8D,
-    0x5E, 0xE6, 0x91, 0xDF, 0x68, 0x60, 0x69, 0x26, 0xBE, 0xFE, 0x79, 0x58, 0xF7, 0xEA, 0x81, 0x7D,
-];
 const SKP_SECRET_NP_VM: [u8; SECRET_SIZE] = [
     0xA9, 0x89, 0x97, 0xFE, 0xAE, 0x97, 0x55, 0x4B, 0x32, 0x35, 0xF0, 0xE8, 0x93, 0xDA, 0xEA, 0x24,
     0x06, 0xAC, 0x36, 0x8B, 0x3C, 0x95, 0x50, 0x16, 0x67, 0x71, 0x65, 0x26, 0xEB, 0xD0, 0xC3, 0x98,
@@ -73,72 +66,75 @@
     // with downgraded images will not have access to VM's secret.
     // V2 secrets require hardware support - Secretkeeper HAL, which (among other things)
     // is backed by tamper-evident storage, providing rollback protection to these secrets.
-    V2 { dice: OwnedDiceArtifacts, skp_secret: ZVec },
+    V2 { dice_artifacts: OwnedDiceArtifactsWithExplicitKey, skp_secret: ZVec },
     // V1 secrets are not protected against rollback of boot images.
     // They are reliable only if rollback of images was prevented by verified boot ie,
     // each stage (including pvmfw/Microdroid/Microdroid Manager) prevents downgrade of next
     // stage. These are now legacy secrets & used only when Secretkeeper HAL is not supported
     // by device.
-    V1 { dice: OwnedDiceArtifacts },
-}
-
-fn get_id() -> [u8; ID_SIZE] {
-    if super::is_strict_boot() {
-        todo!("Id for protected VM is not implemented");
-    } else {
-        ID_NP_VM
-    }
+    V1 { dice_artifacts: OwnedDiceArtifacts },
 }
 
 impl VmSecret {
     pub fn new(
+        id: [u8; ID_SIZE],
         dice_artifacts: OwnedDiceArtifacts,
         vm_service: &Strong<dyn IVirtualMachineService>,
-    ) -> Result<VmSecret> {
+    ) -> Result<Self> {
         ensure!(dice_artifacts.bcc().is_some(), "Dice chain missing");
 
-        if let Some(sk_service) = is_sk_supported(vm_service)? {
-            let id = get_id();
-            let mut skp_secret = Zeroizing::new([0u8; SECRET_SIZE]);
-            if super::is_strict_boot() {
-                if super::is_new_instance() {
-                    *skp_secret = rand::random();
-                    store_secret(sk_service.clone(), id, skp_secret.clone(), &dice_artifacts)?;
-                } else {
-                    // Subsequent run of the pVM -> get the secret stored in Secretkeeper.
-                    *skp_secret = get_secret(sk_service.clone(), id, &dice_artifacts)?;
-                }
+        let Some(sk_service) = is_sk_supported(vm_service)? else {
+            // Use V1 secrets if Secretkeeper is not supported.
+            return Ok(Self::V1 { dice_artifacts });
+        };
+        let explicit_dice =
+            OwnedDiceArtifactsWithExplicitKey::from_owned_artifacts(dice_artifacts)?;
+        let explicit_dice_chain = explicit_dice
+            .explicit_key_dice_chain()
+            .ok_or(anyhow!("Missing explicit dice chain, this is unusual"))?;
+        let policy = sealing_policy(explicit_dice_chain).map_err(anyhow_err)?;
+
+        // Start a new session with Secretkeeper!
+        let mut session = SkSession::new(sk_service, &explicit_dice)?;
+        let mut skp_secret = Zeroizing::new([0u8; SECRET_SIZE]);
+        if super::is_strict_boot() {
+            if super::is_new_instance() {
+                *skp_secret = rand::random();
+                store_secret(&mut session, id, skp_secret.clone(), policy)?;
             } else {
-                // TODO(b/291213394): Non protected VM don't need to use Secretkeeper, remove this
-                // once we have sufficient testing on protected VM.
-                store_secret(sk_service.clone(), id, SKP_SECRET_NP_VM.into(), &dice_artifacts)?;
-                *skp_secret = get_secret(sk_service.clone(), id, &dice_artifacts)?;
+                // Subsequent run of the pVM -> get the secret stored in Secretkeeper.
+                *skp_secret = get_secret(&mut session, id, Some(policy))?;
             }
-            return Ok(Self::V2 {
-                dice: dice_artifacts,
-                skp_secret: ZVec::try_from(skp_secret.to_vec())?,
-            });
+        } else {
+            // TODO(b/291213394): Non protected VM don't need to use Secretkeeper, remove this
+            // once we have sufficient testing on protected VM.
+            store_secret(&mut session, id, SKP_SECRET_NP_VM.into(), policy)?;
+            *skp_secret = get_secret(&mut session, id, None)?;
         }
-        //  Use V1 secrets if Secretkeeper is not supported.
-        Ok(Self::V1 { dice: dice_artifacts })
+        Ok(Self::V2 {
+            dice_artifacts: explicit_dice,
+            skp_secret: ZVec::try_from(skp_secret.to_vec())?,
+        })
     }
 
-    pub fn dice(&self) -> &OwnedDiceArtifacts {
+    pub fn dice_artifacts(&self) -> &dyn DiceArtifacts {
         match self {
-            Self::V2 { dice, .. } => dice,
-            Self::V1 { dice } => dice,
+            Self::V2 { dice_artifacts, .. } => dice_artifacts,
+            Self::V1 { dice_artifacts } => dice_artifacts,
         }
     }
 
     fn get_vm_secret(&self, salt: &[u8], identifier: &[u8], key: &mut [u8]) -> Result<()> {
         match self {
-            Self::V2 { dice, skp_secret } => {
+            Self::V2 { dice_artifacts, skp_secret } => {
                 let mut hasher = sha::Sha256::new();
-                hasher.update(dice.cdi_seal());
+                hasher.update(dice_artifacts.cdi_seal());
                 hasher.update(skp_secret);
                 hkdf(key, Md::sha256(), &hasher.finish(), salt, identifier)?
             }
-            Self::V1 { dice } => hkdf(key, Md::sha256(), dice.cdi_seal(), salt, identifier)?,
+            Self::V1 { dice_artifacts } => {
+                hkdf(key, Md::sha256(), dice_artifacts.cdi_seal(), salt, identifier)?
+            }
         }
         Ok(())
     }
@@ -154,24 +150,45 @@
     }
 }
 
+// Construct a sealing policy on the dice chain. VMs uses the following set of constraint for
+// protecting secrets against rollback of boot images.
+// 1. ExactMatch on AUTHORITY_HASH (Required ie, each DiceChainEntry must have it).
+// 2. ExactMatch on MODE (Required) - Secret should be inaccessible if any of the runtime
+//    configuration changes. For ex, the secrets stored with a boot stage being in Normal mode
+//    should be inaccessible when the same stage is booted in Debug mode.
+// 3. GreaterOrEqual on SECURITY_VERSION (Optional): The secrets will be accessible if version of
+//    any image is greater or equal to the set version. This is an optional field, certain
+//    components may chose to prevent booting of rollback images for ex, ABL is expected to provide
+//    rollback protection of pvmfw. Such components may chose to not put SECURITY_VERSION in the
+//    corresponding DiceChainEntry.
+// TODO(b/291219197) : Add constraints on Extra apks as well!
+fn sealing_policy(dice: &[u8]) -> Result<Vec<u8>, String> {
+    let constraint_spec = [
+        ConstraintSpec::new(ConstraintType::ExactMatch, vec![AUTHORITY_HASH], MissingAction::Fail),
+        ConstraintSpec::new(ConstraintType::ExactMatch, vec![MODE], MissingAction::Fail),
+        ConstraintSpec::new(
+            ConstraintType::GreaterOrEqual,
+            vec![CONFIG_DESC, SECURITY_VERSION],
+            MissingAction::Ignore,
+        ),
+    ];
+
+    DicePolicy::from_dice_chain(dice, &constraint_spec)?
+        .to_vec()
+        .map_err(|e| format!("DicePolicy construction failed {e:?}"))
+}
+
 fn store_secret(
-    secretkeeper: binder::Strong<dyn ISecretkeeper>,
+    session: &mut SkSession,
     id: [u8; ID_SIZE],
     secret: Zeroizing<[u8; SECRET_SIZE]>,
-    _dice_chain: &OwnedDiceArtifacts,
+    sealing_policy: Vec<u8>,
 ) -> Result<()> {
-    // Start a new secretkeeper session!
-    let mut session = SkSession::new(secretkeeper).map_err(anyhow_err)?;
-    let store_request = StoreSecretRequest {
-        id: Id(id),
-        secret: Secret(*secret),
-        // TODO(b/291233371): Construct policy out of dice_chain.
-        sealing_policy: HYPOTHETICAL_DICE_POLICY.to_vec(),
-    };
+    let store_request = StoreSecretRequest { id: Id(id), secret: Secret(*secret), sealing_policy };
     log::info!("Secretkeeper operation: {:?}", store_request);
 
     let store_request = store_request.serialize_to_packet().to_vec().map_err(anyhow_err)?;
-    let store_response = session.secret_management_request(&store_request).map_err(anyhow_err)?;
+    let store_response = session.secret_management_request(&store_request)?;
     let store_response = ResponsePacket::from_slice(&store_response).map_err(anyhow_err)?;
     let response_type = store_response.response_type().map_err(anyhow_err)?;
     ensure!(
@@ -183,21 +200,14 @@
 }
 
 fn get_secret(
-    secretkeeper: binder::Strong<dyn ISecretkeeper>,
+    session: &mut SkSession,
     id: [u8; ID_SIZE],
-    _dice_chain: &OwnedDiceArtifacts,
+    updated_sealing_policy: Option<Vec<u8>>,
 ) -> Result<[u8; SECRET_SIZE]> {
-    // Start a new secretkeeper session!
-    let mut session = SkSession::new(secretkeeper).map_err(anyhow_err)?;
-    let get_request = GetSecretRequest {
-        id: Id(id),
-        // TODO(b/291233371): Construct policy out of dice_chain.
-        updated_sealing_policy: None,
-    };
+    let get_request = GetSecretRequest { id: Id(id), updated_sealing_policy };
     log::info!("Secretkeeper operation: {:?}", get_request);
-
     let get_request = get_request.serialize_to_packet().to_vec().map_err(anyhow_err)?;
-    let get_response = session.secret_management_request(&get_request).map_err(anyhow_err)?;
+    let get_response = session.secret_management_request(&get_request)?;
     let get_response = ResponsePacket::from_slice(&get_response).map_err(anyhow_err)?;
     let response_type = get_response.response_type().map_err(anyhow_err)?;
     ensure!(
diff --git a/pvmfw/README.md b/pvmfw/README.md
index 124ef89..5d347b2 100644
--- a/pvmfw/README.md
+++ b/pvmfw/README.md
@@ -141,7 +141,11 @@
 +-------------------------------+
 |           [Entry 2]           | <-- Entry 2 is present since version 1.1
 |  offset = (THIRD - HEAD)      |
-|  size = (THIRD_END - SECOND)  |
+|  size = (THIRD_END - THIRD)   |
++-------------------------------+
+|           [Entry 3]           | <-- Entry 3 is present since version 1.2
+|  offset = (FOURTH - HEAD)     |
+|  size = (FOURTH_END - FOURTH) |
 +-------------------------------+
 |              ...              |
 +-------------------------------+
@@ -149,17 +153,21 @@
 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
 | (Padding to 8-byte alignment) |
 +===============================+ <-- FIRST
-|        {First blob: BCC}      |
+|       {First blob: BCC}       |
 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ <-- FIRST_END
 | (Padding to 8-byte alignment) |
 +===============================+ <-- SECOND
-|        {Second blob: DP}      |
+|       {Second blob: DP}       |
 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ <-- SECOND_END
 | (Padding to 8-byte alignment) |
 +===============================+ <-- THIRD
-|        {Third blob: VM DTBO}  |
+|     {Third blob: VM DTBO}     |
 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ <-- THIRD_END
 | (Padding to 8-byte alignment) |
++===============================+ <-- FOURTH
+| {Fourth blob: VM reference DT}|
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ <-- FOURTH_END
+| (Padding to 8-byte alignment) |
 +===============================+
 |              ...              |
 +===============================+ <-- TAIL
@@ -185,11 +193,32 @@
 - entry 1 may point to a [DTBO] to be applied to the pVM device tree. See
   [debug policy][debug_policy] for an example.
 
-In version 1.1, new blob is added.
+In version 1.1, a third blob is added.
 
 - entry 2 may point to a [DTBO] that describes VM DTBO for device assignment.
   pvmfw will provision assigned devices with the VM DTBO.
 
+In version 1.2, a fourth blob is added.
+
+- entry 3 if present contains the VM reference DT. This defines properties that
+  may be included in the device tree passed to a protected VM. pvmfw validates
+  that if any of these properties is included in the VM's device tree, the
+  property value exactly matches what is in the VM reference DT.
+
+  The bootloader should ensure that the same properties, with the same values,
+  are added under the "/avf/reference" node in the host Android device tree.
+
+  This provides a mechanism to allow configuration information to be securely
+  passed to the VM via the host. pvmfw does not interpret the content of VM
+  reference DT, nor does it apply it to the VM's device tree, it just ensures
+  that if matching properties are present in the VM device tree they contain the
+  correct values.
+
+<!--
+  TODO(b/319192461): Attach link explaining about Microdroid vendor partition
+  TODO(b/291232226): Attach link explaining about Secretkeeper
+-->
+
 [header]: src/config.rs
 [DTBO]: https://android.googlesource.com/platform/external/dtc/+/refs/heads/main/Documentation/dt-object-internal.txt
 [debug_policy]: ../docs/debug/README.md#debug-policy
diff --git a/pvmfw/src/device_assignment.rs b/pvmfw/src/device_assignment.rs
index 3d060ac..54b5a47 100644
--- a/pvmfw/src/device_assignment.rs
+++ b/pvmfw/src/device_assignment.rs
@@ -601,7 +601,7 @@
 }
 
 impl DeviceAssignmentInfo {
-    const PVIOMMU_COMPATIBLE: &CStr = cstr!("pkvm,pviommu");
+    const PVIOMMU_COMPATIBLE: &'static CStr = cstr!("pkvm,pviommu");
 
     /// Parses pvIOMMUs in fdt
     // Note: This will validate pvIOMMU ids' uniqueness, even when unassigned.
diff --git a/pvmfw/src/instance.rs b/pvmfw/src/instance.rs
index e98f663..6daadd9 100644
--- a/pvmfw/src/instance.rs
+++ b/pvmfw/src/instance.rs
@@ -177,7 +177,7 @@
 }
 
 impl Header {
-    const MAGIC: &[u8] = b"Android-VM-instance";
+    const MAGIC: &'static [u8] = b"Android-VM-instance";
     const VERSION_1: u16 = 1;
 
     pub fn is_valid(&self) -> bool {
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index e31a55d..6314c25 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -119,7 +119,7 @@
     public void setup() throws IOException {
         grantPermission(VirtualMachine.MANAGE_VIRTUAL_MACHINE_PERMISSION);
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
-        prepareTestSetup(mProtectedVm);
+        prepareTestSetup(mProtectedVm, null /* gki */);
         setMaxPerformanceTaskProfile();
         mInstrumentation = getInstrumentation();
     }
diff --git a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
index 817bd85..c2244bc 100644
--- a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
+++ b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
@@ -48,7 +48,11 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
 import java.util.OptionalLong;
+import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -58,6 +62,9 @@
     private static final String TAG = "MicrodroidDeviceTestBase";
     private final String MAX_PERFORMANCE_TASK_PROFILE = "CPUSET_SP_TOP_APP";
 
+    protected static final Set<String> SUPPORTED_GKI_VERSIONS =
+            Collections.unmodifiableSet(new HashSet(Arrays.asList("android14-6.1")));
+
     public static boolean isCuttlefish() {
         return getDeviceProperties().isCuttlefish();
     }
@@ -105,6 +112,7 @@
 
     private Context mCtx;
     private boolean mProtectedVm;
+    private String mOs;
 
     protected Context getContext() {
         return mCtx;
@@ -115,13 +123,17 @@
     }
 
     public VirtualMachineConfig.Builder newVmConfigBuilder() {
-        return new VirtualMachineConfig.Builder(mCtx).setProtectedVm(mProtectedVm);
+        return new VirtualMachineConfig.Builder(mCtx).setProtectedVm(mProtectedVm).setOs(mOs);
     }
 
     protected final boolean isProtectedVm() {
         return mProtectedVm;
     }
 
+    protected final String os() {
+        return mOs;
+    }
+
     /**
      * Creates a new virtual machine, potentially removing an existing virtual machine with given
      * name.
@@ -136,13 +148,14 @@
         return vmm.create(name, config);
     }
 
-    public void prepareTestSetup(boolean protectedVm) {
+    public void prepareTestSetup(boolean protectedVm, String gki) {
         mCtx = ApplicationProvider.getApplicationContext();
         assume().withMessage("Device doesn't support AVF")
                 .that(mCtx.getPackageManager().hasSystemFeature(FEATURE_VIRTUALIZATION_FRAMEWORK))
                 .isTrue();
 
         mProtectedVm = protectedVm;
+        mOs = gki != null ? "microdroid_gki-" + gki : "microdroid";
 
         int capabilities = getVirtualMachineManager().getCapabilities();
         if (protectedVm) {
@@ -154,6 +167,15 @@
                     .that(capabilities & VirtualMachineManager.CAPABILITY_NON_PROTECTED_VM)
                     .isNotEqualTo(0);
         }
+
+        try {
+            assume().withMessage("Skip where requested OS \"" + mOs + "\" isn't supported")
+                    .that(mOs)
+                    .isIn(getVirtualMachineManager().getSupportedOSList());
+        } catch (VirtualMachineException e) {
+            Log.e(TAG, "Error getting supported OS list", e);
+            throw new RuntimeException("Failed to get supported OS list.", e);
+        }
     }
 
     public abstract static class VmEventListener implements VirtualMachineCallback {
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
index be13196..848b43b 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
@@ -36,6 +36,7 @@
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -55,7 +56,7 @@
                 / MICRODROID_COMMAND_RETRY_INTERVAL_MILLIS);
 
     protected static final Set<String> SUPPORTED_GKI_VERSIONS =
-            new HashSet(Arrays.asList("android14-6.1"));
+            Collections.unmodifiableSet(new HashSet(Arrays.asList("android14-6.1")));
 
     public static void prepareVirtualizationTestSetup(ITestDevice androidDevice)
             throws DeviceNotAvailableException {
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index 1fa0976..21abdaa 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -90,7 +90,7 @@
     private static final String SHELL_PACKAGE_NAME = "com.android.shell";
     private static final String VIRT_APEX = "/apex/com.android.virt/";
 
-    private static final int MIN_MEM_ARM64 = 145;
+    private static final int MIN_MEM_ARM64 = 160;
     private static final int MIN_MEM_X86_64 = 196;
 
     private static final int BOOT_COMPLETE_TIMEOUT = 30000; // 30 seconds
@@ -108,6 +108,7 @@
         List<Object[]> ret = new ArrayList<>();
         ret.add(new Object[] {true /* protectedVm */, null /* use microdroid kernel */});
         ret.add(new Object[] {false /* protectedVm */, null /* use microdroid kernel */});
+        // TODO(b/302465542): run only the latest GKI on presubmit to reduce running time
         for (String gki : SUPPORTED_GKI_VERSIONS) {
             ret.add(new Object[] {true /* protectedVm */, gki});
             ret.add(new Object[] {false /* protectedVm */, gki});
@@ -121,6 +122,8 @@
     @Parameterized.Parameter(1)
     public String mGki;
 
+    private String mOs;
+
     @Rule public TestLogData mTestLogs = new TestLogData();
     @Rule public TestName mTestName = new TestName();
     @Rule public TestMetrics mMetrics = new TestMetrics();
@@ -149,7 +152,7 @@
             throws Exception {
         PayloadMetadata.write(
                 PayloadMetadata.metadata(
-                        "/mnt/apk/assets/vm_config.json",
+                        "/mnt/apk/assets/" + mOs + "/vm_config.json",
                         PayloadMetadata.apk("microdroid-apk"),
                         apexes.stream()
                                 .map(apex -> PayloadMetadata.apex(apex.name))
@@ -333,8 +336,7 @@
         //   - its idsig
 
         // Load etc/microdroid.json
-        String os = mGki != null ? "microdroid_gki-" + mGki : "microdroid";
-        File microdroidConfigFile = new File(virtApexEtcDir, os + ".json");
+        File microdroidConfigFile = new File(virtApexEtcDir, mOs + ".json");
         JSONObject config = new JSONObject(FileUtil.readStringFromFile(microdroidConfigFile));
 
         // Replace paths so that the config uses re-signed images from TEST_ROOT
@@ -350,7 +352,7 @@
         }
 
         // Add partitions to the second disk
-        final String initrdPath = TEST_ROOT + "etc/" + os + "_initrd_debuggable.img";
+        final String initrdPath = TEST_ROOT + "etc/" + mOs + "_initrd_debuggable.img";
         config.put("initrd", initrdPath);
         // Add instance image as a partition in disks[1]
         disks.put(
@@ -409,7 +411,7 @@
     public void protectedVmRunsPvmfw() throws Exception {
         // Arrange
         assumeProtectedVm();
-        final String configPath = "assets/vm_config_apex.json";
+        final String configPath = "assets/" + mOs + "/vm_config_apex.json";
 
         // Act
         mMicrodroidDevice =
@@ -418,7 +420,6 @@
                         .memoryMib(minMemorySize())
                         .cpuTopology("match_host")
                         .protectedVm(true)
-                        .gki(mGki)
                         .build(getAndroidDevice());
 
         // Assert
@@ -546,7 +547,6 @@
                         .memoryMib(minMemorySize())
                         .cpuTopology("match_host")
                         .protectedVm(protectedVm)
-                        .gki(mGki)
                         .build(getAndroidDevice());
         mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
         mMicrodroidDevice.enableAdbRoot();
@@ -567,7 +567,7 @@
         assertThat(
                         isTombstoneGeneratedWithCmd(
                                 mProtectedVm,
-                                "assets/vm_config.json",
+                                "assets/" + mOs + "/vm_config.json",
                                 "kill",
                                 "-SIGSEGV",
                                 "$(pidof microdroid_launcher)"))
@@ -581,7 +581,7 @@
         assertThat(
                         isTombstoneGeneratedWithCmd(
                                 mProtectedVm,
-                                "assets/vm_config_no_tombstone.json",
+                                "assets/" + mOs + "/vm_config_no_tombstone.json",
                                 "kill",
                                 "-SIGSEGV",
                                 "$(pidof microdroid_launcher)"))
@@ -595,7 +595,7 @@
         assertThat(
                         isTombstoneGeneratedWithCmd(
                                 mProtectedVm,
-                                "assets/vm_config.json",
+                                "assets/" + mOs + "/vm_config.json",
                                 "echo",
                                 "c",
                                 ">",
@@ -659,7 +659,10 @@
     private boolean isTombstoneGeneratedWithCrashConfig(boolean protectedVm, boolean debuggable)
             throws Exception {
         return isTombstoneGeneratedWithVmRunApp(
-                protectedVm, debuggable, "--config-path", "assets/vm_config_crash.json");
+                protectedVm,
+                debuggable,
+                "--config-path",
+                "assets/" + mOs + "/vm_config_crash.json");
     }
 
     @Test
@@ -694,14 +697,13 @@
 
         // Create VM with microdroid
         TestDevice device = getAndroidDevice();
-        final String configPath = "assets/vm_config_apex.json"; // path inside the APK
+        final String configPath = "assets/" + mOs + "/vm_config_apex.json"; // path inside the APK
         ITestDevice microdroid =
                 MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
                         .debugLevel("full")
                         .memoryMib(minMemorySize())
                         .cpuTopology("match_host")
                         .protectedVm(mProtectedVm)
-                        .gki(mGki)
                         .build(device);
         microdroid.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
         device.shutdownMicrodroid(microdroid);
@@ -824,26 +826,24 @@
     @Test
     @CddTest(requirements = {"9.17/C-1-1", "9.17/C-1-2", "9.17/C/1-3"})
     public void testMicrodroidBoots() throws Exception {
-        final String configPath = "assets/vm_config.json"; // path inside the APK
+        final String configPath = "assets/" + mOs + "/vm_config.json"; // path inside the APK
         testMicrodroidBootsWithBuilder(
                 MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
                         .debugLevel("full")
                         .memoryMib(minMemorySize())
                         .cpuTopology("match_host")
-                        .protectedVm(mProtectedVm)
-                        .gki(mGki));
+                        .protectedVm(mProtectedVm));
     }
 
     @Test
     public void testMicrodroidRamUsage() throws Exception {
-        final String configPath = "assets/vm_config.json";
+        final String configPath = "assets/" + mOs + "/vm_config.json";
         mMicrodroidDevice =
                 MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
                         .debugLevel("full")
                         .memoryMib(minMemorySize())
                         .cpuTopology("match_host")
                         .protectedVm(mProtectedVm)
-                        .gki(mGki)
                         .build(getAndroidDevice());
         mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
         mMicrodroidDevice.enableAdbRoot();
@@ -991,7 +991,7 @@
         List<String> devices = getAssignableDevices();
         assumeFalse("no assignable devices", devices.isEmpty());
 
-        final String configPath = "assets/vm_config.json";
+        final String configPath = "assets/" + mOs + "/vm_config.json";
         mMicrodroidDevice =
                 MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
                         .debugLevel("full")
@@ -999,7 +999,6 @@
                         .cpuTopology("match_host")
                         .protectedVm(true)
                         .addAssignableDevice(devices.get(0))
-                        .gki(mGki)
                         .build(getAndroidDevice());
 
         mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
@@ -1034,6 +1033,8 @@
                     "GKI version \"" + mGki + "\" is not supported on this device",
                     getSupportedGKIVersions().contains(mGki));
         }
+
+        mOs = (mGki != null) ? "microdroid_gki-" + mGki : "microdroid";
     }
 
     @After
@@ -1094,8 +1095,15 @@
         return parseStringArrayFieldsFromVmInfo("Assignable devices: ");
     }
 
+    private List<String> getSupportedOSList() throws Exception {
+        return parseStringArrayFieldsFromVmInfo("Available OS list: ");
+    }
+
     private List<String> getSupportedGKIVersions() throws Exception {
-        return parseStringArrayFieldsFromVmInfo("Available gki versions: ");
+        return getSupportedOSList().stream()
+                .filter(os -> os.startsWith("microdroid_gki-"))
+                .map(os -> os.replaceFirst("^microdroid_gki-", ""))
+                .collect(Collectors.toList());
     }
 
     private TestDevice getAndroidDevice() {
diff --git a/tests/pvmfw/java/com/android/pvmfw/test/DebugPolicyHostTests.java b/tests/pvmfw/java/com/android/pvmfw/test/DebugPolicyHostTests.java
index 7d0faa4..b84d7dc 100644
--- a/tests/pvmfw/java/com/android/pvmfw/test/DebugPolicyHostTests.java
+++ b/tests/pvmfw/java/com/android/pvmfw/test/DebugPolicyHostTests.java
@@ -57,7 +57,10 @@
     @NonNull private static final String PACKAGE_NAME = "com.android.microdroid.test";
     @NonNull private static final String MICRODROID_DEBUG_FULL = "full";
     @NonNull private static final String MICRODROID_DEBUG_NONE = "none";
-    @NonNull private static final String MICRODROID_CONFIG_PATH = "assets/vm_config_apex.json";
+
+    @NonNull
+    private static final String MICRODROID_CONFIG_PATH = "assets/microdroid/vm_config_apex.json";
+
     @NonNull private static final String MICRODROID_LOG_PATH = TEST_ROOT + "log.txt";
     private static final int BOOT_COMPLETE_TIMEOUT_MS = 30000; // 30 seconds
     private static final int BOOT_FAILURE_WAIT_TIME_MS = 10000; // 10 seconds
diff --git a/tests/pvmfw/java/com/android/pvmfw/test/PvmfwImgTest.java b/tests/pvmfw/java/com/android/pvmfw/test/PvmfwImgTest.java
index 95c1c4e..9fbbd87 100644
--- a/tests/pvmfw/java/com/android/pvmfw/test/PvmfwImgTest.java
+++ b/tests/pvmfw/java/com/android/pvmfw/test/PvmfwImgTest.java
@@ -53,7 +53,10 @@
     @NonNull private static final String PACKAGE_FILE_NAME = "MicrodroidTestApp.apk";
     @NonNull private static final String PACKAGE_NAME = "com.android.microdroid.test";
     @NonNull private static final String MICRODROID_DEBUG_FULL = "full";
-    @NonNull private static final String MICRODROID_CONFIG_PATH = "assets/vm_config_apex.json";
+
+    @NonNull
+    private static final String MICRODROID_CONFIG_PATH = "assets/microdroid/vm_config_apex.json";
+
     private static final int BOOT_COMPLETE_TIMEOUT_MS = 30000; // 30 seconds
     private static final int BOOT_FAILURE_WAIT_TIME_MS = 10000; // 10 seconds
 
diff --git a/tests/testapk/assets/vm_config.json b/tests/testapk/assets/microdroid/vm_config.json
similarity index 100%
rename from tests/testapk/assets/vm_config.json
rename to tests/testapk/assets/microdroid/vm_config.json
diff --git a/tests/testapk/assets/vm_config_apex.json b/tests/testapk/assets/microdroid/vm_config_apex.json
similarity index 100%
rename from tests/testapk/assets/vm_config_apex.json
rename to tests/testapk/assets/microdroid/vm_config_apex.json
diff --git a/tests/testapk/assets/vm_config_crash.json b/tests/testapk/assets/microdroid/vm_config_crash.json
similarity index 100%
rename from tests/testapk/assets/vm_config_crash.json
rename to tests/testapk/assets/microdroid/vm_config_crash.json
diff --git a/tests/testapk/assets/vm_config_extra_apk.json b/tests/testapk/assets/microdroid/vm_config_extra_apk.json
similarity index 100%
rename from tests/testapk/assets/vm_config_extra_apk.json
rename to tests/testapk/assets/microdroid/vm_config_extra_apk.json
diff --git a/tests/testapk/assets/vm_config_no_task.json b/tests/testapk/assets/microdroid/vm_config_no_task.json
similarity index 100%
rename from tests/testapk/assets/vm_config_no_task.json
rename to tests/testapk/assets/microdroid/vm_config_no_task.json
diff --git a/tests/testapk/assets/vm_config_no_tombstone.json b/tests/testapk/assets/microdroid/vm_config_no_tombstone.json
similarity index 100%
rename from tests/testapk/assets/vm_config_no_tombstone.json
rename to tests/testapk/assets/microdroid/vm_config_no_tombstone.json
diff --git a/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config.json b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config.json
new file mode 100644
index 0000000..2022127
--- /dev/null
+++ b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config.json
@@ -0,0 +1,10 @@
+{
+  "os": {
+    "name": "microdroid_gki-android14-6.1"
+  },
+  "task": {
+    "type": "microdroid_launcher",
+    "command": "MicrodroidTestNativeLib.so"
+  },
+  "export_tombstones": true
+}
diff --git a/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_apex.json b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_apex.json
new file mode 100644
index 0000000..bd3998d
--- /dev/null
+++ b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_apex.json
@@ -0,0 +1,21 @@
+{
+  "os": {
+    "name": "microdroid_gki-android14-6.1"
+  },
+  "task": {
+    "type": "microdroid_launcher",
+    "command": "MicrodroidTestNativeLib.so"
+  },
+  "apexes": [
+    {
+      "name": "com.android.art"
+    },
+    {
+      "name": "com.android.compos"
+    },
+    {
+      "name": "com.android.sdkext"
+    }
+  ],
+  "export_tombstones": true
+}
diff --git a/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_crash.json b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_crash.json
new file mode 100644
index 0000000..4692258
--- /dev/null
+++ b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_crash.json
@@ -0,0 +1,9 @@
+{
+  "os": {
+    "name": "microdroid_gki-android14-6.1"
+  },
+  "task": {
+    "type": "microdroid_launcher",
+    "command": "MicrodroidCrashNativeLib.so"
+  }
+}
diff --git a/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_extra_apk.json b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_extra_apk.json
new file mode 100644
index 0000000..1602294
--- /dev/null
+++ b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_extra_apk.json
@@ -0,0 +1,15 @@
+{
+  "os": {
+    "name": "microdroid_gki-android14-6.1"
+  },
+  "task": {
+    "type": "microdroid_launcher",
+    "command": "MicrodroidTestNativeLib.so"
+  },
+  "extra_apks": [
+    {
+      "path": "/system/etc/security/fsverity/BuildManifest.apk"
+    }
+  ],
+  "export_tombstones": true
+}
diff --git a/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_no_task.json b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_no_task.json
new file mode 100644
index 0000000..8282f99
--- /dev/null
+++ b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_no_task.json
@@ -0,0 +1,6 @@
+{
+  "os": {
+    "name": "microdroid_gki-android14-6.1"
+  },
+  "export_tombstones": true
+}
diff --git a/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_no_tombstone.json b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_no_tombstone.json
new file mode 100644
index 0000000..6e8a136
--- /dev/null
+++ b/tests/testapk/assets/microdroid_gki-android14-6.1/vm_config_no_tombstone.json
@@ -0,0 +1,10 @@
+{
+  "os": {
+    "name": "microdroid_gki-android14-6.1"
+  },
+  "task": {
+    "type": "microdroid_launcher",
+    "command": "MicrodroidTestNativeLib.so"
+  },
+  "export_tombstones": false
+}
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 2367707..6605f89 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -31,8 +31,8 @@
 
 import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 import static org.junit.Assume.assumeFalse;
+import static org.junit.Assume.assumeTrue;
 
 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
 
@@ -96,7 +96,9 @@
 import java.nio.file.Paths;
 import java.time.LocalDateTime;
 import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.List;
 import java.util.OptionalLong;
 import java.util.UUID;
@@ -118,17 +120,29 @@
 
     private static final String KERNEL_VERSION = SystemProperties.get("ro.kernel.version");
 
-    @Parameterized.Parameters(name = "protectedVm={0}")
-    public static Object[] protectedVmConfigs() {
-        return new Object[] { false, true };
+    @Parameterized.Parameters(name = "protectedVm={0},gki={1}")
+    public static Collection<Object[]> params() {
+        List<Object[]> ret = new ArrayList<>();
+        ret.add(new Object[] {true /* protectedVm */, null /* use microdroid kernel */});
+        ret.add(new Object[] {false /* protectedVm */, null /* use microdroid kernel */});
+        // TODO(b/302465542): run only the latest GKI on presubmit to reduce running time
+        for (String gki : SUPPORTED_GKI_VERSIONS) {
+            ret.add(new Object[] {true /* protectedVm */, gki});
+            ret.add(new Object[] {false /* protectedVm */, gki});
+        }
+        return ret;
     }
 
-    @Parameterized.Parameter public boolean mProtectedVm;
+    @Parameterized.Parameter(0)
+    public boolean mProtectedVm;
+
+    @Parameterized.Parameter(1)
+    public String mGki;
 
     @Before
     public void setup() {
         grantPermission(VirtualMachine.MANAGE_VIRTUAL_MACHINE_PERMISSION);
-        prepareTestSetup(mProtectedVm);
+        prepareTestSetup(mProtectedVm, mGki);
         // USE_CUSTOM_VIRTUAL_MACHINE permission has protection level signature|development, meaning
         // that it will be automatically granted when test apk is installed. We have some tests
         // checking the behavior when caller doesn't have this permission (e.g.
@@ -146,7 +160,7 @@
 
     private static final long ONE_MEBI = 1024 * 1024;
 
-    private static final long MIN_MEM_ARM64 = 150 * ONE_MEBI;
+    private static final long MIN_MEM_ARM64 = 160 * ONE_MEBI;
     private static final long MIN_MEM_X86_64 = 196 * ONE_MEBI;
     private static final String EXAMPLE_STRING = "Literally any string!! :)";
 
@@ -473,8 +487,11 @@
     @CddTest(requirements = {"9.17/C-1-1"})
     public void vmConfigGetAndSetTests() {
         // Minimal has as little as specified as possible; everything that can be is defaulted.
-        VirtualMachineConfig.Builder minimalBuilder = newVmConfigBuilder();
-        VirtualMachineConfig minimal = minimalBuilder.setPayloadBinaryName("binary.so").build();
+        VirtualMachineConfig.Builder minimalBuilder =
+                new VirtualMachineConfig.Builder(getContext())
+                        .setPayloadBinaryName("binary.so")
+                        .setProtectedVm(isProtectedVm());
+        VirtualMachineConfig minimal = minimalBuilder.build();
 
         assertThat(minimal.getApkPath()).isNull();
         assertThat(minimal.getDebugLevel()).isEqualTo(DEBUG_LEVEL_NONE);
@@ -486,6 +503,7 @@
         assertThat(minimal.isEncryptedStorageEnabled()).isFalse();
         assertThat(minimal.getEncryptedStorageBytes()).isEqualTo(0);
         assertThat(minimal.isVmOutputCaptured()).isEqualTo(false);
+        assertThat(minimal.getOs()).isEqualTo("microdroid");
 
         // Maximal has everything that can be set to some non-default value. (And has different
         // values than minimal for the required fields.)
@@ -511,10 +529,16 @@
         assertThat(maximal.isEncryptedStorageEnabled()).isTrue();
         assertThat(maximal.getEncryptedStorageBytes()).isEqualTo(1_000_000);
         assertThat(maximal.isVmOutputCaptured()).isEqualTo(true);
+        assertThat(maximal.getOs()).isEqualTo("microdroid");
 
         assertThat(minimal.isCompatibleWith(maximal)).isFalse();
         assertThat(minimal.isCompatibleWith(minimal)).isTrue();
         assertThat(maximal.isCompatibleWith(maximal)).isTrue();
+
+        VirtualMachineConfig os = minimalBuilder.setOs("microdroid_gki-android14-6.1").build();
+        assertThat(os.getPayloadBinaryName()).isEqualTo("binary.so");
+        assertThat(os.getOs()).isEqualTo("microdroid_gki-android14-6.1");
+        assertThat(os.isCompatibleWith(minimal)).isFalse();
     }
 
     @Test
@@ -626,6 +650,11 @@
                         .setProtectedVm(isProtectedVm())
                         .setPayloadBinaryName("binary.so");
         assertConfigCompatible(currentContextConfig, otherContextBuilder).isFalse();
+
+        VirtualMachineConfig microdroidOsConfig = newBaselineBuilder().setOs("microdroid").build();
+        VirtualMachineConfig.Builder otherOsBuilder =
+                newBaselineBuilder().setOs("microdroid_gki-android14-6.1");
+        assertConfigCompatible(microdroidOsConfig, otherOsBuilder).isFalse();
     }
 
     private VirtualMachineConfig.Builder newBaselineBuilder() {
@@ -772,7 +801,7 @@
 
         VirtualMachineConfig config =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config.json")
                         .setMemoryBytes(minMemoryRequired())
                         .build();
 
@@ -894,7 +923,7 @@
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
         VirtualMachineConfig config =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config_extra_apk.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config_extra_apk.json")
                         .setMemoryBytes(minMemoryRequired())
                         .setDebugLevel(DEBUG_LEVEL_FULL)
                         .build();
@@ -1039,7 +1068,7 @@
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
         VirtualMachineConfig normalConfig =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config.json")
                         .setDebugLevel(DEBUG_LEVEL_FULL)
                         .build();
         forceCreateNewVirtualMachine("test_vm_a", normalConfig);
@@ -1066,7 +1095,7 @@
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
         VirtualMachineConfig normalConfig =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config.json")
                         .setDebugLevel(DEBUG_LEVEL_FULL)
                         .build();
         forceCreateNewVirtualMachine("test_vm", normalConfig);
@@ -1090,7 +1119,7 @@
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
         VirtualMachineConfig normalConfig =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config.json")
                         .setDebugLevel(DEBUG_LEVEL_FULL)
                         .build();
         VirtualMachine vm = forceCreateNewVirtualMachine("bcc_vm", normalConfig);
@@ -1249,7 +1278,7 @@
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
         VirtualMachineConfig normalConfig =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config_no_task.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config_no_task.json")
                         .setDebugLevel(DEBUG_LEVEL_FULL)
                         .build();
         forceCreateNewVirtualMachine("test_vm_invalid_config", normalConfig);
@@ -1269,8 +1298,8 @@
 
         BootResult bootResult = tryBootVm(TAG, "test_vm_invalid_binary_path");
         assertThat(bootResult.payloadStarted).isFalse();
-        assertThat(bootResult.deathReason).isEqualTo(
-                VirtualMachineCallback.STOP_REASON_MICRODROID_UNKNOWN_RUNTIME_ERROR);
+        assertThat(bootResult.deathReason)
+                .isEqualTo(VirtualMachineCallback.STOP_REASON_MICRODROID_UNKNOWN_RUNTIME_ERROR);
     }
 
     // Checks whether microdroid_launcher started but payload failed. reason must be recorded in the
@@ -1346,7 +1375,7 @@
         grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
         VirtualMachineConfig config =
                 newVmConfigBuilder()
-                        .setPayloadConfigPath("assets/vm_config.json")
+                        .setPayloadConfigPath("assets/" + os() + "/vm_config.json")
                         .setDebugLevel(DEBUG_LEVEL_FULL)
                         .build();
         String vmNameOrig = "test_vm_orig";
@@ -1744,6 +1773,7 @@
     @Test
     public void testConsoleInputSupported() throws Exception {
         assumeSupportedDevice();
+        assumeTrue("Not supported on GKI kernels", mGki == null);
 
         VirtualMachineConfig config =
                 newVmConfigBuilder()
diff --git a/virtualizationmanager/Android.bp b/virtualizationmanager/Android.bp
index 60c94fc..f58e999 100644
--- a/virtualizationmanager/Android.bp
+++ b/virtualizationmanager/Android.bp
@@ -38,6 +38,7 @@
         "libclap",
         "libcommand_fds",
         "libdisk",
+        "libglob",
         "libhex",
         "libhypervisor_props",
         "liblazy_static",
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 12b8f88..2603e77 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -69,12 +69,12 @@
     IntoBinderResult,
 };
 use disk::QcowFile;
+use glob::glob;
 use lazy_static::lazy_static;
 use libfdt::Fdt;
 use log::{debug, error, info, warn};
 use microdroid_payload_config::{OsConfig, Task, TaskType, VmPayloadConfig};
 use nix::unistd::pipe;
-use regex::Regex;
 use rpcbinder::RpcServer;
 use rustutils::system_properties;
 use semver::VersionReq;
@@ -83,6 +83,7 @@
 use std::ffi::{CStr, CString};
 use std::fs::{canonicalize, read_dir, remove_file, File, OpenOptions};
 use std::io::{BufRead, BufReader, Error, ErrorKind, Write};
+use std::iter;
 use std::num::{NonZeroU16, NonZeroU32};
 use std::os::unix::io::{FromRawFd, IntoRawFd};
 use std::os::unix::raw::pid_t;
@@ -126,8 +127,8 @@
     pub static ref GLOBAL_SERVICE: Strong<dyn IVirtualizationServiceInternal> =
         wait_for_interface(BINDER_SERVICE_IDENTIFIER)
             .expect("Could not connect to VirtualizationServiceInternal");
-    static ref MICRODROID_GKI_OS_NAME_PATTERN: Regex =
-        Regex::new(r"^microdroid_gki-android\d+-\d+\.\d+$").expect("Failed to construct Regex");
+    static ref SUPPORTED_OS_NAMES: HashSet<String> =
+        get_supported_os_names().expect("Failed to get list of supported os names");
 }
 
 fn create_or_update_idsig_file(
@@ -289,6 +290,11 @@
         GLOBAL_SERVICE.getAssignableDevices()
     }
 
+    /// Get a list of supported OSes.
+    fn getSupportedOSList(&self) -> binder::Result<Vec<String>> {
+        Ok(Vec::from_iter(SUPPORTED_OS_NAMES.iter().cloned()))
+    }
+
     /// Returns whether given feature is enabled
     fn isFeatureEnabled(&self, feature: &str) -> binder::Result<bool> {
         check_manage_access()?;
@@ -728,14 +734,32 @@
     }
 }
 
-fn is_valid_os(os_name: &str) -> bool {
-    if os_name == MICRODROID_OS_NAME {
-        true
-    } else if cfg!(vendor_modules) && MICRODROID_GKI_OS_NAME_PATTERN.is_match(os_name) {
-        PathBuf::from(format!("/apex/com.android.virt/etc/{}.json", os_name)).exists()
-    } else {
-        false
+fn extract_os_name_from_config_path(config: &Path) -> Option<String> {
+    if config.extension()?.to_str()? != "json" {
+        return None;
     }
+
+    Some(config.with_extension("").file_name()?.to_str()?.to_owned())
+}
+
+fn extract_os_names_from_configs(config_glob_pattern: &str) -> Result<HashSet<String>> {
+    let configs = glob(config_glob_pattern)?.collect::<Result<Vec<_>, _>>()?;
+    let os_names =
+        configs.iter().filter_map(|x| extract_os_name_from_config_path(x)).collect::<HashSet<_>>();
+
+    Ok(os_names)
+}
+
+fn get_supported_os_names() -> Result<HashSet<String>> {
+    if !cfg!(vendor_modules) {
+        return Ok(iter::once(MICRODROID_OS_NAME.to_owned()).collect());
+    }
+
+    extract_os_names_from_configs("/apex/com.android.virt/etc/microdroid*.json")
+}
+
+fn is_valid_os(os_name: &str) -> bool {
+    SUPPORTED_OS_NAMES.contains(os_name)
 }
 
 fn load_app_config(
@@ -1593,6 +1617,72 @@
         tmp_dir.close()?;
         Ok(())
     }
+
+    fn test_extract_os_name_from_config_path(
+        path: &Path,
+        expected_result: Option<&str>,
+    ) -> Result<()> {
+        let result = extract_os_name_from_config_path(path);
+        if result.as_deref() != expected_result {
+            bail!("Expected {:?} but was {:?}", expected_result, &result)
+        }
+        Ok(())
+    }
+
+    #[test]
+    fn test_extract_os_name_from_microdroid_config() -> Result<()> {
+        test_extract_os_name_from_config_path(
+            Path::new("/apex/com.android.virt/etc/microdroid.json"),
+            Some("microdroid"),
+        )
+    }
+
+    #[test]
+    fn test_extract_os_name_from_microdroid_gki_config() -> Result<()> {
+        test_extract_os_name_from_config_path(
+            Path::new("/apex/com.android.virt/etc/microdroid_gki-android14-6.1.json"),
+            Some("microdroid_gki-android14-6.1"),
+        )
+    }
+
+    #[test]
+    fn test_extract_os_name_from_invalid_path() -> Result<()> {
+        test_extract_os_name_from_config_path(
+            Path::new("/apex/com.android.virt/etc/microdroid.img"),
+            None,
+        )
+    }
+
+    #[test]
+    fn test_extract_os_name_from_configs() -> Result<()> {
+        let tmp_dir = tempfile::TempDir::new()?;
+        let tmp_dir_path = tmp_dir.path().to_owned();
+
+        let mut os_names: HashSet<String> = HashSet::new();
+        os_names.insert("microdroid".to_owned());
+        os_names.insert("microdroid_gki-android14-6.1".to_owned());
+        os_names.insert("microdroid_gki-android15-6.1".to_owned());
+
+        // config files
+        for os_name in &os_names {
+            std::fs::write(tmp_dir_path.join(os_name.to_owned() + ".json"), b"")?;
+        }
+
+        // fake files not related to configs
+        std::fs::write(tmp_dir_path.join("microdroid_super.img"), b"")?;
+        std::fs::write(tmp_dir_path.join("microdroid_foobar.apk"), b"")?;
+
+        let glob_pattern = match tmp_dir_path.join("microdroid*.json").to_str() {
+            Some(s) => s.to_owned(),
+            None => bail!("tmp_dir_path {:?} is not UTF-8", tmp_dir_path),
+        };
+
+        let result = extract_os_names_from_configs(&glob_pattern)?;
+        if result != os_names {
+            bail!("Expected {:?} but was {:?}", os_names, result);
+        }
+        Ok(())
+    }
 }
 
 struct SecretkeeperProxy(Strong<dyn ISecretkeeper>);
diff --git a/virtualizationmanager/src/debug_config.rs b/virtualizationmanager/src/debug_config.rs
index 5d22f59..003a7d4 100644
--- a/virtualizationmanager/src/debug_config.rs
+++ b/virtualizationmanager/src/debug_config.rs
@@ -84,9 +84,9 @@
     let (node_path, prop_name) = (&path.node_path, &path.prop_name);
     let node = match fdt.node(node_path) {
         Ok(Some(node)) => node,
-        Err(error) if error != FdtError::NotFound => Err(error)
-            .map_err(Error::msg)
-            .with_context(|| format!("Failed to get node {node_path:?}"))?,
+        Err(error) if error != FdtError::NotFound => {
+            Err(Error::msg(error)).with_context(|| format!("Failed to get node {node_path:?}"))?
+        }
         _ => return Ok(false),
     };
 
@@ -94,9 +94,9 @@
         Ok(Some(0)) => Ok(false),
         Ok(Some(1)) => Ok(true),
         Ok(Some(_)) => Err(anyhow!("Invalid prop value {prop_name:?} in node {node_path:?}")),
-        Err(error) if error != FdtError::NotFound => Err(error)
-            .map_err(Error::msg)
-            .with_context(|| format!("Failed to get prop {prop_name:?}")),
+        Err(error) if error != FdtError::NotFound => {
+            Err(Error::msg(error)).with_context(|| format!("Failed to get prop {prop_name:?}"))
+        }
         _ => Ok(false),
     }
 }
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
index d6a1299..92a5812 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
@@ -66,6 +66,11 @@
      */
     AssignableDevice[] getAssignableDevices();
 
+    /**
+     * Get a list of supported OSes.
+     */
+    String[] getSupportedOSList();
+
     /** Returns whether given feature is enabled. */
     boolean isFeatureEnabled(in String feature);
 }
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 9a92f13..5c07eed 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -27,7 +27,6 @@
 use clap::{Args, Parser};
 use create_idsig::command_create_idsig;
 use create_partition::command_create_partition;
-use glob::glob;
 use run::{command_run, command_run_app, command_run_microdroid};
 use std::num::NonZeroU16;
 use std::path::{Path, PathBuf};
@@ -316,12 +315,6 @@
     Ok(())
 }
 
-fn extract_gki_version(gki_config: &Path) -> Option<&str> {
-    let name = gki_config.file_name()?;
-    let name_str = name.to_str()?;
-    name_str.strip_prefix("microdroid_gki-")?.strip_suffix(".json")
-}
-
 /// Print information about supported VM types.
 fn command_info() -> Result<(), Error> {
     let non_protected_vm_supported = hypervisor_props::is_vm_supported()?;
@@ -361,11 +354,8 @@
     let devices = devices.into_iter().map(|x| x.node).collect::<Vec<_>>();
     println!("Assignable devices: {}", serde_json::to_string(&devices)?);
 
-    let gki_configs =
-        glob("/apex/com.android.virt/etc/microdroid_gki-*.json")?.collect::<Result<Vec<_>, _>>()?;
-    let gki_versions =
-        gki_configs.iter().filter_map(|x| extract_gki_version(x)).collect::<Vec<_>>();
-    println!("Available gki versions: {}", serde_json::to_string(&gki_versions)?);
+    let os_list = get_service()?.getSupportedOSList()?;
+    println!("Available OS list: {}", serde_json::to_string(&os_list)?);
 
     Ok(())
 }