Merge "[cts] Add CTS for VM Attestation when Attestation is unsupported" into main
diff --git a/libs/vmconfig/src/lib.rs b/libs/vmconfig/src/lib.rs
index 50f3c8e..907e0d3 100644
--- a/libs/vmconfig/src/lib.rs
+++ b/libs/vmconfig/src/lib.rs
@@ -15,6 +15,7 @@
//! Struct for VM configuration with JSON (de)serialization and AIDL parcelables
use android_system_virtualizationservice::{
+ aidl::android::system::virtualizationservice::CpuTopology::CpuTopology,
aidl::android::system::virtualizationservice::DiskImage::DiskImage as AidlDiskImage,
aidl::android::system::virtualizationservice::Partition::Partition as AidlPartition,
aidl::android::system::virtualizationservice::VirtualMachineRawConfig::VirtualMachineRawConfig,
@@ -54,6 +55,8 @@
/// The amount of RAM to give the VM, in MiB.
#[serde(default)]
pub memory_mib: Option<NonZeroU32>,
+ /// The CPU topology: either "one_cpu"(default) or "match_host"
+ pub cpu_topology: Option<String>,
/// Version or range of versions of the virtual platform that this config is compatible with.
/// The format follows SemVer (https://semver.org).
pub platform_version: VersionReq,
@@ -96,7 +99,12 @@
} else {
0
};
-
+ let cpu_topology = match self.cpu_topology.as_deref() {
+ None => CpuTopology::ONE_CPU,
+ Some("one_cpu") => CpuTopology::ONE_CPU,
+ Some("match_host") => CpuTopology::MATCH_HOST,
+ Some(cpu_topology) => bail!("Invalid cpu topology {}", cpu_topology),
+ };
Ok(VirtualMachineRawConfig {
kernel: maybe_open_parcel_file(&self.kernel, false)?,
initrd: maybe_open_parcel_file(&self.initrd, false)?,
@@ -105,6 +113,7 @@
disks: self.disks.iter().map(DiskImage::to_parcelable).collect::<Result<_, Error>>()?,
protectedVm: self.protected,
memoryMib: memory_mib,
+ cpuTopology: cpu_topology,
platformVersion: self.platform_version.to_string(),
devices: self
.devices
diff --git a/microdroid_manager/src/vm_secret.rs b/microdroid_manager/src/vm_secret.rs
index ed8ab1d..ec40b45 100644
--- a/microdroid_manager/src/vm_secret.rs
+++ b/microdroid_manager/src/vm_secret.rs
@@ -109,15 +109,14 @@
.ok_or(anyhow!("Missing explicit dice chain, this is unusual"))?;
let policy = sealing_policy(explicit_dice_chain)
.map_err(|e| anyhow!("Failed to build a sealing_policy: {e}"))?;
- if super::is_new_instance() {
- // New instance -> create a secret & store in Secretkeeper.
- *skp_secret = rand::random();
- store_secret(&mut session, id, skp_secret.clone(), policy)
- .context("Failed to store secret in Secretkeeper")?;
+ if let Some(secret) = get_secret(&mut session, id, Some(policy.clone()))? {
+ *skp_secret = secret;
} else {
- // Subsequent run of the pVM -> get the secret stored in Secretkeeper.
- *skp_secret = get_secret(&mut session, id, Some(policy))
- .context("Failed to get secret from Secretkeeper")?;
+ log::warn!(
+ "No entry found in Secretkeeper for this VM instance, creating new secret."
+ );
+ *skp_secret = rand::random();
+ store_secret(&mut session, id, skp_secret.clone(), policy)?;
}
}
Ok(Self::V2 {
@@ -248,21 +247,24 @@
session: &mut SkSession,
id: [u8; ID_SIZE],
updated_sealing_policy: Option<Vec<u8>>,
-) -> Result<[u8; SECRET_SIZE]> {
+) -> Result<Option<[u8; SECRET_SIZE]>> {
let get_request = GetSecretRequest { id: Id(id), updated_sealing_policy };
log::info!("Secretkeeper operation: {:?}", get_request);
let get_request = get_request.serialize_to_packet().to_vec().map_err(anyhow_err)?;
let get_response = session.secret_management_request(&get_request)?;
let get_response = ResponsePacket::from_slice(&get_response).map_err(anyhow_err)?;
let response_type = get_response.response_type().map_err(anyhow_err)?;
- ensure!(
- response_type == ResponseType::Success,
- "Secretkeeper get failed with error: {:?}",
- *SecretkeeperError::deserialize_from_packet(get_response).map_err(anyhow_err)?
- );
- let get_response =
- *GetSecretResponse::deserialize_from_packet(get_response).map_err(anyhow_err)?;
- Ok(get_response.secret.0)
+ if response_type == ResponseType::Success {
+ let get_response =
+ *GetSecretResponse::deserialize_from_packet(get_response).map_err(anyhow_err)?;
+ Ok(Some(get_response.secret.0))
+ } else {
+ let error = SecretkeeperError::deserialize_from_packet(get_response).map_err(anyhow_err)?;
+ if *error == SecretkeeperError::EntryNotFound {
+ return Ok(None);
+ }
+ Err(anyhow!("Secretkeeper get failed: {error:?}"))
+ }
}
#[inline]
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index 540fd03..67865e5 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -93,7 +93,8 @@
rkp_vm_marker: bool,
salt: [u8; HIDDEN_SIZE],
}
-
+ // TODO(b/291213394): Include `defer_rollback_protection` flag in the Hidden Input to
+ // differentiate the secrets in both cases.
hash(HiddenInput { rkp_vm_marker: self.rkp_vm_marker, salt: *salt }.as_bytes())
}
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 12d63d5..2af19c4 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -42,10 +42,12 @@
use crate::instance::{get_recorded_entry, record_instance_entry};
use alloc::borrow::Cow;
use alloc::boxed::Box;
+use bssl_avf::Digester;
use core::ops::Range;
-use diced_open_dice::{bcc_handover_parse, DiceArtifacts};
+use cstr::cstr;
+use diced_open_dice::{bcc_handover_parse, DiceArtifacts, Hidden};
use fdtpci::{PciError, PciInfo};
-use libfdt::Fdt;
+use libfdt::{Fdt, FdtNode};
use log::{debug, error, info, trace, warn};
use pvmfw_avb::verify_payload;
use pvmfw_avb::Capability;
@@ -129,18 +131,6 @@
}
}
- if verified_boot_data.has_capability(Capability::SecretkeeperProtection) {
- info!("Guest OS is capable of Secretkeeper protection");
- // For Secretkeeper based Antirollback protection, rollback_index of the image > 0
- if verified_boot_data.rollback_index == 0 {
- error!(
- "Expected positive rollback_index, found {:?}",
- verified_boot_data.rollback_index
- );
- return Err(RebootReason::InvalidPayload);
- };
- }
-
let next_bcc = heap::aligned_boxed_slice(NEXT_BCC_SIZE, GUEST_PAGE_SIZE).ok_or_else(|| {
error!("Failed to allocate the next-stage BCC");
RebootReason::InternalError
@@ -153,43 +143,51 @@
RebootReason::InternalError
})?;
- let (recorded_entry, mut instance_img, header_index) =
- get_recorded_entry(&mut pci_root, cdi_seal).map_err(|e| {
- error!("Failed to get entry from instance.img: {e}");
- RebootReason::InternalError
- })?;
- let (new_instance, salt) = if let Some(entry) = recorded_entry {
- // The RKP VM is allowed to run if it has passed the verified boot check and
- // contains the expected version in its AVB footer.
- // The comparison below with the previous boot information is skipped to enable the
- // simultaneous update of the pvmfw and RKP VM.
- // For instance, when both the pvmfw and RKP VM are updated, the code hash of the
- // RKP VM will differ from the one stored in the instance image. In this case, the
- // RKP VM is still allowed to run.
- // This ensures that the updated RKP VM will retain the same CDIs in the next stage.
- if !dice_inputs.rkp_vm_marker {
- ensure_dice_measurements_match_entry(&dice_inputs, &entry).map_err(|e| {
- error!(
- "Dice measurements do not match recorded entry.
- This may be because of update: {e}"
- );
+ let (new_instance, salt) = if cfg!(llpvm_changes)
+ && should_defer_rollback_protection(fdt)?
+ && verified_boot_data.has_capability(Capability::SecretkeeperProtection)
+ {
+ info!("Guest OS is capable of Secretkeeper protection, deferring rollback protection");
+ // rollback_index of the image is used as security_version and is expected to be > 0 to
+ // discourage implicit allocation.
+ if verified_boot_data.rollback_index == 0 {
+ error!("Expected positive rollback_index, found 0");
+ return Err(RebootReason::InvalidPayload);
+ };
+ // `new_instance` cannot be known to pvmfw
+ (false, salt_from_instance_id(fdt)?)
+ } else {
+ let (recorded_entry, mut instance_img, header_index) =
+ get_recorded_entry(&mut pci_root, cdi_seal).map_err(|e| {
+ error!("Failed to get entry from instance.img: {e}");
RebootReason::InternalError
})?;
- }
- (false, entry.salt)
- } else {
- let salt = rand::random_array().map_err(|e| {
- error!("Failed to generated instance.img salt: {e}");
- RebootReason::InternalError
- })?;
- let entry = EntryBody::new(&dice_inputs, &salt);
- record_instance_entry(&entry, cdi_seal, &mut instance_img, header_index).map_err(|e| {
- error!("Failed to get recorded entry in instance.img: {e}");
- RebootReason::InternalError
- })?;
- (true, salt)
+ let (new_instance, salt) = if let Some(entry) = recorded_entry {
+ maybe_check_dice_measurements_match_entry(&dice_inputs, &entry)?;
+ let salt = if cfg!(llpvm_changes) { salt_from_instance_id(fdt)? } else { entry.salt };
+ (false, salt)
+ } else {
+ // New instance!
+ let salt = if cfg!(llpvm_changes) {
+ salt_from_instance_id(fdt)?
+ } else {
+ rand::random_array().map_err(|e| {
+ error!("Failed to generated instance.img salt: {e}");
+ RebootReason::InternalError
+ })?
+ };
+ let entry = EntryBody::new(&dice_inputs, &salt);
+ record_instance_entry(&entry, cdi_seal, &mut instance_img, header_index).map_err(
+ |e| {
+ error!("Failed to get recorded entry in instance.img: {e}");
+ RebootReason::InternalError
+ },
+ )?;
+ (true, salt)
+ };
+ (new_instance, salt)
};
- trace!("Got salt from instance.img: {salt:x?}");
+ trace!("Got salt for instance: {salt:x?}");
let new_bcc_handover = if cfg!(dice_changes) {
Cow::Borrowed(current_bcc_handover)
@@ -241,6 +239,32 @@
Ok(bcc_range)
}
+fn maybe_check_dice_measurements_match_entry(
+ dice_inputs: &PartialInputs,
+ entry: &EntryBody,
+) -> Result<(), RebootReason> {
+ // The RKP VM is allowed to run if it has passed the verified boot check and
+ // contains the expected version in its AVB footer.
+ // The comparison below with the previous boot information is skipped to enable the
+ // simultaneous update of the pvmfw and RKP VM.
+ // For instance, when both the pvmfw and RKP VM are updated, the code hash of the
+ // RKP VM will differ from the one stored in the instance image. In this case, the
+ // RKP VM is still allowed to run.
+ // This ensures that the updated RKP VM will retain the same CDIs in the next stage.
+ if dice_inputs.rkp_vm_marker {
+ return Ok(());
+ }
+ ensure_dice_measurements_match_entry(dice_inputs, entry).map_err(|e| {
+ error!(
+ "Dice measurements do not match recorded entry. \
+ This may be because of update: {e}"
+ );
+ RebootReason::InternalError
+ })?;
+
+ Ok(())
+}
+
fn ensure_dice_measurements_match_entry(
dice_inputs: &PartialInputs,
entry: &EntryBody,
@@ -256,6 +280,56 @@
}
}
+// Get the "salt" which is one of the input for DICE derivation.
+// This provides differentiation of secrets for different VM instances with same payloads.
+fn salt_from_instance_id(fdt: &Fdt) -> Result<Hidden, RebootReason> {
+ let id = instance_id(fdt)?;
+ let salt = Digester::sha512()
+ .digest(&[&b"InstanceId:"[..], id].concat())
+ .map_err(|e| {
+ error!("Failed to get digest of instance-id: {e}");
+ RebootReason::InternalError
+ })?
+ .try_into()
+ .map_err(|_| RebootReason::InternalError)?;
+ Ok(salt)
+}
+
+fn instance_id(fdt: &Fdt) -> Result<&[u8], RebootReason> {
+ let node = avf_untrusted_node(fdt)?;
+ let id = node.getprop(cstr!("instance-id")).map_err(|e| {
+ error!("Failed to get instance-id in DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ id.ok_or_else(|| {
+ error!("Missing instance-id");
+ RebootReason::InvalidFdt
+ })
+}
+
+fn should_defer_rollback_protection(fdt: &Fdt) -> Result<bool, RebootReason> {
+ let node = avf_untrusted_node(fdt)?;
+ let defer_rbp = node
+ .getprop(cstr!("defer-rollback-protection"))
+ .map_err(|e| {
+ error!("Failed to get defer-rollback-protection property in DT: {e}");
+ RebootReason::InvalidFdt
+ })?
+ .is_some();
+ Ok(defer_rbp)
+}
+
+fn avf_untrusted_node(fdt: &Fdt) -> Result<FdtNode, RebootReason> {
+ let node = fdt.node(cstr!("/avf/untrusted")).map_err(|e| {
+ error!("Failed to get /avf/untrusted node: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ node.ok_or_else(|| {
+ error!("/avf/untrusted node is missing in DT");
+ RebootReason::InvalidFdt
+ })
+}
+
/// Logs the given PCI error and returns the appropriate `RebootReason`.
fn handle_pci_error(e: PciError) -> RebootReason {
error!("{}", e);
diff --git a/service_vm/manager/src/lib.rs b/service_vm/manager/src/lib.rs
index 8dedec5..3f2550c 100644
--- a/service_vm/manager/src/lib.rs
+++ b/service_vm/manager/src/lib.rs
@@ -32,7 +32,7 @@
use std::io::{self, BufRead, BufReader, BufWriter, Write};
use std::os::unix::io::FromRawFd;
use std::path::{Path, PathBuf};
-use std::sync::{Condvar, Mutex, MutexGuard};
+use std::sync::{Condvar, Mutex};
use std::thread;
use std::time::Duration;
use vmclient::{DeathReason, VmInstance};
@@ -48,40 +48,78 @@
const WRITE_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
- static ref SERVICE_VM_STATE: State = State::default();
+ static ref PENDING_REQUESTS: AtomicCounter = AtomicCounter::default();
+ static ref SERVICE_VM: Mutex<Option<ServiceVm>> = Mutex::new(None);
+ static ref SERVICE_VM_SHUTDOWN: Condvar = Condvar::new();
}
-/// The running state of the Service VM.
+/// Atomic counter with a condition variable that is used to wait for the counter
+/// to become positive within a timeout.
#[derive(Debug, Default)]
-struct State {
- is_running: Mutex<bool>,
- stopped: Condvar,
+struct AtomicCounter {
+ num: Mutex<usize>,
+ num_increased: Condvar,
}
-impl State {
- fn wait_until_no_service_vm_running(&self) -> Result<MutexGuard<'_, bool>> {
- // The real timeout can be longer than 10 seconds since the time to acquire
- // is_running mutex is not counted in the 10 seconds.
- let (guard, wait_result) = self
- .stopped
- .wait_timeout_while(
- self.is_running.lock().unwrap(),
- Duration::from_secs(10),
- |&mut is_running| is_running,
- )
+impl AtomicCounter {
+ /// Checks if the counter becomes positive within the given timeout.
+ fn is_positive_within_timeout(&self, timeout: Duration) -> bool {
+ let (guard, _wait_result) = self
+ .num_increased
+ .wait_timeout_while(self.num.lock().unwrap(), timeout, |&mut x| x == 0)
.unwrap();
- ensure!(
- !wait_result.timed_out(),
- "Timed out while waiting for the running service VM to stop."
- );
- Ok(guard)
+ *guard > 0
}
- fn notify_service_vm_shutdown(&self) {
- let mut is_running_guard = self.is_running.lock().unwrap();
- *is_running_guard = false;
- self.stopped.notify_one();
+ fn increment(&self) {
+ let mut num = self.num.lock().unwrap();
+ *num = num.checked_add(1).unwrap();
+ self.num_increased.notify_all();
}
+
+ fn decrement(&self) {
+ let mut num = self.num.lock().unwrap();
+ *num = num.checked_sub(1).unwrap();
+ }
+}
+
+/// Processes the request in the service VM.
+pub fn process_request(request: Request) -> Result<Response> {
+ PENDING_REQUESTS.increment();
+ let result = process_request_in_service_vm(request);
+ PENDING_REQUESTS.decrement();
+ thread::spawn(stop_service_vm_if_idle);
+ result
+}
+
+fn process_request_in_service_vm(request: Request) -> Result<Response> {
+ let mut service_vm = SERVICE_VM.lock().unwrap();
+ if service_vm.is_none() {
+ *service_vm = Some(ServiceVm::start()?);
+ }
+ service_vm.as_mut().unwrap().process_request(request)
+}
+
+fn stop_service_vm_if_idle() {
+ if PENDING_REQUESTS.is_positive_within_timeout(Duration::from_secs(1)) {
+ info!("Service VM has pending requests, keeping it running.");
+ } else {
+ info!("Service VM is idle, shutting it down.");
+ *SERVICE_VM.lock().unwrap() = None;
+ SERVICE_VM_SHUTDOWN.notify_all();
+ }
+}
+
+/// Waits until the service VM shuts down.
+/// This function is only used in tests.
+pub fn wait_until_service_vm_shuts_down() -> Result<()> {
+ const WAIT_FOR_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5);
+
+ let (_guard, wait_result) = SERVICE_VM_SHUTDOWN
+ .wait_timeout_while(SERVICE_VM.lock().unwrap(), WAIT_FOR_SHUTDOWN_TIMEOUT, |x| x.is_some())
+ .unwrap();
+ ensure!(!wait_result.timed_out(), "Service VM didn't shut down within the timeout");
+ Ok(())
}
/// Service VM.
@@ -94,17 +132,12 @@
impl ServiceVm {
/// Starts the service VM and returns its instance.
/// The same instance image is used for different VMs.
- /// At any given time, only one service should be running. If a service VM is
- /// already running, this function will start the service VM once the running one
- /// shuts down.
+ /// TODO(b/27593612): Remove instance image usage for Service VM.
pub fn start() -> Result<Self> {
- let mut is_running_guard = SERVICE_VM_STATE.wait_until_no_service_vm_running()?;
-
let instance_img_path = Path::new(VIRT_DATA_DIR).join(INSTANCE_IMG_NAME);
let vm = protected_vm_instance(instance_img_path)?;
let vm = Self::start_vm(vm, VmType::ProtectedVm)?;
- *is_running_guard = true;
Ok(vm)
}
@@ -174,7 +207,6 @@
Ok(reason) => info!("Exit the service VM successfully: {reason:?}"),
Err(e) => warn!("Service VM shutdown request failed '{e:?}', killing it."),
}
- SERVICE_VM_STATE.notify_service_vm_shutdown();
}
}
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 737c6fc..aae1068 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -1068,23 +1068,43 @@
VirtualMachineConfig normalConfig = builder.build();
assertThat(tryBootVmWithConfig(normalConfig, "test_vm").payloadStarted).isTrue();
- // Try to run the VM again with the previous instance.img
+ // Try to run the VM again with the previous instance
// We need to make sure that no changes on config don't invalidate the identity, to compare
// the result with the below "different debug level" test.
+ File vmInstanceBackup = null, vmIdBackup = null;
File vmInstance = getVmFile("test_vm", "instance.img");
- File vmInstanceBackup = File.createTempFile("instance", ".img");
- Files.copy(vmInstance.toPath(), vmInstanceBackup.toPath(), REPLACE_EXISTING);
+ File vmId = getVmFile("test_vm", "instance_id");
+ if (vmInstance.exists()) {
+ vmInstanceBackup = File.createTempFile("instance", ".img");
+ Files.copy(vmInstance.toPath(), vmInstanceBackup.toPath(), REPLACE_EXISTING);
+ }
+ if (vmId.exists()) {
+ vmIdBackup = File.createTempFile("instance_id", "backup");
+ Files.copy(vmId.toPath(), vmIdBackup.toPath(), REPLACE_EXISTING);
+ }
+
forceCreateNewVirtualMachine("test_vm", normalConfig);
- Files.copy(vmInstanceBackup.toPath(), vmInstance.toPath(), REPLACE_EXISTING);
+
+ if (vmInstanceBackup != null) {
+ Files.copy(vmInstanceBackup.toPath(), vmInstance.toPath(), REPLACE_EXISTING);
+ }
+ if (vmIdBackup != null) {
+ Files.copy(vmIdBackup.toPath(), vmId.toPath(), REPLACE_EXISTING);
+ }
assertThat(tryBootVm(TAG, "test_vm").payloadStarted).isTrue();
// Launch the same VM with a different debug level. The Java API prohibits this
// (thankfully).
- // For testing, we do that by creating a new VM with debug level, and copy the old instance
- // image to the new VM instance image.
+ // For testing, we do that by creating a new VM with debug level, and overwriting the old
+ // instance data to the new VM instance data.
VirtualMachineConfig debugConfig = builder.setDebugLevel(toLevel).build();
forceCreateNewVirtualMachine("test_vm", debugConfig);
- Files.copy(vmInstanceBackup.toPath(), vmInstance.toPath(), REPLACE_EXISTING);
+ if (vmInstanceBackup != null) {
+ Files.copy(vmInstanceBackup.toPath(), vmInstance.toPath(), REPLACE_EXISTING);
+ }
+ if (vmIdBackup != null) {
+ Files.copy(vmIdBackup.toPath(), vmId.toPath(), REPLACE_EXISTING);
+ }
assertThat(tryBootVm(TAG, "test_vm").payloadStarted).isFalse();
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 5ddb8c3..05f3cf6 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -269,6 +269,13 @@
.context("Failed to generate ECDSA P-256 key pair for testing")
.with_log()
.or_service_specific_exception(-1)?;
+ // Wait until the service VM shuts down, so that the Service VM will be restarted when
+ // the key generated in the current session will be used for attestation.
+ // This ensures that different Service VM sessions have the same KEK for the key blob.
+ service_vm_manager::wait_until_service_vm_shuts_down()
+ .context("Failed to wait until the service VM shuts down")
+ .with_log()
+ .or_service_specific_exception(-1)?;
match res {
Response::GenerateEcdsaP256KeyPair(key_pair) => {
FAKE_PROVISIONED_KEY_BLOB_FOR_TESTING
diff --git a/virtualizationservice/src/rkpvm.rs b/virtualizationservice/src/rkpvm.rs
index 67ba740..6898921 100644
--- a/virtualizationservice/src/rkpvm.rs
+++ b/virtualizationservice/src/rkpvm.rs
@@ -21,28 +21,25 @@
use service_vm_comm::{
ClientVmAttestationParams, GenerateCertificateRequestParams, Request, Response,
};
-use service_vm_manager::ServiceVm;
+use service_vm_manager::process_request;
pub(crate) fn request_attestation(
csr: Vec<u8>,
remotely_provisioned_key_blob: Vec<u8>,
remotely_provisioned_cert: Vec<u8>,
) -> Result<Vec<u8>> {
- let mut vm = ServiceVm::start()?;
-
let params =
ClientVmAttestationParams { csr, remotely_provisioned_key_blob, remotely_provisioned_cert };
let request = Request::RequestClientVmAttestation(params);
- match vm.process_request(request).context("Failed to process request")? {
+ match process_request(request).context("Failed to process request")? {
Response::RequestClientVmAttestation(cert) => Ok(cert),
other => bail!("Incorrect response type {other:?}"),
}
}
pub(crate) fn generate_ecdsa_p256_key_pair() -> Result<Response> {
- let mut vm = ServiceVm::start()?;
let request = Request::GenerateEcdsaP256KeyPair;
- vm.process_request(request).context("Failed to process request")
+ process_request(request).context("Failed to process request")
}
pub(crate) fn generate_certificate_request(
@@ -55,6 +52,5 @@
};
let request = Request::GenerateCertificateRequest(params);
- let mut vm = ServiceVm::start()?;
- vm.process_request(request).context("Failed to process request")
+ process_request(request).context("Failed to process request")
}