Clean up instance tracking
We used to hold a Weak<CompOsInstance>, so that we could allow clients
to retrieve the current running instance. But we haven't supported
that since commit 616f822f8a4186f9d29edc6a75cf9d31350ecc7d, and it
confuses the ownership semantics.
We still need to track whether an instance is running, but we can do
that via a separate Arc<()> that is kept alive by the instance.
Bug: 236581575
Test: compos_cmd test-compile
Test: Try to start a second instance, observe failure
Change-Id: Id02a831b249f6432b22741dbf5a5dc3919c3df03
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index c45f6e7..9a0935c 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -45,13 +45,13 @@
Self { service, state: Default::default() }
}
- pub fn start_current_instance(&self) -> Result<Arc<CompOsInstance>> {
+ pub fn start_current_instance(&self) -> Result<CompOsInstance> {
let mut vm_parameters = new_vm_parameters()?;
vm_parameters.config_path = Some(PREFER_STAGED_VM_CONFIG_PATH.to_owned());
self.start_instance(CURRENT_INSTANCE_DIR, vm_parameters)
}
- pub fn start_test_instance(&self, prefer_staged: bool) -> Result<Arc<CompOsInstance>> {
+ pub fn start_test_instance(&self, prefer_staged: bool) -> Result<CompOsInstance> {
let mut vm_parameters = new_vm_parameters()?;
vm_parameters.debug_mode = true;
if prefer_staged {
@@ -64,28 +64,23 @@
&self,
instance_name: &str,
vm_parameters: VmParameters,
- ) -> Result<Arc<CompOsInstance>> {
+ ) -> Result<CompOsInstance> {
let mut state = self.state.lock().unwrap();
state.mark_starting()?;
// Don't hold the lock while we start the instance to avoid blocking other callers.
drop(state);
let instance_starter = InstanceStarter::new(instance_name, vm_parameters);
- let instance = self.try_start_instance(instance_starter);
+ let instance = instance_starter.start_new_instance(&*self.service);
let mut state = self.state.lock().unwrap();
if let Ok(ref instance) = instance {
- state.mark_started(instance)?;
+ state.mark_started(instance.get_instance_tracker())?;
} else {
state.mark_stopped();
}
instance
}
-
- fn try_start_instance(&self, instance_starter: InstanceStarter) -> Result<Arc<CompOsInstance>> {
- let compos_instance = instance_starter.start_new_instance(&*self.service)?;
- Ok(Arc::new(compos_instance))
- }
}
fn new_vm_parameters() -> Result<VmParameters> {
@@ -110,14 +105,14 @@
// Ensures we only run one instance at a time.
// Valid states:
-// Starting: is_starting is true, running_instance is None.
-// Started: is_starting is false, running_instance is Some(x) and there is a strong ref to x.
-// Stopped: is_starting is false and running_instance is None or a weak ref to a dropped instance.
+// Starting: is_starting is true, instance_tracker is None.
+// Started: is_starting is false, instance_tracker is Some(x) and there is a strong ref to x.
+// Stopped: is_starting is false and instance_tracker is None or a weak ref to a dropped instance.
// The panic calls here should never happen, unless the code above in InstanceManager is buggy.
// In particular nothing the client does should be able to trigger them.
#[derive(Default)]
struct State {
- running_instance: Option<Weak<CompOsInstance>>,
+ instance_tracker: Option<Weak<()>>,
is_starting: bool,
}
@@ -127,34 +122,34 @@
if self.is_starting {
bail!("An instance is already starting");
}
- if let Some(weak) = &self.running_instance {
+ if let Some(weak) = &self.instance_tracker {
if weak.strong_count() != 0 {
bail!("An instance is already running");
}
}
- self.running_instance = None;
+ self.instance_tracker = None;
self.is_starting = true;
Ok(())
}
// Move from Starting to Stopped.
fn mark_stopped(&mut self) {
- if !self.is_starting || self.running_instance.is_some() {
+ if !self.is_starting || self.instance_tracker.is_some() {
panic!("Tried to mark stopped when not starting");
}
self.is_starting = false;
}
// Move from Starting to Started.
- fn mark_started(&mut self, instance: &Arc<CompOsInstance>) -> Result<()> {
+ fn mark_started(&mut self, instance_tracker: &Arc<()>) -> Result<()> {
if !self.is_starting {
panic!("Tried to mark started when not starting")
}
- if self.running_instance.is_some() {
+ if self.instance_tracker.is_some() {
panic!("Attempted to mark started when already started");
}
self.is_starting = false;
- self.running_instance = Some(Arc::downgrade(instance));
+ self.instance_tracker = Some(Arc::downgrade(instance_tracker));
Ok(())
}
}
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index e51db13..111c719 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -29,6 +29,7 @@
use log::info;
use std::fs;
use std::path::{Path, PathBuf};
+use std::sync::Arc;
pub struct CompOsInstance {
service: Strong<dyn ICompOsService>,
@@ -36,6 +37,8 @@
vm_instance: ComposClient,
#[allow(dead_code)] // Keeps composd process alive
lazy_service_guard: LazyServiceGuard,
+ // Keep this alive as long as we are
+ instance_tracker: Arc<()>,
}
impl CompOsInstance {
@@ -43,7 +46,13 @@
self.service.clone()
}
- // Attempt to shut down the VM cleanly, giving time for any relevant logs to be written.
+ /// Returns an Arc that this instance holds a strong reference to as long as it exists. This
+ /// can be used to determine when the instance has been dropped.
+ pub fn get_instance_tracker(&self) -> &Arc<()> {
+ &self.instance_tracker
+ }
+
+ /// Attempt to shut down the VM cleanly, giving time for any relevant logs to be written.
pub fn shutdown(self) -> LazyServiceGuard {
self.vm_instance.shutdown(self.service);
// Return the guard to the caller, since we might be terminated at any point after it is
@@ -122,7 +131,12 @@
)
.context("Starting VM")?;
let service = vm_instance.connect_service().context("Connecting to CompOS")?;
- Ok(CompOsInstance { vm_instance, service, lazy_service_guard: Default::default() })
+ Ok(CompOsInstance {
+ vm_instance,
+ service,
+ lazy_service_guard: Default::default(),
+ instance_tracker: Default::default(),
+ })
}
fn create_instance_image(
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
index 07ede3c..8fd574c 100644
--- a/compos/composd/src/odrefresh_task.rs
+++ b/compos/composd/src/odrefresh_task.rs
@@ -60,7 +60,7 @@
struct RunningTask {
callback: Strong<dyn ICompilationTaskCallback>,
#[allow(dead_code)] // Keeps the CompOS VM alive
- comp_os: Arc<CompOsInstance>,
+ comp_os: CompOsInstance,
}
impl OdrefreshTask {
@@ -72,7 +72,7 @@
}
pub fn start(
- comp_os: Arc<CompOsInstance>,
+ comp_os: CompOsInstance,
compilation_mode: CompilationMode,
target_dir_name: String,
callback: &Strong<dyn ICompilationTaskCallback>,
@@ -98,11 +98,8 @@
let task = self.take();
// We don't do the callback if cancel has already happened.
if let Some(RunningTask { callback, comp_os }) = task {
- // If we are the last owners of the instance (and we probably are), then we
- // shut it down now, so that logs get written.
- let comp_os = Arc::try_unwrap(comp_os);
// Make sure we keep our service alive until we have called the callback.
- let lazy_service_guard = comp_os.map(CompOsInstance::shutdown);
+ let lazy_service_guard = comp_os.shutdown();
let result = match exit_code {
Ok(ExitCode::CompilationSuccess) => {