vmbase: Only share if hyp has DYNAMIC_MEM_SHARE

For correctness, always check that DYNAMIC_MEM_SHARE is supported by the
hypervisor before calling its backend functions to share/unshare memory
in MemorySharer.

Note that Gunyah (only hyp without DYNAMIC_MEM_SHARE) already defines
those functions as unimplemented!(), meaning that the logic surrounding
the newly-introduced checks can be assumed to already prevent those
functions from being called so that this patch should not result in any
noticeable change.

This also adds support for using MemorySharer (and therefore the VirtIO
devices) on platforms that don't support DYNAMIC_MEM_SHARE but also
don't provide a static swiotlb e.g. non-protected KVM.

Test: atest DebugPolicyHostTests#testNoAdbInDebugPolicy_withDebugLevelNone_boots
Test: atest rialto_test vmbase_example.integration_test
Change-Id: I327000188faa3b8dd74c7029999729ef7516ef66
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 32a59d1..3b3b30a 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -21,6 +21,8 @@
 mod util;
 
 pub use error::{Error, Result};
-pub use hypervisor::{get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE};
+pub use hypervisor::{
+    get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE,
+};
 
 use hypervisor::GeniezoneError;
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 3faee5b..1e29c79 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -30,7 +30,7 @@
 use core::ops::Range;
 use core::ptr::NonNull;
 use core::result;
-use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE};
+use hyp::{get_hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
 use log::{debug, error, trace};
 use once_cell::race::OnceBox;
 use spin::mutex::SpinMutex;
@@ -361,7 +361,7 @@
 /// Unshares all pages when dropped.
 struct MemorySharer {
     granule: usize,
-    shared_regions: Vec<(usize, Layout)>,
+    frames: Vec<(usize, Layout)>,
 }
 
 impl MemorySharer {
@@ -369,7 +369,7 @@
     /// `granule` must be a power of 2.
     fn new(granule: usize, capacity: usize) -> Self {
         assert!(granule.is_power_of_two());
-        Self { granule, shared_regions: Vec::with_capacity(capacity) }
+        Self { granule, frames: Vec::with_capacity(capacity) }
     }
 
     /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
@@ -383,25 +383,30 @@
 
         let base = shared.as_ptr() as usize;
         let end = base.checked_add(layout.size()).unwrap();
-        trace!("Sharing memory region {:#x?}", base..end);
-        for vaddr in (base..end).step_by(self.granule) {
-            let vaddr = NonNull::new(vaddr as *mut _).unwrap();
-            get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
-        }
-        self.shared_regions.push((base, layout));
 
+        if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
+            trace!("Sharing memory region {:#x?}", base..end);
+            for vaddr in (base..end).step_by(self.granule) {
+                let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+                get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+            }
+        }
+
+        self.frames.push((base, layout));
         pool.add_frame(base, end);
     }
 }
 
 impl Drop for MemorySharer {
     fn drop(&mut self) {
-        while let Some((base, layout)) = self.shared_regions.pop() {
-            let end = base.checked_add(layout.size()).unwrap();
-            trace!("Unsharing memory region {:#x?}", base..end);
-            for vaddr in (base..end).step_by(self.granule) {
-                let vaddr = NonNull::new(vaddr as *mut _).unwrap();
-                get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+        while let Some((base, layout)) = self.frames.pop() {
+            if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
+                let end = base.checked_add(layout.size()).unwrap();
+                trace!("Unsharing memory region {:#x?}", base..end);
+                for vaddr in (base..end).step_by(self.granule) {
+                    let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+                    get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+                }
             }
 
             // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.