blob: d44d58acf1639e5e7392c8e6c8725f7805073b4a [file] [log] [blame]
Alice Wangf47b2342023-06-02 11:51:57 +00001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Shared memory management.
16
Alice Wang93ee98a2023-06-08 08:20:39 +000017use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18use super::error::MemoryTrackerError;
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020019use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010020use super::util::virt_to_phys;
Alice Wang93ee98a2023-06-08 08:20:39 +000021use crate::dsb;
Alice Wanga9fe1fb2023-07-04 09:10:35 +000022use crate::exceptions::HandleExceptionError;
Pierre-Clément Tosia9b345f2024-04-27 01:01:42 +010023use crate::hyp::{self, get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010024use crate::util::unchecked_align_down;
Alice Wang93ee98a2023-06-08 08:20:39 +000025use crate::util::RangeExt as _;
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020026use aarch64_paging::paging::{
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010027 Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, PAGE_SIZE,
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020028};
Alice Wangf47b2342023-06-02 11:51:57 +000029use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
Alice Wang93ee98a2023-06-08 08:20:39 +000030use alloc::boxed::Box;
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010031use alloc::collections::BTreeSet;
Alice Wangf47b2342023-06-02 11:51:57 +000032use alloc::vec::Vec;
Alice Wang93ee98a2023-06-08 08:20:39 +000033use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
Alice Wangf47b2342023-06-02 11:51:57 +000034use core::alloc::Layout;
Alice Wangdf6bacc2023-07-17 14:30:57 +000035use core::cmp::max;
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +000036use core::mem::size_of;
Alice Wang93ee98a2023-06-08 08:20:39 +000037use core::num::NonZeroUsize;
38use core::ops::Range;
Alice Wangf47b2342023-06-02 11:51:57 +000039use core::ptr::NonNull;
Alice Wangb73a81b2023-06-07 13:05:09 +000040use core::result;
Alice Wang93ee98a2023-06-08 08:20:39 +000041use log::{debug, error, trace};
42use once_cell::race::OnceBox;
43use spin::mutex::SpinMutex;
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +010044use static_assertions::const_assert_eq;
Alice Wang93ee98a2023-06-08 08:20:39 +000045use tinyvec::ArrayVec;
46
47/// A global static variable representing the system memory tracker, protected by a spin mutex.
48pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
49
50static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
51static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
52
53/// Memory range.
54pub type MemoryRange = Range<usize>;
Alice Wanga3931aa2023-07-05 12:52:09 +000055
56fn get_va_range(range: &MemoryRange) -> VaRange {
57 VaRange::new(range.start, range.end)
58}
59
Alice Wang93ee98a2023-06-08 08:20:39 +000060type Result<T> = result::Result<T, MemoryTrackerError>;
61
62#[derive(Clone, Copy, Debug, Default, PartialEq)]
63enum MemoryType {
64 #[default]
65 ReadOnly,
66 ReadWrite,
67}
68
69#[derive(Clone, Debug, Default)]
70struct MemoryRegion {
71 range: MemoryRange,
72 mem_type: MemoryType,
73}
74
75/// Tracks non-overlapping slices of main memory.
76pub struct MemoryTracker {
77 total: MemoryRange,
78 page_table: PageTable,
79 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
80 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
81 mmio_range: MemoryRange,
Alice Wang5bb79502023-06-12 09:25:07 +000082 payload_range: Option<MemoryRange>,
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010083 mmio_sharer: MmioSharer,
Alice Wang93ee98a2023-06-08 08:20:39 +000084}
85
Alice Wang93ee98a2023-06-08 08:20:39 +000086impl MemoryTracker {
87 const CAPACITY: usize = 5;
88 const MMIO_CAPACITY: usize = 5;
89
90 /// Creates a new instance from an active page table, covering the maximum RAM size.
91 pub fn new(
92 mut page_table: PageTable,
93 total: MemoryRange,
94 mmio_range: MemoryRange,
Alice Wanga3931aa2023-07-05 12:52:09 +000095 payload_range: Option<Range<VirtualAddress>>,
Alice Wang93ee98a2023-06-08 08:20:39 +000096 ) -> Self {
97 assert!(
98 !total.overlaps(&mmio_range),
99 "MMIO space should not overlap with the main memory region."
100 );
101
102 // Activate dirty state management first, otherwise we may get permission faults immediately
103 // after activating the new page table. This has no effect before the new page table is
104 // activated because none of the entries in the initial idmap have the DBM flag.
105 set_dbm_enabled(true);
106
107 debug!("Activating dynamic page table...");
Andrew Walbranc06e7342023-07-05 14:00:51 +0000108 // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
Alice Wang93ee98a2023-06-08 08:20:39 +0000109 // aware of so activating it shouldn't have any visible effect.
110 unsafe { page_table.activate() }
111 debug!("... Success!");
112
113 Self {
114 total,
115 page_table,
116 regions: ArrayVec::new(),
117 mmio_regions: ArrayVec::new(),
118 mmio_range,
Alice Wanga3931aa2023-07-05 12:52:09 +0000119 payload_range: payload_range.map(|r| r.start.0..r.end.0),
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100120 mmio_sharer: MmioSharer::new().unwrap(),
Alice Wang93ee98a2023-06-08 08:20:39 +0000121 }
122 }
123
124 /// Resize the total RAM size.
125 ///
126 /// This function fails if it contains regions that are not included within the new size.
127 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
128 if range.start != self.total.start {
129 return Err(MemoryTrackerError::DifferentBaseAddress);
130 }
131 if self.total.end < range.end {
132 return Err(MemoryTrackerError::SizeTooLarge);
133 }
134 if !self.regions.iter().all(|r| r.range.is_within(range)) {
135 return Err(MemoryTrackerError::SizeTooSmall);
136 }
137
138 self.total = range.clone();
139 Ok(())
140 }
141
142 /// Allocate the address range for a const slice; returns None if failed.
143 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
144 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
Alice Wang9f3ca832023-09-20 09:33:14 +0000145 self.check_allocatable(&region)?;
146 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
147 error!("Error during range allocation: {e}");
148 MemoryTrackerError::FailedToMap
149 })?;
150 self.add(region)
151 }
152
153 /// Allocates the address range for a const slice.
154 ///
155 /// # Safety
156 ///
157 /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
158 /// data.
159 pub unsafe fn alloc_range_outside_main_memory(
160 &mut self,
161 range: &MemoryRange,
162 ) -> Result<MemoryRange> {
163 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
164 self.check_no_overlap(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000165 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000166 error!("Error during range allocation: {e}");
167 MemoryTrackerError::FailedToMap
168 })?;
169 self.add(region)
170 }
171
172 /// Allocate the address range for a mutable slice; returns None if failed.
173 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
174 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
Alice Wang9f3ca832023-09-20 09:33:14 +0000175 self.check_allocatable(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000176 self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000177 error!("Error during mutable range allocation: {e}");
178 MemoryTrackerError::FailedToMap
179 })?;
180 self.add(region)
181 }
182
183 /// Allocate the address range for a const slice; returns None if failed.
184 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
185 self.alloc_range(&(base..(base + size.get())))
186 }
187
188 /// Allocate the address range for a mutable slice; returns None if failed.
189 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
190 self.alloc_range_mut(&(base..(base + size.get())))
191 }
192
193 /// Checks that the given range of addresses is within the MMIO region, and then maps it
194 /// appropriately.
195 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
196 if !range.is_within(&self.mmio_range) {
197 return Err(MemoryTrackerError::OutOfRange);
198 }
199 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
200 return Err(MemoryTrackerError::Overlaps);
201 }
202 if self.mmio_regions.len() == self.mmio_regions.capacity() {
203 return Err(MemoryTrackerError::Full);
204 }
205
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000206 if get_mmio_guard().is_some() {
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000207 self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
208 error!("Error during lazy MMIO device mapping: {e}");
209 MemoryTrackerError::FailedToMap
210 })?;
211 } else {
212 self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
213 error!("Error during MMIO device mapping: {e}");
214 MemoryTrackerError::FailedToMap
215 })?;
216 }
Alice Wang93ee98a2023-06-08 08:20:39 +0000217
218 if self.mmio_regions.try_push(range).is_some() {
219 return Err(MemoryTrackerError::Full);
220 }
221
222 Ok(())
223 }
224
Alice Wang9f3ca832023-09-20 09:33:14 +0000225 /// Checks that the memory region meets the following criteria:
226 /// - It is within the range of the `MemoryTracker`.
227 /// - It does not overlap with any previously allocated regions.
228 /// - The `regions` ArrayVec has sufficient capacity to add it.
229 fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000230 if !region.range.is_within(&self.total) {
231 return Err(MemoryTrackerError::OutOfRange);
232 }
Alice Wang9f3ca832023-09-20 09:33:14 +0000233 self.check_no_overlap(region)
234 }
235
236 /// Checks that the given region doesn't overlap with any other previously allocated regions,
237 /// and that the regions ArrayVec has capacity to add it.
238 fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000239 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
240 return Err(MemoryTrackerError::Overlaps);
241 }
242 if self.regions.len() == self.regions.capacity() {
243 return Err(MemoryTrackerError::Full);
244 }
245 Ok(())
246 }
247
248 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
249 if self.regions.try_push(region).is_some() {
250 return Err(MemoryTrackerError::Full);
251 }
252
253 Ok(self.regions.last().unwrap().range.clone())
254 }
255
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100256 /// Unshares any MMIO region previously shared with the MMIO guard.
257 pub fn unshare_all_mmio(&mut self) -> Result<()> {
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100258 self.mmio_sharer.unshare_all();
259
Alice Wang93ee98a2023-06-08 08:20:39 +0000260 Ok(())
261 }
262
263 /// Initialize the shared heap to dynamically share memory from the global allocator.
Alice Wangb6d2c642023-06-13 13:07:06 +0000264 pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000265 const INIT_CAP: usize = 10;
266
Alice Wang93ee98a2023-06-08 08:20:39 +0000267 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
268 if previous.is_some() {
269 return Err(MemoryTrackerError::SharedMemorySetFailure);
270 }
271
272 SHARED_POOL
273 .set(Box::new(LockedFrameAllocator::new()))
274 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
275
276 Ok(())
277 }
278
279 /// Initialize the shared heap from a static region of memory.
280 ///
281 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
282 /// to share its memory with host. Instead they allow host to designate part
283 /// of guest memory as "shared" ahead of guest starting its execution. The
284 /// shared memory region is indicated in swiotlb node. On such platforms use
285 /// a separate heap to allocate buffers that can be shared with host.
286 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
287 let size = NonZeroUsize::new(range.len()).unwrap();
288 let range = self.alloc_mut(range.start, size)?;
289 let shared_pool = LockedFrameAllocator::<32>::new();
290
291 shared_pool.lock().insert(range);
292
293 SHARED_POOL
294 .set(Box::new(shared_pool))
295 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
296
297 Ok(())
298 }
299
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +0000300 /// Initialize the shared heap to use heap memory directly.
301 ///
302 /// When running on "non-protected" hypervisors which permit host direct accesses to guest
303 /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
304 /// dedicated region so this function instructs the shared pool to use the global allocator.
305 pub fn init_heap_shared_pool(&mut self) -> Result<()> {
306 // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
307 // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
308 // without any actual "dynamic memory sharing" taking place and, as such, the granule may
309 // be set to the one of the global_allocator i.e. a byte.
310 self.init_dynamic_shared_pool(size_of::<u8>())
311 }
312
Alice Wang93ee98a2023-06-08 08:20:39 +0000313 /// Unshares any memory that may have been shared.
314 pub fn unshare_all_memory(&mut self) {
315 drop(SHARED_MEMORY.lock().take());
316 }
317
318 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
319 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000320 fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100321 let shared_range = self.mmio_sharer.share(addr)?;
322 self.map_lazy_mmio_as_valid(&shared_range)?;
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100323
324 Ok(())
325 }
326
327 /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
328 ///
329 /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
330 fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200331 // This must be safe and free from break-before-make (BBM) violations, given that the
332 // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
333 // created inside the mapping has the same size and alignment.
Alice Wang93ee98a2023-06-08 08:20:39 +0000334 self.page_table
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100335 .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200336 let flags = desc.flags().expect("Unsupported PTE flags set");
337 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
338 desc.modify_flags(Attributes::VALID, Attributes::empty());
339 Ok(())
340 } else {
341 Err(())
342 }
343 })
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100344 .map_err(|_| MemoryTrackerError::InvalidPte)
Alice Wang93ee98a2023-06-08 08:20:39 +0000345 }
346
347 /// Flush all memory regions marked as writable-dirty.
348 fn flush_dirty_pages(&mut self) -> Result<()> {
349 // Collect memory ranges for which dirty state is tracked.
350 let writable_regions =
351 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
352 // Execute a barrier instruction to ensure all hardware updates to the page table have been
353 // observed before reading PTE flags to determine dirty state.
354 dsb!("ish");
355 // Now flush writable-dirty pages in those regions.
Alice Wang5bb79502023-06-12 09:25:07 +0000356 for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
Alice Wang93ee98a2023-06-08 08:20:39 +0000357 self.page_table
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200358 .walk_range(&get_va_range(range), &flush_dirty_range)
Alice Wang93ee98a2023-06-08 08:20:39 +0000359 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
360 }
361 Ok(())
362 }
363
364 /// Handles permission fault for read-only blocks by setting writable-dirty state.
365 /// In general, this should be called from the exception handler when hardware dirty
366 /// state management is disabled or unavailable.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000367 fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000368 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000369 .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
Alice Wang93ee98a2023-06-08 08:20:39 +0000370 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
371 }
372}
373
374impl Drop for MemoryTracker {
375 fn drop(&mut self) {
376 set_dbm_enabled(false);
377 self.flush_dirty_pages().unwrap();
378 self.unshare_all_memory();
379 }
380}
381
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100382struct MmioSharer {
383 granule: usize,
384 frames: BTreeSet<usize>,
385}
386
387impl MmioSharer {
388 fn new() -> Result<Self> {
389 let granule = MMIO_GUARD_GRANULE_SIZE;
390 const_assert_eq!(MMIO_GUARD_GRANULE_SIZE, PAGE_SIZE); // For good measure.
391 let frames = BTreeSet::new();
392
393 // Allows safely calling util::unchecked_align_down().
394 assert!(granule.is_power_of_two());
395
396 Ok(Self { granule, frames })
397 }
398
399 /// Share the MMIO region aligned to the granule size containing addr (not validated as MMIO).
400 fn share(&mut self, addr: VirtualAddress) -> Result<VaRange> {
401 // This can't use virt_to_phys() since 0x0 is a valid MMIO address and we are ID-mapped.
402 let phys = addr.0;
403 let base = unchecked_align_down(phys, self.granule);
404
405 if self.frames.contains(&base) {
406 return Err(MemoryTrackerError::DuplicateMmioShare(base));
407 }
408
409 if let Some(mmio_guard) = get_mmio_guard() {
410 mmio_guard.map(base)?;
411 }
412
413 let inserted = self.frames.insert(base);
414 assert!(inserted);
415
416 let base_va = VirtualAddress(base);
417 Ok((base_va..base_va + self.granule).into())
418 }
419
420 fn unshare_all(&mut self) {
421 let Some(mmio_guard) = get_mmio_guard() else {
422 return self.frames.clear();
423 };
424
425 while let Some(base) = self.frames.pop_first() {
426 mmio_guard.unmap(base).unwrap();
427 }
428 }
429}
430
431impl Drop for MmioSharer {
432 fn drop(&mut self) {
433 self.unshare_all();
434 }
435}
436
Alice Wang93ee98a2023-06-08 08:20:39 +0000437/// Allocates a memory range of at least the given size and alignment that is shared with the host.
438/// Returns a pointer to the buffer.
Alice Wang6c4cda02023-07-18 08:18:07 +0000439pub(crate) fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
Alice Wang7cbe29a2023-07-27 11:45:58 +0000440 assert_ne!(layout.size(), 0);
Alice Wang93ee98a2023-06-08 08:20:39 +0000441 let Some(buffer) = try_shared_alloc(layout) else {
442 handle_alloc_error(layout);
443 };
444
445 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
446 Ok(buffer)
447}
448
449fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
450 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
451
452 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
453 Some(NonNull::new(buffer as _).unwrap())
454 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
Alice Wang2a6b2172023-07-18 10:38:16 +0000455 // Adjusts the layout size to the max of the next power of two and the alignment,
456 // as this is the actual size of the memory allocated in `alloc_aligned()`.
457 let size = max(layout.size().next_power_of_two(), layout.align());
458 let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
459 shared_memory.refill(&mut shared_pool, refill_layout);
Alice Wang93ee98a2023-06-08 08:20:39 +0000460 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
461 } else {
462 None
463 }
464}
465
466/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
467///
468/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
469///
470/// # Safety
471///
472/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
473/// deallocated.
Alice Wang6c4cda02023-07-18 08:18:07 +0000474pub(crate) unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000475 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
476
477 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
478 Ok(())
479}
Alice Wangf47b2342023-06-02 11:51:57 +0000480
481/// Allocates memory on the heap and shares it with the host.
482///
483/// Unshares all pages when dropped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000484struct MemorySharer {
Alice Wangf47b2342023-06-02 11:51:57 +0000485 granule: usize,
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000486 frames: Vec<(usize, Layout)>,
Alice Wangf47b2342023-06-02 11:51:57 +0000487}
488
489impl MemorySharer {
490 /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
491 /// `granule` must be a power of 2.
Alice Wang93ee98a2023-06-08 08:20:39 +0000492 fn new(granule: usize, capacity: usize) -> Self {
Alice Wangf47b2342023-06-02 11:51:57 +0000493 assert!(granule.is_power_of_two());
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000494 Self { granule, frames: Vec::with_capacity(capacity) }
Alice Wangf47b2342023-06-02 11:51:57 +0000495 }
496
Alice Wang93ee98a2023-06-08 08:20:39 +0000497 /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
498 fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
Alice Wangf47b2342023-06-02 11:51:57 +0000499 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
500 assert_ne!(layout.size(), 0);
Andrew Walbranc06e7342023-07-05 14:00:51 +0000501 // SAFETY: layout has non-zero size.
Alice Wangf47b2342023-06-02 11:51:57 +0000502 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
503 handle_alloc_error(layout);
504 };
505
506 let base = shared.as_ptr() as usize;
507 let end = base.checked_add(layout.size()).unwrap();
Alice Wangf47b2342023-06-02 11:51:57 +0000508
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000509 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000510 trace!("Sharing memory region {:#x?}", base..end);
511 for vaddr in (base..end).step_by(self.granule) {
512 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000513 mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000514 }
515 }
516
517 self.frames.push((base, layout));
Alice Wangf47b2342023-06-02 11:51:57 +0000518 pool.add_frame(base, end);
519 }
520}
521
522impl Drop for MemorySharer {
523 fn drop(&mut self) {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000524 while let Some((base, layout)) = self.frames.pop() {
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000525 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000526 let end = base.checked_add(layout.size()).unwrap();
527 trace!("Unsharing memory region {:#x?}", base..end);
528 for vaddr in (base..end).step_by(self.granule) {
529 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000530 mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000531 }
Alice Wangf47b2342023-06-02 11:51:57 +0000532 }
533
Andrew Walbranc06e7342023-07-05 14:00:51 +0000534 // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
Alice Wangf47b2342023-06-02 11:51:57 +0000535 unsafe { dealloc(base as *mut _, layout) };
536 }
537 }
538}
Alice Wangb73a81b2023-06-07 13:05:09 +0000539
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000540/// Handles a translation fault with the given fault address register (FAR).
541#[inline]
542pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
543 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
544 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
545 Ok(memory.handle_mmio_fault(far)?)
546}
547
548/// Handles a permission fault with the given fault address register (FAR).
549#[inline]
550pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
551 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
552 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
553 Ok(memory.handle_permission_fault(far)?)
554}