blob: 457e6f2215078c3de4d5a55f2b1d511bc11b4929 [file] [log] [blame]
Alice Wangf47b2342023-06-02 11:51:57 +00001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Shared memory management.
16
Alice Wang93ee98a2023-06-08 08:20:39 +000017use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18use super::error::MemoryTrackerError;
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020019use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
Pierre-Clément Tosidc715c42024-05-13 20:11:00 +010020use super::util::{page_4kb_of, virt_to_phys};
21use crate::console;
Alice Wang93ee98a2023-06-08 08:20:39 +000022use crate::dsb;
Alice Wanga9fe1fb2023-07-04 09:10:35 +000023use crate::exceptions::HandleExceptionError;
Pierre-Clément Tosia9b345f2024-04-27 01:01:42 +010024use crate::hyp::{self, get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010025use crate::util::unchecked_align_down;
Alice Wang93ee98a2023-06-08 08:20:39 +000026use crate::util::RangeExt as _;
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020027use aarch64_paging::paging::{
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010028 Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, PAGE_SIZE,
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020029};
Alice Wangf47b2342023-06-02 11:51:57 +000030use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
Alice Wang93ee98a2023-06-08 08:20:39 +000031use alloc::boxed::Box;
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010032use alloc::collections::BTreeSet;
Alice Wangf47b2342023-06-02 11:51:57 +000033use alloc::vec::Vec;
Alice Wang93ee98a2023-06-08 08:20:39 +000034use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
Alice Wangf47b2342023-06-02 11:51:57 +000035use core::alloc::Layout;
Alice Wangdf6bacc2023-07-17 14:30:57 +000036use core::cmp::max;
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +000037use core::mem::size_of;
Alice Wang93ee98a2023-06-08 08:20:39 +000038use core::num::NonZeroUsize;
39use core::ops::Range;
Alice Wangf47b2342023-06-02 11:51:57 +000040use core::ptr::NonNull;
Alice Wangb73a81b2023-06-07 13:05:09 +000041use core::result;
Alice Wang93ee98a2023-06-08 08:20:39 +000042use log::{debug, error, trace};
43use once_cell::race::OnceBox;
44use spin::mutex::SpinMutex;
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +010045use static_assertions::const_assert_eq;
Alice Wang93ee98a2023-06-08 08:20:39 +000046use tinyvec::ArrayVec;
47
48/// A global static variable representing the system memory tracker, protected by a spin mutex.
49pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
50
51static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
52static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
53
54/// Memory range.
55pub type MemoryRange = Range<usize>;
Alice Wanga3931aa2023-07-05 12:52:09 +000056
57fn get_va_range(range: &MemoryRange) -> VaRange {
58 VaRange::new(range.start, range.end)
59}
60
Alice Wang93ee98a2023-06-08 08:20:39 +000061type Result<T> = result::Result<T, MemoryTrackerError>;
62
63#[derive(Clone, Copy, Debug, Default, PartialEq)]
64enum MemoryType {
65 #[default]
66 ReadOnly,
67 ReadWrite,
68}
69
70#[derive(Clone, Debug, Default)]
71struct MemoryRegion {
72 range: MemoryRange,
73 mem_type: MemoryType,
74}
75
76/// Tracks non-overlapping slices of main memory.
77pub struct MemoryTracker {
78 total: MemoryRange,
79 page_table: PageTable,
80 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
81 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
82 mmio_range: MemoryRange,
Alice Wang5bb79502023-06-12 09:25:07 +000083 payload_range: Option<MemoryRange>,
Pierre-Clément Tosibf098572024-04-29 01:19:22 +010084 mmio_sharer: MmioSharer,
Alice Wang93ee98a2023-06-08 08:20:39 +000085}
86
Alice Wang93ee98a2023-06-08 08:20:39 +000087impl MemoryTracker {
88 const CAPACITY: usize = 5;
89 const MMIO_CAPACITY: usize = 5;
90
91 /// Creates a new instance from an active page table, covering the maximum RAM size.
92 pub fn new(
93 mut page_table: PageTable,
94 total: MemoryRange,
95 mmio_range: MemoryRange,
Alice Wanga3931aa2023-07-05 12:52:09 +000096 payload_range: Option<Range<VirtualAddress>>,
Alice Wang93ee98a2023-06-08 08:20:39 +000097 ) -> Self {
98 assert!(
99 !total.overlaps(&mmio_range),
100 "MMIO space should not overlap with the main memory region."
101 );
102
103 // Activate dirty state management first, otherwise we may get permission faults immediately
104 // after activating the new page table. This has no effect before the new page table is
105 // activated because none of the entries in the initial idmap have the DBM flag.
106 set_dbm_enabled(true);
107
108 debug!("Activating dynamic page table...");
Andrew Walbranc06e7342023-07-05 14:00:51 +0000109 // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
Alice Wang93ee98a2023-06-08 08:20:39 +0000110 // aware of so activating it shouldn't have any visible effect.
111 unsafe { page_table.activate() }
112 debug!("... Success!");
113
114 Self {
115 total,
116 page_table,
117 regions: ArrayVec::new(),
118 mmio_regions: ArrayVec::new(),
119 mmio_range,
Alice Wanga3931aa2023-07-05 12:52:09 +0000120 payload_range: payload_range.map(|r| r.start.0..r.end.0),
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100121 mmio_sharer: MmioSharer::new().unwrap(),
Alice Wang93ee98a2023-06-08 08:20:39 +0000122 }
123 }
124
125 /// Resize the total RAM size.
126 ///
127 /// This function fails if it contains regions that are not included within the new size.
128 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
129 if range.start != self.total.start {
130 return Err(MemoryTrackerError::DifferentBaseAddress);
131 }
132 if self.total.end < range.end {
133 return Err(MemoryTrackerError::SizeTooLarge);
134 }
135 if !self.regions.iter().all(|r| r.range.is_within(range)) {
136 return Err(MemoryTrackerError::SizeTooSmall);
137 }
138
139 self.total = range.clone();
140 Ok(())
141 }
142
143 /// Allocate the address range for a const slice; returns None if failed.
144 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
145 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
Alice Wang9f3ca832023-09-20 09:33:14 +0000146 self.check_allocatable(&region)?;
147 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
148 error!("Error during range allocation: {e}");
149 MemoryTrackerError::FailedToMap
150 })?;
151 self.add(region)
152 }
153
154 /// Allocates the address range for a const slice.
155 ///
156 /// # Safety
157 ///
158 /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
159 /// data.
160 pub unsafe fn alloc_range_outside_main_memory(
161 &mut self,
162 range: &MemoryRange,
163 ) -> Result<MemoryRange> {
164 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
165 self.check_no_overlap(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000166 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000167 error!("Error during range allocation: {e}");
168 MemoryTrackerError::FailedToMap
169 })?;
170 self.add(region)
171 }
172
173 /// Allocate the address range for a mutable slice; returns None if failed.
174 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
175 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
Alice Wang9f3ca832023-09-20 09:33:14 +0000176 self.check_allocatable(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000177 self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000178 error!("Error during mutable range allocation: {e}");
179 MemoryTrackerError::FailedToMap
180 })?;
181 self.add(region)
182 }
183
184 /// Allocate the address range for a const slice; returns None if failed.
185 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
186 self.alloc_range(&(base..(base + size.get())))
187 }
188
189 /// Allocate the address range for a mutable slice; returns None if failed.
190 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
191 self.alloc_range_mut(&(base..(base + size.get())))
192 }
193
194 /// Checks that the given range of addresses is within the MMIO region, and then maps it
195 /// appropriately.
196 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
197 if !range.is_within(&self.mmio_range) {
198 return Err(MemoryTrackerError::OutOfRange);
199 }
200 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
201 return Err(MemoryTrackerError::Overlaps);
202 }
203 if self.mmio_regions.len() == self.mmio_regions.capacity() {
204 return Err(MemoryTrackerError::Full);
205 }
206
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000207 if get_mmio_guard().is_some() {
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000208 self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
209 error!("Error during lazy MMIO device mapping: {e}");
210 MemoryTrackerError::FailedToMap
211 })?;
212 } else {
213 self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
214 error!("Error during MMIO device mapping: {e}");
215 MemoryTrackerError::FailedToMap
216 })?;
217 }
Alice Wang93ee98a2023-06-08 08:20:39 +0000218
219 if self.mmio_regions.try_push(range).is_some() {
220 return Err(MemoryTrackerError::Full);
221 }
222
223 Ok(())
224 }
225
Alice Wang9f3ca832023-09-20 09:33:14 +0000226 /// Checks that the memory region meets the following criteria:
227 /// - It is within the range of the `MemoryTracker`.
228 /// - It does not overlap with any previously allocated regions.
229 /// - The `regions` ArrayVec has sufficient capacity to add it.
230 fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000231 if !region.range.is_within(&self.total) {
232 return Err(MemoryTrackerError::OutOfRange);
233 }
Alice Wang9f3ca832023-09-20 09:33:14 +0000234 self.check_no_overlap(region)
235 }
236
237 /// Checks that the given region doesn't overlap with any other previously allocated regions,
238 /// and that the regions ArrayVec has capacity to add it.
239 fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000240 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
241 return Err(MemoryTrackerError::Overlaps);
242 }
243 if self.regions.len() == self.regions.capacity() {
244 return Err(MemoryTrackerError::Full);
245 }
246 Ok(())
247 }
248
249 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
250 if self.regions.try_push(region).is_some() {
251 return Err(MemoryTrackerError::Full);
252 }
253
254 Ok(self.regions.last().unwrap().range.clone())
255 }
256
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100257 /// Unshares any MMIO region previously shared with the MMIO guard.
258 pub fn unshare_all_mmio(&mut self) -> Result<()> {
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100259 self.mmio_sharer.unshare_all();
260
Alice Wang93ee98a2023-06-08 08:20:39 +0000261 Ok(())
262 }
263
264 /// Initialize the shared heap to dynamically share memory from the global allocator.
Alice Wangb6d2c642023-06-13 13:07:06 +0000265 pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000266 const INIT_CAP: usize = 10;
267
Alice Wang93ee98a2023-06-08 08:20:39 +0000268 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
269 if previous.is_some() {
270 return Err(MemoryTrackerError::SharedMemorySetFailure);
271 }
272
273 SHARED_POOL
274 .set(Box::new(LockedFrameAllocator::new()))
275 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
276
277 Ok(())
278 }
279
280 /// Initialize the shared heap from a static region of memory.
281 ///
282 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
283 /// to share its memory with host. Instead they allow host to designate part
284 /// of guest memory as "shared" ahead of guest starting its execution. The
285 /// shared memory region is indicated in swiotlb node. On such platforms use
286 /// a separate heap to allocate buffers that can be shared with host.
287 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
288 let size = NonZeroUsize::new(range.len()).unwrap();
289 let range = self.alloc_mut(range.start, size)?;
290 let shared_pool = LockedFrameAllocator::<32>::new();
291
292 shared_pool.lock().insert(range);
293
294 SHARED_POOL
295 .set(Box::new(shared_pool))
296 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
297
298 Ok(())
299 }
300
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +0000301 /// Initialize the shared heap to use heap memory directly.
302 ///
303 /// When running on "non-protected" hypervisors which permit host direct accesses to guest
304 /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
305 /// dedicated region so this function instructs the shared pool to use the global allocator.
306 pub fn init_heap_shared_pool(&mut self) -> Result<()> {
307 // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
308 // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
309 // without any actual "dynamic memory sharing" taking place and, as such, the granule may
310 // be set to the one of the global_allocator i.e. a byte.
311 self.init_dynamic_shared_pool(size_of::<u8>())
312 }
313
Alice Wang93ee98a2023-06-08 08:20:39 +0000314 /// Unshares any memory that may have been shared.
315 pub fn unshare_all_memory(&mut self) {
316 drop(SHARED_MEMORY.lock().take());
317 }
318
319 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
320 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000321 fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100322 let shared_range = self.mmio_sharer.share(addr)?;
323 self.map_lazy_mmio_as_valid(&shared_range)?;
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100324
325 Ok(())
326 }
327
328 /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
329 ///
330 /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
331 fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200332 // This must be safe and free from break-before-make (BBM) violations, given that the
333 // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
334 // created inside the mapping has the same size and alignment.
Alice Wang93ee98a2023-06-08 08:20:39 +0000335 self.page_table
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100336 .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200337 let flags = desc.flags().expect("Unsupported PTE flags set");
338 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
339 desc.modify_flags(Attributes::VALID, Attributes::empty());
340 Ok(())
341 } else {
342 Err(())
343 }
344 })
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100345 .map_err(|_| MemoryTrackerError::InvalidPte)
Alice Wang93ee98a2023-06-08 08:20:39 +0000346 }
347
348 /// Flush all memory regions marked as writable-dirty.
349 fn flush_dirty_pages(&mut self) -> Result<()> {
350 // Collect memory ranges for which dirty state is tracked.
351 let writable_regions =
352 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
353 // Execute a barrier instruction to ensure all hardware updates to the page table have been
354 // observed before reading PTE flags to determine dirty state.
355 dsb!("ish");
356 // Now flush writable-dirty pages in those regions.
Alice Wang5bb79502023-06-12 09:25:07 +0000357 for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
Alice Wang93ee98a2023-06-08 08:20:39 +0000358 self.page_table
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200359 .walk_range(&get_va_range(range), &flush_dirty_range)
Alice Wang93ee98a2023-06-08 08:20:39 +0000360 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
361 }
362 Ok(())
363 }
364
365 /// Handles permission fault for read-only blocks by setting writable-dirty state.
366 /// In general, this should be called from the exception handler when hardware dirty
367 /// state management is disabled or unavailable.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000368 fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000369 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000370 .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
Alice Wang93ee98a2023-06-08 08:20:39 +0000371 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
372 }
373}
374
375impl Drop for MemoryTracker {
376 fn drop(&mut self) {
377 set_dbm_enabled(false);
378 self.flush_dirty_pages().unwrap();
379 self.unshare_all_memory();
380 }
381}
382
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100383struct MmioSharer {
384 granule: usize,
385 frames: BTreeSet<usize>,
386}
387
388impl MmioSharer {
389 fn new() -> Result<Self> {
Pierre-Clément Tosi32d96792024-04-29 01:19:37 +0100390 let granule = Self::get_granule()?;
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100391 let frames = BTreeSet::new();
392
393 // Allows safely calling util::unchecked_align_down().
394 assert!(granule.is_power_of_two());
395
396 Ok(Self { granule, frames })
397 }
398
Pierre-Clément Tosi32d96792024-04-29 01:19:37 +0100399 fn get_granule() -> Result<usize> {
400 const_assert_eq!(MMIO_GUARD_GRANULE_SIZE, PAGE_SIZE); // For good measure.
401 let Some(mmio_guard) = get_mmio_guard() else {
402 return Ok(PAGE_SIZE);
403 };
404 match mmio_guard.granule()? {
405 MMIO_GUARD_GRANULE_SIZE => Ok(MMIO_GUARD_GRANULE_SIZE),
406 granule => Err(MemoryTrackerError::UnsupportedMmioGuardGranule(granule)),
407 }
408 }
409
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100410 /// Share the MMIO region aligned to the granule size containing addr (not validated as MMIO).
411 fn share(&mut self, addr: VirtualAddress) -> Result<VaRange> {
412 // This can't use virt_to_phys() since 0x0 is a valid MMIO address and we are ID-mapped.
413 let phys = addr.0;
414 let base = unchecked_align_down(phys, self.granule);
415
Pierre-Clément Tosidc715c42024-05-13 20:11:00 +0100416 // TODO(ptosi): Share the UART using this method and remove the hardcoded check.
417 if self.frames.contains(&base) || base == page_4kb_of(console::BASE_ADDRESS) {
Pierre-Clément Tosibf098572024-04-29 01:19:22 +0100418 return Err(MemoryTrackerError::DuplicateMmioShare(base));
419 }
420
421 if let Some(mmio_guard) = get_mmio_guard() {
422 mmio_guard.map(base)?;
423 }
424
425 let inserted = self.frames.insert(base);
426 assert!(inserted);
427
428 let base_va = VirtualAddress(base);
429 Ok((base_va..base_va + self.granule).into())
430 }
431
432 fn unshare_all(&mut self) {
433 let Some(mmio_guard) = get_mmio_guard() else {
434 return self.frames.clear();
435 };
436
437 while let Some(base) = self.frames.pop_first() {
438 mmio_guard.unmap(base).unwrap();
439 }
440 }
441}
442
443impl Drop for MmioSharer {
444 fn drop(&mut self) {
445 self.unshare_all();
446 }
447}
448
Alice Wang93ee98a2023-06-08 08:20:39 +0000449/// Allocates a memory range of at least the given size and alignment that is shared with the host.
450/// Returns a pointer to the buffer.
Alice Wang6c4cda02023-07-18 08:18:07 +0000451pub(crate) fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
Alice Wang7cbe29a2023-07-27 11:45:58 +0000452 assert_ne!(layout.size(), 0);
Alice Wang93ee98a2023-06-08 08:20:39 +0000453 let Some(buffer) = try_shared_alloc(layout) else {
454 handle_alloc_error(layout);
455 };
456
457 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
458 Ok(buffer)
459}
460
461fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
462 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
463
464 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
465 Some(NonNull::new(buffer as _).unwrap())
466 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
Alice Wang2a6b2172023-07-18 10:38:16 +0000467 // Adjusts the layout size to the max of the next power of two and the alignment,
468 // as this is the actual size of the memory allocated in `alloc_aligned()`.
469 let size = max(layout.size().next_power_of_two(), layout.align());
470 let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
471 shared_memory.refill(&mut shared_pool, refill_layout);
Alice Wang93ee98a2023-06-08 08:20:39 +0000472 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
473 } else {
474 None
475 }
476}
477
478/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
479///
480/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
481///
482/// # Safety
483///
484/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
485/// deallocated.
Alice Wang6c4cda02023-07-18 08:18:07 +0000486pub(crate) unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000487 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
488
489 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
490 Ok(())
491}
Alice Wangf47b2342023-06-02 11:51:57 +0000492
493/// Allocates memory on the heap and shares it with the host.
494///
495/// Unshares all pages when dropped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000496struct MemorySharer {
Alice Wangf47b2342023-06-02 11:51:57 +0000497 granule: usize,
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000498 frames: Vec<(usize, Layout)>,
Alice Wangf47b2342023-06-02 11:51:57 +0000499}
500
501impl MemorySharer {
502 /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
503 /// `granule` must be a power of 2.
Alice Wang93ee98a2023-06-08 08:20:39 +0000504 fn new(granule: usize, capacity: usize) -> Self {
Alice Wangf47b2342023-06-02 11:51:57 +0000505 assert!(granule.is_power_of_two());
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000506 Self { granule, frames: Vec::with_capacity(capacity) }
Alice Wangf47b2342023-06-02 11:51:57 +0000507 }
508
Alice Wang93ee98a2023-06-08 08:20:39 +0000509 /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
510 fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
Alice Wangf47b2342023-06-02 11:51:57 +0000511 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
512 assert_ne!(layout.size(), 0);
Andrew Walbranc06e7342023-07-05 14:00:51 +0000513 // SAFETY: layout has non-zero size.
Alice Wangf47b2342023-06-02 11:51:57 +0000514 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
515 handle_alloc_error(layout);
516 };
517
518 let base = shared.as_ptr() as usize;
519 let end = base.checked_add(layout.size()).unwrap();
Alice Wangf47b2342023-06-02 11:51:57 +0000520
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000521 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000522 trace!("Sharing memory region {:#x?}", base..end);
523 for vaddr in (base..end).step_by(self.granule) {
524 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000525 mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000526 }
527 }
528
529 self.frames.push((base, layout));
Alice Wangf47b2342023-06-02 11:51:57 +0000530 pool.add_frame(base, end);
531 }
532}
533
534impl Drop for MemorySharer {
535 fn drop(&mut self) {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000536 while let Some((base, layout)) = self.frames.pop() {
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000537 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000538 let end = base.checked_add(layout.size()).unwrap();
539 trace!("Unsharing memory region {:#x?}", base..end);
540 for vaddr in (base..end).step_by(self.granule) {
541 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000542 mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000543 }
Alice Wangf47b2342023-06-02 11:51:57 +0000544 }
545
Andrew Walbranc06e7342023-07-05 14:00:51 +0000546 // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
Alice Wangf47b2342023-06-02 11:51:57 +0000547 unsafe { dealloc(base as *mut _, layout) };
548 }
549 }
550}
Alice Wangb73a81b2023-06-07 13:05:09 +0000551
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000552/// Handles a translation fault with the given fault address register (FAR).
553#[inline]
554pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
555 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
556 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
557 Ok(memory.handle_mmio_fault(far)?)
558}
559
560/// Handles a permission fault with the given fault address register (FAR).
561#[inline]
562pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
563 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
564 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
565 Ok(memory.handle_permission_fault(far)?)
566}