blob: 6e70e6af3b428324592eede5ffda36e6050a26e2 [file] [log] [blame]
Alice Wangf47b2342023-06-02 11:51:57 +00001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Shared memory management.
16
Alice Wang93ee98a2023-06-08 08:20:39 +000017use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18use super::error::MemoryTrackerError;
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020019use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
Alice Wang93ee98a2023-06-08 08:20:39 +000020use super::util::{page_4kb_of, virt_to_phys};
21use crate::dsb;
Alice Wanga9fe1fb2023-07-04 09:10:35 +000022use crate::exceptions::HandleExceptionError;
Pierre-Clément Tosia9b345f2024-04-27 01:01:42 +010023use crate::hyp::{self, get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
Alice Wang93ee98a2023-06-08 08:20:39 +000024use crate::util::RangeExt as _;
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +020025use aarch64_paging::paging::{
26 Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, BITS_PER_LEVEL, PAGE_SIZE,
27};
Alice Wangf47b2342023-06-02 11:51:57 +000028use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
Alice Wang93ee98a2023-06-08 08:20:39 +000029use alloc::boxed::Box;
Alice Wangf47b2342023-06-02 11:51:57 +000030use alloc::vec::Vec;
Alice Wang93ee98a2023-06-08 08:20:39 +000031use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
Alice Wangf47b2342023-06-02 11:51:57 +000032use core::alloc::Layout;
Alice Wangdf6bacc2023-07-17 14:30:57 +000033use core::cmp::max;
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +000034use core::mem::size_of;
Alice Wang93ee98a2023-06-08 08:20:39 +000035use core::num::NonZeroUsize;
36use core::ops::Range;
Alice Wangf47b2342023-06-02 11:51:57 +000037use core::ptr::NonNull;
Alice Wangb73a81b2023-06-07 13:05:09 +000038use core::result;
Alice Wang93ee98a2023-06-08 08:20:39 +000039use log::{debug, error, trace};
40use once_cell::race::OnceBox;
41use spin::mutex::SpinMutex;
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +010042use static_assertions::const_assert_eq;
Alice Wang93ee98a2023-06-08 08:20:39 +000043use tinyvec::ArrayVec;
44
45/// A global static variable representing the system memory tracker, protected by a spin mutex.
46pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
47
48static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
49static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
50
51/// Memory range.
52pub type MemoryRange = Range<usize>;
Alice Wanga3931aa2023-07-05 12:52:09 +000053
54fn get_va_range(range: &MemoryRange) -> VaRange {
55 VaRange::new(range.start, range.end)
56}
57
Alice Wang93ee98a2023-06-08 08:20:39 +000058type Result<T> = result::Result<T, MemoryTrackerError>;
59
60#[derive(Clone, Copy, Debug, Default, PartialEq)]
61enum MemoryType {
62 #[default]
63 ReadOnly,
64 ReadWrite,
65}
66
67#[derive(Clone, Debug, Default)]
68struct MemoryRegion {
69 range: MemoryRange,
70 mem_type: MemoryType,
71}
72
73/// Tracks non-overlapping slices of main memory.
74pub struct MemoryTracker {
75 total: MemoryRange,
76 page_table: PageTable,
77 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
78 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
79 mmio_range: MemoryRange,
Alice Wang5bb79502023-06-12 09:25:07 +000080 payload_range: Option<MemoryRange>,
Alice Wang93ee98a2023-06-08 08:20:39 +000081}
82
Alice Wang93ee98a2023-06-08 08:20:39 +000083impl MemoryTracker {
84 const CAPACITY: usize = 5;
85 const MMIO_CAPACITY: usize = 5;
86
87 /// Creates a new instance from an active page table, covering the maximum RAM size.
88 pub fn new(
89 mut page_table: PageTable,
90 total: MemoryRange,
91 mmio_range: MemoryRange,
Alice Wanga3931aa2023-07-05 12:52:09 +000092 payload_range: Option<Range<VirtualAddress>>,
Alice Wang93ee98a2023-06-08 08:20:39 +000093 ) -> Self {
94 assert!(
95 !total.overlaps(&mmio_range),
96 "MMIO space should not overlap with the main memory region."
97 );
98
99 // Activate dirty state management first, otherwise we may get permission faults immediately
100 // after activating the new page table. This has no effect before the new page table is
101 // activated because none of the entries in the initial idmap have the DBM flag.
102 set_dbm_enabled(true);
103
104 debug!("Activating dynamic page table...");
Andrew Walbranc06e7342023-07-05 14:00:51 +0000105 // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
Alice Wang93ee98a2023-06-08 08:20:39 +0000106 // aware of so activating it shouldn't have any visible effect.
107 unsafe { page_table.activate() }
108 debug!("... Success!");
109
110 Self {
111 total,
112 page_table,
113 regions: ArrayVec::new(),
114 mmio_regions: ArrayVec::new(),
115 mmio_range,
Alice Wanga3931aa2023-07-05 12:52:09 +0000116 payload_range: payload_range.map(|r| r.start.0..r.end.0),
Alice Wang93ee98a2023-06-08 08:20:39 +0000117 }
118 }
119
120 /// Resize the total RAM size.
121 ///
122 /// This function fails if it contains regions that are not included within the new size.
123 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
124 if range.start != self.total.start {
125 return Err(MemoryTrackerError::DifferentBaseAddress);
126 }
127 if self.total.end < range.end {
128 return Err(MemoryTrackerError::SizeTooLarge);
129 }
130 if !self.regions.iter().all(|r| r.range.is_within(range)) {
131 return Err(MemoryTrackerError::SizeTooSmall);
132 }
133
134 self.total = range.clone();
135 Ok(())
136 }
137
138 /// Allocate the address range for a const slice; returns None if failed.
139 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
140 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
Alice Wang9f3ca832023-09-20 09:33:14 +0000141 self.check_allocatable(&region)?;
142 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
143 error!("Error during range allocation: {e}");
144 MemoryTrackerError::FailedToMap
145 })?;
146 self.add(region)
147 }
148
149 /// Allocates the address range for a const slice.
150 ///
151 /// # Safety
152 ///
153 /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
154 /// data.
155 pub unsafe fn alloc_range_outside_main_memory(
156 &mut self,
157 range: &MemoryRange,
158 ) -> Result<MemoryRange> {
159 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
160 self.check_no_overlap(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000161 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000162 error!("Error during range allocation: {e}");
163 MemoryTrackerError::FailedToMap
164 })?;
165 self.add(region)
166 }
167
168 /// Allocate the address range for a mutable slice; returns None if failed.
169 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
170 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
Alice Wang9f3ca832023-09-20 09:33:14 +0000171 self.check_allocatable(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000172 self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000173 error!("Error during mutable range allocation: {e}");
174 MemoryTrackerError::FailedToMap
175 })?;
176 self.add(region)
177 }
178
179 /// Allocate the address range for a const slice; returns None if failed.
180 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
181 self.alloc_range(&(base..(base + size.get())))
182 }
183
184 /// Allocate the address range for a mutable slice; returns None if failed.
185 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
186 self.alloc_range_mut(&(base..(base + size.get())))
187 }
188
189 /// Checks that the given range of addresses is within the MMIO region, and then maps it
190 /// appropriately.
191 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
192 if !range.is_within(&self.mmio_range) {
193 return Err(MemoryTrackerError::OutOfRange);
194 }
195 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
196 return Err(MemoryTrackerError::Overlaps);
197 }
198 if self.mmio_regions.len() == self.mmio_regions.capacity() {
199 return Err(MemoryTrackerError::Full);
200 }
201
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000202 if get_mmio_guard().is_some() {
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000203 self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
204 error!("Error during lazy MMIO device mapping: {e}");
205 MemoryTrackerError::FailedToMap
206 })?;
207 } else {
208 self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
209 error!("Error during MMIO device mapping: {e}");
210 MemoryTrackerError::FailedToMap
211 })?;
212 }
Alice Wang93ee98a2023-06-08 08:20:39 +0000213
214 if self.mmio_regions.try_push(range).is_some() {
215 return Err(MemoryTrackerError::Full);
216 }
217
218 Ok(())
219 }
220
Alice Wang9f3ca832023-09-20 09:33:14 +0000221 /// Checks that the memory region meets the following criteria:
222 /// - It is within the range of the `MemoryTracker`.
223 /// - It does not overlap with any previously allocated regions.
224 /// - The `regions` ArrayVec has sufficient capacity to add it.
225 fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000226 if !region.range.is_within(&self.total) {
227 return Err(MemoryTrackerError::OutOfRange);
228 }
Alice Wang9f3ca832023-09-20 09:33:14 +0000229 self.check_no_overlap(region)
230 }
231
232 /// Checks that the given region doesn't overlap with any other previously allocated regions,
233 /// and that the regions ArrayVec has capacity to add it.
234 fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000235 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
236 return Err(MemoryTrackerError::Overlaps);
237 }
238 if self.regions.len() == self.regions.capacity() {
239 return Err(MemoryTrackerError::Full);
240 }
241 Ok(())
242 }
243
244 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
245 if self.regions.try_push(region).is_some() {
246 return Err(MemoryTrackerError::Full);
247 }
248
249 Ok(self.regions.last().unwrap().range.clone())
250 }
251
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100252 /// Unshares any MMIO region previously shared with the MMIO guard.
253 pub fn unshare_all_mmio(&mut self) -> Result<()> {
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000254 if get_mmio_guard().is_some() {
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000255 for range in &self.mmio_regions {
256 self.page_table
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200257 .walk_range(&get_va_range(range), &mmio_guard_unmap_page)
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000258 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
259 }
Alice Wang93ee98a2023-06-08 08:20:39 +0000260 }
261 Ok(())
262 }
263
264 /// Initialize the shared heap to dynamically share memory from the global allocator.
Alice Wangb6d2c642023-06-13 13:07:06 +0000265 pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000266 const INIT_CAP: usize = 10;
267
Alice Wang93ee98a2023-06-08 08:20:39 +0000268 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
269 if previous.is_some() {
270 return Err(MemoryTrackerError::SharedMemorySetFailure);
271 }
272
273 SHARED_POOL
274 .set(Box::new(LockedFrameAllocator::new()))
275 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
276
277 Ok(())
278 }
279
280 /// Initialize the shared heap from a static region of memory.
281 ///
282 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
283 /// to share its memory with host. Instead they allow host to designate part
284 /// of guest memory as "shared" ahead of guest starting its execution. The
285 /// shared memory region is indicated in swiotlb node. On such platforms use
286 /// a separate heap to allocate buffers that can be shared with host.
287 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
288 let size = NonZeroUsize::new(range.len()).unwrap();
289 let range = self.alloc_mut(range.start, size)?;
290 let shared_pool = LockedFrameAllocator::<32>::new();
291
292 shared_pool.lock().insert(range);
293
294 SHARED_POOL
295 .set(Box::new(shared_pool))
296 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
297
298 Ok(())
299 }
300
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +0000301 /// Initialize the shared heap to use heap memory directly.
302 ///
303 /// When running on "non-protected" hypervisors which permit host direct accesses to guest
304 /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
305 /// dedicated region so this function instructs the shared pool to use the global allocator.
306 pub fn init_heap_shared_pool(&mut self) -> Result<()> {
307 // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
308 // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
309 // without any actual "dynamic memory sharing" taking place and, as such, the granule may
310 // be set to the one of the global_allocator i.e. a byte.
311 self.init_dynamic_shared_pool(size_of::<u8>())
312 }
313
Alice Wang93ee98a2023-06-08 08:20:39 +0000314 /// Unshares any memory that may have been shared.
315 pub fn unshare_all_memory(&mut self) {
316 drop(SHARED_MEMORY.lock().take());
317 }
318
319 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
320 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000321 fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang88736462023-07-05 12:14:15 +0000322 let page_start = VirtualAddress(page_4kb_of(addr.0));
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200323 assert_eq!(page_start.0 % MMIO_GUARD_GRANULE_SIZE, 0);
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100324 const_assert_eq!(MMIO_GUARD_GRANULE_SIZE, PAGE_SIZE); // For good measure.
325 let page_range: VaRange = (page_start..page_start + PAGE_SIZE).into();
326
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000327 let mmio_guard = get_mmio_guard().unwrap();
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100328 mmio_guard.map(page_start.0)?;
329 self.map_lazy_mmio_as_valid(&page_range)?;
330
331 Ok(())
332 }
333
334 /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
335 ///
336 /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
337 fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200338 // This must be safe and free from break-before-make (BBM) violations, given that the
339 // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
340 // created inside the mapping has the same size and alignment.
Alice Wang93ee98a2023-06-08 08:20:39 +0000341 self.page_table
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100342 .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
Ard Biesheuvel5815c8b2023-10-24 00:52:57 +0200343 let flags = desc.flags().expect("Unsupported PTE flags set");
344 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
345 desc.modify_flags(Attributes::VALID, Attributes::empty());
346 Ok(())
347 } else {
348 Err(())
349 }
350 })
Pierre-Clément Tosi6b867532024-04-29 02:29:42 +0100351 .map_err(|_| MemoryTrackerError::InvalidPte)
Alice Wang93ee98a2023-06-08 08:20:39 +0000352 }
353
354 /// Flush all memory regions marked as writable-dirty.
355 fn flush_dirty_pages(&mut self) -> Result<()> {
356 // Collect memory ranges for which dirty state is tracked.
357 let writable_regions =
358 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
359 // Execute a barrier instruction to ensure all hardware updates to the page table have been
360 // observed before reading PTE flags to determine dirty state.
361 dsb!("ish");
362 // Now flush writable-dirty pages in those regions.
Alice Wang5bb79502023-06-12 09:25:07 +0000363 for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
Alice Wang93ee98a2023-06-08 08:20:39 +0000364 self.page_table
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200365 .walk_range(&get_va_range(range), &flush_dirty_range)
Alice Wang93ee98a2023-06-08 08:20:39 +0000366 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
367 }
368 Ok(())
369 }
370
371 /// Handles permission fault for read-only blocks by setting writable-dirty state.
372 /// In general, this should be called from the exception handler when hardware dirty
373 /// state management is disabled or unavailable.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000374 fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000375 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000376 .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
Alice Wang93ee98a2023-06-08 08:20:39 +0000377 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
378 }
379}
380
381impl Drop for MemoryTracker {
382 fn drop(&mut self) {
383 set_dbm_enabled(false);
384 self.flush_dirty_pages().unwrap();
385 self.unshare_all_memory();
386 }
387}
388
389/// Allocates a memory range of at least the given size and alignment that is shared with the host.
390/// Returns a pointer to the buffer.
Alice Wang6c4cda02023-07-18 08:18:07 +0000391pub(crate) fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
Alice Wang7cbe29a2023-07-27 11:45:58 +0000392 assert_ne!(layout.size(), 0);
Alice Wang93ee98a2023-06-08 08:20:39 +0000393 let Some(buffer) = try_shared_alloc(layout) else {
394 handle_alloc_error(layout);
395 };
396
397 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
398 Ok(buffer)
399}
400
401fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
402 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
403
404 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
405 Some(NonNull::new(buffer as _).unwrap())
406 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
Alice Wang2a6b2172023-07-18 10:38:16 +0000407 // Adjusts the layout size to the max of the next power of two and the alignment,
408 // as this is the actual size of the memory allocated in `alloc_aligned()`.
409 let size = max(layout.size().next_power_of_two(), layout.align());
410 let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
411 shared_memory.refill(&mut shared_pool, refill_layout);
Alice Wang93ee98a2023-06-08 08:20:39 +0000412 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
413 } else {
414 None
415 }
416}
417
418/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
419///
420/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
421///
422/// # Safety
423///
424/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
425/// deallocated.
Alice Wang6c4cda02023-07-18 08:18:07 +0000426pub(crate) unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000427 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
428
429 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
430 Ok(())
431}
Alice Wangf47b2342023-06-02 11:51:57 +0000432
433/// Allocates memory on the heap and shares it with the host.
434///
435/// Unshares all pages when dropped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000436struct MemorySharer {
Alice Wangf47b2342023-06-02 11:51:57 +0000437 granule: usize,
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000438 frames: Vec<(usize, Layout)>,
Alice Wangf47b2342023-06-02 11:51:57 +0000439}
440
441impl MemorySharer {
442 /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
443 /// `granule` must be a power of 2.
Alice Wang93ee98a2023-06-08 08:20:39 +0000444 fn new(granule: usize, capacity: usize) -> Self {
Alice Wangf47b2342023-06-02 11:51:57 +0000445 assert!(granule.is_power_of_two());
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000446 Self { granule, frames: Vec::with_capacity(capacity) }
Alice Wangf47b2342023-06-02 11:51:57 +0000447 }
448
Alice Wang93ee98a2023-06-08 08:20:39 +0000449 /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
450 fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
Alice Wangf47b2342023-06-02 11:51:57 +0000451 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
452 assert_ne!(layout.size(), 0);
Andrew Walbranc06e7342023-07-05 14:00:51 +0000453 // SAFETY: layout has non-zero size.
Alice Wangf47b2342023-06-02 11:51:57 +0000454 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
455 handle_alloc_error(layout);
456 };
457
458 let base = shared.as_ptr() as usize;
459 let end = base.checked_add(layout.size()).unwrap();
Alice Wangf47b2342023-06-02 11:51:57 +0000460
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000461 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000462 trace!("Sharing memory region {:#x?}", base..end);
463 for vaddr in (base..end).step_by(self.granule) {
464 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000465 mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000466 }
467 }
468
469 self.frames.push((base, layout));
Alice Wangf47b2342023-06-02 11:51:57 +0000470 pool.add_frame(base, end);
471 }
472}
473
474impl Drop for MemorySharer {
475 fn drop(&mut self) {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000476 while let Some((base, layout)) = self.frames.pop() {
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000477 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000478 let end = base.checked_add(layout.size()).unwrap();
479 trace!("Unsharing memory region {:#x?}", base..end);
480 for vaddr in (base..end).step_by(self.granule) {
481 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000482 mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000483 }
Alice Wangf47b2342023-06-02 11:51:57 +0000484 }
485
Andrew Walbranc06e7342023-07-05 14:00:51 +0000486 // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
Alice Wangf47b2342023-06-02 11:51:57 +0000487 unsafe { dealloc(base as *mut _, layout) };
488 }
489 }
490}
Alice Wangb73a81b2023-06-07 13:05:09 +0000491
Alice Wangb73a81b2023-06-07 13:05:09 +0000492/// MMIO guard unmaps page
Alice Wang93ee98a2023-06-08 08:20:39 +0000493fn mmio_guard_unmap_page(
Alice Wangb73a81b2023-06-07 13:05:09 +0000494 va_range: &VaRange,
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200495 desc: &Descriptor,
Alice Wangb73a81b2023-06-07 13:05:09 +0000496 level: usize,
497) -> result::Result<(), ()> {
498 let flags = desc.flags().expect("Unsupported PTE flags set");
Alice Wangb73a81b2023-06-07 13:05:09 +0000499 // This function will be called on an address range that corresponds to a device. Only if a
500 // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
501 // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
502 // mapped anyway.
503 if flags.contains(Attributes::VALID) {
504 assert!(
505 flags.contains(MMIO_LAZY_MAP_FLAG),
506 "Attempting MMIO guard unmap for non-device pages"
507 );
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200508 const MMIO_GUARD_GRANULE_SHIFT: u32 = MMIO_GUARD_GRANULE_SIZE.ilog2() - PAGE_SIZE.ilog2();
509 const MMIO_GUARD_GRANULE_LEVEL: usize =
510 3 - (MMIO_GUARD_GRANULE_SHIFT as usize / BITS_PER_LEVEL);
Alice Wangb73a81b2023-06-07 13:05:09 +0000511 assert_eq!(
Ard Biesheuvela8dc46f2023-10-20 15:10:38 +0200512 level, MMIO_GUARD_GRANULE_LEVEL,
Alice Wangb73a81b2023-06-07 13:05:09 +0000513 "Failed to break down block mapping before MMIO guard mapping"
514 );
515 let page_base = va_range.start().0;
Pierre-Clément Tosi92154762023-06-07 15:32:15 +0000516 assert_eq!(page_base % MMIO_GUARD_GRANULE_SIZE, 0);
Alice Wangb73a81b2023-06-07 13:05:09 +0000517 // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
518 // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
519 // virt_to_phys here, and just pass page_base instead.
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000520 get_mmio_guard().unwrap().unmap(page_base).map_err(|e| {
Alice Wangb73a81b2023-06-07 13:05:09 +0000521 error!("Error MMIO guard unmapping: {e}");
522 })?;
523 }
524 Ok(())
525}
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000526
527/// Handles a translation fault with the given fault address register (FAR).
528#[inline]
529pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
530 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
531 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
532 Ok(memory.handle_mmio_fault(far)?)
533}
534
535/// Handles a permission fault with the given fault address register (FAR).
536#[inline]
537pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
538 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
539 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
540 Ok(memory.handle_permission_fault(far)?)
541}