blob: ebb55df634352bf6fa21a217bd4ffd814dfdd906 [file] [log] [blame]
Alice Wangf47b2342023-06-02 11:51:57 +00001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Shared memory management.
16
Alice Wang93ee98a2023-06-08 08:20:39 +000017use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18use super::error::MemoryTrackerError;
19use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
20use super::util::{page_4kb_of, virt_to_phys};
21use crate::dsb;
Alice Wanga9fe1fb2023-07-04 09:10:35 +000022use crate::exceptions::HandleExceptionError;
Alice Wang93ee98a2023-06-08 08:20:39 +000023use crate::util::RangeExt as _;
Alice Wanga3931aa2023-07-05 12:52:09 +000024use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
Alice Wangf47b2342023-06-02 11:51:57 +000025use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
Alice Wang93ee98a2023-06-08 08:20:39 +000026use alloc::boxed::Box;
Alice Wangf47b2342023-06-02 11:51:57 +000027use alloc::vec::Vec;
Alice Wang93ee98a2023-06-08 08:20:39 +000028use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
Alice Wangf47b2342023-06-02 11:51:57 +000029use core::alloc::Layout;
Alice Wang93ee98a2023-06-08 08:20:39 +000030use core::num::NonZeroUsize;
31use core::ops::Range;
Alice Wangf47b2342023-06-02 11:51:57 +000032use core::ptr::NonNull;
Alice Wangb73a81b2023-06-07 13:05:09 +000033use core::result;
Pierre-Clément Tosi92154762023-06-07 15:32:15 +000034use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE};
Alice Wang93ee98a2023-06-08 08:20:39 +000035use log::{debug, error, trace};
36use once_cell::race::OnceBox;
37use spin::mutex::SpinMutex;
38use tinyvec::ArrayVec;
39
40/// A global static variable representing the system memory tracker, protected by a spin mutex.
41pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
42
43static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
44static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
45
46/// Memory range.
47pub type MemoryRange = Range<usize>;
Alice Wanga3931aa2023-07-05 12:52:09 +000048
49fn get_va_range(range: &MemoryRange) -> VaRange {
50 VaRange::new(range.start, range.end)
51}
52
Alice Wang93ee98a2023-06-08 08:20:39 +000053type Result<T> = result::Result<T, MemoryTrackerError>;
54
55#[derive(Clone, Copy, Debug, Default, PartialEq)]
56enum MemoryType {
57 #[default]
58 ReadOnly,
59 ReadWrite,
60}
61
62#[derive(Clone, Debug, Default)]
63struct MemoryRegion {
64 range: MemoryRange,
65 mem_type: MemoryType,
66}
67
68/// Tracks non-overlapping slices of main memory.
69pub struct MemoryTracker {
70 total: MemoryRange,
71 page_table: PageTable,
72 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
73 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
74 mmio_range: MemoryRange,
Alice Wang5bb79502023-06-12 09:25:07 +000075 payload_range: Option<MemoryRange>,
Alice Wang93ee98a2023-06-08 08:20:39 +000076}
77
Andrew Walbranc06e7342023-07-05 14:00:51 +000078// TODO: Remove this once aarch64-paging crate is updated.
79// SAFETY: Only `PageTable` doesn't implement Send, but it should.
Alice Wang93ee98a2023-06-08 08:20:39 +000080unsafe impl Send for MemoryTracker {}
81
82impl MemoryTracker {
83 const CAPACITY: usize = 5;
84 const MMIO_CAPACITY: usize = 5;
85
86 /// Creates a new instance from an active page table, covering the maximum RAM size.
87 pub fn new(
88 mut page_table: PageTable,
89 total: MemoryRange,
90 mmio_range: MemoryRange,
Alice Wanga3931aa2023-07-05 12:52:09 +000091 payload_range: Option<Range<VirtualAddress>>,
Alice Wang93ee98a2023-06-08 08:20:39 +000092 ) -> Self {
93 assert!(
94 !total.overlaps(&mmio_range),
95 "MMIO space should not overlap with the main memory region."
96 );
97
98 // Activate dirty state management first, otherwise we may get permission faults immediately
99 // after activating the new page table. This has no effect before the new page table is
100 // activated because none of the entries in the initial idmap have the DBM flag.
101 set_dbm_enabled(true);
102
103 debug!("Activating dynamic page table...");
Andrew Walbranc06e7342023-07-05 14:00:51 +0000104 // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
Alice Wang93ee98a2023-06-08 08:20:39 +0000105 // aware of so activating it shouldn't have any visible effect.
106 unsafe { page_table.activate() }
107 debug!("... Success!");
108
109 Self {
110 total,
111 page_table,
112 regions: ArrayVec::new(),
113 mmio_regions: ArrayVec::new(),
114 mmio_range,
Alice Wanga3931aa2023-07-05 12:52:09 +0000115 payload_range: payload_range.map(|r| r.start.0..r.end.0),
Alice Wang93ee98a2023-06-08 08:20:39 +0000116 }
117 }
118
119 /// Resize the total RAM size.
120 ///
121 /// This function fails if it contains regions that are not included within the new size.
122 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
123 if range.start != self.total.start {
124 return Err(MemoryTrackerError::DifferentBaseAddress);
125 }
126 if self.total.end < range.end {
127 return Err(MemoryTrackerError::SizeTooLarge);
128 }
129 if !self.regions.iter().all(|r| r.range.is_within(range)) {
130 return Err(MemoryTrackerError::SizeTooSmall);
131 }
132
133 self.total = range.clone();
134 Ok(())
135 }
136
137 /// Allocate the address range for a const slice; returns None if failed.
138 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
139 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
140 self.check(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000141 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000142 error!("Error during range allocation: {e}");
143 MemoryTrackerError::FailedToMap
144 })?;
145 self.add(region)
146 }
147
148 /// Allocate the address range for a mutable slice; returns None if failed.
149 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
150 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
151 self.check(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000152 self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000153 error!("Error during mutable range allocation: {e}");
154 MemoryTrackerError::FailedToMap
155 })?;
156 self.add(region)
157 }
158
159 /// Allocate the address range for a const slice; returns None if failed.
160 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
161 self.alloc_range(&(base..(base + size.get())))
162 }
163
164 /// Allocate the address range for a mutable slice; returns None if failed.
165 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
166 self.alloc_range_mut(&(base..(base + size.get())))
167 }
168
169 /// Checks that the given range of addresses is within the MMIO region, and then maps it
170 /// appropriately.
171 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
172 if !range.is_within(&self.mmio_range) {
173 return Err(MemoryTrackerError::OutOfRange);
174 }
175 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
176 return Err(MemoryTrackerError::Overlaps);
177 }
178 if self.mmio_regions.len() == self.mmio_regions.capacity() {
179 return Err(MemoryTrackerError::Full);
180 }
181
Alice Wanga3931aa2023-07-05 12:52:09 +0000182 self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000183 error!("Error during MMIO device mapping: {e}");
184 MemoryTrackerError::FailedToMap
185 })?;
186
187 if self.mmio_regions.try_push(range).is_some() {
188 return Err(MemoryTrackerError::Full);
189 }
190
191 Ok(())
192 }
193
194 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
195 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
196 /// add it.
197 fn check(&self, region: &MemoryRegion) -> Result<()> {
198 if !region.range.is_within(&self.total) {
199 return Err(MemoryTrackerError::OutOfRange);
200 }
201 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
202 return Err(MemoryTrackerError::Overlaps);
203 }
204 if self.regions.len() == self.regions.capacity() {
205 return Err(MemoryTrackerError::Full);
206 }
207 Ok(())
208 }
209
210 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
211 if self.regions.try_push(region).is_some() {
212 return Err(MemoryTrackerError::Full);
213 }
214
215 Ok(self.regions.last().unwrap().range.clone())
216 }
217
218 /// Unmaps all tracked MMIO regions from the MMIO guard.
219 ///
220 /// Note that they are not unmapped from the page table.
221 pub fn mmio_unmap_all(&mut self) -> Result<()> {
222 for range in &self.mmio_regions {
223 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000224 .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
Alice Wang93ee98a2023-06-08 08:20:39 +0000225 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
226 }
227 Ok(())
228 }
229
230 /// Initialize the shared heap to dynamically share memory from the global allocator.
Alice Wangb6d2c642023-06-13 13:07:06 +0000231 pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000232 const INIT_CAP: usize = 10;
233
Alice Wang93ee98a2023-06-08 08:20:39 +0000234 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
235 if previous.is_some() {
236 return Err(MemoryTrackerError::SharedMemorySetFailure);
237 }
238
239 SHARED_POOL
240 .set(Box::new(LockedFrameAllocator::new()))
241 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
242
243 Ok(())
244 }
245
246 /// Initialize the shared heap from a static region of memory.
247 ///
248 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
249 /// to share its memory with host. Instead they allow host to designate part
250 /// of guest memory as "shared" ahead of guest starting its execution. The
251 /// shared memory region is indicated in swiotlb node. On such platforms use
252 /// a separate heap to allocate buffers that can be shared with host.
253 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
254 let size = NonZeroUsize::new(range.len()).unwrap();
255 let range = self.alloc_mut(range.start, size)?;
256 let shared_pool = LockedFrameAllocator::<32>::new();
257
258 shared_pool.lock().insert(range);
259
260 SHARED_POOL
261 .set(Box::new(shared_pool))
262 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
263
264 Ok(())
265 }
266
267 /// Unshares any memory that may have been shared.
268 pub fn unshare_all_memory(&mut self) {
269 drop(SHARED_MEMORY.lock().take());
270 }
271
272 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
273 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000274 fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang88736462023-07-05 12:14:15 +0000275 let page_start = VirtualAddress(page_4kb_of(addr.0));
Alice Wanga3931aa2023-07-05 12:52:09 +0000276 let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
Alice Wang93ee98a2023-06-08 08:20:39 +0000277 self.page_table
278 .modify_range(&page_range, &verify_lazy_mapped_block)
279 .map_err(|_| MemoryTrackerError::InvalidPte)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000280 get_hypervisor().mmio_guard_map(page_start.0)?;
Alice Wang93ee98a2023-06-08 08:20:39 +0000281 // Maps a single device page, breaking up block mappings if necessary.
282 self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
283 }
284
285 /// Flush all memory regions marked as writable-dirty.
286 fn flush_dirty_pages(&mut self) -> Result<()> {
287 // Collect memory ranges for which dirty state is tracked.
288 let writable_regions =
289 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
290 // Execute a barrier instruction to ensure all hardware updates to the page table have been
291 // observed before reading PTE flags to determine dirty state.
292 dsb!("ish");
293 // Now flush writable-dirty pages in those regions.
Alice Wang5bb79502023-06-12 09:25:07 +0000294 for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
Alice Wang93ee98a2023-06-08 08:20:39 +0000295 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000296 .modify_range(&get_va_range(range), &flush_dirty_range)
Alice Wang93ee98a2023-06-08 08:20:39 +0000297 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
298 }
299 Ok(())
300 }
301
302 /// Handles permission fault for read-only blocks by setting writable-dirty state.
303 /// In general, this should be called from the exception handler when hardware dirty
304 /// state management is disabled or unavailable.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000305 fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000306 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000307 .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
Alice Wang93ee98a2023-06-08 08:20:39 +0000308 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
309 }
310}
311
312impl Drop for MemoryTracker {
313 fn drop(&mut self) {
314 set_dbm_enabled(false);
315 self.flush_dirty_pages().unwrap();
316 self.unshare_all_memory();
317 }
318}
319
320/// Allocates a memory range of at least the given size and alignment that is shared with the host.
321/// Returns a pointer to the buffer.
322pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
323 assert_ne!(layout.size(), 0);
324 let Some(buffer) = try_shared_alloc(layout) else {
325 handle_alloc_error(layout);
326 };
327
328 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
329 Ok(buffer)
330}
331
332fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
333 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
334
335 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
336 Some(NonNull::new(buffer as _).unwrap())
337 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
338 shared_memory.refill(&mut shared_pool, layout);
339 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
340 } else {
341 None
342 }
343}
344
345/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
346///
347/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
348///
349/// # Safety
350///
351/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
352/// deallocated.
353pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
354 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
355
356 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
357 Ok(())
358}
Alice Wangf47b2342023-06-02 11:51:57 +0000359
360/// Allocates memory on the heap and shares it with the host.
361///
362/// Unshares all pages when dropped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000363struct MemorySharer {
Alice Wangf47b2342023-06-02 11:51:57 +0000364 granule: usize,
365 shared_regions: Vec<(usize, Layout)>,
366}
367
368impl MemorySharer {
369 /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
370 /// `granule` must be a power of 2.
Alice Wang93ee98a2023-06-08 08:20:39 +0000371 fn new(granule: usize, capacity: usize) -> Self {
Alice Wangf47b2342023-06-02 11:51:57 +0000372 assert!(granule.is_power_of_two());
373 Self { granule, shared_regions: Vec::with_capacity(capacity) }
374 }
375
Alice Wang93ee98a2023-06-08 08:20:39 +0000376 /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
377 fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
Alice Wangf47b2342023-06-02 11:51:57 +0000378 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
379 assert_ne!(layout.size(), 0);
Andrew Walbranc06e7342023-07-05 14:00:51 +0000380 // SAFETY: layout has non-zero size.
Alice Wangf47b2342023-06-02 11:51:57 +0000381 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
382 handle_alloc_error(layout);
383 };
384
385 let base = shared.as_ptr() as usize;
386 let end = base.checked_add(layout.size()).unwrap();
387 trace!("Sharing memory region {:#x?}", base..end);
388 for vaddr in (base..end).step_by(self.granule) {
389 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
390 get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
391 }
392 self.shared_regions.push((base, layout));
393
394 pool.add_frame(base, end);
395 }
396}
397
398impl Drop for MemorySharer {
399 fn drop(&mut self) {
400 while let Some((base, layout)) = self.shared_regions.pop() {
401 let end = base.checked_add(layout.size()).unwrap();
402 trace!("Unsharing memory region {:#x?}", base..end);
403 for vaddr in (base..end).step_by(self.granule) {
404 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
405 get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
406 }
407
Andrew Walbranc06e7342023-07-05 14:00:51 +0000408 // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
Alice Wangf47b2342023-06-02 11:51:57 +0000409 unsafe { dealloc(base as *mut _, layout) };
410 }
411 }
412}
Alice Wangb73a81b2023-06-07 13:05:09 +0000413
414/// Checks whether block flags indicate it should be MMIO guard mapped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000415fn verify_lazy_mapped_block(
Alice Wangb73a81b2023-06-07 13:05:09 +0000416 _range: &VaRange,
417 desc: &mut Descriptor,
418 level: usize,
419) -> result::Result<(), ()> {
420 let flags = desc.flags().expect("Unsupported PTE flags set");
421 if !is_leaf_pte(&flags, level) {
422 return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
423 }
424 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
425 Ok(())
426 } else {
427 Err(())
428 }
429}
430
431/// MMIO guard unmaps page
Alice Wang93ee98a2023-06-08 08:20:39 +0000432fn mmio_guard_unmap_page(
Alice Wangb73a81b2023-06-07 13:05:09 +0000433 va_range: &VaRange,
434 desc: &mut Descriptor,
435 level: usize,
436) -> result::Result<(), ()> {
437 let flags = desc.flags().expect("Unsupported PTE flags set");
438 if !is_leaf_pte(&flags, level) {
439 return Ok(());
440 }
441 // This function will be called on an address range that corresponds to a device. Only if a
442 // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
443 // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
444 // mapped anyway.
445 if flags.contains(Attributes::VALID) {
446 assert!(
447 flags.contains(MMIO_LAZY_MAP_FLAG),
448 "Attempting MMIO guard unmap for non-device pages"
449 );
450 assert_eq!(
451 va_range.len(),
Pierre-Clément Tosi92154762023-06-07 15:32:15 +0000452 MMIO_GUARD_GRANULE_SIZE,
Alice Wangb73a81b2023-06-07 13:05:09 +0000453 "Failed to break down block mapping before MMIO guard mapping"
454 );
455 let page_base = va_range.start().0;
Pierre-Clément Tosi92154762023-06-07 15:32:15 +0000456 assert_eq!(page_base % MMIO_GUARD_GRANULE_SIZE, 0);
Alice Wangb73a81b2023-06-07 13:05:09 +0000457 // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
458 // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
459 // virt_to_phys here, and just pass page_base instead.
460 get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
461 error!("Error MMIO guard unmapping: {e}");
462 })?;
463 }
464 Ok(())
465}
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000466
467/// Handles a translation fault with the given fault address register (FAR).
468#[inline]
469pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
470 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
471 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
472 Ok(memory.handle_mmio_fault(far)?)
473}
474
475/// Handles a permission fault with the given fault address register (FAR).
476#[inline]
477pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
478 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
479 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
480 Ok(memory.handle_permission_fault(far)?)
481}