blob: 064fb6d68bc5e136c1a122ba5cce528be676b10c [file] [log] [blame]
Alice Wangf47b2342023-06-02 11:51:57 +00001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Shared memory management.
16
Alice Wang93ee98a2023-06-08 08:20:39 +000017use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18use super::error::MemoryTrackerError;
19use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
20use super::util::{page_4kb_of, virt_to_phys};
21use crate::dsb;
Alice Wanga9fe1fb2023-07-04 09:10:35 +000022use crate::exceptions::HandleExceptionError;
Alice Wang93ee98a2023-06-08 08:20:39 +000023use crate::util::RangeExt as _;
Alice Wanga3931aa2023-07-05 12:52:09 +000024use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
Alice Wangf47b2342023-06-02 11:51:57 +000025use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
Alice Wang93ee98a2023-06-08 08:20:39 +000026use alloc::boxed::Box;
Alice Wangf47b2342023-06-02 11:51:57 +000027use alloc::vec::Vec;
Alice Wang93ee98a2023-06-08 08:20:39 +000028use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
Alice Wangf47b2342023-06-02 11:51:57 +000029use core::alloc::Layout;
Alice Wangdf6bacc2023-07-17 14:30:57 +000030use core::cmp::max;
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +000031use core::mem::size_of;
Alice Wang93ee98a2023-06-08 08:20:39 +000032use core::num::NonZeroUsize;
33use core::ops::Range;
Alice Wangf47b2342023-06-02 11:51:57 +000034use core::ptr::NonNull;
Alice Wangb73a81b2023-06-07 13:05:09 +000035use core::result;
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +000036use hyp::{get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
Alice Wang93ee98a2023-06-08 08:20:39 +000037use log::{debug, error, trace};
38use once_cell::race::OnceBox;
39use spin::mutex::SpinMutex;
40use tinyvec::ArrayVec;
41
42/// A global static variable representing the system memory tracker, protected by a spin mutex.
43pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
44
45static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
46static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
47
48/// Memory range.
49pub type MemoryRange = Range<usize>;
Alice Wanga3931aa2023-07-05 12:52:09 +000050
51fn get_va_range(range: &MemoryRange) -> VaRange {
52 VaRange::new(range.start, range.end)
53}
54
Alice Wang93ee98a2023-06-08 08:20:39 +000055type Result<T> = result::Result<T, MemoryTrackerError>;
56
57#[derive(Clone, Copy, Debug, Default, PartialEq)]
58enum MemoryType {
59 #[default]
60 ReadOnly,
61 ReadWrite,
62}
63
64#[derive(Clone, Debug, Default)]
65struct MemoryRegion {
66 range: MemoryRange,
67 mem_type: MemoryType,
68}
69
70/// Tracks non-overlapping slices of main memory.
71pub struct MemoryTracker {
72 total: MemoryRange,
73 page_table: PageTable,
74 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
75 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
76 mmio_range: MemoryRange,
Alice Wang5bb79502023-06-12 09:25:07 +000077 payload_range: Option<MemoryRange>,
Alice Wang93ee98a2023-06-08 08:20:39 +000078}
79
Andrew Walbranc06e7342023-07-05 14:00:51 +000080// TODO: Remove this once aarch64-paging crate is updated.
81// SAFETY: Only `PageTable` doesn't implement Send, but it should.
Alice Wang93ee98a2023-06-08 08:20:39 +000082unsafe impl Send for MemoryTracker {}
83
84impl MemoryTracker {
85 const CAPACITY: usize = 5;
86 const MMIO_CAPACITY: usize = 5;
87
88 /// Creates a new instance from an active page table, covering the maximum RAM size.
89 pub fn new(
90 mut page_table: PageTable,
91 total: MemoryRange,
92 mmio_range: MemoryRange,
Alice Wanga3931aa2023-07-05 12:52:09 +000093 payload_range: Option<Range<VirtualAddress>>,
Alice Wang93ee98a2023-06-08 08:20:39 +000094 ) -> Self {
95 assert!(
96 !total.overlaps(&mmio_range),
97 "MMIO space should not overlap with the main memory region."
98 );
99
100 // Activate dirty state management first, otherwise we may get permission faults immediately
101 // after activating the new page table. This has no effect before the new page table is
102 // activated because none of the entries in the initial idmap have the DBM flag.
103 set_dbm_enabled(true);
104
105 debug!("Activating dynamic page table...");
Andrew Walbranc06e7342023-07-05 14:00:51 +0000106 // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
Alice Wang93ee98a2023-06-08 08:20:39 +0000107 // aware of so activating it shouldn't have any visible effect.
108 unsafe { page_table.activate() }
109 debug!("... Success!");
110
111 Self {
112 total,
113 page_table,
114 regions: ArrayVec::new(),
115 mmio_regions: ArrayVec::new(),
116 mmio_range,
Alice Wanga3931aa2023-07-05 12:52:09 +0000117 payload_range: payload_range.map(|r| r.start.0..r.end.0),
Alice Wang93ee98a2023-06-08 08:20:39 +0000118 }
119 }
120
121 /// Resize the total RAM size.
122 ///
123 /// This function fails if it contains regions that are not included within the new size.
124 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
125 if range.start != self.total.start {
126 return Err(MemoryTrackerError::DifferentBaseAddress);
127 }
128 if self.total.end < range.end {
129 return Err(MemoryTrackerError::SizeTooLarge);
130 }
131 if !self.regions.iter().all(|r| r.range.is_within(range)) {
132 return Err(MemoryTrackerError::SizeTooSmall);
133 }
134
135 self.total = range.clone();
136 Ok(())
137 }
138
139 /// Allocate the address range for a const slice; returns None if failed.
140 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
141 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
142 self.check(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000143 self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000144 error!("Error during range allocation: {e}");
145 MemoryTrackerError::FailedToMap
146 })?;
147 self.add(region)
148 }
149
150 /// Allocate the address range for a mutable slice; returns None if failed.
151 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
152 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
153 self.check(&region)?;
Alice Wanga3931aa2023-07-05 12:52:09 +0000154 self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
Alice Wang93ee98a2023-06-08 08:20:39 +0000155 error!("Error during mutable range allocation: {e}");
156 MemoryTrackerError::FailedToMap
157 })?;
158 self.add(region)
159 }
160
161 /// Allocate the address range for a const slice; returns None if failed.
162 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
163 self.alloc_range(&(base..(base + size.get())))
164 }
165
166 /// Allocate the address range for a mutable slice; returns None if failed.
167 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
168 self.alloc_range_mut(&(base..(base + size.get())))
169 }
170
171 /// Checks that the given range of addresses is within the MMIO region, and then maps it
172 /// appropriately.
173 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
174 if !range.is_within(&self.mmio_range) {
175 return Err(MemoryTrackerError::OutOfRange);
176 }
177 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
178 return Err(MemoryTrackerError::Overlaps);
179 }
180 if self.mmio_regions.len() == self.mmio_regions.capacity() {
181 return Err(MemoryTrackerError::Full);
182 }
183
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000184 if get_mmio_guard().is_some() {
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000185 self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
186 error!("Error during lazy MMIO device mapping: {e}");
187 MemoryTrackerError::FailedToMap
188 })?;
189 } else {
190 self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
191 error!("Error during MMIO device mapping: {e}");
192 MemoryTrackerError::FailedToMap
193 })?;
194 }
Alice Wang93ee98a2023-06-08 08:20:39 +0000195
196 if self.mmio_regions.try_push(range).is_some() {
197 return Err(MemoryTrackerError::Full);
198 }
199
200 Ok(())
201 }
202
203 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
204 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
205 /// add it.
206 fn check(&self, region: &MemoryRegion) -> Result<()> {
207 if !region.range.is_within(&self.total) {
208 return Err(MemoryTrackerError::OutOfRange);
209 }
210 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
211 return Err(MemoryTrackerError::Overlaps);
212 }
213 if self.regions.len() == self.regions.capacity() {
214 return Err(MemoryTrackerError::Full);
215 }
216 Ok(())
217 }
218
219 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
220 if self.regions.try_push(region).is_some() {
221 return Err(MemoryTrackerError::Full);
222 }
223
224 Ok(self.regions.last().unwrap().range.clone())
225 }
226
227 /// Unmaps all tracked MMIO regions from the MMIO guard.
228 ///
229 /// Note that they are not unmapped from the page table.
230 pub fn mmio_unmap_all(&mut self) -> Result<()> {
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000231 if get_mmio_guard().is_some() {
Pierre-Clément Tosi32279ef2023-06-29 10:46:59 +0000232 for range in &self.mmio_regions {
233 self.page_table
234 .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
235 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
236 }
Alice Wang93ee98a2023-06-08 08:20:39 +0000237 }
238 Ok(())
239 }
240
241 /// Initialize the shared heap to dynamically share memory from the global allocator.
Alice Wangb6d2c642023-06-13 13:07:06 +0000242 pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000243 const INIT_CAP: usize = 10;
244
Alice Wang93ee98a2023-06-08 08:20:39 +0000245 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
246 if previous.is_some() {
247 return Err(MemoryTrackerError::SharedMemorySetFailure);
248 }
249
250 SHARED_POOL
251 .set(Box::new(LockedFrameAllocator::new()))
252 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
253
254 Ok(())
255 }
256
257 /// Initialize the shared heap from a static region of memory.
258 ///
259 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
260 /// to share its memory with host. Instead they allow host to designate part
261 /// of guest memory as "shared" ahead of guest starting its execution. The
262 /// shared memory region is indicated in swiotlb node. On such platforms use
263 /// a separate heap to allocate buffers that can be shared with host.
264 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
265 let size = NonZeroUsize::new(range.len()).unwrap();
266 let range = self.alloc_mut(range.start, size)?;
267 let shared_pool = LockedFrameAllocator::<32>::new();
268
269 shared_pool.lock().insert(range);
270
271 SHARED_POOL
272 .set(Box::new(shared_pool))
273 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
274
275 Ok(())
276 }
277
Pierre-Clément Tosi8937cb82023-07-06 15:07:38 +0000278 /// Initialize the shared heap to use heap memory directly.
279 ///
280 /// When running on "non-protected" hypervisors which permit host direct accesses to guest
281 /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
282 /// dedicated region so this function instructs the shared pool to use the global allocator.
283 pub fn init_heap_shared_pool(&mut self) -> Result<()> {
284 // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
285 // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
286 // without any actual "dynamic memory sharing" taking place and, as such, the granule may
287 // be set to the one of the global_allocator i.e. a byte.
288 self.init_dynamic_shared_pool(size_of::<u8>())
289 }
290
Alice Wang93ee98a2023-06-08 08:20:39 +0000291 /// Unshares any memory that may have been shared.
292 pub fn unshare_all_memory(&mut self) {
293 drop(SHARED_MEMORY.lock().take());
294 }
295
296 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
297 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000298 fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang88736462023-07-05 12:14:15 +0000299 let page_start = VirtualAddress(page_4kb_of(addr.0));
Alice Wanga3931aa2023-07-05 12:52:09 +0000300 let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000301 let mmio_guard = get_mmio_guard().unwrap();
Alice Wang93ee98a2023-06-08 08:20:39 +0000302 self.page_table
303 .modify_range(&page_range, &verify_lazy_mapped_block)
304 .map_err(|_| MemoryTrackerError::InvalidPte)?;
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000305 mmio_guard.map(page_start.0)?;
Alice Wang93ee98a2023-06-08 08:20:39 +0000306 // Maps a single device page, breaking up block mappings if necessary.
307 self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
308 }
309
310 /// Flush all memory regions marked as writable-dirty.
311 fn flush_dirty_pages(&mut self) -> Result<()> {
312 // Collect memory ranges for which dirty state is tracked.
313 let writable_regions =
314 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
315 // Execute a barrier instruction to ensure all hardware updates to the page table have been
316 // observed before reading PTE flags to determine dirty state.
317 dsb!("ish");
318 // Now flush writable-dirty pages in those regions.
Alice Wang5bb79502023-06-12 09:25:07 +0000319 for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
Alice Wang93ee98a2023-06-08 08:20:39 +0000320 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000321 .modify_range(&get_va_range(range), &flush_dirty_range)
Alice Wang93ee98a2023-06-08 08:20:39 +0000322 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
323 }
324 Ok(())
325 }
326
327 /// Handles permission fault for read-only blocks by setting writable-dirty state.
328 /// In general, this should be called from the exception handler when hardware dirty
329 /// state management is disabled or unavailable.
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000330 fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000331 self.page_table
Alice Wanga3931aa2023-07-05 12:52:09 +0000332 .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
Alice Wang93ee98a2023-06-08 08:20:39 +0000333 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
334 }
335}
336
337impl Drop for MemoryTracker {
338 fn drop(&mut self) {
339 set_dbm_enabled(false);
340 self.flush_dirty_pages().unwrap();
341 self.unshare_all_memory();
342 }
343}
344
345/// Allocates a memory range of at least the given size and alignment that is shared with the host.
346/// Returns a pointer to the buffer.
Alice Wang6c4cda02023-07-18 08:18:07 +0000347pub(crate) fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000348 assert_ne!(layout.size(), 0);
349 let Some(buffer) = try_shared_alloc(layout) else {
350 handle_alloc_error(layout);
351 };
352
353 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
354 Ok(buffer)
355}
356
357fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
358 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
359
360 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
361 Some(NonNull::new(buffer as _).unwrap())
362 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
Alice Wang2a6b2172023-07-18 10:38:16 +0000363 // Adjusts the layout size to the max of the next power of two and the alignment,
364 // as this is the actual size of the memory allocated in `alloc_aligned()`.
365 let size = max(layout.size().next_power_of_two(), layout.align());
366 let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
367 shared_memory.refill(&mut shared_pool, refill_layout);
Alice Wang93ee98a2023-06-08 08:20:39 +0000368 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
369 } else {
370 None
371 }
372}
373
374/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
375///
376/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
377///
378/// # Safety
379///
380/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
381/// deallocated.
Alice Wang6c4cda02023-07-18 08:18:07 +0000382pub(crate) unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Alice Wang93ee98a2023-06-08 08:20:39 +0000383 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
384
385 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
386 Ok(())
387}
Alice Wangf47b2342023-06-02 11:51:57 +0000388
389/// Allocates memory on the heap and shares it with the host.
390///
391/// Unshares all pages when dropped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000392struct MemorySharer {
Alice Wangf47b2342023-06-02 11:51:57 +0000393 granule: usize,
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000394 frames: Vec<(usize, Layout)>,
Alice Wangf47b2342023-06-02 11:51:57 +0000395}
396
397impl MemorySharer {
398 /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
399 /// `granule` must be a power of 2.
Alice Wang93ee98a2023-06-08 08:20:39 +0000400 fn new(granule: usize, capacity: usize) -> Self {
Alice Wangf47b2342023-06-02 11:51:57 +0000401 assert!(granule.is_power_of_two());
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000402 Self { granule, frames: Vec::with_capacity(capacity) }
Alice Wangf47b2342023-06-02 11:51:57 +0000403 }
404
Alice Wang93ee98a2023-06-08 08:20:39 +0000405 /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
406 fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
Alice Wangf47b2342023-06-02 11:51:57 +0000407 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
408 assert_ne!(layout.size(), 0);
Andrew Walbranc06e7342023-07-05 14:00:51 +0000409 // SAFETY: layout has non-zero size.
Alice Wangf47b2342023-06-02 11:51:57 +0000410 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
411 handle_alloc_error(layout);
412 };
413
414 let base = shared.as_ptr() as usize;
415 let end = base.checked_add(layout.size()).unwrap();
Alice Wangf47b2342023-06-02 11:51:57 +0000416
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000417 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000418 trace!("Sharing memory region {:#x?}", base..end);
419 for vaddr in (base..end).step_by(self.granule) {
420 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000421 mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000422 }
423 }
424
425 self.frames.push((base, layout));
Alice Wangf47b2342023-06-02 11:51:57 +0000426 pool.add_frame(base, end);
427 }
428}
429
430impl Drop for MemorySharer {
431 fn drop(&mut self) {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000432 while let Some((base, layout)) = self.frames.pop() {
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000433 if let Some(mem_sharer) = get_mem_sharer() {
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000434 let end = base.checked_add(layout.size()).unwrap();
435 trace!("Unsharing memory region {:#x?}", base..end);
436 for vaddr in (base..end).step_by(self.granule) {
437 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000438 mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
Pierre-Clément Tosid2f7ad12023-06-29 11:48:29 +0000439 }
Alice Wangf47b2342023-06-02 11:51:57 +0000440 }
441
Andrew Walbranc06e7342023-07-05 14:00:51 +0000442 // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
Alice Wangf47b2342023-06-02 11:51:57 +0000443 unsafe { dealloc(base as *mut _, layout) };
444 }
445 }
446}
Alice Wangb73a81b2023-06-07 13:05:09 +0000447
448/// Checks whether block flags indicate it should be MMIO guard mapped.
Alice Wang93ee98a2023-06-08 08:20:39 +0000449fn verify_lazy_mapped_block(
Alice Wangb73a81b2023-06-07 13:05:09 +0000450 _range: &VaRange,
451 desc: &mut Descriptor,
452 level: usize,
453) -> result::Result<(), ()> {
454 let flags = desc.flags().expect("Unsupported PTE flags set");
455 if !is_leaf_pte(&flags, level) {
456 return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
457 }
458 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
459 Ok(())
460 } else {
461 Err(())
462 }
463}
464
465/// MMIO guard unmaps page
Alice Wang93ee98a2023-06-08 08:20:39 +0000466fn mmio_guard_unmap_page(
Alice Wangb73a81b2023-06-07 13:05:09 +0000467 va_range: &VaRange,
468 desc: &mut Descriptor,
469 level: usize,
470) -> result::Result<(), ()> {
471 let flags = desc.flags().expect("Unsupported PTE flags set");
472 if !is_leaf_pte(&flags, level) {
473 return Ok(());
474 }
475 // This function will be called on an address range that corresponds to a device. Only if a
476 // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
477 // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
478 // mapped anyway.
479 if flags.contains(Attributes::VALID) {
480 assert!(
481 flags.contains(MMIO_LAZY_MAP_FLAG),
482 "Attempting MMIO guard unmap for non-device pages"
483 );
484 assert_eq!(
485 va_range.len(),
Pierre-Clément Tosi92154762023-06-07 15:32:15 +0000486 MMIO_GUARD_GRANULE_SIZE,
Alice Wangb73a81b2023-06-07 13:05:09 +0000487 "Failed to break down block mapping before MMIO guard mapping"
488 );
489 let page_base = va_range.start().0;
Pierre-Clément Tosi92154762023-06-07 15:32:15 +0000490 assert_eq!(page_base % MMIO_GUARD_GRANULE_SIZE, 0);
Alice Wangb73a81b2023-06-07 13:05:09 +0000491 // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
492 // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
493 // virt_to_phys here, and just pass page_base instead.
Pierre-Clément Tosid643cfe2023-06-29 09:30:51 +0000494 get_mmio_guard().unwrap().unmap(page_base).map_err(|e| {
Alice Wangb73a81b2023-06-07 13:05:09 +0000495 error!("Error MMIO guard unmapping: {e}");
496 })?;
497 }
498 Ok(())
499}
Alice Wanga9fe1fb2023-07-04 09:10:35 +0000500
501/// Handles a translation fault with the given fault address register (FAR).
502#[inline]
503pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
504 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
505 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
506 Ok(memory.handle_mmio_fault(far)?)
507}
508
509/// Handles a permission fault with the given fault address register (FAR).
510#[inline]
511pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
512 let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
513 let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
514 Ok(memory.handle_permission_fault(far)?)
515}