blob: f25977633d5a0b691cc1aa5487db368d52fe9864 [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran848decf2022-12-15 14:39:38 +000017#![deny(unsafe_op_in_unsafe_fn)]
18
Alice Wang4be4dd02023-06-07 07:50:40 +000019use crate::helpers::PVMFW_PAGE_SIZE;
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000020use aarch64_paging::idmap::IdMap;
Jakob Vukalovicb99905d2023-04-20 15:46:02 +010021use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000022use aarch64_paging::MapError;
Andrew Walbran848decf2022-12-15 14:39:38 +000023use alloc::alloc::handle_alloc_error;
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070024use alloc::boxed::Box;
Alice Wangf47b2342023-06-02 11:51:57 +000025use buddy_system_allocator::LockedFrameAllocator;
Andrew Walbran848decf2022-12-15 14:39:38 +000026use core::alloc::Layout;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000027use core::fmt;
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +010028use core::iter::once;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000029use core::num::NonZeroUsize;
30use core::ops::Range;
Andrew Walbran848decf2022-12-15 14:39:38 +000031use core::ptr::NonNull;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000032use core::result;
Alice Wang90e6f162023-04-17 13:49:45 +000033use hyp::get_hypervisor;
Pierre-Clément Tosi90238c52023-04-27 17:59:10 +000034use log::trace;
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +010035use log::{debug, error};
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070036use once_cell::race::OnceBox;
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010037use spin::mutex::SpinMutex;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000038use tinyvec::ArrayVec;
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000039use vmbase::{
Alice Wang4c70d142023-06-06 11:52:33 +000040 dsb, isb, layout,
Alice Wangeacb7382023-06-05 12:53:54 +000041 memory::{
Alice Wang3fa9b802023-06-06 07:52:31 +000042 flush_dirty_range, is_leaf_pte, page_4kb_of, set_dbm_enabled, MemorySharer, PageTable,
Alice Wang4c70d142023-06-06 11:52:33 +000043 MMIO_LAZY_MAP_FLAG, SIZE_2MB, SIZE_4KB,
Alice Wangeacb7382023-06-05 12:53:54 +000044 },
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000045 tlbi,
Alice Wang4be4dd02023-06-07 07:50:40 +000046 util::{align_up, RangeExt as _},
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000047};
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000048
Jiyong Park0ee65392023-03-27 20:52:45 +090049/// First address that can't be translated by a level 1 TTBR0_EL1.
50pub const MAX_ADDR: usize = 1 << 40;
51
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000052const PT_ROOT_LEVEL: usize = 1;
53const PT_ASID: usize = 1;
54
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000055pub type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000056
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010057pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
58unsafe impl Send for MemoryTracker {}
59
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +010060#[derive(Clone, Copy, Debug, Default, PartialEq)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000061enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000062 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000063 ReadOnly,
64 ReadWrite,
65}
66
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000067#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000068struct MemoryRegion {
69 range: MemoryRange,
70 mem_type: MemoryType,
71}
72
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000073/// Tracks non-overlapping slices of main memory.
74pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000075 total: MemoryRange,
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000076 page_table: PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000077 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
78 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Alice Wang4c70d142023-06-06 11:52:33 +000079 mmio_range: MemoryRange,
Alice Wang446146a2023-06-07 08:18:46 +000080 payload_range: MemoryRange,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000081}
82
83/// Errors for MemoryTracker operations.
84#[derive(Debug, Clone)]
85pub enum MemoryTrackerError {
86 /// Tried to modify the memory base address.
87 DifferentBaseAddress,
88 /// Tried to shrink to a larger memory size.
89 SizeTooLarge,
90 /// Tracked regions would not fit in memory size.
91 SizeTooSmall,
92 /// Reached limit number of tracked regions.
93 Full,
94 /// Region is out of the tracked memory address space.
95 OutOfRange,
96 /// New region overlaps with tracked regions.
97 Overlaps,
98 /// Region couldn't be mapped.
99 FailedToMap,
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100100 /// Region couldn't be unmapped.
101 FailedToUnmap,
Alice Wang90e6f162023-04-17 13:49:45 +0000102 /// Error from the interaction with the hypervisor.
103 Hypervisor(hyp::Error),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000104 /// Failure to set `SHARED_MEMORY`.
105 SharedMemorySetFailure,
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700106 /// Failure to set `SHARED_POOL`.
107 SharedPoolSetFailure,
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100108 /// Invalid page table entry.
109 InvalidPte,
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100110 /// Failed to flush memory region.
111 FlushRegionFailed,
112 /// Failed to set PTE dirty state.
113 SetPteDirtyFailed,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000114}
115
116impl fmt::Display for MemoryTrackerError {
117 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
118 match self {
119 Self::DifferentBaseAddress => write!(f, "Received different base address"),
120 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
121 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
122 Self::Full => write!(f, "Reached limit number of tracked regions"),
123 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
124 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
125 Self::FailedToMap => write!(f, "Failed to map the new region"),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100126 Self::FailedToUnmap => write!(f, "Failed to unmap the new region"),
Alice Wang90e6f162023-04-17 13:49:45 +0000127 Self::Hypervisor(e) => e.fmt(f),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000128 Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700129 Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100130 Self::InvalidPte => write!(f, "Page table entry is not valid"),
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100131 Self::FlushRegionFailed => write!(f, "Failed to flush memory region"),
132 Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000133 }
134 }
135}
136
Alice Wang90e6f162023-04-17 13:49:45 +0000137impl From<hyp::Error> for MemoryTrackerError {
138 fn from(e: hyp::Error) -> Self {
139 Self::Hypervisor(e)
Andrew Walbran19690632022-12-07 16:41:30 +0000140 }
141}
142
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000143type Result<T> = result::Result<T, MemoryTrackerError>;
144
Andrew Walbran87933f32023-05-09 15:29:06 +0000145static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000146static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
147
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000148impl MemoryTracker {
149 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000150 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000151
152 /// Create a new instance from an active page table, covering the maximum RAM size.
Alice Wang446146a2023-06-07 08:18:46 +0000153 pub fn new(
154 mut page_table: PageTable,
155 total: MemoryRange,
156 mmio_range: MemoryRange,
157 payload_range: MemoryRange,
158 ) -> Self {
Alice Wang4c70d142023-06-06 11:52:33 +0000159 assert!(
160 !total.overlaps(&mmio_range),
161 "MMIO space should not overlap with the main memory region."
162 );
163
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100164 // Activate dirty state management first, otherwise we may get permission faults immediately
165 // after activating the new page table. This has no effect before the new page table is
166 // activated because none of the entries in the initial idmap have the DBM flag.
Alice Wang4dd20932023-05-26 13:47:16 +0000167 set_dbm_enabled(true);
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100168
169 debug!("Activating dynamic page table...");
170 // SAFETY - page_table duplicates the static mappings for everything that the Rust code is
171 // aware of so activating it shouldn't have any visible effect.
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000172 unsafe { page_table.activate() }
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100173 debug!("... Success!");
174
Andrew Walbran19690632022-12-07 16:41:30 +0000175 Self {
Alice Wang4c70d142023-06-06 11:52:33 +0000176 total,
Andrew Walbran19690632022-12-07 16:41:30 +0000177 page_table,
178 regions: ArrayVec::new(),
179 mmio_regions: ArrayVec::new(),
Alice Wang4c70d142023-06-06 11:52:33 +0000180 mmio_range,
Alice Wang446146a2023-06-07 08:18:46 +0000181 payload_range,
Andrew Walbran19690632022-12-07 16:41:30 +0000182 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000183 }
184
185 /// Resize the total RAM size.
186 ///
187 /// This function fails if it contains regions that are not included within the new size.
188 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
189 if range.start != self.total.start {
190 return Err(MemoryTrackerError::DifferentBaseAddress);
191 }
192 if self.total.end < range.end {
193 return Err(MemoryTrackerError::SizeTooLarge);
194 }
Alice Wang81e8f142023-06-06 12:47:14 +0000195 if !self.regions.iter().all(|r| r.range.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000196 return Err(MemoryTrackerError::SizeTooSmall);
197 }
198
199 self.total = range.clone();
200 Ok(())
201 }
202
203 /// Allocate the address range for a const slice; returns None if failed.
204 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000205 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
206 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000207 self.page_table.map_rodata(range).map_err(|e| {
208 error!("Error during range allocation: {e}");
209 MemoryTrackerError::FailedToMap
210 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000211 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000212 }
213
214 /// Allocate the address range for a mutable slice; returns None if failed.
215 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000216 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
217 self.check(&region)?;
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000218 self.page_table.map_data_dbm(range).map_err(|e| {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000219 error!("Error during mutable range allocation: {e}");
220 MemoryTrackerError::FailedToMap
221 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000222 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000223 }
224
225 /// Allocate the address range for a const slice; returns None if failed.
226 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
227 self.alloc_range(&(base..(base + size.get())))
228 }
229
230 /// Allocate the address range for a mutable slice; returns None if failed.
231 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
232 self.alloc_range_mut(&(base..(base + size.get())))
233 }
234
Andrew Walbran19690632022-12-07 16:41:30 +0000235 /// Checks that the given range of addresses is within the MMIO region, and then maps it
236 /// appropriately.
237 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
Alice Wang4c70d142023-06-06 11:52:33 +0000238 if !range.is_within(&self.mmio_range) {
Andrew Walbran19690632022-12-07 16:41:30 +0000239 return Err(MemoryTrackerError::OutOfRange);
240 }
Alice Wang81e8f142023-06-06 12:47:14 +0000241 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
Andrew Walbran19690632022-12-07 16:41:30 +0000242 return Err(MemoryTrackerError::Overlaps);
243 }
244 if self.mmio_regions.len() == self.mmio_regions.capacity() {
245 return Err(MemoryTrackerError::Full);
246 }
247
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100248 self.page_table.map_device_lazy(&range).map_err(|e| {
Andrew Walbran19690632022-12-07 16:41:30 +0000249 error!("Error during MMIO device mapping: {e}");
250 MemoryTrackerError::FailedToMap
251 })?;
252
Andrew Walbran19690632022-12-07 16:41:30 +0000253 if self.mmio_regions.try_push(range).is_some() {
254 return Err(MemoryTrackerError::Full);
255 }
256
257 Ok(())
258 }
259
Andrew Walbranda65ab12022-12-07 15:10:13 +0000260 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
261 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
262 /// add it.
263 fn check(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang81e8f142023-06-06 12:47:14 +0000264 if !region.range.is_within(&self.total) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000265 return Err(MemoryTrackerError::OutOfRange);
266 }
Alice Wang81e8f142023-06-06 12:47:14 +0000267 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000268 return Err(MemoryTrackerError::Overlaps);
269 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000270 if self.regions.len() == self.regions.capacity() {
271 return Err(MemoryTrackerError::Full);
272 }
273 Ok(())
274 }
275
276 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000277 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000278 return Err(MemoryTrackerError::Full);
279 }
280
Alice Wang81e8f142023-06-06 12:47:14 +0000281 Ok(self.regions.last().unwrap().range.clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000282 }
Andrew Walbran19690632022-12-07 16:41:30 +0000283
284 /// Unmaps all tracked MMIO regions from the MMIO guard.
285 ///
286 /// Note that they are not unmapped from the page table.
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100287 pub fn mmio_unmap_all(&mut self) -> Result<()> {
288 for range in &self.mmio_regions {
289 self.page_table
290 .modify_range(range, &mmio_guard_unmap_page)
291 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000292 }
Andrew Walbran19690632022-12-07 16:41:30 +0000293 Ok(())
294 }
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700295
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000296 /// Initialize the shared heap to dynamically share memory from the global allocator.
297 pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
Alice Wangf47b2342023-06-02 11:51:57 +0000298 const INIT_CAP: usize = 10;
299
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000300 let granule = get_hypervisor().memory_protection_granule()?;
Alice Wangf47b2342023-06-02 11:51:57 +0000301 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000302 if previous.is_some() {
303 return Err(MemoryTrackerError::SharedMemorySetFailure);
304 }
305
306 SHARED_POOL
Andrew Walbran87933f32023-05-09 15:29:06 +0000307 .set(Box::new(LockedFrameAllocator::new()))
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000308 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
309
310 Ok(())
311 }
312
313 /// Initialize the shared heap from a static region of memory.
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700314 ///
315 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
316 /// to share its memory with host. Instead they allow host to designate part
317 /// of guest memory as "shared" ahead of guest starting its execution. The
318 /// shared memory region is indicated in swiotlb node. On such platforms use
319 /// a separate heap to allocate buffers that can be shared with host.
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000320 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700321 let size = NonZeroUsize::new(range.len()).unwrap();
322 let range = self.alloc_mut(range.start, size)?;
Andrew Walbran87933f32023-05-09 15:29:06 +0000323 let shared_pool = LockedFrameAllocator::<32>::new();
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700324
Andrew Walbran87933f32023-05-09 15:29:06 +0000325 shared_pool.lock().insert(range);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700326
327 SHARED_POOL
328 .set(Box::new(shared_pool))
329 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
330
331 Ok(())
332 }
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000333
334 /// Unshares any memory that may have been shared.
335 pub fn unshare_all_memory(&mut self) {
336 drop(SHARED_MEMORY.lock().take());
337 }
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100338
339 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
340 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
341 pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
342 let page_range = page_4kb_of(addr)..page_4kb_of(addr) + PVMFW_PAGE_SIZE;
343 self.page_table
344 .modify_range(&page_range, &verify_lazy_mapped_block)
345 .map_err(|_| MemoryTrackerError::InvalidPte)?;
346 get_hypervisor().mmio_guard_map(page_range.start)?;
347 // Maps a single device page, breaking up block mappings if necessary.
348 self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
349 }
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100350
351 /// Flush all memory regions marked as writable-dirty.
352 fn flush_dirty_pages(&mut self) -> Result<()> {
353 // Collect memory ranges for which dirty state is tracked.
354 let writable_regions =
355 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100356 // Execute a barrier instruction to ensure all hardware updates to the page table have been
357 // observed before reading PTE flags to determine dirty state.
358 dsb!("ish");
359 // Now flush writable-dirty pages in those regions.
Alice Wang446146a2023-06-07 08:18:46 +0000360 for range in writable_regions.chain(once(&self.payload_range)) {
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100361 self.page_table
362 .modify_range(range, &flush_dirty_range)
363 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
364 }
365 Ok(())
366 }
367
368 /// Handles permission fault for read-only blocks by setting writable-dirty state.
369 /// In general, this should be called from the exception handler when hardware dirty
370 /// state management is disabled or unavailable.
371 pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
372 self.page_table
373 .modify_range(&(addr..addr + 1), &mark_dirty_block)
374 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
375 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000376}
377
378impl Drop for MemoryTracker {
379 fn drop(&mut self) {
Alice Wang4dd20932023-05-26 13:47:16 +0000380 set_dbm_enabled(false);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100381 self.flush_dirty_pages().unwrap();
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100382 self.unshare_all_memory();
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000383 }
384}
Andrew Walbran19690632022-12-07 16:41:30 +0000385
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000386/// Allocates a memory range of at least the given size and alignment that is shared with the host.
387/// Returns a pointer to the buffer.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000388pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
389 assert_ne!(layout.size(), 0);
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000390 let Some(buffer) = try_shared_alloc(layout) else {
Andrew Walbran848decf2022-12-15 14:39:38 +0000391 handle_alloc_error(layout);
392 };
393
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000394 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000395 Ok(buffer)
396}
397
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000398fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
399 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
400
Andrew Walbran87933f32023-05-09 15:29:06 +0000401 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
402 Some(NonNull::new(buffer as _).unwrap())
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000403 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
404 shared_memory.refill(&mut shared_pool, layout);
Andrew Walbran87933f32023-05-09 15:29:06 +0000405 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000406 } else {
407 None
408 }
409}
410
Andrew Walbran848decf2022-12-15 14:39:38 +0000411/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
412///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000413/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
Andrew Walbran848decf2022-12-15 14:39:38 +0000414///
415/// # Safety
416///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000417/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
Andrew Walbran848decf2022-12-15 14:39:38 +0000418/// deallocated.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000419pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Andrew Walbran87933f32023-05-09 15:29:06 +0000420 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700421
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000422 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000423 Ok(())
424}
425
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100426/// Checks whether block flags indicate it should be MMIO guard mapped.
427fn verify_lazy_mapped_block(
428 _range: &VaRange,
429 desc: &mut Descriptor,
430 level: usize,
431) -> result::Result<(), ()> {
432 let flags = desc.flags().expect("Unsupported PTE flags set");
433 if !is_leaf_pte(&flags, level) {
434 return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
435 }
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000436 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100437 Ok(())
438 } else {
439 Err(())
440 }
441}
442
443/// MMIO guard unmaps page
444fn mmio_guard_unmap_page(
445 va_range: &VaRange,
446 desc: &mut Descriptor,
447 level: usize,
448) -> result::Result<(), ()> {
449 let flags = desc.flags().expect("Unsupported PTE flags set");
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100450 if !is_leaf_pte(&flags, level) {
451 return Ok(());
452 }
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100453 // This function will be called on an address range that corresponds to a device. Only if a
454 // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
455 // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
456 // mapped anyway.
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100457 if flags.contains(Attributes::VALID) {
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100458 assert!(
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000459 flags.contains(MMIO_LAZY_MAP_FLAG),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100460 "Attempting MMIO guard unmap for non-device pages"
461 );
462 assert_eq!(
463 va_range.len(),
464 PVMFW_PAGE_SIZE,
465 "Failed to break down block mapping before MMIO guard mapping"
466 );
467 let page_base = va_range.start().0;
468 assert_eq!(page_base % PVMFW_PAGE_SIZE, 0);
469 // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
470 // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
471 // virt_to_phys here, and just pass page_base instead.
472 get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
473 error!("Error MMIO guard unmapping: {e}");
474 })?;
475 }
476 Ok(())
477}
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100478
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100479/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
480/// in software to handle permission faults on read-only descriptors.
481fn mark_dirty_block(
482 va_range: &VaRange,
483 desc: &mut Descriptor,
484 level: usize,
485) -> result::Result<(), ()> {
486 let flags = desc.flags().ok_or(())?;
487 if !is_leaf_pte(&flags, level) {
488 return Ok(());
489 }
490 if flags.contains(Attributes::DBM) {
491 assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
492 desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
493 // Updating the read-only bit of a PTE requires TLB invalidation.
494 // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction.
495 // An ISB instruction is required to ensure the effects of completed TLB maintenance
496 // instructions are visible to instructions fetched afterwards.
497 // See ARM ARM E2.3.10, and G5.9.
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000498 tlbi!("vale1", PT_ASID, va_range.start().0);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100499 dsb!("ish");
500 isb!();
501 Ok(())
502 } else {
503 Err(())
504 }
505}
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000506
507/// Returns memory range reserved for the appended payload.
Alice Wang446146a2023-06-07 08:18:46 +0000508pub fn appended_payload_range() -> MemoryRange {
Alice Wangeacb7382023-06-05 12:53:54 +0000509 let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000510 // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
Alice Wangeacb7382023-06-05 12:53:54 +0000511 let end = align_up(start, SIZE_2MB).unwrap();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000512 start..end
513}
514
515/// Region allocated for the stack.
Alice Wang446146a2023-06-07 08:18:46 +0000516pub fn stack_range() -> MemoryRange {
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000517 const STACK_PAGES: usize = 8;
518
519 layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
520}
521
522pub fn init_page_table() -> result::Result<PageTable, MapError> {
523 let mut page_table: PageTable = IdMap::new(PT_ASID, PT_ROOT_LEVEL).into();
524
525 // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
526 // so dirty state management can be omitted.
527 page_table.map_data(&layout::scratch_range())?;
528 page_table.map_data(&stack_range())?;
529 page_table.map_code(&layout::text_range())?;
530 page_table.map_rodata(&layout::rodata_range())?;
531 page_table.map_data_dbm(&appended_payload_range())?;
Alice Wang807fa592023-06-02 09:54:43 +0000532 if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
533 error!("Failed to remap the UART as a dynamic page table entry: {e}");
534 return Err(e);
535 }
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000536 Ok(page_table)
537}