blob: 989120d03e111567a1fc8cc06630234d2f94e3e3 [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran848decf2022-12-15 14:39:38 +000017#![deny(unsafe_op_in_unsafe_fn)]
18
Alice Wang4be4dd02023-06-07 07:50:40 +000019use crate::helpers::PVMFW_PAGE_SIZE;
Jakob Vukalovicb99905d2023-04-20 15:46:02 +010020use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000021use aarch64_paging::MapError;
Andrew Walbran848decf2022-12-15 14:39:38 +000022use alloc::alloc::handle_alloc_error;
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070023use alloc::boxed::Box;
Alice Wangf47b2342023-06-02 11:51:57 +000024use buddy_system_allocator::LockedFrameAllocator;
Andrew Walbran848decf2022-12-15 14:39:38 +000025use core::alloc::Layout;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000026use core::fmt;
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +010027use core::iter::once;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000028use core::num::NonZeroUsize;
29use core::ops::Range;
Andrew Walbran848decf2022-12-15 14:39:38 +000030use core::ptr::NonNull;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000031use core::result;
Alice Wang90e6f162023-04-17 13:49:45 +000032use hyp::get_hypervisor;
Pierre-Clément Tosi90238c52023-04-27 17:59:10 +000033use log::trace;
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +010034use log::{debug, error};
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070035use once_cell::race::OnceBox;
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010036use spin::mutex::SpinMutex;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000037use tinyvec::ArrayVec;
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000038use vmbase::{
Alice Wang4c70d142023-06-06 11:52:33 +000039 dsb, isb, layout,
Alice Wangeacb7382023-06-05 12:53:54 +000040 memory::{
Alice Wang3fa9b802023-06-06 07:52:31 +000041 flush_dirty_range, is_leaf_pte, page_4kb_of, set_dbm_enabled, MemorySharer, PageTable,
Alice Wangee5b1802023-06-07 07:41:54 +000042 MMIO_LAZY_MAP_FLAG, PT_ASID, SIZE_2MB, SIZE_4KB,
Alice Wangeacb7382023-06-05 12:53:54 +000043 },
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000044 tlbi,
Alice Wang4be4dd02023-06-07 07:50:40 +000045 util::{align_up, RangeExt as _},
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000046};
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000047
Jiyong Park0ee65392023-03-27 20:52:45 +090048/// First address that can't be translated by a level 1 TTBR0_EL1.
49pub const MAX_ADDR: usize = 1 << 40;
50
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000051pub type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000052
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010053pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
54unsafe impl Send for MemoryTracker {}
55
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +010056#[derive(Clone, Copy, Debug, Default, PartialEq)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000057enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000058 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000059 ReadOnly,
60 ReadWrite,
61}
62
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000063#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000064struct MemoryRegion {
65 range: MemoryRange,
66 mem_type: MemoryType,
67}
68
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000069/// Tracks non-overlapping slices of main memory.
70pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000071 total: MemoryRange,
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000072 page_table: PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000073 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
74 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Alice Wang4c70d142023-06-06 11:52:33 +000075 mmio_range: MemoryRange,
Alice Wang446146a2023-06-07 08:18:46 +000076 payload_range: MemoryRange,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000077}
78
79/// Errors for MemoryTracker operations.
80#[derive(Debug, Clone)]
81pub enum MemoryTrackerError {
82 /// Tried to modify the memory base address.
83 DifferentBaseAddress,
84 /// Tried to shrink to a larger memory size.
85 SizeTooLarge,
86 /// Tracked regions would not fit in memory size.
87 SizeTooSmall,
88 /// Reached limit number of tracked regions.
89 Full,
90 /// Region is out of the tracked memory address space.
91 OutOfRange,
92 /// New region overlaps with tracked regions.
93 Overlaps,
94 /// Region couldn't be mapped.
95 FailedToMap,
Jakob Vukalovicb99905d2023-04-20 15:46:02 +010096 /// Region couldn't be unmapped.
97 FailedToUnmap,
Alice Wang90e6f162023-04-17 13:49:45 +000098 /// Error from the interaction with the hypervisor.
99 Hypervisor(hyp::Error),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000100 /// Failure to set `SHARED_MEMORY`.
101 SharedMemorySetFailure,
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700102 /// Failure to set `SHARED_POOL`.
103 SharedPoolSetFailure,
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100104 /// Invalid page table entry.
105 InvalidPte,
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100106 /// Failed to flush memory region.
107 FlushRegionFailed,
108 /// Failed to set PTE dirty state.
109 SetPteDirtyFailed,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000110}
111
112impl fmt::Display for MemoryTrackerError {
113 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
114 match self {
115 Self::DifferentBaseAddress => write!(f, "Received different base address"),
116 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
117 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
118 Self::Full => write!(f, "Reached limit number of tracked regions"),
119 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
120 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
121 Self::FailedToMap => write!(f, "Failed to map the new region"),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100122 Self::FailedToUnmap => write!(f, "Failed to unmap the new region"),
Alice Wang90e6f162023-04-17 13:49:45 +0000123 Self::Hypervisor(e) => e.fmt(f),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000124 Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700125 Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100126 Self::InvalidPte => write!(f, "Page table entry is not valid"),
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100127 Self::FlushRegionFailed => write!(f, "Failed to flush memory region"),
128 Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000129 }
130 }
131}
132
Alice Wang90e6f162023-04-17 13:49:45 +0000133impl From<hyp::Error> for MemoryTrackerError {
134 fn from(e: hyp::Error) -> Self {
135 Self::Hypervisor(e)
Andrew Walbran19690632022-12-07 16:41:30 +0000136 }
137}
138
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000139type Result<T> = result::Result<T, MemoryTrackerError>;
140
Andrew Walbran87933f32023-05-09 15:29:06 +0000141static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000142static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
143
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000144impl MemoryTracker {
145 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000146 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000147
148 /// Create a new instance from an active page table, covering the maximum RAM size.
Alice Wang446146a2023-06-07 08:18:46 +0000149 pub fn new(
150 mut page_table: PageTable,
151 total: MemoryRange,
152 mmio_range: MemoryRange,
153 payload_range: MemoryRange,
154 ) -> Self {
Alice Wang4c70d142023-06-06 11:52:33 +0000155 assert!(
156 !total.overlaps(&mmio_range),
157 "MMIO space should not overlap with the main memory region."
158 );
159
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100160 // Activate dirty state management first, otherwise we may get permission faults immediately
161 // after activating the new page table. This has no effect before the new page table is
162 // activated because none of the entries in the initial idmap have the DBM flag.
Alice Wang4dd20932023-05-26 13:47:16 +0000163 set_dbm_enabled(true);
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100164
165 debug!("Activating dynamic page table...");
166 // SAFETY - page_table duplicates the static mappings for everything that the Rust code is
167 // aware of so activating it shouldn't have any visible effect.
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000168 unsafe { page_table.activate() }
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100169 debug!("... Success!");
170
Andrew Walbran19690632022-12-07 16:41:30 +0000171 Self {
Alice Wang4c70d142023-06-06 11:52:33 +0000172 total,
Andrew Walbran19690632022-12-07 16:41:30 +0000173 page_table,
174 regions: ArrayVec::new(),
175 mmio_regions: ArrayVec::new(),
Alice Wang4c70d142023-06-06 11:52:33 +0000176 mmio_range,
Alice Wang446146a2023-06-07 08:18:46 +0000177 payload_range,
Andrew Walbran19690632022-12-07 16:41:30 +0000178 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000179 }
180
181 /// Resize the total RAM size.
182 ///
183 /// This function fails if it contains regions that are not included within the new size.
184 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
185 if range.start != self.total.start {
186 return Err(MemoryTrackerError::DifferentBaseAddress);
187 }
188 if self.total.end < range.end {
189 return Err(MemoryTrackerError::SizeTooLarge);
190 }
Alice Wang81e8f142023-06-06 12:47:14 +0000191 if !self.regions.iter().all(|r| r.range.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000192 return Err(MemoryTrackerError::SizeTooSmall);
193 }
194
195 self.total = range.clone();
196 Ok(())
197 }
198
199 /// Allocate the address range for a const slice; returns None if failed.
200 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000201 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
202 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000203 self.page_table.map_rodata(range).map_err(|e| {
204 error!("Error during range allocation: {e}");
205 MemoryTrackerError::FailedToMap
206 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000207 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000208 }
209
210 /// Allocate the address range for a mutable slice; returns None if failed.
211 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000212 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
213 self.check(&region)?;
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000214 self.page_table.map_data_dbm(range).map_err(|e| {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000215 error!("Error during mutable range allocation: {e}");
216 MemoryTrackerError::FailedToMap
217 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000218 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000219 }
220
221 /// Allocate the address range for a const slice; returns None if failed.
222 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
223 self.alloc_range(&(base..(base + size.get())))
224 }
225
226 /// Allocate the address range for a mutable slice; returns None if failed.
227 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
228 self.alloc_range_mut(&(base..(base + size.get())))
229 }
230
Andrew Walbran19690632022-12-07 16:41:30 +0000231 /// Checks that the given range of addresses is within the MMIO region, and then maps it
232 /// appropriately.
233 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
Alice Wang4c70d142023-06-06 11:52:33 +0000234 if !range.is_within(&self.mmio_range) {
Andrew Walbran19690632022-12-07 16:41:30 +0000235 return Err(MemoryTrackerError::OutOfRange);
236 }
Alice Wang81e8f142023-06-06 12:47:14 +0000237 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
Andrew Walbran19690632022-12-07 16:41:30 +0000238 return Err(MemoryTrackerError::Overlaps);
239 }
240 if self.mmio_regions.len() == self.mmio_regions.capacity() {
241 return Err(MemoryTrackerError::Full);
242 }
243
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100244 self.page_table.map_device_lazy(&range).map_err(|e| {
Andrew Walbran19690632022-12-07 16:41:30 +0000245 error!("Error during MMIO device mapping: {e}");
246 MemoryTrackerError::FailedToMap
247 })?;
248
Andrew Walbran19690632022-12-07 16:41:30 +0000249 if self.mmio_regions.try_push(range).is_some() {
250 return Err(MemoryTrackerError::Full);
251 }
252
253 Ok(())
254 }
255
Andrew Walbranda65ab12022-12-07 15:10:13 +0000256 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
257 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
258 /// add it.
259 fn check(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang81e8f142023-06-06 12:47:14 +0000260 if !region.range.is_within(&self.total) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000261 return Err(MemoryTrackerError::OutOfRange);
262 }
Alice Wang81e8f142023-06-06 12:47:14 +0000263 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000264 return Err(MemoryTrackerError::Overlaps);
265 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000266 if self.regions.len() == self.regions.capacity() {
267 return Err(MemoryTrackerError::Full);
268 }
269 Ok(())
270 }
271
272 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000273 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000274 return Err(MemoryTrackerError::Full);
275 }
276
Alice Wang81e8f142023-06-06 12:47:14 +0000277 Ok(self.regions.last().unwrap().range.clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000278 }
Andrew Walbran19690632022-12-07 16:41:30 +0000279
280 /// Unmaps all tracked MMIO regions from the MMIO guard.
281 ///
282 /// Note that they are not unmapped from the page table.
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100283 pub fn mmio_unmap_all(&mut self) -> Result<()> {
284 for range in &self.mmio_regions {
285 self.page_table
286 .modify_range(range, &mmio_guard_unmap_page)
287 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000288 }
Andrew Walbran19690632022-12-07 16:41:30 +0000289 Ok(())
290 }
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700291
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000292 /// Initialize the shared heap to dynamically share memory from the global allocator.
293 pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
Alice Wangf47b2342023-06-02 11:51:57 +0000294 const INIT_CAP: usize = 10;
295
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000296 let granule = get_hypervisor().memory_protection_granule()?;
Alice Wangf47b2342023-06-02 11:51:57 +0000297 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000298 if previous.is_some() {
299 return Err(MemoryTrackerError::SharedMemorySetFailure);
300 }
301
302 SHARED_POOL
Andrew Walbran87933f32023-05-09 15:29:06 +0000303 .set(Box::new(LockedFrameAllocator::new()))
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000304 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
305
306 Ok(())
307 }
308
309 /// Initialize the shared heap from a static region of memory.
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700310 ///
311 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
312 /// to share its memory with host. Instead they allow host to designate part
313 /// of guest memory as "shared" ahead of guest starting its execution. The
314 /// shared memory region is indicated in swiotlb node. On such platforms use
315 /// a separate heap to allocate buffers that can be shared with host.
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000316 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700317 let size = NonZeroUsize::new(range.len()).unwrap();
318 let range = self.alloc_mut(range.start, size)?;
Andrew Walbran87933f32023-05-09 15:29:06 +0000319 let shared_pool = LockedFrameAllocator::<32>::new();
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700320
Andrew Walbran87933f32023-05-09 15:29:06 +0000321 shared_pool.lock().insert(range);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700322
323 SHARED_POOL
324 .set(Box::new(shared_pool))
325 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
326
327 Ok(())
328 }
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000329
330 /// Unshares any memory that may have been shared.
331 pub fn unshare_all_memory(&mut self) {
332 drop(SHARED_MEMORY.lock().take());
333 }
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100334
335 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
336 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
337 pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
338 let page_range = page_4kb_of(addr)..page_4kb_of(addr) + PVMFW_PAGE_SIZE;
339 self.page_table
340 .modify_range(&page_range, &verify_lazy_mapped_block)
341 .map_err(|_| MemoryTrackerError::InvalidPte)?;
342 get_hypervisor().mmio_guard_map(page_range.start)?;
343 // Maps a single device page, breaking up block mappings if necessary.
344 self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
345 }
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100346
347 /// Flush all memory regions marked as writable-dirty.
348 fn flush_dirty_pages(&mut self) -> Result<()> {
349 // Collect memory ranges for which dirty state is tracked.
350 let writable_regions =
351 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100352 // Execute a barrier instruction to ensure all hardware updates to the page table have been
353 // observed before reading PTE flags to determine dirty state.
354 dsb!("ish");
355 // Now flush writable-dirty pages in those regions.
Alice Wang446146a2023-06-07 08:18:46 +0000356 for range in writable_regions.chain(once(&self.payload_range)) {
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100357 self.page_table
358 .modify_range(range, &flush_dirty_range)
359 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
360 }
361 Ok(())
362 }
363
364 /// Handles permission fault for read-only blocks by setting writable-dirty state.
365 /// In general, this should be called from the exception handler when hardware dirty
366 /// state management is disabled or unavailable.
367 pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
368 self.page_table
369 .modify_range(&(addr..addr + 1), &mark_dirty_block)
370 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
371 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000372}
373
374impl Drop for MemoryTracker {
375 fn drop(&mut self) {
Alice Wang4dd20932023-05-26 13:47:16 +0000376 set_dbm_enabled(false);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100377 self.flush_dirty_pages().unwrap();
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100378 self.unshare_all_memory();
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000379 }
380}
Andrew Walbran19690632022-12-07 16:41:30 +0000381
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000382/// Allocates a memory range of at least the given size and alignment that is shared with the host.
383/// Returns a pointer to the buffer.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000384pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
385 assert_ne!(layout.size(), 0);
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000386 let Some(buffer) = try_shared_alloc(layout) else {
Andrew Walbran848decf2022-12-15 14:39:38 +0000387 handle_alloc_error(layout);
388 };
389
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000390 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000391 Ok(buffer)
392}
393
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000394fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
395 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
396
Andrew Walbran87933f32023-05-09 15:29:06 +0000397 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
398 Some(NonNull::new(buffer as _).unwrap())
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000399 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
400 shared_memory.refill(&mut shared_pool, layout);
Andrew Walbran87933f32023-05-09 15:29:06 +0000401 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000402 } else {
403 None
404 }
405}
406
Andrew Walbran848decf2022-12-15 14:39:38 +0000407/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
408///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000409/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
Andrew Walbran848decf2022-12-15 14:39:38 +0000410///
411/// # Safety
412///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000413/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
Andrew Walbran848decf2022-12-15 14:39:38 +0000414/// deallocated.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000415pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Andrew Walbran87933f32023-05-09 15:29:06 +0000416 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700417
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000418 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000419 Ok(())
420}
421
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100422/// Checks whether block flags indicate it should be MMIO guard mapped.
423fn verify_lazy_mapped_block(
424 _range: &VaRange,
425 desc: &mut Descriptor,
426 level: usize,
427) -> result::Result<(), ()> {
428 let flags = desc.flags().expect("Unsupported PTE flags set");
429 if !is_leaf_pte(&flags, level) {
430 return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
431 }
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000432 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100433 Ok(())
434 } else {
435 Err(())
436 }
437}
438
439/// MMIO guard unmaps page
440fn mmio_guard_unmap_page(
441 va_range: &VaRange,
442 desc: &mut Descriptor,
443 level: usize,
444) -> result::Result<(), ()> {
445 let flags = desc.flags().expect("Unsupported PTE flags set");
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100446 if !is_leaf_pte(&flags, level) {
447 return Ok(());
448 }
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100449 // This function will be called on an address range that corresponds to a device. Only if a
450 // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
451 // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
452 // mapped anyway.
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100453 if flags.contains(Attributes::VALID) {
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100454 assert!(
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000455 flags.contains(MMIO_LAZY_MAP_FLAG),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100456 "Attempting MMIO guard unmap for non-device pages"
457 );
458 assert_eq!(
459 va_range.len(),
460 PVMFW_PAGE_SIZE,
461 "Failed to break down block mapping before MMIO guard mapping"
462 );
463 let page_base = va_range.start().0;
464 assert_eq!(page_base % PVMFW_PAGE_SIZE, 0);
465 // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
466 // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
467 // virt_to_phys here, and just pass page_base instead.
468 get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
469 error!("Error MMIO guard unmapping: {e}");
470 })?;
471 }
472 Ok(())
473}
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100474
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100475/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
476/// in software to handle permission faults on read-only descriptors.
477fn mark_dirty_block(
478 va_range: &VaRange,
479 desc: &mut Descriptor,
480 level: usize,
481) -> result::Result<(), ()> {
482 let flags = desc.flags().ok_or(())?;
483 if !is_leaf_pte(&flags, level) {
484 return Ok(());
485 }
486 if flags.contains(Attributes::DBM) {
487 assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
488 desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
489 // Updating the read-only bit of a PTE requires TLB invalidation.
490 // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction.
491 // An ISB instruction is required to ensure the effects of completed TLB maintenance
492 // instructions are visible to instructions fetched afterwards.
493 // See ARM ARM E2.3.10, and G5.9.
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000494 tlbi!("vale1", PT_ASID, va_range.start().0);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100495 dsb!("ish");
496 isb!();
497 Ok(())
498 } else {
499 Err(())
500 }
501}
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000502
503/// Returns memory range reserved for the appended payload.
Alice Wang446146a2023-06-07 08:18:46 +0000504pub fn appended_payload_range() -> MemoryRange {
Alice Wangeacb7382023-06-05 12:53:54 +0000505 let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000506 // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
Alice Wangeacb7382023-06-05 12:53:54 +0000507 let end = align_up(start, SIZE_2MB).unwrap();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000508 start..end
509}
510
511/// Region allocated for the stack.
Alice Wang446146a2023-06-07 08:18:46 +0000512pub fn stack_range() -> MemoryRange {
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000513 const STACK_PAGES: usize = 8;
514
515 layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
516}
517
518pub fn init_page_table() -> result::Result<PageTable, MapError> {
Alice Wangee5b1802023-06-07 07:41:54 +0000519 let mut page_table = PageTable::default();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000520
521 // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
522 // so dirty state management can be omitted.
523 page_table.map_data(&layout::scratch_range())?;
524 page_table.map_data(&stack_range())?;
525 page_table.map_code(&layout::text_range())?;
526 page_table.map_rodata(&layout::rodata_range())?;
527 page_table.map_data_dbm(&appended_payload_range())?;
Alice Wang807fa592023-06-02 09:54:43 +0000528 if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
529 error!("Failed to remap the UART as a dynamic page table entry: {e}");
530 return Err(e);
531 }
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000532 Ok(page_table)
533}