blob: fa2d56bc7eee9ba6a5ab8572562d04b02ee4067f [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran848decf2022-12-15 14:39:38 +000017#![deny(unsafe_op_in_unsafe_fn)]
18
Alice Wang4be4dd02023-06-07 07:50:40 +000019use crate::helpers::PVMFW_PAGE_SIZE;
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000020use aarch64_paging::MapError;
Andrew Walbran848decf2022-12-15 14:39:38 +000021use alloc::alloc::handle_alloc_error;
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070022use alloc::boxed::Box;
Alice Wangf47b2342023-06-02 11:51:57 +000023use buddy_system_allocator::LockedFrameAllocator;
Andrew Walbran848decf2022-12-15 14:39:38 +000024use core::alloc::Layout;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000025use core::fmt;
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +010026use core::iter::once;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000027use core::num::NonZeroUsize;
28use core::ops::Range;
Andrew Walbran848decf2022-12-15 14:39:38 +000029use core::ptr::NonNull;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000030use core::result;
Alice Wang90e6f162023-04-17 13:49:45 +000031use hyp::get_hypervisor;
Pierre-Clément Tosi90238c52023-04-27 17:59:10 +000032use log::trace;
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +010033use log::{debug, error};
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070034use once_cell::race::OnceBox;
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010035use spin::mutex::SpinMutex;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000036use tinyvec::ArrayVec;
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000037use vmbase::{
Alice Wangb73a81b2023-06-07 13:05:09 +000038 dsb, layout,
Alice Wangeacb7382023-06-05 12:53:54 +000039 memory::{
Alice Wangb73a81b2023-06-07 13:05:09 +000040 flush_dirty_range, mark_dirty_block, mmio_guard_unmap_page, page_4kb_of, set_dbm_enabled,
41 verify_lazy_mapped_block, MemorySharer, PageTable, SIZE_2MB, SIZE_4KB,
Alice Wangeacb7382023-06-05 12:53:54 +000042 },
Alice Wang4be4dd02023-06-07 07:50:40 +000043 util::{align_up, RangeExt as _},
Pierre-Clément Tosi3d4c5c32023-05-31 16:57:06 +000044};
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000045
Jiyong Park0ee65392023-03-27 20:52:45 +090046/// First address that can't be translated by a level 1 TTBR0_EL1.
47pub const MAX_ADDR: usize = 1 << 40;
48
Alice Wangb73a81b2023-06-07 13:05:09 +000049type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000050
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010051pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
52unsafe impl Send for MemoryTracker {}
53
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +010054#[derive(Clone, Copy, Debug, Default, PartialEq)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000055enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000056 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000057 ReadOnly,
58 ReadWrite,
59}
60
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000061#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000062struct MemoryRegion {
63 range: MemoryRange,
64 mem_type: MemoryType,
65}
66
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000067/// Tracks non-overlapping slices of main memory.
68pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000069 total: MemoryRange,
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +000070 page_table: PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000071 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
72 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Alice Wang4c70d142023-06-06 11:52:33 +000073 mmio_range: MemoryRange,
Alice Wang446146a2023-06-07 08:18:46 +000074 payload_range: MemoryRange,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000075}
76
77/// Errors for MemoryTracker operations.
78#[derive(Debug, Clone)]
79pub enum MemoryTrackerError {
80 /// Tried to modify the memory base address.
81 DifferentBaseAddress,
82 /// Tried to shrink to a larger memory size.
83 SizeTooLarge,
84 /// Tracked regions would not fit in memory size.
85 SizeTooSmall,
86 /// Reached limit number of tracked regions.
87 Full,
88 /// Region is out of the tracked memory address space.
89 OutOfRange,
90 /// New region overlaps with tracked regions.
91 Overlaps,
92 /// Region couldn't be mapped.
93 FailedToMap,
Jakob Vukalovicb99905d2023-04-20 15:46:02 +010094 /// Region couldn't be unmapped.
95 FailedToUnmap,
Alice Wang90e6f162023-04-17 13:49:45 +000096 /// Error from the interaction with the hypervisor.
97 Hypervisor(hyp::Error),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +000098 /// Failure to set `SHARED_MEMORY`.
99 SharedMemorySetFailure,
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700100 /// Failure to set `SHARED_POOL`.
101 SharedPoolSetFailure,
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100102 /// Invalid page table entry.
103 InvalidPte,
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100104 /// Failed to flush memory region.
105 FlushRegionFailed,
106 /// Failed to set PTE dirty state.
107 SetPteDirtyFailed,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000108}
109
110impl fmt::Display for MemoryTrackerError {
111 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
112 match self {
113 Self::DifferentBaseAddress => write!(f, "Received different base address"),
114 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
115 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
116 Self::Full => write!(f, "Reached limit number of tracked regions"),
117 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
118 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
119 Self::FailedToMap => write!(f, "Failed to map the new region"),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100120 Self::FailedToUnmap => write!(f, "Failed to unmap the new region"),
Alice Wang90e6f162023-04-17 13:49:45 +0000121 Self::Hypervisor(e) => e.fmt(f),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000122 Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700123 Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100124 Self::InvalidPte => write!(f, "Page table entry is not valid"),
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100125 Self::FlushRegionFailed => write!(f, "Failed to flush memory region"),
126 Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000127 }
128 }
129}
130
Alice Wang90e6f162023-04-17 13:49:45 +0000131impl From<hyp::Error> for MemoryTrackerError {
132 fn from(e: hyp::Error) -> Self {
133 Self::Hypervisor(e)
Andrew Walbran19690632022-12-07 16:41:30 +0000134 }
135}
136
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000137type Result<T> = result::Result<T, MemoryTrackerError>;
138
Andrew Walbran87933f32023-05-09 15:29:06 +0000139static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000140static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
141
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000142impl MemoryTracker {
143 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000144 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000145
146 /// Create a new instance from an active page table, covering the maximum RAM size.
Alice Wang446146a2023-06-07 08:18:46 +0000147 pub fn new(
148 mut page_table: PageTable,
149 total: MemoryRange,
150 mmio_range: MemoryRange,
151 payload_range: MemoryRange,
152 ) -> Self {
Alice Wang4c70d142023-06-06 11:52:33 +0000153 assert!(
154 !total.overlaps(&mmio_range),
155 "MMIO space should not overlap with the main memory region."
156 );
157
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100158 // Activate dirty state management first, otherwise we may get permission faults immediately
159 // after activating the new page table. This has no effect before the new page table is
160 // activated because none of the entries in the initial idmap have the DBM flag.
Alice Wang4dd20932023-05-26 13:47:16 +0000161 set_dbm_enabled(true);
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100162
163 debug!("Activating dynamic page table...");
164 // SAFETY - page_table duplicates the static mappings for everything that the Rust code is
165 // aware of so activating it shouldn't have any visible effect.
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000166 unsafe { page_table.activate() }
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100167 debug!("... Success!");
168
Andrew Walbran19690632022-12-07 16:41:30 +0000169 Self {
Alice Wang4c70d142023-06-06 11:52:33 +0000170 total,
Andrew Walbran19690632022-12-07 16:41:30 +0000171 page_table,
172 regions: ArrayVec::new(),
173 mmio_regions: ArrayVec::new(),
Alice Wang4c70d142023-06-06 11:52:33 +0000174 mmio_range,
Alice Wang446146a2023-06-07 08:18:46 +0000175 payload_range,
Andrew Walbran19690632022-12-07 16:41:30 +0000176 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000177 }
178
179 /// Resize the total RAM size.
180 ///
181 /// This function fails if it contains regions that are not included within the new size.
182 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
183 if range.start != self.total.start {
184 return Err(MemoryTrackerError::DifferentBaseAddress);
185 }
186 if self.total.end < range.end {
187 return Err(MemoryTrackerError::SizeTooLarge);
188 }
Alice Wang81e8f142023-06-06 12:47:14 +0000189 if !self.regions.iter().all(|r| r.range.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000190 return Err(MemoryTrackerError::SizeTooSmall);
191 }
192
193 self.total = range.clone();
194 Ok(())
195 }
196
197 /// Allocate the address range for a const slice; returns None if failed.
198 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000199 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
200 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000201 self.page_table.map_rodata(range).map_err(|e| {
202 error!("Error during range allocation: {e}");
203 MemoryTrackerError::FailedToMap
204 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000205 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000206 }
207
208 /// Allocate the address range for a mutable slice; returns None if failed.
209 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000210 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
211 self.check(&region)?;
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000212 self.page_table.map_data_dbm(range).map_err(|e| {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000213 error!("Error during mutable range allocation: {e}");
214 MemoryTrackerError::FailedToMap
215 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000216 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000217 }
218
219 /// Allocate the address range for a const slice; returns None if failed.
220 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
221 self.alloc_range(&(base..(base + size.get())))
222 }
223
224 /// Allocate the address range for a mutable slice; returns None if failed.
225 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
226 self.alloc_range_mut(&(base..(base + size.get())))
227 }
228
Andrew Walbran19690632022-12-07 16:41:30 +0000229 /// Checks that the given range of addresses is within the MMIO region, and then maps it
230 /// appropriately.
231 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
Alice Wang4c70d142023-06-06 11:52:33 +0000232 if !range.is_within(&self.mmio_range) {
Andrew Walbran19690632022-12-07 16:41:30 +0000233 return Err(MemoryTrackerError::OutOfRange);
234 }
Alice Wang81e8f142023-06-06 12:47:14 +0000235 if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
Andrew Walbran19690632022-12-07 16:41:30 +0000236 return Err(MemoryTrackerError::Overlaps);
237 }
238 if self.mmio_regions.len() == self.mmio_regions.capacity() {
239 return Err(MemoryTrackerError::Full);
240 }
241
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100242 self.page_table.map_device_lazy(&range).map_err(|e| {
Andrew Walbran19690632022-12-07 16:41:30 +0000243 error!("Error during MMIO device mapping: {e}");
244 MemoryTrackerError::FailedToMap
245 })?;
246
Andrew Walbran19690632022-12-07 16:41:30 +0000247 if self.mmio_regions.try_push(range).is_some() {
248 return Err(MemoryTrackerError::Full);
249 }
250
251 Ok(())
252 }
253
Andrew Walbranda65ab12022-12-07 15:10:13 +0000254 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
255 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
256 /// add it.
257 fn check(&self, region: &MemoryRegion) -> Result<()> {
Alice Wang81e8f142023-06-06 12:47:14 +0000258 if !region.range.is_within(&self.total) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000259 return Err(MemoryTrackerError::OutOfRange);
260 }
Alice Wang81e8f142023-06-06 12:47:14 +0000261 if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000262 return Err(MemoryTrackerError::Overlaps);
263 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000264 if self.regions.len() == self.regions.capacity() {
265 return Err(MemoryTrackerError::Full);
266 }
267 Ok(())
268 }
269
270 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000271 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000272 return Err(MemoryTrackerError::Full);
273 }
274
Alice Wang81e8f142023-06-06 12:47:14 +0000275 Ok(self.regions.last().unwrap().range.clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000276 }
Andrew Walbran19690632022-12-07 16:41:30 +0000277
278 /// Unmaps all tracked MMIO regions from the MMIO guard.
279 ///
280 /// Note that they are not unmapped from the page table.
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100281 pub fn mmio_unmap_all(&mut self) -> Result<()> {
282 for range in &self.mmio_regions {
283 self.page_table
284 .modify_range(range, &mmio_guard_unmap_page)
285 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000286 }
Andrew Walbran19690632022-12-07 16:41:30 +0000287 Ok(())
288 }
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700289
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000290 /// Initialize the shared heap to dynamically share memory from the global allocator.
291 pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
Alice Wangf47b2342023-06-02 11:51:57 +0000292 const INIT_CAP: usize = 10;
293
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000294 let granule = get_hypervisor().memory_protection_granule()?;
Alice Wangf47b2342023-06-02 11:51:57 +0000295 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000296 if previous.is_some() {
297 return Err(MemoryTrackerError::SharedMemorySetFailure);
298 }
299
300 SHARED_POOL
Andrew Walbran87933f32023-05-09 15:29:06 +0000301 .set(Box::new(LockedFrameAllocator::new()))
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000302 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
303
304 Ok(())
305 }
306
307 /// Initialize the shared heap from a static region of memory.
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700308 ///
309 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
310 /// to share its memory with host. Instead they allow host to designate part
311 /// of guest memory as "shared" ahead of guest starting its execution. The
312 /// shared memory region is indicated in swiotlb node. On such platforms use
313 /// a separate heap to allocate buffers that can be shared with host.
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000314 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700315 let size = NonZeroUsize::new(range.len()).unwrap();
316 let range = self.alloc_mut(range.start, size)?;
Andrew Walbran87933f32023-05-09 15:29:06 +0000317 let shared_pool = LockedFrameAllocator::<32>::new();
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700318
Andrew Walbran87933f32023-05-09 15:29:06 +0000319 shared_pool.lock().insert(range);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700320
321 SHARED_POOL
322 .set(Box::new(shared_pool))
323 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
324
325 Ok(())
326 }
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000327
328 /// Unshares any memory that may have been shared.
329 pub fn unshare_all_memory(&mut self) {
330 drop(SHARED_MEMORY.lock().take());
331 }
Jakob Vukalovicb99905d2023-04-20 15:46:02 +0100332
333 /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
334 /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
335 pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
336 let page_range = page_4kb_of(addr)..page_4kb_of(addr) + PVMFW_PAGE_SIZE;
337 self.page_table
338 .modify_range(&page_range, &verify_lazy_mapped_block)
339 .map_err(|_| MemoryTrackerError::InvalidPte)?;
340 get_hypervisor().mmio_guard_map(page_range.start)?;
341 // Maps a single device page, breaking up block mappings if necessary.
342 self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
343 }
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100344
345 /// Flush all memory regions marked as writable-dirty.
346 fn flush_dirty_pages(&mut self) -> Result<()> {
347 // Collect memory ranges for which dirty state is tracked.
348 let writable_regions =
349 self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100350 // Execute a barrier instruction to ensure all hardware updates to the page table have been
351 // observed before reading PTE flags to determine dirty state.
352 dsb!("ish");
353 // Now flush writable-dirty pages in those regions.
Alice Wang446146a2023-06-07 08:18:46 +0000354 for range in writable_regions.chain(once(&self.payload_range)) {
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100355 self.page_table
356 .modify_range(range, &flush_dirty_range)
357 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
358 }
359 Ok(())
360 }
361
362 /// Handles permission fault for read-only blocks by setting writable-dirty state.
363 /// In general, this should be called from the exception handler when hardware dirty
364 /// state management is disabled or unavailable.
365 pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
366 self.page_table
367 .modify_range(&(addr..addr + 1), &mark_dirty_block)
368 .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
369 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000370}
371
372impl Drop for MemoryTracker {
373 fn drop(&mut self) {
Alice Wang4dd20932023-05-26 13:47:16 +0000374 set_dbm_enabled(false);
Jakob Vukalovic44b1ce32023-04-17 19:10:10 +0100375 self.flush_dirty_pages().unwrap();
Jakob Vukalovic4c1edbe2023-04-17 19:10:57 +0100376 self.unshare_all_memory();
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000377 }
378}
Andrew Walbran19690632022-12-07 16:41:30 +0000379
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000380/// Allocates a memory range of at least the given size and alignment that is shared with the host.
381/// Returns a pointer to the buffer.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000382pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
383 assert_ne!(layout.size(), 0);
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000384 let Some(buffer) = try_shared_alloc(layout) else {
Andrew Walbran848decf2022-12-15 14:39:38 +0000385 handle_alloc_error(layout);
386 };
387
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000388 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000389 Ok(buffer)
390}
391
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000392fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
393 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
394
Andrew Walbran87933f32023-05-09 15:29:06 +0000395 if let Some(buffer) = shared_pool.alloc_aligned(layout) {
396 Some(NonNull::new(buffer as _).unwrap())
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000397 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
398 shared_memory.refill(&mut shared_pool, layout);
Andrew Walbran87933f32023-05-09 15:29:06 +0000399 shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000400 } else {
401 None
402 }
403}
404
Andrew Walbran848decf2022-12-15 14:39:38 +0000405/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
406///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000407/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
Andrew Walbran848decf2022-12-15 14:39:38 +0000408///
409/// # Safety
410///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000411/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
Andrew Walbran848decf2022-12-15 14:39:38 +0000412/// deallocated.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000413pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Andrew Walbran87933f32023-05-09 15:29:06 +0000414 SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700415
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000416 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000417 Ok(())
418}
419
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000420/// Returns memory range reserved for the appended payload.
Alice Wang446146a2023-06-07 08:18:46 +0000421pub fn appended_payload_range() -> MemoryRange {
Alice Wangeacb7382023-06-05 12:53:54 +0000422 let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000423 // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
Alice Wangeacb7382023-06-05 12:53:54 +0000424 let end = align_up(start, SIZE_2MB).unwrap();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000425 start..end
426}
427
428/// Region allocated for the stack.
Alice Wang446146a2023-06-07 08:18:46 +0000429pub fn stack_range() -> MemoryRange {
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000430 const STACK_PAGES: usize = 8;
431
432 layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
433}
434
435pub fn init_page_table() -> result::Result<PageTable, MapError> {
Alice Wangee5b1802023-06-07 07:41:54 +0000436 let mut page_table = PageTable::default();
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000437
438 // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
439 // so dirty state management can be omitted.
440 page_table.map_data(&layout::scratch_range())?;
441 page_table.map_data(&stack_range())?;
442 page_table.map_code(&layout::text_range())?;
443 page_table.map_rodata(&layout::rodata_range())?;
444 page_table.map_data_dbm(&appended_payload_range())?;
Alice Wang807fa592023-06-02 09:54:43 +0000445 if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
446 error!("Failed to remap the UART as a dynamic page table entry: {e}");
447 return Err(e);
448 }
Pierre-Clément Tosiad1fc752023-05-31 16:56:56 +0000449 Ok(page_table)
450}