Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 1 | // Copyright 2022, The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | //! Low-level allocation and tracking of main memory. |
| 16 | |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 17 | #![deny(unsafe_op_in_unsafe_fn)] |
| 18 | |
Alice Wang | 4be4dd0 | 2023-06-07 07:50:40 +0000 | [diff] [blame^] | 19 | use crate::helpers::PVMFW_PAGE_SIZE; |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 20 | use aarch64_paging::idmap::IdMap; |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 21 | use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange}; |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 22 | use aarch64_paging::MapError; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 23 | use alloc::alloc::handle_alloc_error; |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 24 | use alloc::boxed::Box; |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 25 | use buddy_system_allocator::LockedFrameAllocator; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 26 | use core::alloc::Layout; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 27 | use core::fmt; |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 28 | use core::iter::once; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 29 | use core::num::NonZeroUsize; |
| 30 | use core::ops::Range; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 31 | use core::ptr::NonNull; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 32 | use core::result; |
Alice Wang | 90e6f16 | 2023-04-17 13:49:45 +0000 | [diff] [blame] | 33 | use hyp::get_hypervisor; |
Pierre-Clément Tosi | 90238c5 | 2023-04-27 17:59:10 +0000 | [diff] [blame] | 34 | use log::trace; |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 35 | use log::{debug, error}; |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 36 | use once_cell::race::OnceBox; |
Jakob Vukalovic | 85a00d7 | 2023-04-20 09:51:10 +0100 | [diff] [blame] | 37 | use spin::mutex::SpinMutex; |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 38 | use tinyvec::ArrayVec; |
Pierre-Clément Tosi | 3d4c5c3 | 2023-05-31 16:57:06 +0000 | [diff] [blame] | 39 | use vmbase::{ |
Alice Wang | e243d46 | 2023-06-06 15:18:12 +0000 | [diff] [blame] | 40 | dsb, isb, |
| 41 | layout::{self, crosvm::MEM_START}, |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 42 | memory::{ |
Alice Wang | 3fa9b80 | 2023-06-06 07:52:31 +0000 | [diff] [blame] | 43 | flush_dirty_range, is_leaf_pte, page_4kb_of, set_dbm_enabled, MemorySharer, PageTable, |
| 44 | MMIO_LAZY_MAP_FLAG, SIZE_2MB, SIZE_4KB, SIZE_4MB, |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 45 | }, |
Pierre-Clément Tosi | 3d4c5c3 | 2023-05-31 16:57:06 +0000 | [diff] [blame] | 46 | tlbi, |
Alice Wang | 4be4dd0 | 2023-06-07 07:50:40 +0000 | [diff] [blame^] | 47 | util::{align_up, RangeExt as _}, |
Pierre-Clément Tosi | 3d4c5c3 | 2023-05-31 16:57:06 +0000 | [diff] [blame] | 48 | }; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 49 | |
Jiyong Park | 0ee6539 | 2023-03-27 20:52:45 +0900 | [diff] [blame] | 50 | /// First address that can't be translated by a level 1 TTBR0_EL1. |
| 51 | pub const MAX_ADDR: usize = 1 << 40; |
| 52 | |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 53 | const PT_ROOT_LEVEL: usize = 1; |
| 54 | const PT_ASID: usize = 1; |
| 55 | |
Andrew Walbran | 0d8b54d | 2022-12-08 16:32:33 +0000 | [diff] [blame] | 56 | pub type MemoryRange = Range<usize>; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 57 | |
Jakob Vukalovic | 85a00d7 | 2023-04-20 09:51:10 +0100 | [diff] [blame] | 58 | pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None); |
| 59 | unsafe impl Send for MemoryTracker {} |
| 60 | |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 61 | #[derive(Clone, Copy, Debug, Default, PartialEq)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 62 | enum MemoryType { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 63 | #[default] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 64 | ReadOnly, |
| 65 | ReadWrite, |
| 66 | } |
| 67 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 68 | #[derive(Clone, Debug, Default)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 69 | struct MemoryRegion { |
| 70 | range: MemoryRange, |
| 71 | mem_type: MemoryType, |
| 72 | } |
| 73 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 74 | /// Tracks non-overlapping slices of main memory. |
| 75 | pub struct MemoryTracker { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 76 | total: MemoryRange, |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 77 | page_table: PageTable, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 78 | regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>, |
| 79 | mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>, |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | /// Errors for MemoryTracker operations. |
| 83 | #[derive(Debug, Clone)] |
| 84 | pub enum MemoryTrackerError { |
| 85 | /// Tried to modify the memory base address. |
| 86 | DifferentBaseAddress, |
| 87 | /// Tried to shrink to a larger memory size. |
| 88 | SizeTooLarge, |
| 89 | /// Tracked regions would not fit in memory size. |
| 90 | SizeTooSmall, |
| 91 | /// Reached limit number of tracked regions. |
| 92 | Full, |
| 93 | /// Region is out of the tracked memory address space. |
| 94 | OutOfRange, |
| 95 | /// New region overlaps with tracked regions. |
| 96 | Overlaps, |
| 97 | /// Region couldn't be mapped. |
| 98 | FailedToMap, |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 99 | /// Region couldn't be unmapped. |
| 100 | FailedToUnmap, |
Alice Wang | 90e6f16 | 2023-04-17 13:49:45 +0000 | [diff] [blame] | 101 | /// Error from the interaction with the hypervisor. |
| 102 | Hypervisor(hyp::Error), |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 103 | /// Failure to set `SHARED_MEMORY`. |
| 104 | SharedMemorySetFailure, |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 105 | /// Failure to set `SHARED_POOL`. |
| 106 | SharedPoolSetFailure, |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 107 | /// Invalid page table entry. |
| 108 | InvalidPte, |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 109 | /// Failed to flush memory region. |
| 110 | FlushRegionFailed, |
| 111 | /// Failed to set PTE dirty state. |
| 112 | SetPteDirtyFailed, |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | impl fmt::Display for MemoryTrackerError { |
| 116 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 117 | match self { |
| 118 | Self::DifferentBaseAddress => write!(f, "Received different base address"), |
| 119 | Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"), |
| 120 | Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"), |
| 121 | Self::Full => write!(f, "Reached limit number of tracked regions"), |
| 122 | Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"), |
| 123 | Self::Overlaps => write!(f, "New region overlaps with tracked regions"), |
| 124 | Self::FailedToMap => write!(f, "Failed to map the new region"), |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 125 | Self::FailedToUnmap => write!(f, "Failed to unmap the new region"), |
Alice Wang | 90e6f16 | 2023-04-17 13:49:45 +0000 | [diff] [blame] | 126 | Self::Hypervisor(e) => e.fmt(f), |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 127 | Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"), |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 128 | Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"), |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 129 | Self::InvalidPte => write!(f, "Page table entry is not valid"), |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 130 | Self::FlushRegionFailed => write!(f, "Failed to flush memory region"), |
| 131 | Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"), |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 132 | } |
| 133 | } |
| 134 | } |
| 135 | |
Alice Wang | 90e6f16 | 2023-04-17 13:49:45 +0000 | [diff] [blame] | 136 | impl From<hyp::Error> for MemoryTrackerError { |
| 137 | fn from(e: hyp::Error) -> Self { |
| 138 | Self::Hypervisor(e) |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 139 | } |
| 140 | } |
| 141 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 142 | type Result<T> = result::Result<T, MemoryTrackerError>; |
| 143 | |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 144 | static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new(); |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 145 | static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None); |
| 146 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 147 | impl MemoryTracker { |
| 148 | const CAPACITY: usize = 5; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 149 | const MMIO_CAPACITY: usize = 5; |
Alice Wang | e243d46 | 2023-06-06 15:18:12 +0000 | [diff] [blame] | 150 | const PVMFW_RANGE: MemoryRange = (MEM_START - SIZE_4MB)..MEM_START; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 151 | |
| 152 | /// Create a new instance from an active page table, covering the maximum RAM size. |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 153 | pub fn new(mut page_table: PageTable) -> Self { |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 154 | // Activate dirty state management first, otherwise we may get permission faults immediately |
| 155 | // after activating the new page table. This has no effect before the new page table is |
| 156 | // activated because none of the entries in the initial idmap have the DBM flag. |
Alice Wang | 4dd2093 | 2023-05-26 13:47:16 +0000 | [diff] [blame] | 157 | set_dbm_enabled(true); |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 158 | |
| 159 | debug!("Activating dynamic page table..."); |
| 160 | // SAFETY - page_table duplicates the static mappings for everything that the Rust code is |
| 161 | // aware of so activating it shouldn't have any visible effect. |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 162 | unsafe { page_table.activate() } |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 163 | debug!("... Success!"); |
| 164 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 165 | Self { |
Alice Wang | e243d46 | 2023-06-06 15:18:12 +0000 | [diff] [blame] | 166 | total: MEM_START..MAX_ADDR, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 167 | page_table, |
| 168 | regions: ArrayVec::new(), |
| 169 | mmio_regions: ArrayVec::new(), |
| 170 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | /// Resize the total RAM size. |
| 174 | /// |
| 175 | /// This function fails if it contains regions that are not included within the new size. |
| 176 | pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> { |
| 177 | if range.start != self.total.start { |
| 178 | return Err(MemoryTrackerError::DifferentBaseAddress); |
| 179 | } |
| 180 | if self.total.end < range.end { |
| 181 | return Err(MemoryTrackerError::SizeTooLarge); |
| 182 | } |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 183 | if !self.regions.iter().all(|r| r.range.is_within(range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 184 | return Err(MemoryTrackerError::SizeTooSmall); |
| 185 | } |
| 186 | |
| 187 | self.total = range.clone(); |
| 188 | Ok(()) |
| 189 | } |
| 190 | |
| 191 | /// Allocate the address range for a const slice; returns None if failed. |
| 192 | pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 193 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly }; |
| 194 | self.check(®ion)?; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 195 | self.page_table.map_rodata(range).map_err(|e| { |
| 196 | error!("Error during range allocation: {e}"); |
| 197 | MemoryTrackerError::FailedToMap |
| 198 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 199 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 203 | pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 204 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite }; |
| 205 | self.check(®ion)?; |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 206 | self.page_table.map_data_dbm(range).map_err(|e| { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 207 | error!("Error during mutable range allocation: {e}"); |
| 208 | MemoryTrackerError::FailedToMap |
| 209 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 210 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | /// Allocate the address range for a const slice; returns None if failed. |
| 214 | pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 215 | self.alloc_range(&(base..(base + size.get()))) |
| 216 | } |
| 217 | |
| 218 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 219 | pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 220 | self.alloc_range_mut(&(base..(base + size.get()))) |
| 221 | } |
| 222 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 223 | /// Checks that the given range of addresses is within the MMIO region, and then maps it |
| 224 | /// appropriately. |
| 225 | pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> { |
| 226 | // MMIO space is below the main memory region. |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 227 | if range.end > self.total.start || range.overlaps(&Self::PVMFW_RANGE) { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 228 | return Err(MemoryTrackerError::OutOfRange); |
| 229 | } |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 230 | if self.mmio_regions.iter().any(|r| range.overlaps(r)) { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 231 | return Err(MemoryTrackerError::Overlaps); |
| 232 | } |
| 233 | if self.mmio_regions.len() == self.mmio_regions.capacity() { |
| 234 | return Err(MemoryTrackerError::Full); |
| 235 | } |
| 236 | |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 237 | self.page_table.map_device_lazy(&range).map_err(|e| { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 238 | error!("Error during MMIO device mapping: {e}"); |
| 239 | MemoryTrackerError::FailedToMap |
| 240 | })?; |
| 241 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 242 | if self.mmio_regions.try_push(range).is_some() { |
| 243 | return Err(MemoryTrackerError::Full); |
| 244 | } |
| 245 | |
| 246 | Ok(()) |
| 247 | } |
| 248 | |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 249 | /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap |
| 250 | /// with any other previously allocated regions, and that the regions ArrayVec has capacity to |
| 251 | /// add it. |
| 252 | fn check(&self, region: &MemoryRegion) -> Result<()> { |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 253 | if !region.range.is_within(&self.total) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 254 | return Err(MemoryTrackerError::OutOfRange); |
| 255 | } |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 256 | if self.regions.iter().any(|r| region.range.overlaps(&r.range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 257 | return Err(MemoryTrackerError::Overlaps); |
| 258 | } |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 259 | if self.regions.len() == self.regions.capacity() { |
| 260 | return Err(MemoryTrackerError::Full); |
| 261 | } |
| 262 | Ok(()) |
| 263 | } |
| 264 | |
| 265 | fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 266 | if self.regions.try_push(region).is_some() { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 267 | return Err(MemoryTrackerError::Full); |
| 268 | } |
| 269 | |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 270 | Ok(self.regions.last().unwrap().range.clone()) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 271 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 272 | |
| 273 | /// Unmaps all tracked MMIO regions from the MMIO guard. |
| 274 | /// |
| 275 | /// Note that they are not unmapped from the page table. |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 276 | pub fn mmio_unmap_all(&mut self) -> Result<()> { |
| 277 | for range in &self.mmio_regions { |
| 278 | self.page_table |
| 279 | .modify_range(range, &mmio_guard_unmap_page) |
| 280 | .map_err(|_| MemoryTrackerError::FailedToUnmap)?; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 281 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 282 | Ok(()) |
| 283 | } |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 284 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 285 | /// Initialize the shared heap to dynamically share memory from the global allocator. |
| 286 | pub fn init_dynamic_shared_pool(&mut self) -> Result<()> { |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 287 | const INIT_CAP: usize = 10; |
| 288 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 289 | let granule = get_hypervisor().memory_protection_granule()?; |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 290 | let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP)); |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 291 | if previous.is_some() { |
| 292 | return Err(MemoryTrackerError::SharedMemorySetFailure); |
| 293 | } |
| 294 | |
| 295 | SHARED_POOL |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 296 | .set(Box::new(LockedFrameAllocator::new())) |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 297 | .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?; |
| 298 | |
| 299 | Ok(()) |
| 300 | } |
| 301 | |
| 302 | /// Initialize the shared heap from a static region of memory. |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 303 | /// |
| 304 | /// Some hypervisors such as Gunyah do not support a MemShare API for guest |
| 305 | /// to share its memory with host. Instead they allow host to designate part |
| 306 | /// of guest memory as "shared" ahead of guest starting its execution. The |
| 307 | /// shared memory region is indicated in swiotlb node. On such platforms use |
| 308 | /// a separate heap to allocate buffers that can be shared with host. |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 309 | pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> { |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 310 | let size = NonZeroUsize::new(range.len()).unwrap(); |
| 311 | let range = self.alloc_mut(range.start, size)?; |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 312 | let shared_pool = LockedFrameAllocator::<32>::new(); |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 313 | |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 314 | shared_pool.lock().insert(range); |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 315 | |
| 316 | SHARED_POOL |
| 317 | .set(Box::new(shared_pool)) |
| 318 | .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?; |
| 319 | |
| 320 | Ok(()) |
| 321 | } |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 322 | |
| 323 | /// Unshares any memory that may have been shared. |
| 324 | pub fn unshare_all_memory(&mut self) { |
| 325 | drop(SHARED_MEMORY.lock().take()); |
| 326 | } |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 327 | |
| 328 | /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page |
| 329 | /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required. |
| 330 | pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> { |
| 331 | let page_range = page_4kb_of(addr)..page_4kb_of(addr) + PVMFW_PAGE_SIZE; |
| 332 | self.page_table |
| 333 | .modify_range(&page_range, &verify_lazy_mapped_block) |
| 334 | .map_err(|_| MemoryTrackerError::InvalidPte)?; |
| 335 | get_hypervisor().mmio_guard_map(page_range.start)?; |
| 336 | // Maps a single device page, breaking up block mappings if necessary. |
| 337 | self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap) |
| 338 | } |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 339 | |
| 340 | /// Flush all memory regions marked as writable-dirty. |
| 341 | fn flush_dirty_pages(&mut self) -> Result<()> { |
| 342 | // Collect memory ranges for which dirty state is tracked. |
| 343 | let writable_regions = |
| 344 | self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range); |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 345 | let payload_range = appended_payload_range(); |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 346 | // Execute a barrier instruction to ensure all hardware updates to the page table have been |
| 347 | // observed before reading PTE flags to determine dirty state. |
| 348 | dsb!("ish"); |
| 349 | // Now flush writable-dirty pages in those regions. |
| 350 | for range in writable_regions.chain(once(&payload_range)) { |
| 351 | self.page_table |
| 352 | .modify_range(range, &flush_dirty_range) |
| 353 | .map_err(|_| MemoryTrackerError::FlushRegionFailed)?; |
| 354 | } |
| 355 | Ok(()) |
| 356 | } |
| 357 | |
| 358 | /// Handles permission fault for read-only blocks by setting writable-dirty state. |
| 359 | /// In general, this should be called from the exception handler when hardware dirty |
| 360 | /// state management is disabled or unavailable. |
| 361 | pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> { |
| 362 | self.page_table |
| 363 | .modify_range(&(addr..addr + 1), &mark_dirty_block) |
| 364 | .map_err(|_| MemoryTrackerError::SetPteDirtyFailed) |
| 365 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | impl Drop for MemoryTracker { |
| 369 | fn drop(&mut self) { |
Alice Wang | 4dd2093 | 2023-05-26 13:47:16 +0000 | [diff] [blame] | 370 | set_dbm_enabled(false); |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 371 | self.flush_dirty_pages().unwrap(); |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 372 | self.unshare_all_memory(); |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 373 | } |
| 374 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 375 | |
Andrew Walbran | 2b0c7fb | 2023-05-09 12:16:20 +0000 | [diff] [blame] | 376 | /// Allocates a memory range of at least the given size and alignment that is shared with the host. |
| 377 | /// Returns a pointer to the buffer. |
Pierre-Clément Tosi | 2d5bc58 | 2023-05-03 11:23:11 +0000 | [diff] [blame] | 378 | pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> { |
| 379 | assert_ne!(layout.size(), 0); |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 380 | let Some(buffer) = try_shared_alloc(layout) else { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 381 | handle_alloc_error(layout); |
| 382 | }; |
| 383 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 384 | trace!("Allocated shared buffer at {buffer:?} with {layout:?}"); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 385 | Ok(buffer) |
| 386 | } |
| 387 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 388 | fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> { |
| 389 | let mut shared_pool = SHARED_POOL.get().unwrap().lock(); |
| 390 | |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 391 | if let Some(buffer) = shared_pool.alloc_aligned(layout) { |
| 392 | Some(NonNull::new(buffer as _).unwrap()) |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 393 | } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() { |
| 394 | shared_memory.refill(&mut shared_pool, layout); |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 395 | shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap()) |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 396 | } else { |
| 397 | None |
| 398 | } |
| 399 | } |
| 400 | |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 401 | /// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`. |
| 402 | /// |
Andrew Walbran | 2b0c7fb | 2023-05-09 12:16:20 +0000 | [diff] [blame] | 403 | /// The layout passed in must be the same layout passed to the original `alloc_shared` call. |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 404 | /// |
| 405 | /// # Safety |
| 406 | /// |
Andrew Walbran | 2b0c7fb | 2023-05-09 12:16:20 +0000 | [diff] [blame] | 407 | /// The memory must have been allocated by `alloc_shared` with the same layout, and not yet |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 408 | /// deallocated. |
Pierre-Clément Tosi | 2d5bc58 | 2023-05-03 11:23:11 +0000 | [diff] [blame] | 409 | pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> { |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 410 | SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout); |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 411 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 412 | trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}"); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 413 | Ok(()) |
| 414 | } |
| 415 | |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 416 | /// Checks whether block flags indicate it should be MMIO guard mapped. |
| 417 | fn verify_lazy_mapped_block( |
| 418 | _range: &VaRange, |
| 419 | desc: &mut Descriptor, |
| 420 | level: usize, |
| 421 | ) -> result::Result<(), ()> { |
| 422 | let flags = desc.flags().expect("Unsupported PTE flags set"); |
| 423 | if !is_leaf_pte(&flags, level) { |
| 424 | return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG. |
| 425 | } |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 426 | if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) { |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 427 | Ok(()) |
| 428 | } else { |
| 429 | Err(()) |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | /// MMIO guard unmaps page |
| 434 | fn mmio_guard_unmap_page( |
| 435 | va_range: &VaRange, |
| 436 | desc: &mut Descriptor, |
| 437 | level: usize, |
| 438 | ) -> result::Result<(), ()> { |
| 439 | let flags = desc.flags().expect("Unsupported PTE flags set"); |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 440 | if !is_leaf_pte(&flags, level) { |
| 441 | return Ok(()); |
| 442 | } |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 443 | // This function will be called on an address range that corresponds to a device. Only if a |
| 444 | // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO |
| 445 | // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard |
| 446 | // mapped anyway. |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 447 | if flags.contains(Attributes::VALID) { |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 448 | assert!( |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 449 | flags.contains(MMIO_LAZY_MAP_FLAG), |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 450 | "Attempting MMIO guard unmap for non-device pages" |
| 451 | ); |
| 452 | assert_eq!( |
| 453 | va_range.len(), |
| 454 | PVMFW_PAGE_SIZE, |
| 455 | "Failed to break down block mapping before MMIO guard mapping" |
| 456 | ); |
| 457 | let page_base = va_range.start().0; |
| 458 | assert_eq!(page_base % PVMFW_PAGE_SIZE, 0); |
| 459 | // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base |
| 460 | // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use |
| 461 | // virt_to_phys here, and just pass page_base instead. |
| 462 | get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| { |
| 463 | error!("Error MMIO guard unmapping: {e}"); |
| 464 | })?; |
| 465 | } |
| 466 | Ok(()) |
| 467 | } |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 468 | |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 469 | /// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed |
| 470 | /// in software to handle permission faults on read-only descriptors. |
| 471 | fn mark_dirty_block( |
| 472 | va_range: &VaRange, |
| 473 | desc: &mut Descriptor, |
| 474 | level: usize, |
| 475 | ) -> result::Result<(), ()> { |
| 476 | let flags = desc.flags().ok_or(())?; |
| 477 | if !is_leaf_pte(&flags, level) { |
| 478 | return Ok(()); |
| 479 | } |
| 480 | if flags.contains(Attributes::DBM) { |
| 481 | assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state"); |
| 482 | desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY); |
| 483 | // Updating the read-only bit of a PTE requires TLB invalidation. |
| 484 | // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction. |
| 485 | // An ISB instruction is required to ensure the effects of completed TLB maintenance |
| 486 | // instructions are visible to instructions fetched afterwards. |
| 487 | // See ARM ARM E2.3.10, and G5.9. |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 488 | tlbi!("vale1", PT_ASID, va_range.start().0); |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 489 | dsb!("ish"); |
| 490 | isb!(); |
| 491 | Ok(()) |
| 492 | } else { |
| 493 | Err(()) |
| 494 | } |
| 495 | } |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 496 | |
| 497 | /// Returns memory range reserved for the appended payload. |
| 498 | pub fn appended_payload_range() -> Range<usize> { |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 499 | let start = align_up(layout::binary_end(), SIZE_4KB).unwrap(); |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 500 | // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment. |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 501 | let end = align_up(start, SIZE_2MB).unwrap(); |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 502 | start..end |
| 503 | } |
| 504 | |
| 505 | /// Region allocated for the stack. |
| 506 | pub fn stack_range() -> Range<usize> { |
| 507 | const STACK_PAGES: usize = 8; |
| 508 | |
| 509 | layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE) |
| 510 | } |
| 511 | |
| 512 | pub fn init_page_table() -> result::Result<PageTable, MapError> { |
| 513 | let mut page_table: PageTable = IdMap::new(PT_ASID, PT_ROOT_LEVEL).into(); |
| 514 | |
| 515 | // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload, |
| 516 | // so dirty state management can be omitted. |
| 517 | page_table.map_data(&layout::scratch_range())?; |
| 518 | page_table.map_data(&stack_range())?; |
| 519 | page_table.map_code(&layout::text_range())?; |
| 520 | page_table.map_rodata(&layout::rodata_range())?; |
| 521 | page_table.map_data_dbm(&appended_payload_range())?; |
Alice Wang | 807fa59 | 2023-06-02 09:54:43 +0000 | [diff] [blame] | 522 | if let Err(e) = page_table.map_device(&layout::console_uart_range()) { |
| 523 | error!("Failed to remap the UART as a dynamic page table entry: {e}"); |
| 524 | return Err(e); |
| 525 | } |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 526 | Ok(page_table) |
| 527 | } |