Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 1 | // Copyright 2022, The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | //! Low-level allocation and tracking of main memory. |
| 16 | |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 17 | #![deny(unsafe_op_in_unsafe_fn)] |
| 18 | |
Alice Wang | 4be4dd0 | 2023-06-07 07:50:40 +0000 | [diff] [blame] | 19 | use crate::helpers::PVMFW_PAGE_SIZE; |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 20 | use aarch64_paging::MapError; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 21 | use alloc::alloc::handle_alloc_error; |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 22 | use alloc::boxed::Box; |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 23 | use buddy_system_allocator::LockedFrameAllocator; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 24 | use core::alloc::Layout; |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 25 | use core::iter::once; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 26 | use core::num::NonZeroUsize; |
| 27 | use core::ops::Range; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 28 | use core::ptr::NonNull; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 29 | use core::result; |
Alice Wang | 90e6f16 | 2023-04-17 13:49:45 +0000 | [diff] [blame] | 30 | use hyp::get_hypervisor; |
Pierre-Clément Tosi | 90238c5 | 2023-04-27 17:59:10 +0000 | [diff] [blame] | 31 | use log::trace; |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 32 | use log::{debug, error}; |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 33 | use once_cell::race::OnceBox; |
Jakob Vukalovic | 85a00d7 | 2023-04-20 09:51:10 +0100 | [diff] [blame] | 34 | use spin::mutex::SpinMutex; |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 35 | use tinyvec::ArrayVec; |
Pierre-Clément Tosi | 3d4c5c3 | 2023-05-31 16:57:06 +0000 | [diff] [blame] | 36 | use vmbase::{ |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 37 | dsb, layout, |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 38 | memory::{ |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 39 | flush_dirty_range, mark_dirty_block, mmio_guard_unmap_page, page_4kb_of, set_dbm_enabled, |
Alice Wang | 110476e | 2023-06-07 13:12:21 +0000 | [diff] [blame] | 40 | verify_lazy_mapped_block, MemorySharer, MemoryTrackerError, PageTable, SIZE_2MB, SIZE_4KB, |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 41 | }, |
Alice Wang | 4be4dd0 | 2023-06-07 07:50:40 +0000 | [diff] [blame] | 42 | util::{align_up, RangeExt as _}, |
Pierre-Clément Tosi | 3d4c5c3 | 2023-05-31 16:57:06 +0000 | [diff] [blame] | 43 | }; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 44 | |
Jiyong Park | 0ee6539 | 2023-03-27 20:52:45 +0900 | [diff] [blame] | 45 | /// First address that can't be translated by a level 1 TTBR0_EL1. |
| 46 | pub const MAX_ADDR: usize = 1 << 40; |
| 47 | |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 48 | type MemoryRange = Range<usize>; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 49 | |
Jakob Vukalovic | 85a00d7 | 2023-04-20 09:51:10 +0100 | [diff] [blame] | 50 | pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None); |
| 51 | unsafe impl Send for MemoryTracker {} |
| 52 | |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 53 | #[derive(Clone, Copy, Debug, Default, PartialEq)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 54 | enum MemoryType { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 55 | #[default] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 56 | ReadOnly, |
| 57 | ReadWrite, |
| 58 | } |
| 59 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 60 | #[derive(Clone, Debug, Default)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 61 | struct MemoryRegion { |
| 62 | range: MemoryRange, |
| 63 | mem_type: MemoryType, |
| 64 | } |
| 65 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 66 | /// Tracks non-overlapping slices of main memory. |
| 67 | pub struct MemoryTracker { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 68 | total: MemoryRange, |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 69 | page_table: PageTable, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 70 | regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>, |
| 71 | mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>, |
Alice Wang | 4c70d14 | 2023-06-06 11:52:33 +0000 | [diff] [blame] | 72 | mmio_range: MemoryRange, |
Alice Wang | 446146a | 2023-06-07 08:18:46 +0000 | [diff] [blame] | 73 | payload_range: MemoryRange, |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 74 | } |
| 75 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 76 | type Result<T> = result::Result<T, MemoryTrackerError>; |
| 77 | |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 78 | static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new(); |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 79 | static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None); |
| 80 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 81 | impl MemoryTracker { |
| 82 | const CAPACITY: usize = 5; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 83 | const MMIO_CAPACITY: usize = 5; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 84 | |
| 85 | /// Create a new instance from an active page table, covering the maximum RAM size. |
Alice Wang | 446146a | 2023-06-07 08:18:46 +0000 | [diff] [blame] | 86 | pub fn new( |
| 87 | mut page_table: PageTable, |
| 88 | total: MemoryRange, |
| 89 | mmio_range: MemoryRange, |
| 90 | payload_range: MemoryRange, |
| 91 | ) -> Self { |
Alice Wang | 4c70d14 | 2023-06-06 11:52:33 +0000 | [diff] [blame] | 92 | assert!( |
| 93 | !total.overlaps(&mmio_range), |
| 94 | "MMIO space should not overlap with the main memory region." |
| 95 | ); |
| 96 | |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 97 | // Activate dirty state management first, otherwise we may get permission faults immediately |
| 98 | // after activating the new page table. This has no effect before the new page table is |
| 99 | // activated because none of the entries in the initial idmap have the DBM flag. |
Alice Wang | 4dd2093 | 2023-05-26 13:47:16 +0000 | [diff] [blame] | 100 | set_dbm_enabled(true); |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 101 | |
| 102 | debug!("Activating dynamic page table..."); |
| 103 | // SAFETY - page_table duplicates the static mappings for everything that the Rust code is |
| 104 | // aware of so activating it shouldn't have any visible effect. |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 105 | unsafe { page_table.activate() } |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 106 | debug!("... Success!"); |
| 107 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 108 | Self { |
Alice Wang | 4c70d14 | 2023-06-06 11:52:33 +0000 | [diff] [blame] | 109 | total, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 110 | page_table, |
| 111 | regions: ArrayVec::new(), |
| 112 | mmio_regions: ArrayVec::new(), |
Alice Wang | 4c70d14 | 2023-06-06 11:52:33 +0000 | [diff] [blame] | 113 | mmio_range, |
Alice Wang | 446146a | 2023-06-07 08:18:46 +0000 | [diff] [blame] | 114 | payload_range, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 115 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | /// Resize the total RAM size. |
| 119 | /// |
| 120 | /// This function fails if it contains regions that are not included within the new size. |
| 121 | pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> { |
| 122 | if range.start != self.total.start { |
| 123 | return Err(MemoryTrackerError::DifferentBaseAddress); |
| 124 | } |
| 125 | if self.total.end < range.end { |
| 126 | return Err(MemoryTrackerError::SizeTooLarge); |
| 127 | } |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 128 | if !self.regions.iter().all(|r| r.range.is_within(range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 129 | return Err(MemoryTrackerError::SizeTooSmall); |
| 130 | } |
| 131 | |
| 132 | self.total = range.clone(); |
| 133 | Ok(()) |
| 134 | } |
| 135 | |
| 136 | /// Allocate the address range for a const slice; returns None if failed. |
| 137 | pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 138 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly }; |
| 139 | self.check(®ion)?; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 140 | self.page_table.map_rodata(range).map_err(|e| { |
| 141 | error!("Error during range allocation: {e}"); |
| 142 | MemoryTrackerError::FailedToMap |
| 143 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 144 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 148 | pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 149 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite }; |
| 150 | self.check(®ion)?; |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 151 | self.page_table.map_data_dbm(range).map_err(|e| { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 152 | error!("Error during mutable range allocation: {e}"); |
| 153 | MemoryTrackerError::FailedToMap |
| 154 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 155 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /// Allocate the address range for a const slice; returns None if failed. |
| 159 | pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 160 | self.alloc_range(&(base..(base + size.get()))) |
| 161 | } |
| 162 | |
| 163 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 164 | pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 165 | self.alloc_range_mut(&(base..(base + size.get()))) |
| 166 | } |
| 167 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 168 | /// Checks that the given range of addresses is within the MMIO region, and then maps it |
| 169 | /// appropriately. |
| 170 | pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> { |
Alice Wang | 4c70d14 | 2023-06-06 11:52:33 +0000 | [diff] [blame] | 171 | if !range.is_within(&self.mmio_range) { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 172 | return Err(MemoryTrackerError::OutOfRange); |
| 173 | } |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 174 | if self.mmio_regions.iter().any(|r| range.overlaps(r)) { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 175 | return Err(MemoryTrackerError::Overlaps); |
| 176 | } |
| 177 | if self.mmio_regions.len() == self.mmio_regions.capacity() { |
| 178 | return Err(MemoryTrackerError::Full); |
| 179 | } |
| 180 | |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 181 | self.page_table.map_device_lazy(&range).map_err(|e| { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 182 | error!("Error during MMIO device mapping: {e}"); |
| 183 | MemoryTrackerError::FailedToMap |
| 184 | })?; |
| 185 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 186 | if self.mmio_regions.try_push(range).is_some() { |
| 187 | return Err(MemoryTrackerError::Full); |
| 188 | } |
| 189 | |
| 190 | Ok(()) |
| 191 | } |
| 192 | |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 193 | /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap |
| 194 | /// with any other previously allocated regions, and that the regions ArrayVec has capacity to |
| 195 | /// add it. |
| 196 | fn check(&self, region: &MemoryRegion) -> Result<()> { |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 197 | if !region.range.is_within(&self.total) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 198 | return Err(MemoryTrackerError::OutOfRange); |
| 199 | } |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 200 | if self.regions.iter().any(|r| region.range.overlaps(&r.range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 201 | return Err(MemoryTrackerError::Overlaps); |
| 202 | } |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 203 | if self.regions.len() == self.regions.capacity() { |
| 204 | return Err(MemoryTrackerError::Full); |
| 205 | } |
| 206 | Ok(()) |
| 207 | } |
| 208 | |
| 209 | fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 210 | if self.regions.try_push(region).is_some() { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 211 | return Err(MemoryTrackerError::Full); |
| 212 | } |
| 213 | |
Alice Wang | 81e8f14 | 2023-06-06 12:47:14 +0000 | [diff] [blame] | 214 | Ok(self.regions.last().unwrap().range.clone()) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 215 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 216 | |
| 217 | /// Unmaps all tracked MMIO regions from the MMIO guard. |
| 218 | /// |
| 219 | /// Note that they are not unmapped from the page table. |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 220 | pub fn mmio_unmap_all(&mut self) -> Result<()> { |
| 221 | for range in &self.mmio_regions { |
| 222 | self.page_table |
| 223 | .modify_range(range, &mmio_guard_unmap_page) |
| 224 | .map_err(|_| MemoryTrackerError::FailedToUnmap)?; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 225 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 226 | Ok(()) |
| 227 | } |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 228 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 229 | /// Initialize the shared heap to dynamically share memory from the global allocator. |
| 230 | pub fn init_dynamic_shared_pool(&mut self) -> Result<()> { |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 231 | const INIT_CAP: usize = 10; |
| 232 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 233 | let granule = get_hypervisor().memory_protection_granule()?; |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 234 | let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP)); |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 235 | if previous.is_some() { |
| 236 | return Err(MemoryTrackerError::SharedMemorySetFailure); |
| 237 | } |
| 238 | |
| 239 | SHARED_POOL |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 240 | .set(Box::new(LockedFrameAllocator::new())) |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 241 | .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?; |
| 242 | |
| 243 | Ok(()) |
| 244 | } |
| 245 | |
| 246 | /// Initialize the shared heap from a static region of memory. |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 247 | /// |
| 248 | /// Some hypervisors such as Gunyah do not support a MemShare API for guest |
| 249 | /// to share its memory with host. Instead they allow host to designate part |
| 250 | /// of guest memory as "shared" ahead of guest starting its execution. The |
| 251 | /// shared memory region is indicated in swiotlb node. On such platforms use |
| 252 | /// a separate heap to allocate buffers that can be shared with host. |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 253 | pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> { |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 254 | let size = NonZeroUsize::new(range.len()).unwrap(); |
| 255 | let range = self.alloc_mut(range.start, size)?; |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 256 | let shared_pool = LockedFrameAllocator::<32>::new(); |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 257 | |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 258 | shared_pool.lock().insert(range); |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 259 | |
| 260 | SHARED_POOL |
| 261 | .set(Box::new(shared_pool)) |
| 262 | .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?; |
| 263 | |
| 264 | Ok(()) |
| 265 | } |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 266 | |
| 267 | /// Unshares any memory that may have been shared. |
| 268 | pub fn unshare_all_memory(&mut self) { |
| 269 | drop(SHARED_MEMORY.lock().take()); |
| 270 | } |
Jakob Vukalovic | b99905d | 2023-04-20 15:46:02 +0100 | [diff] [blame] | 271 | |
| 272 | /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page |
| 273 | /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required. |
| 274 | pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> { |
| 275 | let page_range = page_4kb_of(addr)..page_4kb_of(addr) + PVMFW_PAGE_SIZE; |
| 276 | self.page_table |
| 277 | .modify_range(&page_range, &verify_lazy_mapped_block) |
| 278 | .map_err(|_| MemoryTrackerError::InvalidPte)?; |
| 279 | get_hypervisor().mmio_guard_map(page_range.start)?; |
| 280 | // Maps a single device page, breaking up block mappings if necessary. |
| 281 | self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap) |
| 282 | } |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 283 | |
| 284 | /// Flush all memory regions marked as writable-dirty. |
| 285 | fn flush_dirty_pages(&mut self) -> Result<()> { |
| 286 | // Collect memory ranges for which dirty state is tracked. |
| 287 | let writable_regions = |
| 288 | self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range); |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 289 | // Execute a barrier instruction to ensure all hardware updates to the page table have been |
| 290 | // observed before reading PTE flags to determine dirty state. |
| 291 | dsb!("ish"); |
| 292 | // Now flush writable-dirty pages in those regions. |
Alice Wang | 446146a | 2023-06-07 08:18:46 +0000 | [diff] [blame] | 293 | for range in writable_regions.chain(once(&self.payload_range)) { |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 294 | self.page_table |
| 295 | .modify_range(range, &flush_dirty_range) |
| 296 | .map_err(|_| MemoryTrackerError::FlushRegionFailed)?; |
| 297 | } |
| 298 | Ok(()) |
| 299 | } |
| 300 | |
| 301 | /// Handles permission fault for read-only blocks by setting writable-dirty state. |
| 302 | /// In general, this should be called from the exception handler when hardware dirty |
| 303 | /// state management is disabled or unavailable. |
| 304 | pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> { |
| 305 | self.page_table |
| 306 | .modify_range(&(addr..addr + 1), &mark_dirty_block) |
| 307 | .map_err(|_| MemoryTrackerError::SetPteDirtyFailed) |
| 308 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | impl Drop for MemoryTracker { |
| 312 | fn drop(&mut self) { |
Alice Wang | 4dd2093 | 2023-05-26 13:47:16 +0000 | [diff] [blame] | 313 | set_dbm_enabled(false); |
Jakob Vukalovic | 44b1ce3 | 2023-04-17 19:10:10 +0100 | [diff] [blame] | 314 | self.flush_dirty_pages().unwrap(); |
Jakob Vukalovic | 4c1edbe | 2023-04-17 19:10:57 +0100 | [diff] [blame] | 315 | self.unshare_all_memory(); |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 316 | } |
| 317 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 318 | |
Andrew Walbran | 2b0c7fb | 2023-05-09 12:16:20 +0000 | [diff] [blame] | 319 | /// Allocates a memory range of at least the given size and alignment that is shared with the host. |
| 320 | /// Returns a pointer to the buffer. |
Pierre-Clément Tosi | 2d5bc58 | 2023-05-03 11:23:11 +0000 | [diff] [blame] | 321 | pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> { |
| 322 | assert_ne!(layout.size(), 0); |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 323 | let Some(buffer) = try_shared_alloc(layout) else { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 324 | handle_alloc_error(layout); |
| 325 | }; |
| 326 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 327 | trace!("Allocated shared buffer at {buffer:?} with {layout:?}"); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 328 | Ok(buffer) |
| 329 | } |
| 330 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 331 | fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> { |
| 332 | let mut shared_pool = SHARED_POOL.get().unwrap().lock(); |
| 333 | |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 334 | if let Some(buffer) = shared_pool.alloc_aligned(layout) { |
| 335 | Some(NonNull::new(buffer as _).unwrap()) |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 336 | } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() { |
| 337 | shared_memory.refill(&mut shared_pool, layout); |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 338 | shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap()) |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 339 | } else { |
| 340 | None |
| 341 | } |
| 342 | } |
| 343 | |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 344 | /// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`. |
| 345 | /// |
Andrew Walbran | 2b0c7fb | 2023-05-09 12:16:20 +0000 | [diff] [blame] | 346 | /// The layout passed in must be the same layout passed to the original `alloc_shared` call. |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 347 | /// |
| 348 | /// # Safety |
| 349 | /// |
Andrew Walbran | 2b0c7fb | 2023-05-09 12:16:20 +0000 | [diff] [blame] | 350 | /// The memory must have been allocated by `alloc_shared` with the same layout, and not yet |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 351 | /// deallocated. |
Pierre-Clément Tosi | 2d5bc58 | 2023-05-03 11:23:11 +0000 | [diff] [blame] | 352 | pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> { |
Andrew Walbran | 87933f3 | 2023-05-09 15:29:06 +0000 | [diff] [blame] | 353 | SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout); |
Srivatsa Vaddagiri | 37713ec | 2023-04-20 04:04:08 -0700 | [diff] [blame] | 354 | |
Pierre-Clément Tosi | f19c0e6 | 2023-05-02 13:56:58 +0000 | [diff] [blame] | 355 | trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}"); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 356 | Ok(()) |
| 357 | } |
| 358 | |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 359 | /// Returns memory range reserved for the appended payload. |
Alice Wang | 446146a | 2023-06-07 08:18:46 +0000 | [diff] [blame] | 360 | pub fn appended_payload_range() -> MemoryRange { |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 361 | let start = align_up(layout::binary_end(), SIZE_4KB).unwrap(); |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 362 | // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment. |
Alice Wang | eacb738 | 2023-06-05 12:53:54 +0000 | [diff] [blame] | 363 | let end = align_up(start, SIZE_2MB).unwrap(); |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 364 | start..end |
| 365 | } |
| 366 | |
| 367 | /// Region allocated for the stack. |
Alice Wang | 446146a | 2023-06-07 08:18:46 +0000 | [diff] [blame] | 368 | pub fn stack_range() -> MemoryRange { |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 369 | const STACK_PAGES: usize = 8; |
| 370 | |
| 371 | layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE) |
| 372 | } |
| 373 | |
| 374 | pub fn init_page_table() -> result::Result<PageTable, MapError> { |
Alice Wang | ee5b180 | 2023-06-07 07:41:54 +0000 | [diff] [blame] | 375 | let mut page_table = PageTable::default(); |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 376 | |
| 377 | // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload, |
| 378 | // so dirty state management can be omitted. |
| 379 | page_table.map_data(&layout::scratch_range())?; |
| 380 | page_table.map_data(&stack_range())?; |
| 381 | page_table.map_code(&layout::text_range())?; |
| 382 | page_table.map_rodata(&layout::rodata_range())?; |
| 383 | page_table.map_data_dbm(&appended_payload_range())?; |
Alice Wang | 807fa59 | 2023-06-02 09:54:43 +0000 | [diff] [blame] | 384 | if let Err(e) = page_table.map_device(&layout::console_uart_range()) { |
| 385 | error!("Failed to remap the UART as a dynamic page table entry: {e}"); |
| 386 | return Err(e); |
| 387 | } |
Pierre-Clément Tosi | ad1fc75 | 2023-05-31 16:56:56 +0000 | [diff] [blame] | 388 | Ok(page_table) |
| 389 | } |