Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 1 | // Copyright 2022, The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | //! Low-level allocation and tracking of main memory. |
| 16 | |
Andrew Walbran | 41ebe93 | 2022-12-14 15:22:30 +0000 | [diff] [blame] | 17 | use crate::helpers::{self, align_down, page_4kb_of, SIZE_4KB}; |
| 18 | use crate::hvc::{hyp_meminfo, mem_share, mem_unshare}; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 19 | use crate::mmio_guard; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 20 | use crate::mmu; |
Andrew Walbran | 41ebe93 | 2022-12-14 15:22:30 +0000 | [diff] [blame] | 21 | use crate::smccc; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 22 | use core::cmp::max; |
| 23 | use core::cmp::min; |
| 24 | use core::fmt; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 25 | use core::num::NonZeroUsize; |
| 26 | use core::ops::Range; |
| 27 | use core::result; |
| 28 | use log::error; |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 29 | use tinyvec::ArrayVec; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 30 | |
Andrew Walbran | 0d8b54d | 2022-12-08 16:32:33 +0000 | [diff] [blame] | 31 | pub type MemoryRange = Range<usize>; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 32 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 33 | #[derive(Clone, Copy, Debug, Default)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 34 | enum MemoryType { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 35 | #[default] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 36 | ReadOnly, |
| 37 | ReadWrite, |
| 38 | } |
| 39 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 40 | #[derive(Clone, Debug, Default)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 41 | struct MemoryRegion { |
| 42 | range: MemoryRange, |
| 43 | mem_type: MemoryType, |
| 44 | } |
| 45 | |
| 46 | impl MemoryRegion { |
| 47 | /// True if the instance overlaps with the passed range. |
| 48 | pub fn overlaps(&self, range: &MemoryRange) -> bool { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 49 | overlaps(&self.range, range) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | /// True if the instance is fully contained within the passed range. |
| 53 | pub fn is_within(&self, range: &MemoryRange) -> bool { |
| 54 | let our: &MemoryRange = self.as_ref(); |
| 55 | self.as_ref() == &(max(our.start, range.start)..min(our.end, range.end)) |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | impl AsRef<MemoryRange> for MemoryRegion { |
| 60 | fn as_ref(&self) -> &MemoryRange { |
| 61 | &self.range |
| 62 | } |
| 63 | } |
| 64 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 65 | /// Returns true if one range overlaps with the other at all. |
| 66 | fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool { |
| 67 | max(a.start, b.start) < min(a.end, b.end) |
| 68 | } |
| 69 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 70 | /// Tracks non-overlapping slices of main memory. |
| 71 | pub struct MemoryTracker { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 72 | total: MemoryRange, |
| 73 | page_table: mmu::PageTable, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 74 | regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>, |
| 75 | mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>, |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | /// Errors for MemoryTracker operations. |
| 79 | #[derive(Debug, Clone)] |
| 80 | pub enum MemoryTrackerError { |
| 81 | /// Tried to modify the memory base address. |
| 82 | DifferentBaseAddress, |
| 83 | /// Tried to shrink to a larger memory size. |
| 84 | SizeTooLarge, |
| 85 | /// Tracked regions would not fit in memory size. |
| 86 | SizeTooSmall, |
| 87 | /// Reached limit number of tracked regions. |
| 88 | Full, |
| 89 | /// Region is out of the tracked memory address space. |
| 90 | OutOfRange, |
| 91 | /// New region overlaps with tracked regions. |
| 92 | Overlaps, |
| 93 | /// Region couldn't be mapped. |
| 94 | FailedToMap, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 95 | /// Error from an MMIO guard call. |
| 96 | MmioGuard(mmio_guard::Error), |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | impl fmt::Display for MemoryTrackerError { |
| 100 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 101 | match self { |
| 102 | Self::DifferentBaseAddress => write!(f, "Received different base address"), |
| 103 | Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"), |
| 104 | Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"), |
| 105 | Self::Full => write!(f, "Reached limit number of tracked regions"), |
| 106 | Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"), |
| 107 | Self::Overlaps => write!(f, "New region overlaps with tracked regions"), |
| 108 | Self::FailedToMap => write!(f, "Failed to map the new region"), |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 109 | Self::MmioGuard(e) => e.fmt(f), |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 110 | } |
| 111 | } |
| 112 | } |
| 113 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 114 | impl From<mmio_guard::Error> for MemoryTrackerError { |
| 115 | fn from(e: mmio_guard::Error) -> Self { |
| 116 | Self::MmioGuard(e) |
| 117 | } |
| 118 | } |
| 119 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 120 | type Result<T> = result::Result<T, MemoryTrackerError>; |
| 121 | |
| 122 | impl MemoryTracker { |
| 123 | const CAPACITY: usize = 5; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 124 | const MMIO_CAPACITY: usize = 5; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 125 | /// Base of the system's contiguous "main" memory. |
| 126 | const BASE: usize = 0x8000_0000; |
| 127 | /// First address that can't be translated by a level 1 TTBR0_EL1. |
| 128 | const MAX_ADDR: usize = 1 << 39; |
| 129 | |
| 130 | /// Create a new instance from an active page table, covering the maximum RAM size. |
| 131 | pub fn new(page_table: mmu::PageTable) -> Self { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 132 | Self { |
| 133 | total: Self::BASE..Self::MAX_ADDR, |
| 134 | page_table, |
| 135 | regions: ArrayVec::new(), |
| 136 | mmio_regions: ArrayVec::new(), |
| 137 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | /// Resize the total RAM size. |
| 141 | /// |
| 142 | /// This function fails if it contains regions that are not included within the new size. |
| 143 | pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> { |
| 144 | if range.start != self.total.start { |
| 145 | return Err(MemoryTrackerError::DifferentBaseAddress); |
| 146 | } |
| 147 | if self.total.end < range.end { |
| 148 | return Err(MemoryTrackerError::SizeTooLarge); |
| 149 | } |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 150 | if !self.regions.iter().all(|r| r.is_within(range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 151 | return Err(MemoryTrackerError::SizeTooSmall); |
| 152 | } |
| 153 | |
| 154 | self.total = range.clone(); |
| 155 | Ok(()) |
| 156 | } |
| 157 | |
| 158 | /// Allocate the address range for a const slice; returns None if failed. |
| 159 | pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 160 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly }; |
| 161 | self.check(®ion)?; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 162 | self.page_table.map_rodata(range).map_err(|e| { |
| 163 | error!("Error during range allocation: {e}"); |
| 164 | MemoryTrackerError::FailedToMap |
| 165 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 166 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 170 | pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 171 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite }; |
| 172 | self.check(®ion)?; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 173 | self.page_table.map_data(range).map_err(|e| { |
| 174 | error!("Error during mutable range allocation: {e}"); |
| 175 | MemoryTrackerError::FailedToMap |
| 176 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 177 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /// Allocate the address range for a const slice; returns None if failed. |
| 181 | pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 182 | self.alloc_range(&(base..(base + size.get()))) |
| 183 | } |
| 184 | |
| 185 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 186 | pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 187 | self.alloc_range_mut(&(base..(base + size.get()))) |
| 188 | } |
| 189 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 190 | /// Checks that the given range of addresses is within the MMIO region, and then maps it |
| 191 | /// appropriately. |
| 192 | pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> { |
| 193 | // MMIO space is below the main memory region. |
| 194 | if range.end > self.total.start { |
| 195 | return Err(MemoryTrackerError::OutOfRange); |
| 196 | } |
| 197 | if self.mmio_regions.iter().any(|r| overlaps(r, &range)) { |
| 198 | return Err(MemoryTrackerError::Overlaps); |
| 199 | } |
| 200 | if self.mmio_regions.len() == self.mmio_regions.capacity() { |
| 201 | return Err(MemoryTrackerError::Full); |
| 202 | } |
| 203 | |
| 204 | self.page_table.map_device(&range).map_err(|e| { |
| 205 | error!("Error during MMIO device mapping: {e}"); |
| 206 | MemoryTrackerError::FailedToMap |
| 207 | })?; |
| 208 | |
| 209 | for page_base in page_iterator(&range) { |
| 210 | mmio_guard::map(page_base)?; |
| 211 | } |
| 212 | |
| 213 | if self.mmio_regions.try_push(range).is_some() { |
| 214 | return Err(MemoryTrackerError::Full); |
| 215 | } |
| 216 | |
| 217 | Ok(()) |
| 218 | } |
| 219 | |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 220 | /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap |
| 221 | /// with any other previously allocated regions, and that the regions ArrayVec has capacity to |
| 222 | /// add it. |
| 223 | fn check(&self, region: &MemoryRegion) -> Result<()> { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 224 | if !region.is_within(&self.total) { |
| 225 | return Err(MemoryTrackerError::OutOfRange); |
| 226 | } |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 227 | if self.regions.iter().any(|r| r.overlaps(®ion.range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 228 | return Err(MemoryTrackerError::Overlaps); |
| 229 | } |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 230 | if self.regions.len() == self.regions.capacity() { |
| 231 | return Err(MemoryTrackerError::Full); |
| 232 | } |
| 233 | Ok(()) |
| 234 | } |
| 235 | |
| 236 | fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 237 | if self.regions.try_push(region).is_some() { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 238 | return Err(MemoryTrackerError::Full); |
| 239 | } |
| 240 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 241 | Ok(self.regions.last().unwrap().as_ref().clone()) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 242 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 243 | |
| 244 | /// Unmaps all tracked MMIO regions from the MMIO guard. |
| 245 | /// |
| 246 | /// Note that they are not unmapped from the page table. |
| 247 | pub fn mmio_unmap_all(&self) -> Result<()> { |
| 248 | for region in &self.mmio_regions { |
| 249 | for page_base in page_iterator(region) { |
| 250 | mmio_guard::unmap(page_base)?; |
| 251 | } |
| 252 | } |
| 253 | |
| 254 | Ok(()) |
| 255 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | impl Drop for MemoryTracker { |
| 259 | fn drop(&mut self) { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 260 | for region in &self.regions { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 261 | match region.mem_type { |
| 262 | MemoryType::ReadWrite => { |
| 263 | // TODO: Use page table's dirty bit to only flush pages that were touched. |
| 264 | helpers::flush_region(region.range.start, region.range.len()) |
| 265 | } |
| 266 | MemoryType::ReadOnly => {} |
| 267 | } |
| 268 | } |
| 269 | } |
| 270 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 271 | |
Andrew Walbran | 41ebe93 | 2022-12-14 15:22:30 +0000 | [diff] [blame] | 272 | /// Gives the KVM host read, write and execute permissions on the given memory range. If the range |
| 273 | /// is not aligned with the memory protection granule then it will be extended on either end to |
| 274 | /// align. |
| 275 | #[allow(unused)] |
| 276 | pub fn share_range(range: &MemoryRange) -> smccc::Result<()> { |
| 277 | let granule = hyp_meminfo()? as usize; |
| 278 | for base in (align_down(range.start, granule) |
| 279 | .expect("Memory protection granule was not a power of two")..range.end) |
| 280 | .step_by(granule) |
| 281 | { |
| 282 | mem_share(base as u64)?; |
| 283 | } |
| 284 | Ok(()) |
| 285 | } |
| 286 | |
| 287 | /// Removes permission from the KVM host to access the given memory range which was previously |
| 288 | /// shared. If the range is not aligned with the memory protection granule then it will be extended |
| 289 | /// on either end to align. |
| 290 | #[allow(unused)] |
| 291 | pub fn unshare_range(range: &MemoryRange) -> smccc::Result<()> { |
| 292 | let granule = hyp_meminfo()? as usize; |
| 293 | for base in (align_down(range.start, granule) |
| 294 | .expect("Memory protection granule was not a power of two")..range.end) |
| 295 | .step_by(granule) |
| 296 | { |
| 297 | mem_unshare(base as u64)?; |
| 298 | } |
| 299 | Ok(()) |
| 300 | } |
| 301 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 302 | /// Returns an iterator which yields the base address of each 4 KiB page within the given range. |
| 303 | fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> { |
| 304 | (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB) |
| 305 | } |