Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 1 | // Copyright 2022, The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | //! Low-level allocation and tracking of main memory. |
| 16 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 17 | use crate::helpers::{self, page_4kb_of, SIZE_4KB}; |
| 18 | use crate::mmio_guard; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 19 | use crate::mmu; |
| 20 | use core::cmp::max; |
| 21 | use core::cmp::min; |
| 22 | use core::fmt; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 23 | use core::num::NonZeroUsize; |
| 24 | use core::ops::Range; |
| 25 | use core::result; |
| 26 | use log::error; |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 27 | use tinyvec::ArrayVec; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 28 | |
| 29 | type MemoryRange = Range<usize>; |
| 30 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 31 | #[derive(Clone, Copy, Debug, Default)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 32 | enum MemoryType { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 33 | #[default] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 34 | ReadOnly, |
| 35 | ReadWrite, |
| 36 | } |
| 37 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 38 | #[derive(Clone, Debug, Default)] |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 39 | struct MemoryRegion { |
| 40 | range: MemoryRange, |
| 41 | mem_type: MemoryType, |
| 42 | } |
| 43 | |
| 44 | impl MemoryRegion { |
| 45 | /// True if the instance overlaps with the passed range. |
| 46 | pub fn overlaps(&self, range: &MemoryRange) -> bool { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 47 | overlaps(&self.range, range) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | /// True if the instance is fully contained within the passed range. |
| 51 | pub fn is_within(&self, range: &MemoryRange) -> bool { |
| 52 | let our: &MemoryRange = self.as_ref(); |
| 53 | self.as_ref() == &(max(our.start, range.start)..min(our.end, range.end)) |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | impl AsRef<MemoryRange> for MemoryRegion { |
| 58 | fn as_ref(&self) -> &MemoryRange { |
| 59 | &self.range |
| 60 | } |
| 61 | } |
| 62 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 63 | /// Returns true if one range overlaps with the other at all. |
| 64 | fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool { |
| 65 | max(a.start, b.start) < min(a.end, b.end) |
| 66 | } |
| 67 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 68 | /// Tracks non-overlapping slices of main memory. |
| 69 | pub struct MemoryTracker { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 70 | total: MemoryRange, |
| 71 | page_table: mmu::PageTable, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 72 | regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>, |
| 73 | mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>, |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | /// Errors for MemoryTracker operations. |
| 77 | #[derive(Debug, Clone)] |
| 78 | pub enum MemoryTrackerError { |
| 79 | /// Tried to modify the memory base address. |
| 80 | DifferentBaseAddress, |
| 81 | /// Tried to shrink to a larger memory size. |
| 82 | SizeTooLarge, |
| 83 | /// Tracked regions would not fit in memory size. |
| 84 | SizeTooSmall, |
| 85 | /// Reached limit number of tracked regions. |
| 86 | Full, |
| 87 | /// Region is out of the tracked memory address space. |
| 88 | OutOfRange, |
| 89 | /// New region overlaps with tracked regions. |
| 90 | Overlaps, |
| 91 | /// Region couldn't be mapped. |
| 92 | FailedToMap, |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 93 | /// Error from an MMIO guard call. |
| 94 | MmioGuard(mmio_guard::Error), |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | impl fmt::Display for MemoryTrackerError { |
| 98 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 99 | match self { |
| 100 | Self::DifferentBaseAddress => write!(f, "Received different base address"), |
| 101 | Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"), |
| 102 | Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"), |
| 103 | Self::Full => write!(f, "Reached limit number of tracked regions"), |
| 104 | Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"), |
| 105 | Self::Overlaps => write!(f, "New region overlaps with tracked regions"), |
| 106 | Self::FailedToMap => write!(f, "Failed to map the new region"), |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 107 | Self::MmioGuard(e) => e.fmt(f), |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 108 | } |
| 109 | } |
| 110 | } |
| 111 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 112 | impl From<mmio_guard::Error> for MemoryTrackerError { |
| 113 | fn from(e: mmio_guard::Error) -> Self { |
| 114 | Self::MmioGuard(e) |
| 115 | } |
| 116 | } |
| 117 | |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 118 | type Result<T> = result::Result<T, MemoryTrackerError>; |
| 119 | |
| 120 | impl MemoryTracker { |
| 121 | const CAPACITY: usize = 5; |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 122 | const MMIO_CAPACITY: usize = 5; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 123 | /// Base of the system's contiguous "main" memory. |
| 124 | const BASE: usize = 0x8000_0000; |
| 125 | /// First address that can't be translated by a level 1 TTBR0_EL1. |
| 126 | const MAX_ADDR: usize = 1 << 39; |
| 127 | |
| 128 | /// Create a new instance from an active page table, covering the maximum RAM size. |
| 129 | pub fn new(page_table: mmu::PageTable) -> Self { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 130 | Self { |
| 131 | total: Self::BASE..Self::MAX_ADDR, |
| 132 | page_table, |
| 133 | regions: ArrayVec::new(), |
| 134 | mmio_regions: ArrayVec::new(), |
| 135 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | /// Resize the total RAM size. |
| 139 | /// |
| 140 | /// This function fails if it contains regions that are not included within the new size. |
| 141 | pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> { |
| 142 | if range.start != self.total.start { |
| 143 | return Err(MemoryTrackerError::DifferentBaseAddress); |
| 144 | } |
| 145 | if self.total.end < range.end { |
| 146 | return Err(MemoryTrackerError::SizeTooLarge); |
| 147 | } |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 148 | if !self.regions.iter().all(|r| r.is_within(range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 149 | return Err(MemoryTrackerError::SizeTooSmall); |
| 150 | } |
| 151 | |
| 152 | self.total = range.clone(); |
| 153 | Ok(()) |
| 154 | } |
| 155 | |
| 156 | /// Allocate the address range for a const slice; returns None if failed. |
| 157 | pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 158 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly }; |
| 159 | self.check(®ion)?; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 160 | self.page_table.map_rodata(range).map_err(|e| { |
| 161 | error!("Error during range allocation: {e}"); |
| 162 | MemoryTrackerError::FailedToMap |
| 163 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 164 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 168 | pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 169 | let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite }; |
| 170 | self.check(®ion)?; |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 171 | self.page_table.map_data(range).map_err(|e| { |
| 172 | error!("Error during mutable range allocation: {e}"); |
| 173 | MemoryTrackerError::FailedToMap |
| 174 | })?; |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 175 | self.add(region) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | /// Allocate the address range for a const slice; returns None if failed. |
| 179 | pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 180 | self.alloc_range(&(base..(base + size.get()))) |
| 181 | } |
| 182 | |
| 183 | /// Allocate the address range for a mutable slice; returns None if failed. |
| 184 | pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| 185 | self.alloc_range_mut(&(base..(base + size.get()))) |
| 186 | } |
| 187 | |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 188 | /// Checks that the given range of addresses is within the MMIO region, and then maps it |
| 189 | /// appropriately. |
| 190 | pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> { |
| 191 | // MMIO space is below the main memory region. |
| 192 | if range.end > self.total.start { |
| 193 | return Err(MemoryTrackerError::OutOfRange); |
| 194 | } |
| 195 | if self.mmio_regions.iter().any(|r| overlaps(r, &range)) { |
| 196 | return Err(MemoryTrackerError::Overlaps); |
| 197 | } |
| 198 | if self.mmio_regions.len() == self.mmio_regions.capacity() { |
| 199 | return Err(MemoryTrackerError::Full); |
| 200 | } |
| 201 | |
| 202 | self.page_table.map_device(&range).map_err(|e| { |
| 203 | error!("Error during MMIO device mapping: {e}"); |
| 204 | MemoryTrackerError::FailedToMap |
| 205 | })?; |
| 206 | |
| 207 | for page_base in page_iterator(&range) { |
| 208 | mmio_guard::map(page_base)?; |
| 209 | } |
| 210 | |
| 211 | if self.mmio_regions.try_push(range).is_some() { |
| 212 | return Err(MemoryTrackerError::Full); |
| 213 | } |
| 214 | |
| 215 | Ok(()) |
| 216 | } |
| 217 | |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 218 | /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap |
| 219 | /// with any other previously allocated regions, and that the regions ArrayVec has capacity to |
| 220 | /// add it. |
| 221 | fn check(&self, region: &MemoryRegion) -> Result<()> { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 222 | if !region.is_within(&self.total) { |
| 223 | return Err(MemoryTrackerError::OutOfRange); |
| 224 | } |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 225 | if self.regions.iter().any(|r| r.overlaps(®ion.range)) { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 226 | return Err(MemoryTrackerError::Overlaps); |
| 227 | } |
Andrew Walbran | da65ab1 | 2022-12-07 15:10:13 +0000 | [diff] [blame] | 228 | if self.regions.len() == self.regions.capacity() { |
| 229 | return Err(MemoryTrackerError::Full); |
| 230 | } |
| 231 | Ok(()) |
| 232 | } |
| 233 | |
| 234 | fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> { |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 235 | if self.regions.try_push(region).is_some() { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 236 | return Err(MemoryTrackerError::Full); |
| 237 | } |
| 238 | |
Pierre-Clément Tosi | 328dfb6 | 2022-11-25 18:20:42 +0000 | [diff] [blame] | 239 | Ok(self.regions.last().unwrap().as_ref().clone()) |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 240 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 241 | |
| 242 | /// Unmaps all tracked MMIO regions from the MMIO guard. |
| 243 | /// |
| 244 | /// Note that they are not unmapped from the page table. |
| 245 | pub fn mmio_unmap_all(&self) -> Result<()> { |
| 246 | for region in &self.mmio_regions { |
| 247 | for page_base in page_iterator(region) { |
| 248 | mmio_guard::unmap(page_base)?; |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | Ok(()) |
| 253 | } |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | impl Drop for MemoryTracker { |
| 257 | fn drop(&mut self) { |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 258 | for region in &self.regions { |
Pierre-Clément Tosi | a0934c1 | 2022-11-25 20:54:11 +0000 | [diff] [blame] | 259 | match region.mem_type { |
| 260 | MemoryType::ReadWrite => { |
| 261 | // TODO: Use page table's dirty bit to only flush pages that were touched. |
| 262 | helpers::flush_region(region.range.start, region.range.len()) |
| 263 | } |
| 264 | MemoryType::ReadOnly => {} |
| 265 | } |
| 266 | } |
| 267 | } |
| 268 | } |
Andrew Walbran | 1969063 | 2022-12-07 16:41:30 +0000 | [diff] [blame] | 269 | |
| 270 | /// Returns an iterator which yields the base address of each 4 KiB page within the given range. |
| 271 | fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> { |
| 272 | (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB) |
| 273 | } |