blob: 892089e3704ea0e2c038457513e22424eb9e420c [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran19690632022-12-07 16:41:30 +000017use crate::helpers::{self, page_4kb_of, SIZE_4KB};
18use crate::mmio_guard;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000019use crate::mmu;
20use core::cmp::max;
21use core::cmp::min;
22use core::fmt;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000023use core::num::NonZeroUsize;
24use core::ops::Range;
25use core::result;
26use log::error;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000027use tinyvec::ArrayVec;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000028
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000029pub type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000030
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000031#[derive(Clone, Copy, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000032enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000033 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000034 ReadOnly,
35 ReadWrite,
36}
37
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000038#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000039struct MemoryRegion {
40 range: MemoryRange,
41 mem_type: MemoryType,
42}
43
44impl MemoryRegion {
45 /// True if the instance overlaps with the passed range.
46 pub fn overlaps(&self, range: &MemoryRange) -> bool {
Andrew Walbran19690632022-12-07 16:41:30 +000047 overlaps(&self.range, range)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000048 }
49
50 /// True if the instance is fully contained within the passed range.
51 pub fn is_within(&self, range: &MemoryRange) -> bool {
52 let our: &MemoryRange = self.as_ref();
53 self.as_ref() == &(max(our.start, range.start)..min(our.end, range.end))
54 }
55}
56
57impl AsRef<MemoryRange> for MemoryRegion {
58 fn as_ref(&self) -> &MemoryRange {
59 &self.range
60 }
61}
62
Andrew Walbran19690632022-12-07 16:41:30 +000063/// Returns true if one range overlaps with the other at all.
64fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool {
65 max(a.start, b.start) < min(a.end, b.end)
66}
67
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000068/// Tracks non-overlapping slices of main memory.
69pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000070 total: MemoryRange,
71 page_table: mmu::PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000072 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
73 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000074}
75
76/// Errors for MemoryTracker operations.
77#[derive(Debug, Clone)]
78pub enum MemoryTrackerError {
79 /// Tried to modify the memory base address.
80 DifferentBaseAddress,
81 /// Tried to shrink to a larger memory size.
82 SizeTooLarge,
83 /// Tracked regions would not fit in memory size.
84 SizeTooSmall,
85 /// Reached limit number of tracked regions.
86 Full,
87 /// Region is out of the tracked memory address space.
88 OutOfRange,
89 /// New region overlaps with tracked regions.
90 Overlaps,
91 /// Region couldn't be mapped.
92 FailedToMap,
Andrew Walbran19690632022-12-07 16:41:30 +000093 /// Error from an MMIO guard call.
94 MmioGuard(mmio_guard::Error),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000095}
96
97impl fmt::Display for MemoryTrackerError {
98 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
99 match self {
100 Self::DifferentBaseAddress => write!(f, "Received different base address"),
101 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
102 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
103 Self::Full => write!(f, "Reached limit number of tracked regions"),
104 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
105 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
106 Self::FailedToMap => write!(f, "Failed to map the new region"),
Andrew Walbran19690632022-12-07 16:41:30 +0000107 Self::MmioGuard(e) => e.fmt(f),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000108 }
109 }
110}
111
Andrew Walbran19690632022-12-07 16:41:30 +0000112impl From<mmio_guard::Error> for MemoryTrackerError {
113 fn from(e: mmio_guard::Error) -> Self {
114 Self::MmioGuard(e)
115 }
116}
117
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000118type Result<T> = result::Result<T, MemoryTrackerError>;
119
120impl MemoryTracker {
121 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000122 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000123 /// Base of the system's contiguous "main" memory.
124 const BASE: usize = 0x8000_0000;
125 /// First address that can't be translated by a level 1 TTBR0_EL1.
126 const MAX_ADDR: usize = 1 << 39;
127
128 /// Create a new instance from an active page table, covering the maximum RAM size.
129 pub fn new(page_table: mmu::PageTable) -> Self {
Andrew Walbran19690632022-12-07 16:41:30 +0000130 Self {
131 total: Self::BASE..Self::MAX_ADDR,
132 page_table,
133 regions: ArrayVec::new(),
134 mmio_regions: ArrayVec::new(),
135 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000136 }
137
138 /// Resize the total RAM size.
139 ///
140 /// This function fails if it contains regions that are not included within the new size.
141 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
142 if range.start != self.total.start {
143 return Err(MemoryTrackerError::DifferentBaseAddress);
144 }
145 if self.total.end < range.end {
146 return Err(MemoryTrackerError::SizeTooLarge);
147 }
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000148 if !self.regions.iter().all(|r| r.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000149 return Err(MemoryTrackerError::SizeTooSmall);
150 }
151
152 self.total = range.clone();
153 Ok(())
154 }
155
156 /// Allocate the address range for a const slice; returns None if failed.
157 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000158 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
159 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000160 self.page_table.map_rodata(range).map_err(|e| {
161 error!("Error during range allocation: {e}");
162 MemoryTrackerError::FailedToMap
163 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000164 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000165 }
166
167 /// Allocate the address range for a mutable slice; returns None if failed.
168 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000169 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
170 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000171 self.page_table.map_data(range).map_err(|e| {
172 error!("Error during mutable range allocation: {e}");
173 MemoryTrackerError::FailedToMap
174 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000175 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000176 }
177
178 /// Allocate the address range for a const slice; returns None if failed.
179 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
180 self.alloc_range(&(base..(base + size.get())))
181 }
182
183 /// Allocate the address range for a mutable slice; returns None if failed.
184 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
185 self.alloc_range_mut(&(base..(base + size.get())))
186 }
187
Andrew Walbran19690632022-12-07 16:41:30 +0000188 /// Checks that the given range of addresses is within the MMIO region, and then maps it
189 /// appropriately.
190 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
191 // MMIO space is below the main memory region.
192 if range.end > self.total.start {
193 return Err(MemoryTrackerError::OutOfRange);
194 }
195 if self.mmio_regions.iter().any(|r| overlaps(r, &range)) {
196 return Err(MemoryTrackerError::Overlaps);
197 }
198 if self.mmio_regions.len() == self.mmio_regions.capacity() {
199 return Err(MemoryTrackerError::Full);
200 }
201
202 self.page_table.map_device(&range).map_err(|e| {
203 error!("Error during MMIO device mapping: {e}");
204 MemoryTrackerError::FailedToMap
205 })?;
206
207 for page_base in page_iterator(&range) {
208 mmio_guard::map(page_base)?;
209 }
210
211 if self.mmio_regions.try_push(range).is_some() {
212 return Err(MemoryTrackerError::Full);
213 }
214
215 Ok(())
216 }
217
Andrew Walbranda65ab12022-12-07 15:10:13 +0000218 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
219 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
220 /// add it.
221 fn check(&self, region: &MemoryRegion) -> Result<()> {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000222 if !region.is_within(&self.total) {
223 return Err(MemoryTrackerError::OutOfRange);
224 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000225 if self.regions.iter().any(|r| r.overlaps(&region.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000226 return Err(MemoryTrackerError::Overlaps);
227 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000228 if self.regions.len() == self.regions.capacity() {
229 return Err(MemoryTrackerError::Full);
230 }
231 Ok(())
232 }
233
234 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000235 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000236 return Err(MemoryTrackerError::Full);
237 }
238
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000239 Ok(self.regions.last().unwrap().as_ref().clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000240 }
Andrew Walbran19690632022-12-07 16:41:30 +0000241
242 /// Unmaps all tracked MMIO regions from the MMIO guard.
243 ///
244 /// Note that they are not unmapped from the page table.
245 pub fn mmio_unmap_all(&self) -> Result<()> {
246 for region in &self.mmio_regions {
247 for page_base in page_iterator(region) {
248 mmio_guard::unmap(page_base)?;
249 }
250 }
251
252 Ok(())
253 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000254}
255
256impl Drop for MemoryTracker {
257 fn drop(&mut self) {
Andrew Walbran19690632022-12-07 16:41:30 +0000258 for region in &self.regions {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000259 match region.mem_type {
260 MemoryType::ReadWrite => {
261 // TODO: Use page table's dirty bit to only flush pages that were touched.
262 helpers::flush_region(region.range.start, region.range.len())
263 }
264 MemoryType::ReadOnly => {}
265 }
266 }
267 }
268}
Andrew Walbran19690632022-12-07 16:41:30 +0000269
270/// Returns an iterator which yields the base address of each 4 KiB page within the given range.
271fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
272 (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
273}