blob: 7df25f246d6ce1f90d3b7370a0e4df3b66b8a0cd [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran848decf2022-12-15 14:39:38 +000017#![deny(unsafe_op_in_unsafe_fn)]
18
Pierre-Clément Tosi164a6f52023-04-18 19:29:11 +010019use crate::helpers::{self, align_down, align_up, page_4kb_of, SIZE_4KB, SIZE_4MB};
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000020use crate::mmu;
Andrew Walbran848decf2022-12-15 14:39:38 +000021use alloc::alloc::alloc_zeroed;
22use alloc::alloc::dealloc;
23use alloc::alloc::handle_alloc_error;
24use core::alloc::Layout;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000025use core::cmp::max;
26use core::cmp::min;
27use core::fmt;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000028use core::num::NonZeroUsize;
29use core::ops::Range;
Andrew Walbran848decf2022-12-15 14:39:38 +000030use core::ptr::NonNull;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000031use core::result;
Alice Wang90e6f162023-04-17 13:49:45 +000032use hyp::get_hypervisor;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000033use log::error;
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010034use spin::mutex::SpinMutex;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000035use tinyvec::ArrayVec;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000036
Jiyong Park0ee65392023-03-27 20:52:45 +090037/// Base of the system's contiguous "main" memory.
38pub const BASE_ADDR: usize = 0x8000_0000;
39/// First address that can't be translated by a level 1 TTBR0_EL1.
40pub const MAX_ADDR: usize = 1 << 40;
41
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000042pub type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000043
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010044pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
45unsafe impl Send for MemoryTracker {}
46
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000047#[derive(Clone, Copy, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000048enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000049 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000050 ReadOnly,
51 ReadWrite,
52}
53
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000054#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000055struct MemoryRegion {
56 range: MemoryRange,
57 mem_type: MemoryType,
58}
59
60impl MemoryRegion {
61 /// True if the instance overlaps with the passed range.
62 pub fn overlaps(&self, range: &MemoryRange) -> bool {
Andrew Walbran19690632022-12-07 16:41:30 +000063 overlaps(&self.range, range)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000064 }
65
66 /// True if the instance is fully contained within the passed range.
67 pub fn is_within(&self, range: &MemoryRange) -> bool {
68 let our: &MemoryRange = self.as_ref();
69 self.as_ref() == &(max(our.start, range.start)..min(our.end, range.end))
70 }
71}
72
73impl AsRef<MemoryRange> for MemoryRegion {
74 fn as_ref(&self) -> &MemoryRange {
75 &self.range
76 }
77}
78
Andrew Walbran19690632022-12-07 16:41:30 +000079/// Returns true if one range overlaps with the other at all.
80fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool {
81 max(a.start, b.start) < min(a.end, b.end)
82}
83
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000084/// Tracks non-overlapping slices of main memory.
85pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000086 total: MemoryRange,
87 page_table: mmu::PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000088 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
89 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000090}
91
92/// Errors for MemoryTracker operations.
93#[derive(Debug, Clone)]
94pub enum MemoryTrackerError {
95 /// Tried to modify the memory base address.
96 DifferentBaseAddress,
97 /// Tried to shrink to a larger memory size.
98 SizeTooLarge,
99 /// Tracked regions would not fit in memory size.
100 SizeTooSmall,
101 /// Reached limit number of tracked regions.
102 Full,
103 /// Region is out of the tracked memory address space.
104 OutOfRange,
105 /// New region overlaps with tracked regions.
106 Overlaps,
107 /// Region couldn't be mapped.
108 FailedToMap,
Alice Wang90e6f162023-04-17 13:49:45 +0000109 /// Error from the interaction with the hypervisor.
110 Hypervisor(hyp::Error),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000111}
112
113impl fmt::Display for MemoryTrackerError {
114 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
115 match self {
116 Self::DifferentBaseAddress => write!(f, "Received different base address"),
117 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
118 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
119 Self::Full => write!(f, "Reached limit number of tracked regions"),
120 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
121 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
122 Self::FailedToMap => write!(f, "Failed to map the new region"),
Alice Wang90e6f162023-04-17 13:49:45 +0000123 Self::Hypervisor(e) => e.fmt(f),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000124 }
125 }
126}
127
Alice Wang90e6f162023-04-17 13:49:45 +0000128impl From<hyp::Error> for MemoryTrackerError {
129 fn from(e: hyp::Error) -> Self {
130 Self::Hypervisor(e)
Andrew Walbran19690632022-12-07 16:41:30 +0000131 }
132}
133
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000134type Result<T> = result::Result<T, MemoryTrackerError>;
135
136impl MemoryTracker {
137 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000138 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosi164a6f52023-04-18 19:29:11 +0100139 const PVMFW_RANGE: MemoryRange = (BASE_ADDR - SIZE_4MB)..BASE_ADDR;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000140
141 /// Create a new instance from an active page table, covering the maximum RAM size.
142 pub fn new(page_table: mmu::PageTable) -> Self {
Andrew Walbran19690632022-12-07 16:41:30 +0000143 Self {
Jiyong Park0ee65392023-03-27 20:52:45 +0900144 total: BASE_ADDR..MAX_ADDR,
Andrew Walbran19690632022-12-07 16:41:30 +0000145 page_table,
146 regions: ArrayVec::new(),
147 mmio_regions: ArrayVec::new(),
148 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000149 }
150
151 /// Resize the total RAM size.
152 ///
153 /// This function fails if it contains regions that are not included within the new size.
154 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
155 if range.start != self.total.start {
156 return Err(MemoryTrackerError::DifferentBaseAddress);
157 }
158 if self.total.end < range.end {
159 return Err(MemoryTrackerError::SizeTooLarge);
160 }
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000161 if !self.regions.iter().all(|r| r.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000162 return Err(MemoryTrackerError::SizeTooSmall);
163 }
164
165 self.total = range.clone();
166 Ok(())
167 }
168
169 /// Allocate the address range for a const slice; returns None if failed.
170 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000171 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
172 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000173 self.page_table.map_rodata(range).map_err(|e| {
174 error!("Error during range allocation: {e}");
175 MemoryTrackerError::FailedToMap
176 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000177 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000178 }
179
180 /// Allocate the address range for a mutable slice; returns None if failed.
181 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000182 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
183 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000184 self.page_table.map_data(range).map_err(|e| {
185 error!("Error during mutable range allocation: {e}");
186 MemoryTrackerError::FailedToMap
187 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000188 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000189 }
190
191 /// Allocate the address range for a const slice; returns None if failed.
192 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
193 self.alloc_range(&(base..(base + size.get())))
194 }
195
196 /// Allocate the address range for a mutable slice; returns None if failed.
197 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
198 self.alloc_range_mut(&(base..(base + size.get())))
199 }
200
Andrew Walbran19690632022-12-07 16:41:30 +0000201 /// Checks that the given range of addresses is within the MMIO region, and then maps it
202 /// appropriately.
203 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
204 // MMIO space is below the main memory region.
Pierre-Clément Tosi164a6f52023-04-18 19:29:11 +0100205 if range.end > self.total.start || overlaps(&Self::PVMFW_RANGE, &range) {
Andrew Walbran19690632022-12-07 16:41:30 +0000206 return Err(MemoryTrackerError::OutOfRange);
207 }
208 if self.mmio_regions.iter().any(|r| overlaps(r, &range)) {
209 return Err(MemoryTrackerError::Overlaps);
210 }
211 if self.mmio_regions.len() == self.mmio_regions.capacity() {
212 return Err(MemoryTrackerError::Full);
213 }
214
215 self.page_table.map_device(&range).map_err(|e| {
216 error!("Error during MMIO device mapping: {e}");
217 MemoryTrackerError::FailedToMap
218 })?;
219
220 for page_base in page_iterator(&range) {
Alice Wang90e6f162023-04-17 13:49:45 +0000221 get_hypervisor().mmio_guard_map(page_base)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000222 }
223
224 if self.mmio_regions.try_push(range).is_some() {
225 return Err(MemoryTrackerError::Full);
226 }
227
228 Ok(())
229 }
230
Andrew Walbranda65ab12022-12-07 15:10:13 +0000231 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
232 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
233 /// add it.
234 fn check(&self, region: &MemoryRegion) -> Result<()> {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000235 if !region.is_within(&self.total) {
236 return Err(MemoryTrackerError::OutOfRange);
237 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000238 if self.regions.iter().any(|r| r.overlaps(&region.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000239 return Err(MemoryTrackerError::Overlaps);
240 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000241 if self.regions.len() == self.regions.capacity() {
242 return Err(MemoryTrackerError::Full);
243 }
244 Ok(())
245 }
246
247 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000248 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000249 return Err(MemoryTrackerError::Full);
250 }
251
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000252 Ok(self.regions.last().unwrap().as_ref().clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000253 }
Andrew Walbran19690632022-12-07 16:41:30 +0000254
255 /// Unmaps all tracked MMIO regions from the MMIO guard.
256 ///
257 /// Note that they are not unmapped from the page table.
258 pub fn mmio_unmap_all(&self) -> Result<()> {
259 for region in &self.mmio_regions {
260 for page_base in page_iterator(region) {
Alice Wang90e6f162023-04-17 13:49:45 +0000261 get_hypervisor().mmio_guard_unmap(page_base)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000262 }
263 }
264
265 Ok(())
266 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000267}
268
269impl Drop for MemoryTracker {
270 fn drop(&mut self) {
Andrew Walbran19690632022-12-07 16:41:30 +0000271 for region in &self.regions {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000272 match region.mem_type {
273 MemoryType::ReadWrite => {
Pierre-Clément Tosi73c2d642023-02-17 14:56:48 +0000274 // TODO(b/269738062): Use PT's dirty bit to only flush pages that were touched.
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000275 helpers::flush_region(region.range.start, region.range.len())
276 }
277 MemoryType::ReadOnly => {}
278 }
279 }
280 }
281}
Andrew Walbran19690632022-12-07 16:41:30 +0000282
Andrew Walbran41ebe932022-12-14 15:22:30 +0000283/// Gives the KVM host read, write and execute permissions on the given memory range. If the range
284/// is not aligned with the memory protection granule then it will be extended on either end to
285/// align.
Alice Wang90e6f162023-04-17 13:49:45 +0000286fn share_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
Andrew Walbran41ebe932022-12-14 15:22:30 +0000287 for base in (align_down(range.start, granule)
288 .expect("Memory protection granule was not a power of two")..range.end)
289 .step_by(granule)
290 {
Alice Wang31329112023-04-13 09:02:36 +0000291 get_hypervisor().mem_share(base as u64)?;
Andrew Walbran41ebe932022-12-14 15:22:30 +0000292 }
293 Ok(())
294}
295
296/// Removes permission from the KVM host to access the given memory range which was previously
297/// shared. If the range is not aligned with the memory protection granule then it will be extended
298/// on either end to align.
Alice Wang90e6f162023-04-17 13:49:45 +0000299fn unshare_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
Andrew Walbran41ebe932022-12-14 15:22:30 +0000300 for base in (align_down(range.start, granule)
301 .expect("Memory protection granule was not a power of two")..range.end)
302 .step_by(granule)
303 {
Alice Wang31329112023-04-13 09:02:36 +0000304 get_hypervisor().mem_unshare(base as u64)?;
Andrew Walbran41ebe932022-12-14 15:22:30 +0000305 }
306 Ok(())
307}
308
Andrew Walbran848decf2022-12-15 14:39:38 +0000309/// Allocates a memory range of at least the given size from the global allocator, and shares it
310/// with the host. Returns a pointer to the buffer.
311///
312/// It will be aligned to the memory sharing granule size supported by the hypervisor.
Alice Wang90e6f162023-04-17 13:49:45 +0000313pub fn alloc_shared(size: usize) -> hyp::Result<NonNull<u8>> {
Andrew Walbran848decf2022-12-15 14:39:38 +0000314 let layout = shared_buffer_layout(size)?;
315 let granule = layout.align();
316
317 // Safe because `shared_buffer_layout` panics if the size is 0, so the layout must have a
318 // non-zero size.
319 let buffer = unsafe { alloc_zeroed(layout) };
320
Pierre-Clément Tosiebb37602023-02-17 14:57:26 +0000321 let Some(buffer) = NonNull::new(buffer) else {
Andrew Walbran848decf2022-12-15 14:39:38 +0000322 handle_alloc_error(layout);
323 };
324
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000325 let paddr = virt_to_phys(buffer);
Andrew Walbran848decf2022-12-15 14:39:38 +0000326 // If share_range fails then we will leak the allocation, but that seems better than having it
327 // be reused while maybe still partially shared with the host.
328 share_range(&(paddr..paddr + layout.size()), granule)?;
329
330 Ok(buffer)
331}
332
333/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
334///
335/// The size passed in must be the size passed to the original `alloc_shared` call.
336///
337/// # Safety
338///
339/// The memory must have been allocated by `alloc_shared` with the same size, and not yet
340/// deallocated.
Alice Wang90e6f162023-04-17 13:49:45 +0000341pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, size: usize) -> hyp::Result<()> {
Andrew Walbran848decf2022-12-15 14:39:38 +0000342 let layout = shared_buffer_layout(size)?;
343 let granule = layout.align();
344
345 let paddr = virt_to_phys(vaddr);
346 unshare_range(&(paddr..paddr + layout.size()), granule)?;
347 // Safe because the memory was allocated by `alloc_shared` above using the same allocator, and
348 // the layout is the same as was used then.
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000349 unsafe { dealloc(vaddr.as_ptr(), layout) };
Andrew Walbran848decf2022-12-15 14:39:38 +0000350
351 Ok(())
352}
353
354/// Returns the layout to use for allocating a buffer of at least the given size shared with the
355/// host.
356///
357/// It will be aligned to the memory sharing granule size supported by the hypervisor.
358///
359/// Panics if `size` is 0.
Alice Wang90e6f162023-04-17 13:49:45 +0000360fn shared_buffer_layout(size: usize) -> hyp::Result<Layout> {
Andrew Walbran848decf2022-12-15 14:39:38 +0000361 assert_ne!(size, 0);
Alice Wang31329112023-04-13 09:02:36 +0000362 let granule = get_hypervisor().memory_protection_granule()?;
Andrew Walbran848decf2022-12-15 14:39:38 +0000363 let allocated_size =
364 align_up(size, granule).expect("Memory protection granule was not a power of two");
365 Ok(Layout::from_size_align(allocated_size, granule).unwrap())
366}
367
Andrew Walbran19690632022-12-07 16:41:30 +0000368/// Returns an iterator which yields the base address of each 4 KiB page within the given range.
369fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
370 (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
371}
Andrew Walbran848decf2022-12-15 14:39:38 +0000372
373/// Returns the intermediate physical address corresponding to the given virtual address.
374///
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000375/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
376/// explicit about where we are converting from virtual to physical address.
377pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
378 vaddr.as_ptr() as _
379}
380
381/// Returns a pointer for the virtual address corresponding to the given non-zero intermediate
382/// physical address.
383///
384/// Panics if `paddr` is 0.
385pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
386 NonNull::new(paddr as _).unwrap()
Andrew Walbran848decf2022-12-15 14:39:38 +0000387}