blob: 2d5eb5ce1e695db6bffb1f1f19f570759c955e33 [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran848decf2022-12-15 14:39:38 +000017#![deny(unsafe_op_in_unsafe_fn)]
18
19use crate::helpers::{self, align_down, align_up, page_4kb_of, SIZE_4KB};
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000020use crate::mmu;
Andrew Walbran848decf2022-12-15 14:39:38 +000021use alloc::alloc::alloc_zeroed;
22use alloc::alloc::dealloc;
23use alloc::alloc::handle_alloc_error;
24use core::alloc::Layout;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000025use core::cmp::max;
26use core::cmp::min;
27use core::fmt;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000028use core::num::NonZeroUsize;
29use core::ops::Range;
Andrew Walbran848decf2022-12-15 14:39:38 +000030use core::ptr::NonNull;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000031use core::result;
Alice Wang90e6f162023-04-17 13:49:45 +000032use hyp::get_hypervisor;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000033use log::error;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000034use tinyvec::ArrayVec;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000035
Jiyong Park0ee65392023-03-27 20:52:45 +090036/// Base of the system's contiguous "main" memory.
37pub const BASE_ADDR: usize = 0x8000_0000;
38/// First address that can't be translated by a level 1 TTBR0_EL1.
39pub const MAX_ADDR: usize = 1 << 40;
40
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000041pub type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000042
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000043#[derive(Clone, Copy, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000044enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000045 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000046 ReadOnly,
47 ReadWrite,
48}
49
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000050#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000051struct MemoryRegion {
52 range: MemoryRange,
53 mem_type: MemoryType,
54}
55
56impl MemoryRegion {
57 /// True if the instance overlaps with the passed range.
58 pub fn overlaps(&self, range: &MemoryRange) -> bool {
Andrew Walbran19690632022-12-07 16:41:30 +000059 overlaps(&self.range, range)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000060 }
61
62 /// True if the instance is fully contained within the passed range.
63 pub fn is_within(&self, range: &MemoryRange) -> bool {
64 let our: &MemoryRange = self.as_ref();
65 self.as_ref() == &(max(our.start, range.start)..min(our.end, range.end))
66 }
67}
68
69impl AsRef<MemoryRange> for MemoryRegion {
70 fn as_ref(&self) -> &MemoryRange {
71 &self.range
72 }
73}
74
Andrew Walbran19690632022-12-07 16:41:30 +000075/// Returns true if one range overlaps with the other at all.
76fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool {
77 max(a.start, b.start) < min(a.end, b.end)
78}
79
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000080/// Tracks non-overlapping slices of main memory.
81pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000082 total: MemoryRange,
83 page_table: mmu::PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000084 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
85 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000086}
87
88/// Errors for MemoryTracker operations.
89#[derive(Debug, Clone)]
90pub enum MemoryTrackerError {
91 /// Tried to modify the memory base address.
92 DifferentBaseAddress,
93 /// Tried to shrink to a larger memory size.
94 SizeTooLarge,
95 /// Tracked regions would not fit in memory size.
96 SizeTooSmall,
97 /// Reached limit number of tracked regions.
98 Full,
99 /// Region is out of the tracked memory address space.
100 OutOfRange,
101 /// New region overlaps with tracked regions.
102 Overlaps,
103 /// Region couldn't be mapped.
104 FailedToMap,
Alice Wang90e6f162023-04-17 13:49:45 +0000105 /// Error from the interaction with the hypervisor.
106 Hypervisor(hyp::Error),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000107}
108
109impl fmt::Display for MemoryTrackerError {
110 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
111 match self {
112 Self::DifferentBaseAddress => write!(f, "Received different base address"),
113 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
114 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
115 Self::Full => write!(f, "Reached limit number of tracked regions"),
116 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
117 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
118 Self::FailedToMap => write!(f, "Failed to map the new region"),
Alice Wang90e6f162023-04-17 13:49:45 +0000119 Self::Hypervisor(e) => e.fmt(f),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000120 }
121 }
122}
123
Alice Wang90e6f162023-04-17 13:49:45 +0000124impl From<hyp::Error> for MemoryTrackerError {
125 fn from(e: hyp::Error) -> Self {
126 Self::Hypervisor(e)
Andrew Walbran19690632022-12-07 16:41:30 +0000127 }
128}
129
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000130type Result<T> = result::Result<T, MemoryTrackerError>;
131
132impl MemoryTracker {
133 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000134 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000135
136 /// Create a new instance from an active page table, covering the maximum RAM size.
137 pub fn new(page_table: mmu::PageTable) -> Self {
Andrew Walbran19690632022-12-07 16:41:30 +0000138 Self {
Jiyong Park0ee65392023-03-27 20:52:45 +0900139 total: BASE_ADDR..MAX_ADDR,
Andrew Walbran19690632022-12-07 16:41:30 +0000140 page_table,
141 regions: ArrayVec::new(),
142 mmio_regions: ArrayVec::new(),
143 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000144 }
145
146 /// Resize the total RAM size.
147 ///
148 /// This function fails if it contains regions that are not included within the new size.
149 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
150 if range.start != self.total.start {
151 return Err(MemoryTrackerError::DifferentBaseAddress);
152 }
153 if self.total.end < range.end {
154 return Err(MemoryTrackerError::SizeTooLarge);
155 }
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000156 if !self.regions.iter().all(|r| r.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000157 return Err(MemoryTrackerError::SizeTooSmall);
158 }
159
160 self.total = range.clone();
161 Ok(())
162 }
163
164 /// Allocate the address range for a const slice; returns None if failed.
165 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000166 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
167 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000168 self.page_table.map_rodata(range).map_err(|e| {
169 error!("Error during range allocation: {e}");
170 MemoryTrackerError::FailedToMap
171 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000172 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000173 }
174
175 /// Allocate the address range for a mutable slice; returns None if failed.
176 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000177 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
178 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000179 self.page_table.map_data(range).map_err(|e| {
180 error!("Error during mutable range allocation: {e}");
181 MemoryTrackerError::FailedToMap
182 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000183 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000184 }
185
186 /// Allocate the address range for a const slice; returns None if failed.
187 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
188 self.alloc_range(&(base..(base + size.get())))
189 }
190
191 /// Allocate the address range for a mutable slice; returns None if failed.
192 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
193 self.alloc_range_mut(&(base..(base + size.get())))
194 }
195
Andrew Walbran19690632022-12-07 16:41:30 +0000196 /// Checks that the given range of addresses is within the MMIO region, and then maps it
197 /// appropriately.
198 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
199 // MMIO space is below the main memory region.
200 if range.end > self.total.start {
201 return Err(MemoryTrackerError::OutOfRange);
202 }
203 if self.mmio_regions.iter().any(|r| overlaps(r, &range)) {
204 return Err(MemoryTrackerError::Overlaps);
205 }
206 if self.mmio_regions.len() == self.mmio_regions.capacity() {
207 return Err(MemoryTrackerError::Full);
208 }
209
210 self.page_table.map_device(&range).map_err(|e| {
211 error!("Error during MMIO device mapping: {e}");
212 MemoryTrackerError::FailedToMap
213 })?;
214
215 for page_base in page_iterator(&range) {
Alice Wang90e6f162023-04-17 13:49:45 +0000216 get_hypervisor().mmio_guard_map(page_base)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000217 }
218
219 if self.mmio_regions.try_push(range).is_some() {
220 return Err(MemoryTrackerError::Full);
221 }
222
223 Ok(())
224 }
225
Andrew Walbranda65ab12022-12-07 15:10:13 +0000226 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
227 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
228 /// add it.
229 fn check(&self, region: &MemoryRegion) -> Result<()> {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000230 if !region.is_within(&self.total) {
231 return Err(MemoryTrackerError::OutOfRange);
232 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000233 if self.regions.iter().any(|r| r.overlaps(&region.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000234 return Err(MemoryTrackerError::Overlaps);
235 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000236 if self.regions.len() == self.regions.capacity() {
237 return Err(MemoryTrackerError::Full);
238 }
239 Ok(())
240 }
241
242 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000243 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000244 return Err(MemoryTrackerError::Full);
245 }
246
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000247 Ok(self.regions.last().unwrap().as_ref().clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000248 }
Andrew Walbran19690632022-12-07 16:41:30 +0000249
250 /// Unmaps all tracked MMIO regions from the MMIO guard.
251 ///
252 /// Note that they are not unmapped from the page table.
253 pub fn mmio_unmap_all(&self) -> Result<()> {
254 for region in &self.mmio_regions {
255 for page_base in page_iterator(region) {
Alice Wang90e6f162023-04-17 13:49:45 +0000256 get_hypervisor().mmio_guard_unmap(page_base)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000257 }
258 }
259
260 Ok(())
261 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000262}
263
264impl Drop for MemoryTracker {
265 fn drop(&mut self) {
Andrew Walbran19690632022-12-07 16:41:30 +0000266 for region in &self.regions {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000267 match region.mem_type {
268 MemoryType::ReadWrite => {
Pierre-Clément Tosi73c2d642023-02-17 14:56:48 +0000269 // TODO(b/269738062): Use PT's dirty bit to only flush pages that were touched.
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000270 helpers::flush_region(region.range.start, region.range.len())
271 }
272 MemoryType::ReadOnly => {}
273 }
274 }
275 }
276}
Andrew Walbran19690632022-12-07 16:41:30 +0000277
Andrew Walbran41ebe932022-12-14 15:22:30 +0000278/// Gives the KVM host read, write and execute permissions on the given memory range. If the range
279/// is not aligned with the memory protection granule then it will be extended on either end to
280/// align.
Alice Wang90e6f162023-04-17 13:49:45 +0000281fn share_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
Andrew Walbran41ebe932022-12-14 15:22:30 +0000282 for base in (align_down(range.start, granule)
283 .expect("Memory protection granule was not a power of two")..range.end)
284 .step_by(granule)
285 {
Alice Wang31329112023-04-13 09:02:36 +0000286 get_hypervisor().mem_share(base as u64)?;
Andrew Walbran41ebe932022-12-14 15:22:30 +0000287 }
288 Ok(())
289}
290
291/// Removes permission from the KVM host to access the given memory range which was previously
292/// shared. If the range is not aligned with the memory protection granule then it will be extended
293/// on either end to align.
Alice Wang90e6f162023-04-17 13:49:45 +0000294fn unshare_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
Andrew Walbran41ebe932022-12-14 15:22:30 +0000295 for base in (align_down(range.start, granule)
296 .expect("Memory protection granule was not a power of two")..range.end)
297 .step_by(granule)
298 {
Alice Wang31329112023-04-13 09:02:36 +0000299 get_hypervisor().mem_unshare(base as u64)?;
Andrew Walbran41ebe932022-12-14 15:22:30 +0000300 }
301 Ok(())
302}
303
Andrew Walbran848decf2022-12-15 14:39:38 +0000304/// Allocates a memory range of at least the given size from the global allocator, and shares it
305/// with the host. Returns a pointer to the buffer.
306///
307/// It will be aligned to the memory sharing granule size supported by the hypervisor.
Alice Wang90e6f162023-04-17 13:49:45 +0000308pub fn alloc_shared(size: usize) -> hyp::Result<NonNull<u8>> {
Andrew Walbran848decf2022-12-15 14:39:38 +0000309 let layout = shared_buffer_layout(size)?;
310 let granule = layout.align();
311
312 // Safe because `shared_buffer_layout` panics if the size is 0, so the layout must have a
313 // non-zero size.
314 let buffer = unsafe { alloc_zeroed(layout) };
315
Pierre-Clément Tosiebb37602023-02-17 14:57:26 +0000316 let Some(buffer) = NonNull::new(buffer) else {
Andrew Walbran848decf2022-12-15 14:39:38 +0000317 handle_alloc_error(layout);
318 };
319
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000320 let paddr = virt_to_phys(buffer);
Andrew Walbran848decf2022-12-15 14:39:38 +0000321 // If share_range fails then we will leak the allocation, but that seems better than having it
322 // be reused while maybe still partially shared with the host.
323 share_range(&(paddr..paddr + layout.size()), granule)?;
324
325 Ok(buffer)
326}
327
328/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
329///
330/// The size passed in must be the size passed to the original `alloc_shared` call.
331///
332/// # Safety
333///
334/// The memory must have been allocated by `alloc_shared` with the same size, and not yet
335/// deallocated.
Alice Wang90e6f162023-04-17 13:49:45 +0000336pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, size: usize) -> hyp::Result<()> {
Andrew Walbran848decf2022-12-15 14:39:38 +0000337 let layout = shared_buffer_layout(size)?;
338 let granule = layout.align();
339
340 let paddr = virt_to_phys(vaddr);
341 unshare_range(&(paddr..paddr + layout.size()), granule)?;
342 // Safe because the memory was allocated by `alloc_shared` above using the same allocator, and
343 // the layout is the same as was used then.
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000344 unsafe { dealloc(vaddr.as_ptr(), layout) };
Andrew Walbran848decf2022-12-15 14:39:38 +0000345
346 Ok(())
347}
348
349/// Returns the layout to use for allocating a buffer of at least the given size shared with the
350/// host.
351///
352/// It will be aligned to the memory sharing granule size supported by the hypervisor.
353///
354/// Panics if `size` is 0.
Alice Wang90e6f162023-04-17 13:49:45 +0000355fn shared_buffer_layout(size: usize) -> hyp::Result<Layout> {
Andrew Walbran848decf2022-12-15 14:39:38 +0000356 assert_ne!(size, 0);
Alice Wang31329112023-04-13 09:02:36 +0000357 let granule = get_hypervisor().memory_protection_granule()?;
Andrew Walbran848decf2022-12-15 14:39:38 +0000358 let allocated_size =
359 align_up(size, granule).expect("Memory protection granule was not a power of two");
360 Ok(Layout::from_size_align(allocated_size, granule).unwrap())
361}
362
Andrew Walbran19690632022-12-07 16:41:30 +0000363/// Returns an iterator which yields the base address of each 4 KiB page within the given range.
364fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
365 (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
366}
Andrew Walbran848decf2022-12-15 14:39:38 +0000367
368/// Returns the intermediate physical address corresponding to the given virtual address.
369///
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000370/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
371/// explicit about where we are converting from virtual to physical address.
372pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
373 vaddr.as_ptr() as _
374}
375
376/// Returns a pointer for the virtual address corresponding to the given non-zero intermediate
377/// physical address.
378///
379/// Panics if `paddr` is 0.
380pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
381 NonNull::new(paddr as _).unwrap()
Andrew Walbran848decf2022-12-15 14:39:38 +0000382}