blob: b0a940688cedcaa84198357e708bff8366d61e63 [file] [log] [blame]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Low-level allocation and tracking of main memory.
16
Andrew Walbran848decf2022-12-15 14:39:38 +000017#![deny(unsafe_op_in_unsafe_fn)]
18
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +000019use crate::helpers::{self, page_4kb_of, RangeExt, SIZE_4KB, SIZE_4MB};
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000020use crate::mmu;
Andrew Walbran848decf2022-12-15 14:39:38 +000021use alloc::alloc::alloc_zeroed;
22use alloc::alloc::dealloc;
23use alloc::alloc::handle_alloc_error;
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070024use alloc::boxed::Box;
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +000025use alloc::vec::Vec;
26use buddy_system_allocator::Heap;
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070027use buddy_system_allocator::LockedHeap;
Andrew Walbran848decf2022-12-15 14:39:38 +000028use core::alloc::Layout;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000029use core::cmp::max;
30use core::cmp::min;
31use core::fmt;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000032use core::num::NonZeroUsize;
33use core::ops::Range;
Andrew Walbran848decf2022-12-15 14:39:38 +000034use core::ptr::NonNull;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000035use core::result;
Alice Wang90e6f162023-04-17 13:49:45 +000036use hyp::get_hypervisor;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000037use log::error;
Pierre-Clément Tosi90238c52023-04-27 17:59:10 +000038use log::trace;
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -070039use once_cell::race::OnceBox;
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010040use spin::mutex::SpinMutex;
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000041use tinyvec::ArrayVec;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000042
Jiyong Park0ee65392023-03-27 20:52:45 +090043/// Base of the system's contiguous "main" memory.
44pub const BASE_ADDR: usize = 0x8000_0000;
45/// First address that can't be translated by a level 1 TTBR0_EL1.
46pub const MAX_ADDR: usize = 1 << 40;
47
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000048pub type MemoryRange = Range<usize>;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000049
Jakob Vukalovic85a00d72023-04-20 09:51:10 +010050pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
51unsafe impl Send for MemoryTracker {}
52
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000053#[derive(Clone, Copy, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000054enum MemoryType {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000055 #[default]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000056 ReadOnly,
57 ReadWrite,
58}
59
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +000060#[derive(Clone, Debug, Default)]
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000061struct MemoryRegion {
62 range: MemoryRange,
63 mem_type: MemoryType,
64}
65
66impl MemoryRegion {
67 /// True if the instance overlaps with the passed range.
68 pub fn overlaps(&self, range: &MemoryRange) -> bool {
Andrew Walbran19690632022-12-07 16:41:30 +000069 overlaps(&self.range, range)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000070 }
71
72 /// True if the instance is fully contained within the passed range.
73 pub fn is_within(&self, range: &MemoryRange) -> bool {
Srivatsa Vaddagiric25d68e2023-04-19 22:56:33 -070074 self.as_ref().is_within(range)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000075 }
76}
77
78impl AsRef<MemoryRange> for MemoryRegion {
79 fn as_ref(&self) -> &MemoryRange {
80 &self.range
81 }
82}
83
Andrew Walbran19690632022-12-07 16:41:30 +000084/// Returns true if one range overlaps with the other at all.
85fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool {
86 max(a.start, b.start) < min(a.end, b.end)
87}
88
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000089/// Tracks non-overlapping slices of main memory.
90pub struct MemoryTracker {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000091 total: MemoryRange,
92 page_table: mmu::PageTable,
Andrew Walbran19690632022-12-07 16:41:30 +000093 regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
94 mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +000095}
96
97/// Errors for MemoryTracker operations.
98#[derive(Debug, Clone)]
99pub enum MemoryTrackerError {
100 /// Tried to modify the memory base address.
101 DifferentBaseAddress,
102 /// Tried to shrink to a larger memory size.
103 SizeTooLarge,
104 /// Tracked regions would not fit in memory size.
105 SizeTooSmall,
106 /// Reached limit number of tracked regions.
107 Full,
108 /// Region is out of the tracked memory address space.
109 OutOfRange,
110 /// New region overlaps with tracked regions.
111 Overlaps,
112 /// Region couldn't be mapped.
113 FailedToMap,
Alice Wang90e6f162023-04-17 13:49:45 +0000114 /// Error from the interaction with the hypervisor.
115 Hypervisor(hyp::Error),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000116 /// Failure to set `SHARED_MEMORY`.
117 SharedMemorySetFailure,
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700118 /// Failure to set `SHARED_POOL`.
119 SharedPoolSetFailure,
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000120}
121
122impl fmt::Display for MemoryTrackerError {
123 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
124 match self {
125 Self::DifferentBaseAddress => write!(f, "Received different base address"),
126 Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
127 Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
128 Self::Full => write!(f, "Reached limit number of tracked regions"),
129 Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
130 Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
131 Self::FailedToMap => write!(f, "Failed to map the new region"),
Alice Wang90e6f162023-04-17 13:49:45 +0000132 Self::Hypervisor(e) => e.fmt(f),
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000133 Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700134 Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000135 }
136 }
137}
138
Alice Wang90e6f162023-04-17 13:49:45 +0000139impl From<hyp::Error> for MemoryTrackerError {
140 fn from(e: hyp::Error) -> Self {
141 Self::Hypervisor(e)
Andrew Walbran19690632022-12-07 16:41:30 +0000142 }
143}
144
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000145type Result<T> = result::Result<T, MemoryTrackerError>;
146
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700147static SHARED_POOL: OnceBox<LockedHeap<32>> = OnceBox::new();
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000148static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
149
150/// Allocates memory on the heap and shares it with the host.
151///
152/// Unshares all pages when dropped.
153pub struct MemorySharer {
154 granule: usize,
155 shared_regions: Vec<(usize, Layout)>,
156}
157
158impl MemorySharer {
159 const INIT_CAP: usize = 10;
160
161 pub fn new(granule: usize) -> Self {
162 assert!(granule.is_power_of_two());
163 Self { granule, shared_regions: Vec::with_capacity(Self::INIT_CAP) }
164 }
165
166 /// Get from the global allocator a granule-aligned region that suits `hint` and share it.
167 pub fn refill(&mut self, pool: &mut Heap<32>, hint: Layout) {
168 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
169 assert_ne!(layout.size(), 0);
170 // SAFETY - layout has non-zero size.
171 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
172 handle_alloc_error(layout);
173 };
174
175 let base = shared.as_ptr() as usize;
176 let end = base.checked_add(layout.size()).unwrap();
177 trace!("Sharing memory region {:#x?}", base..end);
178 for vaddr in (base..end).step_by(self.granule) {
179 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
180 get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
181 }
182 self.shared_regions.push((base, layout));
183
184 // SAFETY - The underlying memory range is owned by self and reserved for this pool.
185 unsafe { pool.add_to_heap(base, end) };
186 }
187}
188
189impl Drop for MemorySharer {
190 fn drop(&mut self) {
191 while let Some((base, layout)) = self.shared_regions.pop() {
192 let end = base.checked_add(layout.size()).unwrap();
193 trace!("Unsharing memory region {:#x?}", base..end);
194 for vaddr in (base..end).step_by(self.granule) {
195 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
196 get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
197 }
198
199 // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
200 unsafe { dealloc(base as *mut _, layout) };
201 }
202 }
203}
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700204
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000205impl MemoryTracker {
206 const CAPACITY: usize = 5;
Andrew Walbran19690632022-12-07 16:41:30 +0000207 const MMIO_CAPACITY: usize = 5;
Pierre-Clément Tosi164a6f52023-04-18 19:29:11 +0100208 const PVMFW_RANGE: MemoryRange = (BASE_ADDR - SIZE_4MB)..BASE_ADDR;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000209
210 /// Create a new instance from an active page table, covering the maximum RAM size.
211 pub fn new(page_table: mmu::PageTable) -> Self {
Andrew Walbran19690632022-12-07 16:41:30 +0000212 Self {
Jiyong Park0ee65392023-03-27 20:52:45 +0900213 total: BASE_ADDR..MAX_ADDR,
Andrew Walbran19690632022-12-07 16:41:30 +0000214 page_table,
215 regions: ArrayVec::new(),
216 mmio_regions: ArrayVec::new(),
217 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000218 }
219
220 /// Resize the total RAM size.
221 ///
222 /// This function fails if it contains regions that are not included within the new size.
223 pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
224 if range.start != self.total.start {
225 return Err(MemoryTrackerError::DifferentBaseAddress);
226 }
227 if self.total.end < range.end {
228 return Err(MemoryTrackerError::SizeTooLarge);
229 }
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000230 if !self.regions.iter().all(|r| r.is_within(range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000231 return Err(MemoryTrackerError::SizeTooSmall);
232 }
233
234 self.total = range.clone();
235 Ok(())
236 }
237
238 /// Allocate the address range for a const slice; returns None if failed.
239 pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000240 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
241 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000242 self.page_table.map_rodata(range).map_err(|e| {
243 error!("Error during range allocation: {e}");
244 MemoryTrackerError::FailedToMap
245 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000246 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000247 }
248
249 /// Allocate the address range for a mutable slice; returns None if failed.
250 pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
Andrew Walbranda65ab12022-12-07 15:10:13 +0000251 let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
252 self.check(&region)?;
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000253 self.page_table.map_data(range).map_err(|e| {
254 error!("Error during mutable range allocation: {e}");
255 MemoryTrackerError::FailedToMap
256 })?;
Andrew Walbranda65ab12022-12-07 15:10:13 +0000257 self.add(region)
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000258 }
259
260 /// Allocate the address range for a const slice; returns None if failed.
261 pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
262 self.alloc_range(&(base..(base + size.get())))
263 }
264
265 /// Allocate the address range for a mutable slice; returns None if failed.
266 pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
267 self.alloc_range_mut(&(base..(base + size.get())))
268 }
269
Andrew Walbran19690632022-12-07 16:41:30 +0000270 /// Checks that the given range of addresses is within the MMIO region, and then maps it
271 /// appropriately.
272 pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
273 // MMIO space is below the main memory region.
Pierre-Clément Tosi164a6f52023-04-18 19:29:11 +0100274 if range.end > self.total.start || overlaps(&Self::PVMFW_RANGE, &range) {
Andrew Walbran19690632022-12-07 16:41:30 +0000275 return Err(MemoryTrackerError::OutOfRange);
276 }
277 if self.mmio_regions.iter().any(|r| overlaps(r, &range)) {
278 return Err(MemoryTrackerError::Overlaps);
279 }
280 if self.mmio_regions.len() == self.mmio_regions.capacity() {
281 return Err(MemoryTrackerError::Full);
282 }
283
284 self.page_table.map_device(&range).map_err(|e| {
285 error!("Error during MMIO device mapping: {e}");
286 MemoryTrackerError::FailedToMap
287 })?;
288
289 for page_base in page_iterator(&range) {
Alice Wang90e6f162023-04-17 13:49:45 +0000290 get_hypervisor().mmio_guard_map(page_base)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000291 }
292
293 if self.mmio_regions.try_push(range).is_some() {
294 return Err(MemoryTrackerError::Full);
295 }
296
297 Ok(())
298 }
299
Andrew Walbranda65ab12022-12-07 15:10:13 +0000300 /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap
301 /// with any other previously allocated regions, and that the regions ArrayVec has capacity to
302 /// add it.
303 fn check(&self, region: &MemoryRegion) -> Result<()> {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000304 if !region.is_within(&self.total) {
305 return Err(MemoryTrackerError::OutOfRange);
306 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000307 if self.regions.iter().any(|r| r.overlaps(&region.range)) {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000308 return Err(MemoryTrackerError::Overlaps);
309 }
Andrew Walbranda65ab12022-12-07 15:10:13 +0000310 if self.regions.len() == self.regions.capacity() {
311 return Err(MemoryTrackerError::Full);
312 }
313 Ok(())
314 }
315
316 fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000317 if self.regions.try_push(region).is_some() {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000318 return Err(MemoryTrackerError::Full);
319 }
320
Pierre-Clément Tosi328dfb62022-11-25 18:20:42 +0000321 Ok(self.regions.last().unwrap().as_ref().clone())
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000322 }
Andrew Walbran19690632022-12-07 16:41:30 +0000323
324 /// Unmaps all tracked MMIO regions from the MMIO guard.
325 ///
326 /// Note that they are not unmapped from the page table.
327 pub fn mmio_unmap_all(&self) -> Result<()> {
328 for region in &self.mmio_regions {
329 for page_base in page_iterator(region) {
Alice Wang90e6f162023-04-17 13:49:45 +0000330 get_hypervisor().mmio_guard_unmap(page_base)?;
Andrew Walbran19690632022-12-07 16:41:30 +0000331 }
332 }
333
334 Ok(())
335 }
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700336
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000337 /// Initialize the shared heap to dynamically share memory from the global allocator.
338 pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
339 let granule = get_hypervisor().memory_protection_granule()?;
340 let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule));
341 if previous.is_some() {
342 return Err(MemoryTrackerError::SharedMemorySetFailure);
343 }
344
345 SHARED_POOL
346 .set(Box::new(LockedHeap::empty()))
347 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
348
349 Ok(())
350 }
351
352 /// Initialize the shared heap from a static region of memory.
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700353 ///
354 /// Some hypervisors such as Gunyah do not support a MemShare API for guest
355 /// to share its memory with host. Instead they allow host to designate part
356 /// of guest memory as "shared" ahead of guest starting its execution. The
357 /// shared memory region is indicated in swiotlb node. On such platforms use
358 /// a separate heap to allocate buffers that can be shared with host.
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000359 pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700360 let size = NonZeroUsize::new(range.len()).unwrap();
361 let range = self.alloc_mut(range.start, size)?;
362 let shared_pool = LockedHeap::<32>::new();
363
364 // SAFETY - `range` should be a valid region of memory as validated by
365 // `validate_swiotlb_info` and not used by any other rust code.
366 unsafe {
367 shared_pool.lock().init(range.start, range.len());
368 }
369
370 SHARED_POOL
371 .set(Box::new(shared_pool))
372 .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
373
374 Ok(())
375 }
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000376
377 /// Unshares any memory that may have been shared.
378 pub fn unshare_all_memory(&mut self) {
379 drop(SHARED_MEMORY.lock().take());
380 }
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000381}
382
383impl Drop for MemoryTracker {
384 fn drop(&mut self) {
Andrew Walbran19690632022-12-07 16:41:30 +0000385 for region in &self.regions {
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000386 match region.mem_type {
387 MemoryType::ReadWrite => {
Pierre-Clément Tosi73c2d642023-02-17 14:56:48 +0000388 // TODO(b/269738062): Use PT's dirty bit to only flush pages that were touched.
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000389 helpers::flush_region(region.range.start, region.range.len())
390 }
391 MemoryType::ReadOnly => {}
392 }
393 }
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000394 self.unshare_all_memory()
Pierre-Clément Tosia0934c12022-11-25 20:54:11 +0000395 }
396}
Andrew Walbran19690632022-12-07 16:41:30 +0000397
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000398/// Allocates a memory range of at least the given size and alignment that is shared with the host.
399/// Returns a pointer to the buffer.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000400pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
401 assert_ne!(layout.size(), 0);
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000402 let Some(buffer) = try_shared_alloc(layout) else {
Andrew Walbran848decf2022-12-15 14:39:38 +0000403 handle_alloc_error(layout);
404 };
405
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000406 trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000407 Ok(buffer)
408}
409
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000410fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
411 let mut shared_pool = SHARED_POOL.get().unwrap().lock();
412
413 if let Ok(buffer) = shared_pool.alloc(layout) {
414 Some(buffer)
415 } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
416 shared_memory.refill(&mut shared_pool, layout);
417 shared_pool.alloc(layout).ok()
418 } else {
419 None
420 }
421}
422
Andrew Walbran848decf2022-12-15 14:39:38 +0000423/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
424///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000425/// The layout passed in must be the same layout passed to the original `alloc_shared` call.
Andrew Walbran848decf2022-12-15 14:39:38 +0000426///
427/// # Safety
428///
Andrew Walbran2b0c7fb2023-05-09 12:16:20 +0000429/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
Andrew Walbran848decf2022-12-15 14:39:38 +0000430/// deallocated.
Pierre-Clément Tosi2d5bc582023-05-03 11:23:11 +0000431pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000432 SHARED_POOL.get().unwrap().lock().dealloc(vaddr, layout);
Srivatsa Vaddagiri37713ec2023-04-20 04:04:08 -0700433
Pierre-Clément Tosif19c0e62023-05-02 13:56:58 +0000434 trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
Andrew Walbran848decf2022-12-15 14:39:38 +0000435 Ok(())
436}
437
Andrew Walbran19690632022-12-07 16:41:30 +0000438/// Returns an iterator which yields the base address of each 4 KiB page within the given range.
439fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
440 (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
441}
Andrew Walbran848decf2022-12-15 14:39:38 +0000442
443/// Returns the intermediate physical address corresponding to the given virtual address.
444///
Andrew Walbran272bd7a2023-01-24 14:02:36 +0000445/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
446/// explicit about where we are converting from virtual to physical address.
447pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
448 vaddr.as_ptr() as _
449}
450
451/// Returns a pointer for the virtual address corresponding to the given non-zero intermediate
452/// physical address.
453///
454/// Panics if `paddr` is 0.
455pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
456 NonNull::new(paddr as _).unwrap()
Andrew Walbran848decf2022-12-15 14:39:38 +0000457}