| // Copyright 2022, The Android Open Source Project |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| //! Low-level allocation and tracking of main memory. |
| |
| use crate::helpers::{self, align_down, page_4kb_of, SIZE_4KB}; |
| use crate::hvc::{hyp_meminfo, mem_share, mem_unshare}; |
| use crate::mmio_guard; |
| use crate::mmu; |
| use crate::smccc; |
| use core::cmp::max; |
| use core::cmp::min; |
| use core::fmt; |
| use core::num::NonZeroUsize; |
| use core::ops::Range; |
| use core::result; |
| use log::error; |
| use tinyvec::ArrayVec; |
| |
| pub type MemoryRange = Range<usize>; |
| |
| #[derive(Clone, Copy, Debug, Default)] |
| enum MemoryType { |
| #[default] |
| ReadOnly, |
| ReadWrite, |
| } |
| |
| #[derive(Clone, Debug, Default)] |
| struct MemoryRegion { |
| range: MemoryRange, |
| mem_type: MemoryType, |
| } |
| |
| impl MemoryRegion { |
| /// True if the instance overlaps with the passed range. |
| pub fn overlaps(&self, range: &MemoryRange) -> bool { |
| overlaps(&self.range, range) |
| } |
| |
| /// True if the instance is fully contained within the passed range. |
| pub fn is_within(&self, range: &MemoryRange) -> bool { |
| let our: &MemoryRange = self.as_ref(); |
| self.as_ref() == &(max(our.start, range.start)..min(our.end, range.end)) |
| } |
| } |
| |
| impl AsRef<MemoryRange> for MemoryRegion { |
| fn as_ref(&self) -> &MemoryRange { |
| &self.range |
| } |
| } |
| |
| /// Returns true if one range overlaps with the other at all. |
| fn overlaps<T: Copy + Ord>(a: &Range<T>, b: &Range<T>) -> bool { |
| max(a.start, b.start) < min(a.end, b.end) |
| } |
| |
| /// Tracks non-overlapping slices of main memory. |
| pub struct MemoryTracker { |
| total: MemoryRange, |
| page_table: mmu::PageTable, |
| regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>, |
| mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>, |
| } |
| |
| /// Errors for MemoryTracker operations. |
| #[derive(Debug, Clone)] |
| pub enum MemoryTrackerError { |
| /// Tried to modify the memory base address. |
| DifferentBaseAddress, |
| /// Tried to shrink to a larger memory size. |
| SizeTooLarge, |
| /// Tracked regions would not fit in memory size. |
| SizeTooSmall, |
| /// Reached limit number of tracked regions. |
| Full, |
| /// Region is out of the tracked memory address space. |
| OutOfRange, |
| /// New region overlaps with tracked regions. |
| Overlaps, |
| /// Region couldn't be mapped. |
| FailedToMap, |
| /// Error from an MMIO guard call. |
| MmioGuard(mmio_guard::Error), |
| } |
| |
| impl fmt::Display for MemoryTrackerError { |
| fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| match self { |
| Self::DifferentBaseAddress => write!(f, "Received different base address"), |
| Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"), |
| Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"), |
| Self::Full => write!(f, "Reached limit number of tracked regions"), |
| Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"), |
| Self::Overlaps => write!(f, "New region overlaps with tracked regions"), |
| Self::FailedToMap => write!(f, "Failed to map the new region"), |
| Self::MmioGuard(e) => e.fmt(f), |
| } |
| } |
| } |
| |
| impl From<mmio_guard::Error> for MemoryTrackerError { |
| fn from(e: mmio_guard::Error) -> Self { |
| Self::MmioGuard(e) |
| } |
| } |
| |
| type Result<T> = result::Result<T, MemoryTrackerError>; |
| |
| impl MemoryTracker { |
| const CAPACITY: usize = 5; |
| const MMIO_CAPACITY: usize = 5; |
| /// Base of the system's contiguous "main" memory. |
| const BASE: usize = 0x8000_0000; |
| /// First address that can't be translated by a level 1 TTBR0_EL1. |
| const MAX_ADDR: usize = 1 << 39; |
| |
| /// Create a new instance from an active page table, covering the maximum RAM size. |
| pub fn new(page_table: mmu::PageTable) -> Self { |
| Self { |
| total: Self::BASE..Self::MAX_ADDR, |
| page_table, |
| regions: ArrayVec::new(), |
| mmio_regions: ArrayVec::new(), |
| } |
| } |
| |
| /// Resize the total RAM size. |
| /// |
| /// This function fails if it contains regions that are not included within the new size. |
| pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> { |
| if range.start != self.total.start { |
| return Err(MemoryTrackerError::DifferentBaseAddress); |
| } |
| if self.total.end < range.end { |
| return Err(MemoryTrackerError::SizeTooLarge); |
| } |
| if !self.regions.iter().all(|r| r.is_within(range)) { |
| return Err(MemoryTrackerError::SizeTooSmall); |
| } |
| |
| self.total = range.clone(); |
| Ok(()) |
| } |
| |
| /// Allocate the address range for a const slice; returns None if failed. |
| pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
| let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly }; |
| self.check(®ion)?; |
| self.page_table.map_rodata(range).map_err(|e| { |
| error!("Error during range allocation: {e}"); |
| MemoryTrackerError::FailedToMap |
| })?; |
| self.add(region) |
| } |
| |
| /// Allocate the address range for a mutable slice; returns None if failed. |
| pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> { |
| let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite }; |
| self.check(®ion)?; |
| self.page_table.map_data(range).map_err(|e| { |
| error!("Error during mutable range allocation: {e}"); |
| MemoryTrackerError::FailedToMap |
| })?; |
| self.add(region) |
| } |
| |
| /// Allocate the address range for a const slice; returns None if failed. |
| pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| self.alloc_range(&(base..(base + size.get()))) |
| } |
| |
| /// Allocate the address range for a mutable slice; returns None if failed. |
| pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> { |
| self.alloc_range_mut(&(base..(base + size.get()))) |
| } |
| |
| /// Checks that the given range of addresses is within the MMIO region, and then maps it |
| /// appropriately. |
| pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> { |
| // MMIO space is below the main memory region. |
| if range.end > self.total.start { |
| return Err(MemoryTrackerError::OutOfRange); |
| } |
| if self.mmio_regions.iter().any(|r| overlaps(r, &range)) { |
| return Err(MemoryTrackerError::Overlaps); |
| } |
| if self.mmio_regions.len() == self.mmio_regions.capacity() { |
| return Err(MemoryTrackerError::Full); |
| } |
| |
| self.page_table.map_device(&range).map_err(|e| { |
| error!("Error during MMIO device mapping: {e}"); |
| MemoryTrackerError::FailedToMap |
| })?; |
| |
| for page_base in page_iterator(&range) { |
| mmio_guard::map(page_base)?; |
| } |
| |
| if self.mmio_regions.try_push(range).is_some() { |
| return Err(MemoryTrackerError::Full); |
| } |
| |
| Ok(()) |
| } |
| |
| /// Checks that the given region is within the range of the `MemoryTracker` and doesn't overlap |
| /// with any other previously allocated regions, and that the regions ArrayVec has capacity to |
| /// add it. |
| fn check(&self, region: &MemoryRegion) -> Result<()> { |
| if !region.is_within(&self.total) { |
| return Err(MemoryTrackerError::OutOfRange); |
| } |
| if self.regions.iter().any(|r| r.overlaps(®ion.range)) { |
| return Err(MemoryTrackerError::Overlaps); |
| } |
| if self.regions.len() == self.regions.capacity() { |
| return Err(MemoryTrackerError::Full); |
| } |
| Ok(()) |
| } |
| |
| fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> { |
| if self.regions.try_push(region).is_some() { |
| return Err(MemoryTrackerError::Full); |
| } |
| |
| Ok(self.regions.last().unwrap().as_ref().clone()) |
| } |
| |
| /// Unmaps all tracked MMIO regions from the MMIO guard. |
| /// |
| /// Note that they are not unmapped from the page table. |
| pub fn mmio_unmap_all(&self) -> Result<()> { |
| for region in &self.mmio_regions { |
| for page_base in page_iterator(region) { |
| mmio_guard::unmap(page_base)?; |
| } |
| } |
| |
| Ok(()) |
| } |
| } |
| |
| impl Drop for MemoryTracker { |
| fn drop(&mut self) { |
| for region in &self.regions { |
| match region.mem_type { |
| MemoryType::ReadWrite => { |
| // TODO: Use page table's dirty bit to only flush pages that were touched. |
| helpers::flush_region(region.range.start, region.range.len()) |
| } |
| MemoryType::ReadOnly => {} |
| } |
| } |
| } |
| } |
| |
| /// Gives the KVM host read, write and execute permissions on the given memory range. If the range |
| /// is not aligned with the memory protection granule then it will be extended on either end to |
| /// align. |
| #[allow(unused)] |
| pub fn share_range(range: &MemoryRange) -> smccc::Result<()> { |
| let granule = hyp_meminfo()? as usize; |
| for base in (align_down(range.start, granule) |
| .expect("Memory protection granule was not a power of two")..range.end) |
| .step_by(granule) |
| { |
| mem_share(base as u64)?; |
| } |
| Ok(()) |
| } |
| |
| /// Removes permission from the KVM host to access the given memory range which was previously |
| /// shared. If the range is not aligned with the memory protection granule then it will be extended |
| /// on either end to align. |
| #[allow(unused)] |
| pub fn unshare_range(range: &MemoryRange) -> smccc::Result<()> { |
| let granule = hyp_meminfo()? as usize; |
| for base in (align_down(range.start, granule) |
| .expect("Memory protection granule was not a power of two")..range.end) |
| .step_by(granule) |
| { |
| mem_unshare(base as u64)?; |
| } |
| Ok(()) |
| } |
| |
| /// Returns an iterator which yields the base address of each 4 KiB page within the given range. |
| fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> { |
| (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB) |
| } |