Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 1 | // Copyright 2023, The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | //! Shared memory management. |
| 16 | |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 17 | use super::page_table::{is_leaf_pte, MMIO_LAZY_MAP_FLAG}; |
Pierre-Clément Tosi | 9215476 | 2023-06-07 15:32:15 +0000 | [diff] [blame] | 18 | use super::util::virt_to_phys; |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 19 | use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange}; |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 20 | use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error}; |
| 21 | use alloc::vec::Vec; |
| 22 | use buddy_system_allocator::FrameAllocator; |
| 23 | use core::alloc::Layout; |
| 24 | use core::ptr::NonNull; |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 25 | use core::result; |
Pierre-Clément Tosi | 9215476 | 2023-06-07 15:32:15 +0000 | [diff] [blame] | 26 | use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE}; |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 27 | use log::{error, trace}; |
Alice Wang | f47b234 | 2023-06-02 11:51:57 +0000 | [diff] [blame] | 28 | |
| 29 | /// Allocates memory on the heap and shares it with the host. |
| 30 | /// |
| 31 | /// Unshares all pages when dropped. |
| 32 | pub struct MemorySharer { |
| 33 | granule: usize, |
| 34 | shared_regions: Vec<(usize, Layout)>, |
| 35 | } |
| 36 | |
| 37 | impl MemorySharer { |
| 38 | /// Constructs a new `MemorySharer` instance with the specified granule size and capacity. |
| 39 | /// `granule` must be a power of 2. |
| 40 | pub fn new(granule: usize, capacity: usize) -> Self { |
| 41 | assert!(granule.is_power_of_two()); |
| 42 | Self { granule, shared_regions: Vec::with_capacity(capacity) } |
| 43 | } |
| 44 | |
| 45 | /// Get from the global allocator a granule-aligned region that suits `hint` and share it. |
| 46 | pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) { |
| 47 | let layout = hint.align_to(self.granule).unwrap().pad_to_align(); |
| 48 | assert_ne!(layout.size(), 0); |
| 49 | // SAFETY - layout has non-zero size. |
| 50 | let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else { |
| 51 | handle_alloc_error(layout); |
| 52 | }; |
| 53 | |
| 54 | let base = shared.as_ptr() as usize; |
| 55 | let end = base.checked_add(layout.size()).unwrap(); |
| 56 | trace!("Sharing memory region {:#x?}", base..end); |
| 57 | for vaddr in (base..end).step_by(self.granule) { |
| 58 | let vaddr = NonNull::new(vaddr as *mut _).unwrap(); |
| 59 | get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap(); |
| 60 | } |
| 61 | self.shared_regions.push((base, layout)); |
| 62 | |
| 63 | pool.add_frame(base, end); |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | impl Drop for MemorySharer { |
| 68 | fn drop(&mut self) { |
| 69 | while let Some((base, layout)) = self.shared_regions.pop() { |
| 70 | let end = base.checked_add(layout.size()).unwrap(); |
| 71 | trace!("Unsharing memory region {:#x?}", base..end); |
| 72 | for vaddr in (base..end).step_by(self.granule) { |
| 73 | let vaddr = NonNull::new(vaddr as *mut _).unwrap(); |
| 74 | get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap(); |
| 75 | } |
| 76 | |
| 77 | // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout. |
| 78 | unsafe { dealloc(base as *mut _, layout) }; |
| 79 | } |
| 80 | } |
| 81 | } |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 82 | |
| 83 | /// Checks whether block flags indicate it should be MMIO guard mapped. |
| 84 | /// As the return type is required by the crate `aarch64_paging`, we cannot address the lint |
| 85 | /// issue `clippy::result_unit_err`. |
| 86 | #[allow(clippy::result_unit_err)] |
| 87 | pub fn verify_lazy_mapped_block( |
| 88 | _range: &VaRange, |
| 89 | desc: &mut Descriptor, |
| 90 | level: usize, |
| 91 | ) -> result::Result<(), ()> { |
| 92 | let flags = desc.flags().expect("Unsupported PTE flags set"); |
| 93 | if !is_leaf_pte(&flags, level) { |
| 94 | return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG. |
| 95 | } |
| 96 | if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) { |
| 97 | Ok(()) |
| 98 | } else { |
| 99 | Err(()) |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | /// MMIO guard unmaps page |
| 104 | /// As the return type is required by the crate `aarch64_paging`, we cannot address the lint |
| 105 | /// issue `clippy::result_unit_err`. |
| 106 | #[allow(clippy::result_unit_err)] |
| 107 | pub fn mmio_guard_unmap_page( |
| 108 | va_range: &VaRange, |
| 109 | desc: &mut Descriptor, |
| 110 | level: usize, |
| 111 | ) -> result::Result<(), ()> { |
| 112 | let flags = desc.flags().expect("Unsupported PTE flags set"); |
| 113 | if !is_leaf_pte(&flags, level) { |
| 114 | return Ok(()); |
| 115 | } |
| 116 | // This function will be called on an address range that corresponds to a device. Only if a |
| 117 | // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO |
| 118 | // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard |
| 119 | // mapped anyway. |
| 120 | if flags.contains(Attributes::VALID) { |
| 121 | assert!( |
| 122 | flags.contains(MMIO_LAZY_MAP_FLAG), |
| 123 | "Attempting MMIO guard unmap for non-device pages" |
| 124 | ); |
| 125 | assert_eq!( |
| 126 | va_range.len(), |
Pierre-Clément Tosi | 9215476 | 2023-06-07 15:32:15 +0000 | [diff] [blame] | 127 | MMIO_GUARD_GRANULE_SIZE, |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 128 | "Failed to break down block mapping before MMIO guard mapping" |
| 129 | ); |
| 130 | let page_base = va_range.start().0; |
Pierre-Clément Tosi | 9215476 | 2023-06-07 15:32:15 +0000 | [diff] [blame] | 131 | assert_eq!(page_base % MMIO_GUARD_GRANULE_SIZE, 0); |
Alice Wang | b73a81b | 2023-06-07 13:05:09 +0000 | [diff] [blame] | 132 | // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base |
| 133 | // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use |
| 134 | // virt_to_phys here, and just pass page_base instead. |
| 135 | get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| { |
| 136 | error!("Error MMIO guard unmapping: {e}"); |
| 137 | })?; |
| 138 | } |
| 139 | Ok(()) |
| 140 | } |