blob: 568eb817684b49ee671ae72435a6ef3ef0f32481 [file] [log] [blame]
Alice Wangf47b2342023-06-02 11:51:57 +00001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Shared memory management.
16
Alice Wangb73a81b2023-06-07 13:05:09 +000017use super::page_table::{is_leaf_pte, MMIO_LAZY_MAP_FLAG};
18use super::util::{virt_to_phys, PAGE_SIZE};
19use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
Alice Wangf47b2342023-06-02 11:51:57 +000020use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
21use alloc::vec::Vec;
22use buddy_system_allocator::FrameAllocator;
23use core::alloc::Layout;
24use core::ptr::NonNull;
Alice Wangb73a81b2023-06-07 13:05:09 +000025use core::result;
Alice Wangf47b2342023-06-02 11:51:57 +000026use hyp::get_hypervisor;
Alice Wangb73a81b2023-06-07 13:05:09 +000027use log::{error, trace};
Alice Wangf47b2342023-06-02 11:51:57 +000028
29/// Allocates memory on the heap and shares it with the host.
30///
31/// Unshares all pages when dropped.
32pub struct MemorySharer {
33 granule: usize,
34 shared_regions: Vec<(usize, Layout)>,
35}
36
37impl MemorySharer {
38 /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
39 /// `granule` must be a power of 2.
40 pub fn new(granule: usize, capacity: usize) -> Self {
41 assert!(granule.is_power_of_two());
42 Self { granule, shared_regions: Vec::with_capacity(capacity) }
43 }
44
45 /// Get from the global allocator a granule-aligned region that suits `hint` and share it.
46 pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
47 let layout = hint.align_to(self.granule).unwrap().pad_to_align();
48 assert_ne!(layout.size(), 0);
49 // SAFETY - layout has non-zero size.
50 let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
51 handle_alloc_error(layout);
52 };
53
54 let base = shared.as_ptr() as usize;
55 let end = base.checked_add(layout.size()).unwrap();
56 trace!("Sharing memory region {:#x?}", base..end);
57 for vaddr in (base..end).step_by(self.granule) {
58 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
59 get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
60 }
61 self.shared_regions.push((base, layout));
62
63 pool.add_frame(base, end);
64 }
65}
66
67impl Drop for MemorySharer {
68 fn drop(&mut self) {
69 while let Some((base, layout)) = self.shared_regions.pop() {
70 let end = base.checked_add(layout.size()).unwrap();
71 trace!("Unsharing memory region {:#x?}", base..end);
72 for vaddr in (base..end).step_by(self.granule) {
73 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
74 get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
75 }
76
77 // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
78 unsafe { dealloc(base as *mut _, layout) };
79 }
80 }
81}
Alice Wangb73a81b2023-06-07 13:05:09 +000082
83/// Checks whether block flags indicate it should be MMIO guard mapped.
84/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
85/// issue `clippy::result_unit_err`.
86#[allow(clippy::result_unit_err)]
87pub fn verify_lazy_mapped_block(
88 _range: &VaRange,
89 desc: &mut Descriptor,
90 level: usize,
91) -> result::Result<(), ()> {
92 let flags = desc.flags().expect("Unsupported PTE flags set");
93 if !is_leaf_pte(&flags, level) {
94 return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
95 }
96 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
97 Ok(())
98 } else {
99 Err(())
100 }
101}
102
103/// MMIO guard unmaps page
104/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
105/// issue `clippy::result_unit_err`.
106#[allow(clippy::result_unit_err)]
107pub fn mmio_guard_unmap_page(
108 va_range: &VaRange,
109 desc: &mut Descriptor,
110 level: usize,
111) -> result::Result<(), ()> {
112 let flags = desc.flags().expect("Unsupported PTE flags set");
113 if !is_leaf_pte(&flags, level) {
114 return Ok(());
115 }
116 // This function will be called on an address range that corresponds to a device. Only if a
117 // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
118 // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
119 // mapped anyway.
120 if flags.contains(Attributes::VALID) {
121 assert!(
122 flags.contains(MMIO_LAZY_MAP_FLAG),
123 "Attempting MMIO guard unmap for non-device pages"
124 );
125 assert_eq!(
126 va_range.len(),
127 PAGE_SIZE,
128 "Failed to break down block mapping before MMIO guard mapping"
129 );
130 let page_base = va_range.start().0;
131 assert_eq!(page_base % PAGE_SIZE, 0);
132 // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
133 // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
134 // virt_to_phys here, and just pass page_base instead.
135 get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
136 error!("Error MMIO guard unmapping: {e}");
137 })?;
138 }
139 Ok(())
140}