Pierre-Clément Tosi | fc53115 | 2022-10-20 12:22:23 +0100 | [diff] [blame] | 1 | // Copyright 2022, The Android Open Source Project |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | //! Heap implementation. |
| 16 | |
Pierre-Clément Tosi | db74cb1 | 2022-12-08 13:56:25 +0000 | [diff] [blame] | 17 | use alloc::alloc::alloc; |
| 18 | use alloc::alloc::Layout; |
| 19 | use alloc::boxed::Box; |
| 20 | |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 21 | use core::alloc::GlobalAlloc as _; |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 22 | use core::ffi::c_void; |
| 23 | use core::mem; |
| 24 | use core::num::NonZeroUsize; |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 25 | use core::ops::Range; |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 26 | use core::ptr; |
| 27 | use core::ptr::NonNull; |
| 28 | |
Pierre-Clément Tosi | fc53115 | 2022-10-20 12:22:23 +0100 | [diff] [blame] | 29 | use buddy_system_allocator::LockedHeap; |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 30 | use spin::{ |
| 31 | mutex::{SpinMutex, SpinMutexGuard}, |
| 32 | Once, |
| 33 | }; |
Pierre-Clément Tosi | fc53115 | 2022-10-20 12:22:23 +0100 | [diff] [blame] | 34 | |
Pierre-Clément Tosi | f3681e8 | 2023-06-22 11:38:22 +0000 | [diff] [blame] | 35 | /// Configures the size of the global allocator. |
| 36 | #[macro_export] |
Pierre-Clément Tosi | 6a4808c | 2023-06-29 09:19:38 +0000 | [diff] [blame] | 37 | macro_rules! configure_heap { |
Pierre-Clément Tosi | f3681e8 | 2023-06-22 11:38:22 +0000 | [diff] [blame] | 38 | ($len:expr) => { |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 39 | static __HEAP: $crate::heap::HeapArray<{ $len }> = $crate::heap::HeapArray::new(); |
| 40 | #[export_name = "get_heap"] |
| 41 | fn __get_heap() -> &'static mut [u8] { |
| 42 | __HEAP.get() |
| 43 | } |
Pierre-Clément Tosi | f3681e8 | 2023-06-22 11:38:22 +0000 | [diff] [blame] | 44 | }; |
| 45 | } |
| 46 | |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 47 | /// An array to be used as a heap. |
| 48 | /// |
| 49 | /// This should be stored in a static variable to have the appropriate lifetime. |
| 50 | pub struct HeapArray<const SIZE: usize> { |
| 51 | array: SpinMutex<[u8; SIZE]>, |
| 52 | } |
| 53 | |
| 54 | impl<const SIZE: usize> HeapArray<SIZE> { |
| 55 | /// Creates a new empty heap array. |
| 56 | #[allow(clippy::new_without_default)] |
| 57 | pub const fn new() -> Self { |
| 58 | Self { array: SpinMutex::new([0; SIZE]) } |
| 59 | } |
| 60 | |
| 61 | /// Gets the heap as a slice. |
| 62 | /// |
| 63 | /// Panics if called more than once. |
| 64 | pub fn get(&self) -> &mut [u8] { |
| 65 | SpinMutexGuard::leak(self.array.try_lock().expect("Page heap was already taken")) |
| 66 | .as_mut_slice() |
| 67 | } |
| 68 | } |
| 69 | |
Pierre-Clément Tosi | f3681e8 | 2023-06-22 11:38:22 +0000 | [diff] [blame] | 70 | extern "Rust" { |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 71 | /// Gets slice used by the global allocator, configured using configure_heap!(). |
| 72 | /// |
| 73 | /// Panics if called more than once. |
| 74 | fn get_heap() -> &'static mut [u8]; |
Pierre-Clément Tosi | f3681e8 | 2023-06-22 11:38:22 +0000 | [diff] [blame] | 75 | } |
Pierre-Clément Tosi | fc53115 | 2022-10-20 12:22:23 +0100 | [diff] [blame] | 76 | |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 77 | #[global_allocator] |
| 78 | static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new(); |
| 79 | |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 80 | /// The range of addresses used for the heap. |
| 81 | static HEAP_RANGE: Once<Range<usize>> = Once::new(); |
| 82 | |
Pierre-Clément Tosi | f3681e8 | 2023-06-22 11:38:22 +0000 | [diff] [blame] | 83 | /// Initialize the global allocator. |
| 84 | /// |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 85 | /// Panics if called more than once. |
| 86 | pub(crate) fn init() { |
| 87 | // SAFETY: This is in fact a safe Rust function. |
| 88 | let heap = unsafe { get_heap() }; |
| 89 | |
| 90 | HEAP_RANGE.call_once(|| { |
| 91 | let range = heap.as_ptr_range(); |
| 92 | range.start as usize..range.end as usize |
| 93 | }); |
| 94 | |
| 95 | let start = heap.as_mut_ptr() as usize; |
| 96 | let size = heap.len(); |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 97 | |
| 98 | let mut heap = HEAP_ALLOCATOR.lock(); |
| 99 | // SAFETY: We are supplying a valid memory range, and we only do this once. |
| 100 | unsafe { heap.init(start, size) }; |
Pierre-Clément Tosi | fc53115 | 2022-10-20 12:22:23 +0100 | [diff] [blame] | 101 | } |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 102 | |
Pierre-Clément Tosi | db74cb1 | 2022-12-08 13:56:25 +0000 | [diff] [blame] | 103 | /// Allocate an aligned but uninitialized slice of heap. |
| 104 | pub fn aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>> { |
| 105 | let size = NonZeroUsize::new(size)?.get(); |
| 106 | let layout = Layout::from_size_align(size, align).ok()?; |
Andrew Walbran | c06e734 | 2023-07-05 14:00:51 +0000 | [diff] [blame] | 107 | // SAFETY: We verify that `size` and the returned `ptr` are non-null. |
Pierre-Clément Tosi | db74cb1 | 2022-12-08 13:56:25 +0000 | [diff] [blame] | 108 | let ptr = unsafe { alloc(layout) }; |
| 109 | let ptr = NonNull::new(ptr)?.as_ptr(); |
| 110 | let slice_ptr = ptr::slice_from_raw_parts_mut(ptr, size); |
| 111 | |
Andrew Walbran | c06e734 | 2023-07-05 14:00:51 +0000 | [diff] [blame] | 112 | // SAFETY: The memory was allocated using the proper layout by our global_allocator. |
Pierre-Clément Tosi | db74cb1 | 2022-12-08 13:56:25 +0000 | [diff] [blame] | 113 | Some(unsafe { Box::from_raw(slice_ptr) }) |
| 114 | } |
| 115 | |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 116 | #[no_mangle] |
| 117 | unsafe extern "C" fn malloc(size: usize) -> *mut c_void { |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 118 | allocate(size, false).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr()) |
Pierre-Clément Tosi | aaa0869 | 2023-03-10 13:55:19 +0000 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | #[no_mangle] |
| 122 | unsafe extern "C" fn calloc(nmemb: usize, size: usize) -> *mut c_void { |
Pierre-Clément Tosi | fb54a0a | 2023-10-26 11:42:30 +0100 | [diff] [blame] | 123 | let Some(size) = nmemb.checked_mul(size) else { return ptr::null_mut() }; |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 124 | allocate(size, true).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr()) |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | #[no_mangle] |
Alice Wang | 000595b | 2023-10-02 13:46:45 +0000 | [diff] [blame] | 128 | unsafe extern "C" fn __memset_chk( |
| 129 | dest: *mut c_void, |
| 130 | val: u8, |
| 131 | len: usize, |
| 132 | destlen: usize, |
| 133 | ) -> *mut c_void { |
| 134 | assert!(len <= destlen, "memset buffer overflow detected"); |
| 135 | // SAFETY: `dest` is valid for writes of `len` bytes. |
| 136 | unsafe { |
| 137 | ptr::write_bytes(dest, val, len); |
| 138 | } |
| 139 | dest |
| 140 | } |
| 141 | |
| 142 | #[no_mangle] |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 143 | /// SAFETY: ptr must be null or point to a currently-allocated block returned by allocate (either |
| 144 | /// directly or via malloc or calloc). Note that this function is called directly from C, so we have |
| 145 | /// to trust that the C code is doing the right thing; there are checks below which will catch some |
| 146 | /// errors. |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 147 | unsafe extern "C" fn free(ptr: *mut c_void) { |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 148 | let Some(ptr) = NonNull::new(ptr) else { return }; |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 149 | let heap_range = HEAP_RANGE.get().expect("free called before heap was initialised"); |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 150 | assert!( |
Andrew Walbran | 2e059cc | 2024-10-15 18:22:05 +0100 | [diff] [blame^] | 151 | heap_range.contains(&(ptr.as_ptr() as usize)), |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 152 | "free() called on a pointer that is not part of the HEAP: {ptr:?}" |
| 153 | ); |
Andrew Walbran | c06e734 | 2023-07-05 14:00:51 +0000 | [diff] [blame] | 154 | // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned |
| 155 | // usize. |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 156 | let (ptr, size) = unsafe { |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 157 | let ptr = ptr.cast::<usize>().as_ptr().offset(-1); |
| 158 | (ptr, *ptr) |
| 159 | }; |
| 160 | let size = NonZeroUsize::new(size).unwrap(); |
| 161 | let layout = malloc_layout(size).unwrap(); |
| 162 | // SAFETY: If our precondition is satisfied, then this is a valid currently-allocated block. |
| 163 | unsafe { HEAP_ALLOCATOR.dealloc(ptr as *mut u8, layout) } |
| 164 | } |
| 165 | |
| 166 | /// Allocate a block of memory suitable to return from `malloc()` etc. Returns a valid pointer |
| 167 | /// to a suitable aligned region of size bytes, optionally zeroed (and otherwise uninitialized), or |
| 168 | /// None if size is 0 or allocation fails. The block can be freed by passing the returned pointer to |
| 169 | /// `free()`. |
| 170 | fn allocate(size: usize, zeroed: bool) -> Option<NonNull<usize>> { |
| 171 | let size = NonZeroUsize::new(size)?.checked_add(mem::size_of::<usize>())?; |
| 172 | let layout = malloc_layout(size)?; |
| 173 | // SAFETY: layout is known to have non-zero size. |
| 174 | let ptr = unsafe { |
| 175 | if zeroed { |
| 176 | HEAP_ALLOCATOR.alloc_zeroed(layout) |
| 177 | } else { |
| 178 | HEAP_ALLOCATOR.alloc(layout) |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 179 | } |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 180 | }; |
| 181 | let ptr = NonNull::new(ptr)?.cast::<usize>().as_ptr(); |
| 182 | // SAFETY: ptr points to a newly allocated block of memory which is properly aligned |
| 183 | // for a usize and is big enough to hold a usize as well as the requested number of |
| 184 | // bytes. |
| 185 | unsafe { |
| 186 | *ptr = size.get(); |
| 187 | NonNull::new(ptr.offset(1)) |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 188 | } |
| 189 | } |
| 190 | |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 191 | fn malloc_layout(size: NonZeroUsize) -> Option<Layout> { |
| 192 | // We want at least 8 byte alignment, and we need to be able to store a usize. |
| 193 | const ALIGN: usize = const_max_size(mem::size_of::<usize>(), mem::size_of::<u64>()); |
| 194 | Layout::from_size_align(size.get(), ALIGN).ok() |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 195 | } |
| 196 | |
Alan Stokes | a0e4296 | 2023-04-14 17:59:50 +0100 | [diff] [blame] | 197 | const fn const_max_size(a: usize, b: usize) -> usize { |
| 198 | if a > b { |
| 199 | a |
| 200 | } else { |
| 201 | b |
| 202 | } |
Pierre-Clément Tosi | 54e71d0 | 2022-12-08 13:57:43 +0000 | [diff] [blame] | 203 | } |