blob: 41baf8b18ca7465ddb65b411f47bde2c076edc34 [file] [log] [blame]
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -08003 * All rights reserved.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -07004 *
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -08005 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070014 *
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -080015 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070027 */
28
Ryan Prichard083d8502019-01-24 13:47:13 -080029#include "private/bionic_allocator.h"
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070030
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070031#include <stdlib.h>
Ryan Prichard52165b32019-01-23 17:46:24 -080032#include <string.h>
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070033#include <sys/mman.h>
Ryan Prichard96773a22019-01-24 15:22:50 -080034#include <sys/param.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070035#include <sys/prctl.h>
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070036#include <unistd.h>
37
Ryan Prichard52165b32019-01-23 17:46:24 -080038#include <new>
39
Christopher Ferris7a3681e2017-04-24 17:48:32 -070040#include <async_safe/log.h>
Elliott Hughes3019d782019-02-13 12:39:07 -080041#include <async_safe/CHECK.h>
Christopher Ferris7a3681e2017-04-24 17:48:32 -070042
Elliott Hughescdb52fc2019-12-12 15:26:14 -080043#include "platform/bionic/page.h"
Josh Gao4956c372019-12-19 16:35:51 -080044#include "platform/bionic/macros.h"
Ryan Prichard52165b32019-01-23 17:46:24 -080045
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070046//
Ryan Prichard083d8502019-01-24 13:47:13 -080047// BionicAllocator is a general purpose allocator designed to provide the same
Ryan Pricharddb6edcc2019-04-01 16:16:05 -070048// functionality as the malloc/free/realloc/memalign libc functions.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070049//
50// On alloc:
Ryan Pricharddb6edcc2019-04-01 16:16:05 -070051// If size is > 1k allocator proxies malloc call directly to mmap.
52// If size <= 1k allocator uses BionicSmallObjectAllocator for the size
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070053// rounded up to the nearest power of two.
54//
55// On free:
56//
57// For a pointer allocated using proxy-to-mmap allocator unmaps
58// the memory.
59//
Ryan Pricharddb6edcc2019-04-01 16:16:05 -070060// For a pointer allocated using BionicSmallObjectAllocator it adds
Vic Yang54938512018-12-02 23:46:26 -080061// the block to free_blocks_list in the corresponding page. If the number of
Ryan Pricharddb6edcc2019-04-01 16:16:05 -070062// free pages reaches 2, BionicSmallObjectAllocator munmaps one of the pages
63// keeping the other one in reserve.
Vic Yang54938512018-12-02 23:46:26 -080064
65// Memory management for large objects is fairly straightforward, but for small
66// objects it is more complicated. If you are changing this code, one simple
67// way to evaluate the memory usage change is by running 'dd' and examine the
68// memory usage by 'showmap $(pidof dd)'. 'dd' is nice in that:
69// 1. It links in quite a few libraries, so you get some linker memory use.
70// 2. When run with no arguments, it sits waiting for input, so it is easy to
71// examine its memory usage with showmap.
72// 3. Since it does nothing while waiting for input, the memory usage is
73// determinisitic.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070074
75static const char kSignature[4] = {'L', 'M', 'A', 1};
76
77static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
78
79// This type is used for large allocations (with size >1k)
80static const uint32_t kLargeObject = 111;
81
Vic Yang259429b2018-12-04 23:59:57 -080082// Allocated pointers must be at least 16-byte aligned. Round up the size of
83// page_info to multiple of 16.
84static constexpr size_t kPageInfoSize = __BIONIC_ALIGN(sizeof(page_info), 16);
85
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070086static inline uint16_t log2(size_t number) {
87 uint16_t result = 0;
88 number--;
89
90 while (number != 0) {
91 result++;
92 number >>= 1;
93 }
94
95 return result;
96}
97
Peter Collingbournebb11ee62022-05-02 12:26:16 -070098BionicSmallObjectAllocator::BionicSmallObjectAllocator(uint32_t type, size_t block_size)
Vic Yang54938512018-12-02 23:46:26 -080099 : type_(type),
100 block_size_(block_size),
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700101 blocks_per_page_((page_size() - sizeof(small_object_page_info)) / block_size),
Vic Yang54938512018-12-02 23:46:26 -0800102 free_pages_cnt_(0),
103 page_list_(nullptr) {}
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700104
Ryan Prichard083d8502019-01-24 13:47:13 -0800105void* BionicSmallObjectAllocator::alloc() {
Dimitry Ivanovf8572112016-07-13 10:24:06 -0700106 CHECK(block_size_ != 0);
107
Vic Yang54938512018-12-02 23:46:26 -0800108 if (page_list_ == nullptr) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700109 alloc_page();
110 }
111
Vic Yang54938512018-12-02 23:46:26 -0800112 // Fully allocated pages are de-managed and removed from the page list, so
113 // every page from the page list must be useable. Let's just take the first
114 // one.
115 small_object_page_info* page = page_list_;
116 CHECK(page->free_block_list != nullptr);
117
118 small_object_block_record* const block_record = page->free_block_list;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700119 if (block_record->free_blocks_cnt > 1) {
Vic Yang54938512018-12-02 23:46:26 -0800120 small_object_block_record* next_free =
121 reinterpret_cast<small_object_block_record*>(
122 reinterpret_cast<uint8_t*>(block_record) + block_size_);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700123 next_free->next = block_record->next;
124 next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
Vic Yang54938512018-12-02 23:46:26 -0800125 page->free_block_list = next_free;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700126 } else {
Vic Yang54938512018-12-02 23:46:26 -0800127 page->free_block_list = block_record->next;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700128 }
129
Vic Yang259429b2018-12-04 23:59:57 -0800130 if (page->free_blocks_cnt == blocks_per_page_) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700131 free_pages_cnt_--;
132 }
133
Vic Yang54938512018-12-02 23:46:26 -0800134 page->free_blocks_cnt--;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700135
136 memset(block_record, 0, block_size_);
137
Vic Yang54938512018-12-02 23:46:26 -0800138 if (page->free_blocks_cnt == 0) {
139 // De-manage fully allocated pages. These pages will be managed again if
140 // a block is freed.
141 remove_from_page_list(page);
142 }
143
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700144 return block_record;
145}
146
Ryan Prichard083d8502019-01-24 13:47:13 -0800147void BionicSmallObjectAllocator::free_page(small_object_page_info* page) {
Vic Yang259429b2018-12-04 23:59:57 -0800148 CHECK(page->free_blocks_cnt == blocks_per_page_);
Vic Yang54938512018-12-02 23:46:26 -0800149 if (page->prev_page) {
150 page->prev_page->next_page = page->next_page;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700151 }
Vic Yang54938512018-12-02 23:46:26 -0800152 if (page->next_page) {
153 page->next_page->prev_page = page->prev_page;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700154 }
Vic Yang54938512018-12-02 23:46:26 -0800155 if (page_list_ == page) {
156 page_list_ = page->next_page;
157 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700158 munmap(page, page_size());
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700159 free_pages_cnt_--;
160}
161
Ryan Prichard083d8502019-01-24 13:47:13 -0800162void BionicSmallObjectAllocator::free(void* ptr) {
Vic Yang54938512018-12-02 23:46:26 -0800163 small_object_page_info* const page =
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700164 reinterpret_cast<small_object_page_info*>(page_start(reinterpret_cast<uintptr_t>(ptr)));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700165
Vic Yang259429b2018-12-04 23:59:57 -0800166 if (reinterpret_cast<uintptr_t>(ptr) % block_size_ != 0) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700167 async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700168 }
169
170 memset(ptr, 0, block_size_);
Vic Yang54938512018-12-02 23:46:26 -0800171 small_object_block_record* const block_record =
172 reinterpret_cast<small_object_block_record*>(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700173
Vic Yang54938512018-12-02 23:46:26 -0800174 block_record->next = page->free_block_list;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700175 block_record->free_blocks_cnt = 1;
176
Vic Yang54938512018-12-02 23:46:26 -0800177 page->free_block_list = block_record;
178 page->free_blocks_cnt++;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700179
Vic Yang259429b2018-12-04 23:59:57 -0800180 if (page->free_blocks_cnt == blocks_per_page_) {
Vic Yang54938512018-12-02 23:46:26 -0800181 if (++free_pages_cnt_ > 1) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700182 // if we already have a free page - unmap this one.
Vic Yang54938512018-12-02 23:46:26 -0800183 free_page(page);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700184 }
Vic Yang54938512018-12-02 23:46:26 -0800185 } else if (page->free_blocks_cnt == 1) {
186 // We just freed from a full page. Add this page back to the list.
187 add_to_page_list(page);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700188 }
189}
190
Ryan Prichard083d8502019-01-24 13:47:13 -0800191void BionicSmallObjectAllocator::alloc_page() {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700192 void* const map_ptr =
193 mmap(nullptr, page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700194 if (map_ptr == MAP_FAILED) {
Elliott Hughes2557f732023-07-12 21:15:23 +0000195 async_safe_fatal("mmap failed: %m");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700196 }
197
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700198 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, page_size(), "bionic_alloc_small_objects");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700199
Vic Yang54938512018-12-02 23:46:26 -0800200 small_object_page_info* const page =
201 reinterpret_cast<small_object_page_info*>(map_ptr);
202 memcpy(page->info.signature, kSignature, sizeof(kSignature));
203 page->info.type = type_;
204 page->info.allocator_addr = this;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700205
Vic Yang259429b2018-12-04 23:59:57 -0800206 page->free_blocks_cnt = blocks_per_page_;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700207
Vic Yang259429b2018-12-04 23:59:57 -0800208 // Align the first block to block_size_.
209 const uintptr_t first_block_addr =
210 __BIONIC_ALIGN(reinterpret_cast<uintptr_t>(page + 1), block_size_);
Vic Yang54938512018-12-02 23:46:26 -0800211 small_object_block_record* const first_block =
Vic Yang259429b2018-12-04 23:59:57 -0800212 reinterpret_cast<small_object_block_record*>(first_block_addr);
213
Vic Yang54938512018-12-02 23:46:26 -0800214 first_block->next = nullptr;
Vic Yang259429b2018-12-04 23:59:57 -0800215 first_block->free_blocks_cnt = blocks_per_page_;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700216
Vic Yang54938512018-12-02 23:46:26 -0800217 page->free_block_list = first_block;
218
219 add_to_page_list(page);
Vic Yangde696602018-11-27 13:34:44 -0800220
221 free_pages_cnt_++;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700222}
223
Ryan Prichard083d8502019-01-24 13:47:13 -0800224void BionicSmallObjectAllocator::add_to_page_list(small_object_page_info* page) {
Vic Yang54938512018-12-02 23:46:26 -0800225 page->next_page = page_list_;
226 page->prev_page = nullptr;
227 if (page_list_) {
228 page_list_->prev_page = page;
229 }
230 page_list_ = page;
231}
232
Ryan Prichard083d8502019-01-24 13:47:13 -0800233void BionicSmallObjectAllocator::remove_from_page_list(
Vic Yang54938512018-12-02 23:46:26 -0800234 small_object_page_info* page) {
235 if (page->prev_page) {
236 page->prev_page->next_page = page->next_page;
237 }
238 if (page->next_page) {
239 page->next_page->prev_page = page->prev_page;
240 }
241 if (page_list_ == page) {
242 page_list_ = page->next_page;
243 }
244 page->prev_page = nullptr;
245 page->next_page = nullptr;
246}
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700247
Ryan Prichard083d8502019-01-24 13:47:13 -0800248void BionicAllocator::initialize_allocators() {
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700249 if (allocators_ != nullptr) {
250 return;
251 }
252
Ryan Prichard083d8502019-01-24 13:47:13 -0800253 BionicSmallObjectAllocator* allocators =
254 reinterpret_cast<BionicSmallObjectAllocator*>(allocators_buf_);
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700255
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700256 for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
257 uint32_t type = i + kSmallObjectMinSizeLog2;
Ryan Prichard083d8502019-01-24 13:47:13 -0800258 new (allocators + i) BionicSmallObjectAllocator(type, 1 << type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700259 }
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700260
261 allocators_ = allocators;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700262}
263
Ryan Prichard96773a22019-01-24 15:22:50 -0800264void* BionicAllocator::alloc_mmap(size_t align, size_t size) {
265 size_t header_size = __BIONIC_ALIGN(kPageInfoSize, align);
266 size_t allocated_size;
267 if (__builtin_add_overflow(header_size, size, &allocated_size) ||
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700268 page_end(allocated_size) < allocated_size) {
Ryan Prichard96773a22019-01-24 15:22:50 -0800269 async_safe_fatal("overflow trying to alloc %zu bytes", size);
270 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700271 allocated_size = page_end(allocated_size);
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700272 void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
273 -1, 0);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700274
275 if (map_ptr == MAP_FAILED) {
Elliott Hughes2557f732023-07-12 21:15:23 +0000276 async_safe_fatal("mmap failed: %m");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700277 }
278
Ryan Prichard083d8502019-01-24 13:47:13 -0800279 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "bionic_alloc_lob");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700280
Ryan Prichard96773a22019-01-24 15:22:50 -0800281 void* result = static_cast<char*>(map_ptr) + header_size;
282 page_info* info = get_page_info_unchecked(result);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700283 memcpy(info->signature, kSignature, sizeof(kSignature));
284 info->type = kLargeObject;
285 info->allocated_size = allocated_size;
286
Ryan Prichard96773a22019-01-24 15:22:50 -0800287 return result;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700288}
289
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700290
Ryan Prichard96773a22019-01-24 15:22:50 -0800291inline void* BionicAllocator::alloc_impl(size_t align, size_t size) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700292 if (size > kSmallObjectMaxSize) {
Ryan Prichard96773a22019-01-24 15:22:50 -0800293 return alloc_mmap(align, size);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700294 }
295
296 uint16_t log2_size = log2(size);
297
298 if (log2_size < kSmallObjectMinSizeLog2) {
299 log2_size = kSmallObjectMinSizeLog2;
300 }
301
Elliott Hughes8f653f82024-05-29 22:25:37 +0000302 return get_small_object_allocator_unchecked(log2_size)->alloc();
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700303}
304
Ryan Prichard96773a22019-01-24 15:22:50 -0800305void* BionicAllocator::alloc(size_t size) {
306 // treat alloc(0) as alloc(1)
307 if (size == 0) {
308 size = 1;
309 }
310 return alloc_impl(16, size);
311}
312
313void* BionicAllocator::memalign(size_t align, size_t size) {
314 // The Bionic allocator only supports alignment up to one page, which is good
315 // enough for ELF TLS.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700316 align = MIN(align, page_size());
Ryan Prichard96773a22019-01-24 15:22:50 -0800317 align = MAX(align, 16);
318 if (!powerof2(align)) {
319 align = BIONIC_ROUND_UP_POWER_OF_2(align);
320 }
321 size = MAX(size, align);
322 return alloc_impl(align, size);
323}
324
325inline page_info* BionicAllocator::get_page_info_unchecked(void* ptr) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700326 uintptr_t header_page = page_start(reinterpret_cast<size_t>(ptr) - kPageInfoSize);
Ryan Prichard96773a22019-01-24 15:22:50 -0800327 return reinterpret_cast<page_info*>(header_page);
328}
329
330inline page_info* BionicAllocator::get_page_info(void* ptr) {
331 page_info* info = get_page_info_unchecked(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700332 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
Elliott Hughes8f653f82024-05-29 22:25:37 +0000333 async_safe_fatal("invalid pointer %p (page signature %04x instead of %04x)", ptr,
334 *reinterpret_cast<const unsigned*>(info->signature),
335 *reinterpret_cast<const unsigned*>(kSignature));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700336 }
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700337 return info;
338}
339
Ryan Prichard083d8502019-01-24 13:47:13 -0800340void* BionicAllocator::realloc(void* ptr, size_t size) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700341 if (ptr == nullptr) {
342 return alloc(size);
343 }
344
345 if (size == 0) {
346 free(ptr);
347 return nullptr;
348 }
349
350 page_info* info = get_page_info(ptr);
351
352 size_t old_size = 0;
353
354 if (info->type == kLargeObject) {
Ryan Prichard96773a22019-01-24 15:22:50 -0800355 old_size = info->allocated_size - (static_cast<char*>(ptr) - reinterpret_cast<char*>(info));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700356 } else {
Elliott Hughes8f653f82024-05-29 22:25:37 +0000357 old_size = get_small_object_allocator(info, ptr)->get_block_size();
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700358 }
359
360 if (old_size < size) {
361 void *result = alloc(size);
362 memcpy(result, ptr, old_size);
363 free(ptr);
364 return result;
365 }
366
367 return ptr;
368}
369
Ryan Prichard083d8502019-01-24 13:47:13 -0800370void BionicAllocator::free(void* ptr) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700371 if (ptr == nullptr) {
372 return;
373 }
374
375 page_info* info = get_page_info(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700376 if (info->type == kLargeObject) {
377 munmap(info, info->allocated_size);
378 } else {
Elliott Hughes8f653f82024-05-29 22:25:37 +0000379 get_small_object_allocator(info, ptr)->free(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700380 }
381}
382
Vy Nguyend5007512020-07-14 17:37:04 -0400383size_t BionicAllocator::get_chunk_size(void* ptr) {
384 if (ptr == nullptr) return 0;
385
386 page_info* info = get_page_info_unchecked(ptr);
387 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
388 // Invalid pointer (mismatched signature)
389 return 0;
390 }
391 if (info->type == kLargeObject) {
392 return info->allocated_size - (static_cast<char*>(ptr) - reinterpret_cast<char*>(info));
393 }
394
Elliott Hughes8f653f82024-05-29 22:25:37 +0000395 BionicSmallObjectAllocator* allocator = get_small_object_allocator_unchecked(info->type);
Vy Nguyend5007512020-07-14 17:37:04 -0400396 if (allocator != info->allocator_addr) {
397 // Invalid pointer.
398 return 0;
399 }
400 return allocator->get_block_size();
401}
402
Elliott Hughes8f653f82024-05-29 22:25:37 +0000403BionicSmallObjectAllocator* BionicAllocator::get_small_object_allocator_unchecked(uint32_t type) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700404 if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700405 async_safe_fatal("invalid type: %u", type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700406 }
407
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700408 initialize_allocators();
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700409 return &allocators_[type - kSmallObjectMinSizeLog2];
410}
Elliott Hughes8f653f82024-05-29 22:25:37 +0000411
412BionicSmallObjectAllocator* BionicAllocator::get_small_object_allocator(page_info* pi, void* ptr) {
413 BionicSmallObjectAllocator* result = get_small_object_allocator_unchecked(pi->type);
414 if (result != pi->allocator_addr) {
415 async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
416 }
417 return result;
418}