blob: d9302ad1b12cf29798d15926be3663b0cbd706a9 [file] [log] [blame]
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -08003 * All rights reserved.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -07004 *
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -08005 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070014 *
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -080015 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070027 */
28
Ryan Prichard083d8502019-01-24 13:47:13 -080029#include "private/bionic_allocator.h"
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070030
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070031#include <stdlib.h>
Ryan Prichard52165b32019-01-23 17:46:24 -080032#include <string.h>
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070033#include <sys/mman.h>
Ryan Prichard96773a22019-01-24 15:22:50 -080034#include <sys/param.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070035#include <sys/prctl.h>
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070036#include <unistd.h>
37
Ryan Prichard52165b32019-01-23 17:46:24 -080038#include <new>
39
Christopher Ferris7a3681e2017-04-24 17:48:32 -070040#include <async_safe/log.h>
41
Ryan Prichard96773a22019-01-24 15:22:50 -080042#include "private/bionic_macros.h"
Ryan Prichard52165b32019-01-23 17:46:24 -080043#include "private/bionic_page.h"
44
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070045//
Ryan Prichard083d8502019-01-24 13:47:13 -080046// BionicAllocator is a general purpose allocator designed to provide the same
47// functionality as the malloc/free/realloc libc functions.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070048//
49// On alloc:
50// If size is >= 1k allocator proxies malloc call directly to mmap
51// If size < 1k allocator uses SmallObjectAllocator for the size
52// rounded up to the nearest power of two.
53//
54// On free:
55//
56// For a pointer allocated using proxy-to-mmap allocator unmaps
57// the memory.
58//
59// For a pointer allocated using SmallObjectAllocator it adds
Vic Yang54938512018-12-02 23:46:26 -080060// the block to free_blocks_list in the corresponding page. If the number of
61// free pages reaches 2, SmallObjectAllocator munmaps one of the pages keeping
62// the other one in reserve.
63
64// Memory management for large objects is fairly straightforward, but for small
65// objects it is more complicated. If you are changing this code, one simple
66// way to evaluate the memory usage change is by running 'dd' and examine the
67// memory usage by 'showmap $(pidof dd)'. 'dd' is nice in that:
68// 1. It links in quite a few libraries, so you get some linker memory use.
69// 2. When run with no arguments, it sits waiting for input, so it is easy to
70// examine its memory usage with showmap.
71// 3. Since it does nothing while waiting for input, the memory usage is
72// determinisitic.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070073
74static const char kSignature[4] = {'L', 'M', 'A', 1};
75
76static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
77
78// This type is used for large allocations (with size >1k)
79static const uint32_t kLargeObject = 111;
80
Vic Yang259429b2018-12-04 23:59:57 -080081// Allocated pointers must be at least 16-byte aligned. Round up the size of
82// page_info to multiple of 16.
83static constexpr size_t kPageInfoSize = __BIONIC_ALIGN(sizeof(page_info), 16);
84
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070085static inline uint16_t log2(size_t number) {
86 uint16_t result = 0;
87 number--;
88
89 while (number != 0) {
90 result++;
91 number >>= 1;
92 }
93
94 return result;
95}
96
Ryan Prichard083d8502019-01-24 13:47:13 -080097BionicSmallObjectAllocator::BionicSmallObjectAllocator(uint32_t type,
Vic Yang54938512018-12-02 23:46:26 -080098 size_t block_size)
99 : type_(type),
100 block_size_(block_size),
Vic Yang259429b2018-12-04 23:59:57 -0800101 blocks_per_page_((PAGE_SIZE - sizeof(small_object_page_info)) /
102 block_size),
Vic Yang54938512018-12-02 23:46:26 -0800103 free_pages_cnt_(0),
104 page_list_(nullptr) {}
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700105
Ryan Prichard083d8502019-01-24 13:47:13 -0800106void* BionicSmallObjectAllocator::alloc() {
Dimitry Ivanovf8572112016-07-13 10:24:06 -0700107 CHECK(block_size_ != 0);
108
Vic Yang54938512018-12-02 23:46:26 -0800109 if (page_list_ == nullptr) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700110 alloc_page();
111 }
112
Vic Yang54938512018-12-02 23:46:26 -0800113 // Fully allocated pages are de-managed and removed from the page list, so
114 // every page from the page list must be useable. Let's just take the first
115 // one.
116 small_object_page_info* page = page_list_;
117 CHECK(page->free_block_list != nullptr);
118
119 small_object_block_record* const block_record = page->free_block_list;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700120 if (block_record->free_blocks_cnt > 1) {
Vic Yang54938512018-12-02 23:46:26 -0800121 small_object_block_record* next_free =
122 reinterpret_cast<small_object_block_record*>(
123 reinterpret_cast<uint8_t*>(block_record) + block_size_);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700124 next_free->next = block_record->next;
125 next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
Vic Yang54938512018-12-02 23:46:26 -0800126 page->free_block_list = next_free;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700127 } else {
Vic Yang54938512018-12-02 23:46:26 -0800128 page->free_block_list = block_record->next;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700129 }
130
Vic Yang259429b2018-12-04 23:59:57 -0800131 if (page->free_blocks_cnt == blocks_per_page_) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700132 free_pages_cnt_--;
133 }
134
Vic Yang54938512018-12-02 23:46:26 -0800135 page->free_blocks_cnt--;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700136
137 memset(block_record, 0, block_size_);
138
Vic Yang54938512018-12-02 23:46:26 -0800139 if (page->free_blocks_cnt == 0) {
140 // De-manage fully allocated pages. These pages will be managed again if
141 // a block is freed.
142 remove_from_page_list(page);
143 }
144
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700145 return block_record;
146}
147
Ryan Prichard083d8502019-01-24 13:47:13 -0800148void BionicSmallObjectAllocator::free_page(small_object_page_info* page) {
Vic Yang259429b2018-12-04 23:59:57 -0800149 CHECK(page->free_blocks_cnt == blocks_per_page_);
Vic Yang54938512018-12-02 23:46:26 -0800150 if (page->prev_page) {
151 page->prev_page->next_page = page->next_page;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700152 }
Vic Yang54938512018-12-02 23:46:26 -0800153 if (page->next_page) {
154 page->next_page->prev_page = page->prev_page;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700155 }
Vic Yang54938512018-12-02 23:46:26 -0800156 if (page_list_ == page) {
157 page_list_ = page->next_page;
158 }
159 munmap(page, PAGE_SIZE);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700160 free_pages_cnt_--;
161}
162
Ryan Prichard083d8502019-01-24 13:47:13 -0800163void BionicSmallObjectAllocator::free(void* ptr) {
Vic Yang54938512018-12-02 23:46:26 -0800164 small_object_page_info* const page =
165 reinterpret_cast<small_object_page_info*>(
166 PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700167
Vic Yang259429b2018-12-04 23:59:57 -0800168 if (reinterpret_cast<uintptr_t>(ptr) % block_size_ != 0) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700169 async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700170 }
171
172 memset(ptr, 0, block_size_);
Vic Yang54938512018-12-02 23:46:26 -0800173 small_object_block_record* const block_record =
174 reinterpret_cast<small_object_block_record*>(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700175
Vic Yang54938512018-12-02 23:46:26 -0800176 block_record->next = page->free_block_list;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700177 block_record->free_blocks_cnt = 1;
178
Vic Yang54938512018-12-02 23:46:26 -0800179 page->free_block_list = block_record;
180 page->free_blocks_cnt++;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700181
Vic Yang259429b2018-12-04 23:59:57 -0800182 if (page->free_blocks_cnt == blocks_per_page_) {
Vic Yang54938512018-12-02 23:46:26 -0800183 if (++free_pages_cnt_ > 1) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700184 // if we already have a free page - unmap this one.
Vic Yang54938512018-12-02 23:46:26 -0800185 free_page(page);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700186 }
Vic Yang54938512018-12-02 23:46:26 -0800187 } else if (page->free_blocks_cnt == 1) {
188 // We just freed from a full page. Add this page back to the list.
189 add_to_page_list(page);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700190 }
191}
192
Ryan Prichard083d8502019-01-24 13:47:13 -0800193void BionicSmallObjectAllocator::alloc_page() {
Vic Yang54938512018-12-02 23:46:26 -0800194 void* const map_ptr = mmap(nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE,
195 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700196 if (map_ptr == MAP_FAILED) {
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700197 async_safe_fatal("mmap failed: %s", strerror(errno));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700198 }
199
Vic Yang54938512018-12-02 23:46:26 -0800200 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE,
Ryan Prichard083d8502019-01-24 13:47:13 -0800201 "bionic_alloc_small_objects");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700202
Vic Yang54938512018-12-02 23:46:26 -0800203 small_object_page_info* const page =
204 reinterpret_cast<small_object_page_info*>(map_ptr);
205 memcpy(page->info.signature, kSignature, sizeof(kSignature));
206 page->info.type = type_;
207 page->info.allocator_addr = this;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700208
Vic Yang259429b2018-12-04 23:59:57 -0800209 page->free_blocks_cnt = blocks_per_page_;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700210
Vic Yang259429b2018-12-04 23:59:57 -0800211 // Align the first block to block_size_.
212 const uintptr_t first_block_addr =
213 __BIONIC_ALIGN(reinterpret_cast<uintptr_t>(page + 1), block_size_);
Vic Yang54938512018-12-02 23:46:26 -0800214 small_object_block_record* const first_block =
Vic Yang259429b2018-12-04 23:59:57 -0800215 reinterpret_cast<small_object_block_record*>(first_block_addr);
216
Vic Yang54938512018-12-02 23:46:26 -0800217 first_block->next = nullptr;
Vic Yang259429b2018-12-04 23:59:57 -0800218 first_block->free_blocks_cnt = blocks_per_page_;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700219
Vic Yang54938512018-12-02 23:46:26 -0800220 page->free_block_list = first_block;
221
222 add_to_page_list(page);
Vic Yangde696602018-11-27 13:34:44 -0800223
224 free_pages_cnt_++;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700225}
226
Ryan Prichard083d8502019-01-24 13:47:13 -0800227void BionicSmallObjectAllocator::add_to_page_list(small_object_page_info* page) {
Vic Yang54938512018-12-02 23:46:26 -0800228 page->next_page = page_list_;
229 page->prev_page = nullptr;
230 if (page_list_) {
231 page_list_->prev_page = page;
232 }
233 page_list_ = page;
234}
235
Ryan Prichard083d8502019-01-24 13:47:13 -0800236void BionicSmallObjectAllocator::remove_from_page_list(
Vic Yang54938512018-12-02 23:46:26 -0800237 small_object_page_info* page) {
238 if (page->prev_page) {
239 page->prev_page->next_page = page->next_page;
240 }
241 if (page->next_page) {
242 page->next_page->prev_page = page->prev_page;
243 }
244 if (page_list_ == page) {
245 page_list_ = page->next_page;
246 }
247 page->prev_page = nullptr;
248 page->next_page = nullptr;
249}
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700250
Ryan Prichard083d8502019-01-24 13:47:13 -0800251void BionicAllocator::initialize_allocators() {
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700252 if (allocators_ != nullptr) {
253 return;
254 }
255
Ryan Prichard083d8502019-01-24 13:47:13 -0800256 BionicSmallObjectAllocator* allocators =
257 reinterpret_cast<BionicSmallObjectAllocator*>(allocators_buf_);
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700258
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700259 for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
260 uint32_t type = i + kSmallObjectMinSizeLog2;
Ryan Prichard083d8502019-01-24 13:47:13 -0800261 new (allocators + i) BionicSmallObjectAllocator(type, 1 << type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700262 }
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700263
264 allocators_ = allocators;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700265}
266
Ryan Prichard96773a22019-01-24 15:22:50 -0800267void* BionicAllocator::alloc_mmap(size_t align, size_t size) {
268 size_t header_size = __BIONIC_ALIGN(kPageInfoSize, align);
269 size_t allocated_size;
270 if (__builtin_add_overflow(header_size, size, &allocated_size) ||
271 PAGE_END(allocated_size) < allocated_size) {
272 async_safe_fatal("overflow trying to alloc %zu bytes", size);
273 }
274 allocated_size = PAGE_END(allocated_size);
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700275 void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
276 -1, 0);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700277
278 if (map_ptr == MAP_FAILED) {
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700279 async_safe_fatal("mmap failed: %s", strerror(errno));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700280 }
281
Ryan Prichard083d8502019-01-24 13:47:13 -0800282 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "bionic_alloc_lob");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700283
Ryan Prichard96773a22019-01-24 15:22:50 -0800284 void* result = static_cast<char*>(map_ptr) + header_size;
285 page_info* info = get_page_info_unchecked(result);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700286 memcpy(info->signature, kSignature, sizeof(kSignature));
287 info->type = kLargeObject;
288 info->allocated_size = allocated_size;
289
Ryan Prichard96773a22019-01-24 15:22:50 -0800290 return result;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700291}
292
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700293
Ryan Prichard96773a22019-01-24 15:22:50 -0800294inline void* BionicAllocator::alloc_impl(size_t align, size_t size) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700295 if (size > kSmallObjectMaxSize) {
Ryan Prichard96773a22019-01-24 15:22:50 -0800296 return alloc_mmap(align, size);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700297 }
298
299 uint16_t log2_size = log2(size);
300
301 if (log2_size < kSmallObjectMinSizeLog2) {
302 log2_size = kSmallObjectMinSizeLog2;
303 }
304
305 return get_small_object_allocator(log2_size)->alloc();
306}
307
Ryan Prichard96773a22019-01-24 15:22:50 -0800308void* BionicAllocator::alloc(size_t size) {
309 // treat alloc(0) as alloc(1)
310 if (size == 0) {
311 size = 1;
312 }
313 return alloc_impl(16, size);
314}
315
316void* BionicAllocator::memalign(size_t align, size_t size) {
317 // The Bionic allocator only supports alignment up to one page, which is good
318 // enough for ELF TLS.
319 align = MIN(align, PAGE_SIZE);
320 align = MAX(align, 16);
321 if (!powerof2(align)) {
322 align = BIONIC_ROUND_UP_POWER_OF_2(align);
323 }
324 size = MAX(size, align);
325 return alloc_impl(align, size);
326}
327
328inline page_info* BionicAllocator::get_page_info_unchecked(void* ptr) {
329 uintptr_t header_page = PAGE_START(reinterpret_cast<size_t>(ptr) - kPageInfoSize);
330 return reinterpret_cast<page_info*>(header_page);
331}
332
333inline page_info* BionicAllocator::get_page_info(void* ptr) {
334 page_info* info = get_page_info_unchecked(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700335 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700336 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700337 }
338
339 return info;
340}
341
Ryan Prichard083d8502019-01-24 13:47:13 -0800342void* BionicAllocator::realloc(void* ptr, size_t size) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700343 if (ptr == nullptr) {
344 return alloc(size);
345 }
346
347 if (size == 0) {
348 free(ptr);
349 return nullptr;
350 }
351
352 page_info* info = get_page_info(ptr);
353
354 size_t old_size = 0;
355
356 if (info->type == kLargeObject) {
Ryan Prichard96773a22019-01-24 15:22:50 -0800357 old_size = info->allocated_size - (static_cast<char*>(ptr) - reinterpret_cast<char*>(info));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700358 } else {
Ryan Prichard083d8502019-01-24 13:47:13 -0800359 BionicSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700360 if (allocator != info->allocator_addr) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700361 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700362 }
363
364 old_size = allocator->get_block_size();
365 }
366
367 if (old_size < size) {
368 void *result = alloc(size);
369 memcpy(result, ptr, old_size);
370 free(ptr);
371 return result;
372 }
373
374 return ptr;
375}
376
Ryan Prichard083d8502019-01-24 13:47:13 -0800377void BionicAllocator::free(void* ptr) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700378 if (ptr == nullptr) {
379 return;
380 }
381
382 page_info* info = get_page_info(ptr);
383
384 if (info->type == kLargeObject) {
385 munmap(info, info->allocated_size);
386 } else {
Ryan Prichard083d8502019-01-24 13:47:13 -0800387 BionicSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700388 if (allocator != info->allocator_addr) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700389 async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700390 }
391
392 allocator->free(ptr);
393 }
394}
395
Ryan Prichard083d8502019-01-24 13:47:13 -0800396BionicSmallObjectAllocator* BionicAllocator::get_small_object_allocator(uint32_t type) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700397 if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700398 async_safe_fatal("invalid type: %u", type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700399 }
400
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700401 initialize_allocators();
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700402 return &allocators_[type - kSmallObjectMinSizeLog2];
403}