blob: 015768ac91ff12f1a8d6d40bae7022f7fa4afdf8 [file] [log] [blame]
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -08003 * All rights reserved.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -07004 *
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -08005 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070014 *
Dimitry Ivanovbcc4da92017-02-15 15:31:13 -080015 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070027 */
28
29#include "linker_allocator.h"
Dimitry Ivanovf8572112016-07-13 10:24:06 -070030#include "linker_debug.h"
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070031#include "linker.h"
32
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070033#include <stdlib.h>
34#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070035#include <sys/prctl.h>
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070036#include <unistd.h>
37
Christopher Ferris7a3681e2017-04-24 17:48:32 -070038#include <async_safe/log.h>
39
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070040//
41// LinkerMemeoryAllocator is general purpose allocator
42// designed to provide the same functionality as the malloc/free/realloc
43// libc functions.
44//
45// On alloc:
46// If size is >= 1k allocator proxies malloc call directly to mmap
47// If size < 1k allocator uses SmallObjectAllocator for the size
48// rounded up to the nearest power of two.
49//
50// On free:
51//
52// For a pointer allocated using proxy-to-mmap allocator unmaps
53// the memory.
54//
55// For a pointer allocated using SmallObjectAllocator it adds
Vic Yang54938512018-12-02 23:46:26 -080056// the block to free_blocks_list in the corresponding page. If the number of
57// free pages reaches 2, SmallObjectAllocator munmaps one of the pages keeping
58// the other one in reserve.
59
60// Memory management for large objects is fairly straightforward, but for small
61// objects it is more complicated. If you are changing this code, one simple
62// way to evaluate the memory usage change is by running 'dd' and examine the
63// memory usage by 'showmap $(pidof dd)'. 'dd' is nice in that:
64// 1. It links in quite a few libraries, so you get some linker memory use.
65// 2. When run with no arguments, it sits waiting for input, so it is easy to
66// examine its memory usage with showmap.
67// 3. Since it does nothing while waiting for input, the memory usage is
68// determinisitic.
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070069
70static const char kSignature[4] = {'L', 'M', 'A', 1};
71
72static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
73
74// This type is used for large allocations (with size >1k)
75static const uint32_t kLargeObject = 111;
76
Vic Yang259429b2018-12-04 23:59:57 -080077// Allocated pointers must be at least 16-byte aligned. Round up the size of
78// page_info to multiple of 16.
79static constexpr size_t kPageInfoSize = __BIONIC_ALIGN(sizeof(page_info), 16);
80
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -070081static inline uint16_t log2(size_t number) {
82 uint16_t result = 0;
83 number--;
84
85 while (number != 0) {
86 result++;
87 number >>= 1;
88 }
89
90 return result;
91}
92
Vic Yang54938512018-12-02 23:46:26 -080093LinkerSmallObjectAllocator::LinkerSmallObjectAllocator(uint32_t type,
94 size_t block_size)
95 : type_(type),
96 block_size_(block_size),
Vic Yang259429b2018-12-04 23:59:57 -080097 blocks_per_page_((PAGE_SIZE - sizeof(small_object_page_info)) /
98 block_size),
Vic Yang54938512018-12-02 23:46:26 -080099 free_pages_cnt_(0),
100 page_list_(nullptr) {}
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700101
102void* LinkerSmallObjectAllocator::alloc() {
Dimitry Ivanovf8572112016-07-13 10:24:06 -0700103 CHECK(block_size_ != 0);
104
Vic Yang54938512018-12-02 23:46:26 -0800105 if (page_list_ == nullptr) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700106 alloc_page();
107 }
108
Vic Yang54938512018-12-02 23:46:26 -0800109 // Fully allocated pages are de-managed and removed from the page list, so
110 // every page from the page list must be useable. Let's just take the first
111 // one.
112 small_object_page_info* page = page_list_;
113 CHECK(page->free_block_list != nullptr);
114
115 small_object_block_record* const block_record = page->free_block_list;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700116 if (block_record->free_blocks_cnt > 1) {
Vic Yang54938512018-12-02 23:46:26 -0800117 small_object_block_record* next_free =
118 reinterpret_cast<small_object_block_record*>(
119 reinterpret_cast<uint8_t*>(block_record) + block_size_);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700120 next_free->next = block_record->next;
121 next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
Vic Yang54938512018-12-02 23:46:26 -0800122 page->free_block_list = next_free;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700123 } else {
Vic Yang54938512018-12-02 23:46:26 -0800124 page->free_block_list = block_record->next;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700125 }
126
Vic Yang259429b2018-12-04 23:59:57 -0800127 if (page->free_blocks_cnt == blocks_per_page_) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700128 free_pages_cnt_--;
129 }
130
Vic Yang54938512018-12-02 23:46:26 -0800131 page->free_blocks_cnt--;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700132
133 memset(block_record, 0, block_size_);
134
Vic Yang54938512018-12-02 23:46:26 -0800135 if (page->free_blocks_cnt == 0) {
136 // De-manage fully allocated pages. These pages will be managed again if
137 // a block is freed.
138 remove_from_page_list(page);
139 }
140
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700141 return block_record;
142}
143
Vic Yang54938512018-12-02 23:46:26 -0800144void LinkerSmallObjectAllocator::free_page(small_object_page_info* page) {
Vic Yang259429b2018-12-04 23:59:57 -0800145 CHECK(page->free_blocks_cnt == blocks_per_page_);
Vic Yang54938512018-12-02 23:46:26 -0800146 if (page->prev_page) {
147 page->prev_page->next_page = page->next_page;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700148 }
Vic Yang54938512018-12-02 23:46:26 -0800149 if (page->next_page) {
150 page->next_page->prev_page = page->prev_page;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700151 }
Vic Yang54938512018-12-02 23:46:26 -0800152 if (page_list_ == page) {
153 page_list_ = page->next_page;
154 }
155 munmap(page, PAGE_SIZE);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700156 free_pages_cnt_--;
157}
158
159void LinkerSmallObjectAllocator::free(void* ptr) {
Vic Yang54938512018-12-02 23:46:26 -0800160 small_object_page_info* const page =
161 reinterpret_cast<small_object_page_info*>(
162 PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700163
Vic Yang259429b2018-12-04 23:59:57 -0800164 if (reinterpret_cast<uintptr_t>(ptr) % block_size_ != 0) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700165 async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700166 }
167
168 memset(ptr, 0, block_size_);
Vic Yang54938512018-12-02 23:46:26 -0800169 small_object_block_record* const block_record =
170 reinterpret_cast<small_object_block_record*>(ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700171
Vic Yang54938512018-12-02 23:46:26 -0800172 block_record->next = page->free_block_list;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700173 block_record->free_blocks_cnt = 1;
174
Vic Yang54938512018-12-02 23:46:26 -0800175 page->free_block_list = block_record;
176 page->free_blocks_cnt++;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700177
Vic Yang259429b2018-12-04 23:59:57 -0800178 if (page->free_blocks_cnt == blocks_per_page_) {
Vic Yang54938512018-12-02 23:46:26 -0800179 if (++free_pages_cnt_ > 1) {
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700180 // if we already have a free page - unmap this one.
Vic Yang54938512018-12-02 23:46:26 -0800181 free_page(page);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700182 }
Vic Yang54938512018-12-02 23:46:26 -0800183 } else if (page->free_blocks_cnt == 1) {
184 // We just freed from a full page. Add this page back to the list.
185 add_to_page_list(page);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700186 }
187}
188
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700189void LinkerSmallObjectAllocator::alloc_page() {
Vic Yang54938512018-12-02 23:46:26 -0800190 void* const map_ptr = mmap(nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE,
191 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700192 if (map_ptr == MAP_FAILED) {
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700193 async_safe_fatal("mmap failed: %s", strerror(errno));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700194 }
195
Vic Yang54938512018-12-02 23:46:26 -0800196 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE,
197 "linker_alloc_small_objects");
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700198
Vic Yang54938512018-12-02 23:46:26 -0800199 small_object_page_info* const page =
200 reinterpret_cast<small_object_page_info*>(map_ptr);
201 memcpy(page->info.signature, kSignature, sizeof(kSignature));
202 page->info.type = type_;
203 page->info.allocator_addr = this;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700204
Vic Yang259429b2018-12-04 23:59:57 -0800205 page->free_blocks_cnt = blocks_per_page_;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700206
Vic Yang259429b2018-12-04 23:59:57 -0800207 // Align the first block to block_size_.
208 const uintptr_t first_block_addr =
209 __BIONIC_ALIGN(reinterpret_cast<uintptr_t>(page + 1), block_size_);
Vic Yang54938512018-12-02 23:46:26 -0800210 small_object_block_record* const first_block =
Vic Yang259429b2018-12-04 23:59:57 -0800211 reinterpret_cast<small_object_block_record*>(first_block_addr);
212
Vic Yang54938512018-12-02 23:46:26 -0800213 first_block->next = nullptr;
Vic Yang259429b2018-12-04 23:59:57 -0800214 first_block->free_blocks_cnt = blocks_per_page_;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700215
Vic Yang54938512018-12-02 23:46:26 -0800216 page->free_block_list = first_block;
217
218 add_to_page_list(page);
Vic Yangde696602018-11-27 13:34:44 -0800219
220 free_pages_cnt_++;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700221}
222
Vic Yang54938512018-12-02 23:46:26 -0800223void LinkerSmallObjectAllocator::add_to_page_list(small_object_page_info* page) {
224 page->next_page = page_list_;
225 page->prev_page = nullptr;
226 if (page_list_) {
227 page_list_->prev_page = page;
228 }
229 page_list_ = page;
230}
231
232void LinkerSmallObjectAllocator::remove_from_page_list(
233 small_object_page_info* page) {
234 if (page->prev_page) {
235 page->prev_page->next_page = page->next_page;
236 }
237 if (page->next_page) {
238 page->next_page->prev_page = page->prev_page;
239 }
240 if (page_list_ == page) {
241 page_list_ = page->next_page;
242 }
243 page->prev_page = nullptr;
244 page->next_page = nullptr;
245}
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700246
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700247void LinkerMemoryAllocator::initialize_allocators() {
248 if (allocators_ != nullptr) {
249 return;
250 }
251
252 LinkerSmallObjectAllocator* allocators =
253 reinterpret_cast<LinkerSmallObjectAllocator*>(allocators_buf_);
254
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700255 for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
256 uint32_t type = i + kSmallObjectMinSizeLog2;
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700257 new (allocators + i) LinkerSmallObjectAllocator(type, 1 << type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700258 }
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700259
260 allocators_ = allocators;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700261}
262
263void* LinkerMemoryAllocator::alloc_mmap(size_t size) {
Vic Yang259429b2018-12-04 23:59:57 -0800264 size_t allocated_size = PAGE_END(size + kPageInfoSize);
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700265 void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
266 -1, 0);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700267
268 if (map_ptr == MAP_FAILED) {
Elliott Hughes7b0af7a2017-09-15 16:09:22 -0700269 async_safe_fatal("mmap failed: %s", strerror(errno));
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700270 }
271
272 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob");
273
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700274 page_info* info = reinterpret_cast<page_info*>(map_ptr);
275 memcpy(info->signature, kSignature, sizeof(kSignature));
276 info->type = kLargeObject;
277 info->allocated_size = allocated_size;
278
Vic Yang259429b2018-12-04 23:59:57 -0800279 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(info) +
280 kPageInfoSize);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700281}
282
283void* LinkerMemoryAllocator::alloc(size_t size) {
284 // treat alloc(0) as alloc(1)
285 if (size == 0) {
286 size = 1;
287 }
288
289 if (size > kSmallObjectMaxSize) {
290 return alloc_mmap(size);
291 }
292
293 uint16_t log2_size = log2(size);
294
295 if (log2_size < kSmallObjectMinSizeLog2) {
296 log2_size = kSmallObjectMinSizeLog2;
297 }
298
299 return get_small_object_allocator(log2_size)->alloc();
300}
301
302page_info* LinkerMemoryAllocator::get_page_info(void* ptr) {
303 page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr)));
304 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700305 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700306 }
307
308 return info;
309}
310
311void* LinkerMemoryAllocator::realloc(void* ptr, size_t size) {
312 if (ptr == nullptr) {
313 return alloc(size);
314 }
315
316 if (size == 0) {
317 free(ptr);
318 return nullptr;
319 }
320
321 page_info* info = get_page_info(ptr);
322
323 size_t old_size = 0;
324
325 if (info->type == kLargeObject) {
Vic Yang259429b2018-12-04 23:59:57 -0800326 old_size = info->allocated_size - kPageInfoSize;
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700327 } else {
328 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
329 if (allocator != info->allocator_addr) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700330 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700331 }
332
333 old_size = allocator->get_block_size();
334 }
335
336 if (old_size < size) {
337 void *result = alloc(size);
338 memcpy(result, ptr, old_size);
339 free(ptr);
340 return result;
341 }
342
343 return ptr;
344}
345
346void LinkerMemoryAllocator::free(void* ptr) {
347 if (ptr == nullptr) {
348 return;
349 }
350
351 page_info* info = get_page_info(ptr);
352
353 if (info->type == kLargeObject) {
354 munmap(info, info->allocated_size);
355 } else {
356 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
357 if (allocator != info->allocator_addr) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700358 async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700359 }
360
361 allocator->free(ptr);
362 }
363}
364
365LinkerSmallObjectAllocator* LinkerMemoryAllocator::get_small_object_allocator(uint32_t type) {
366 if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
Christopher Ferris7a3681e2017-04-24 17:48:32 -0700367 async_safe_fatal("invalid type: %u", type);
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700368 }
369
Dimitry Ivanov65707b62016-07-29 13:25:33 -0700370 initialize_allocators();
Dmitriy Ivanov19656ce2015-03-10 17:48:27 -0700371 return &allocators_[type - kSmallObjectMinSizeLog2];
372}