blob: da6db20f364c6455b275f5f178de22f56977d1e0 [file] [log] [blame]
Colin Crossbcb4ed32016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17// Header page:
18//
19// For minimum allocation size (8 bytes), bitmap can store used allocations for
20// up to 4032*8*8=258048, which is 256KiB minus the header page
21
22#include <assert.h>
23#include <stdlib.h>
24
25#include <sys/cdefs.h>
26#include <sys/mman.h>
27
28#include <cmath>
29#include <cstddef>
30#include <cstdint>
31#include <memory>
32#include <mutex>
33
34#include "android-base/macros.h"
35
Colin Crossbcb4ed32016-01-14 15:35:40 -080036#include "Allocator.h"
37#include "LinkedList.h"
Colin Crossa83881e2017-06-22 10:50:05 -070038#include "anon_vma_naming.h"
Colin Crossbcb4ed32016-01-14 15:35:40 -080039
40// runtime interfaces used:
41// abort
42// assert - fprintf + mmap
43// mmap
44// munmap
45// prctl
46
47constexpr size_t const_log2(size_t n, size_t p = 0) {
48 return (n <= 1) ? p : const_log2(n / 2, p + 1);
49}
50
51constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
52 return (x + y - 1) / y;
53}
54
Colin Crossbcb4ed32016-01-14 15:35:40 -080055static constexpr size_t kPageSize = 4096;
56static constexpr size_t kChunkSize = 256 * 1024;
57static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
58static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
59static constexpr size_t kMinBucketAllocationSize = 8;
Colin Crossa83881e2017-06-22 10:50:05 -070060static constexpr unsigned int kNumBuckets =
61 const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
62static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
Colin Crossbcb4ed32016-01-14 15:35:40 -080063
64std::atomic<int> heap_count;
65
66class Chunk;
67
68class HeapImpl {
69 public:
70 HeapImpl();
71 ~HeapImpl();
72 void* operator new(std::size_t count) noexcept;
73 void operator delete(void* ptr);
74
75 void* Alloc(size_t size);
76 void Free(void* ptr);
77 bool Empty();
78
79 void MoveToFullList(Chunk* chunk, int bucket_);
80 void MoveToFreeList(Chunk* chunk, int bucket_);
81
82 private:
83 DISALLOW_COPY_AND_ASSIGN(HeapImpl);
84
85 LinkedList<Chunk*> free_chunks_[kNumBuckets];
86 LinkedList<Chunk*> full_chunks_[kNumBuckets];
87
88 void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
89 void* MapAlloc(size_t size);
90 void MapFree(void* ptr);
91 void* AllocLocked(size_t size);
92 void FreeLocked(void* ptr);
93
94 struct MapAllocation {
Colin Crossa83881e2017-06-22 10:50:05 -070095 void* ptr;
Colin Crossbcb4ed32016-01-14 15:35:40 -080096 size_t size;
97 MapAllocation* next;
98 };
99 MapAllocation* map_allocation_list_;
100 std::mutex m_;
101};
102
103// Integer log 2, rounds down
104static inline unsigned int log2(size_t n) {
105 return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
106}
107
108static inline unsigned int size_to_bucket(size_t size) {
Colin Crossa83881e2017-06-22 10:50:05 -0700109 if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800110 return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
111}
112
113static inline size_t bucket_to_size(unsigned int bucket) {
114 return kMinBucketAllocationSize << bucket;
115}
116
117static void* MapAligned(size_t size, size_t align) {
118 const int prot = PROT_READ | PROT_WRITE;
119 const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
120
121 size = (size + kPageSize - 1) & ~(kPageSize - 1);
122
123 // Over-allocate enough to align
124 size_t map_size = size + align - kPageSize;
125 if (map_size < size) {
126 return nullptr;
127 }
128
129 void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
130 if (ptr == MAP_FAILED) {
131 return nullptr;
132 }
133
134 size_t aligned_size = map_size;
135 void* aligned_ptr = ptr;
136
137 std::align(align, size, aligned_ptr, aligned_size);
138
139 // Trim beginning
140 if (aligned_ptr != ptr) {
Colin Crossa83881e2017-06-22 10:50:05 -0700141 ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800142 munmap(ptr, extra);
143 map_size -= extra;
144 ptr = aligned_ptr;
145 }
146
147 // Trim end
148 if (map_size != size) {
149 assert(map_size > size);
150 assert(ptr != NULL);
Colin Crossa83881e2017-06-22 10:50:05 -0700151 munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800152 }
153
Colin Crossa83881e2017-06-22 10:50:05 -0700154#define PR_SET_VMA 0x53564d41
155#define PR_SET_VMA_ANON_NAME 0
156 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
157 "leak_detector_malloc");
Colin Crossbcb4ed32016-01-14 15:35:40 -0800158
159 return ptr;
160}
161
162class Chunk {
163 public:
164 static void* operator new(std::size_t count) noexcept;
165 static void operator delete(void* ptr);
166 Chunk(HeapImpl* heap, int bucket);
167 ~Chunk() {}
168
Colin Crossa83881e2017-06-22 10:50:05 -0700169 void* Alloc();
Colin Crossbcb4ed32016-01-14 15:35:40 -0800170 void Free(void* ptr);
171 void Purge();
172 bool Empty();
173
174 static Chunk* ptr_to_chunk(void* ptr) {
Colin Crossa83881e2017-06-22 10:50:05 -0700175 return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800176 }
177 static bool is_chunk(void* ptr) {
178 return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
179 }
180
Colin Crossa83881e2017-06-22 10:50:05 -0700181 unsigned int free_count() { return free_count_; }
182 HeapImpl* heap() { return heap_; }
183 LinkedList<Chunk*> node_; // linked list sorted by minimum free count
Colin Crossbcb4ed32016-01-14 15:35:40 -0800184
185 private:
186 DISALLOW_COPY_AND_ASSIGN(Chunk);
187 HeapImpl* heap_;
188 unsigned int bucket_;
Colin Crossa83881e2017-06-22 10:50:05 -0700189 unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
190 unsigned int max_allocations_; // maximum number of allocations in the chunk
191 unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
192 unsigned int free_count_; // number of available allocations
193 unsigned int frees_since_purge_; // number of calls to Free since last Purge
Colin Crossbcb4ed32016-01-14 15:35:40 -0800194
195 // bitmap of pages that have been dirtied
196 uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
197
198 // bitmap of free allocations.
199 uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
200
201 char data_[0];
202
203 unsigned int ptr_to_n(void* ptr) {
Colin Crossa83881e2017-06-22 10:50:05 -0700204 ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800205 return offset / allocation_size_;
206 }
Colin Crossa83881e2017-06-22 10:50:05 -0700207 void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
Colin Crossbcb4ed32016-01-14 15:35:40 -0800208};
209static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
210
211// Override new operator on chunk to use mmap to allocate kChunkSize
212void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
213 assert(count == sizeof(Chunk));
214 void* mem = MapAligned(kChunkSize, kChunkSize);
215 if (!mem) {
Colin Crossa83881e2017-06-22 10:50:05 -0700216 abort(); // throw std::bad_alloc;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800217 }
218
219 return mem;
220}
221
222// Override new operator on chunk to use mmap to allocate kChunkSize
Colin Crossa83881e2017-06-22 10:50:05 -0700223void Chunk::operator delete(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800224 assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
225 munmap(ptr, kChunkSize);
226}
227
Colin Crossa83881e2017-06-22 10:50:05 -0700228Chunk::Chunk(HeapImpl* heap, int bucket)
229 : node_(this),
230 heap_(heap),
231 bucket_(bucket),
232 allocation_size_(bucket_to_size(bucket)),
233 max_allocations_(kUsableChunkSize / allocation_size_),
234 first_free_bitmap_(0),
235 free_count_(max_allocations_),
236 frees_since_purge_(0) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800237 memset(dirty_pages_, 0, sizeof(dirty_pages_));
238 memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
239}
240
241bool Chunk::Empty() {
242 return free_count_ == max_allocations_;
243}
244
245void* Chunk::Alloc() {
246 assert(free_count_ > 0);
247
248 unsigned int i = first_free_bitmap_;
Colin Crossa83881e2017-06-22 10:50:05 -0700249 while (free_bitmap_[i] == 0) i++;
Elliott Hughes749ae2d2016-06-28 14:48:45 -0700250 assert(i < arraysize(free_bitmap_));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800251 unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
252 assert(free_bitmap_[i] & (1U << bit));
253 free_bitmap_[i] &= ~(1U << bit);
254 unsigned int n = i * 32 + bit;
255 assert(n < max_allocations_);
256
257 unsigned int page = n * allocation_size_ / kPageSize;
Elliott Hughes749ae2d2016-06-28 14:48:45 -0700258 assert(page / 32 < arraysize(dirty_pages_));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800259 dirty_pages_[page / 32] |= 1U << (page % 32);
260
261 free_count_--;
262 if (free_count_ == 0) {
263 heap_->MoveToFullList(this, bucket_);
264 }
265
266 return n_to_ptr(n);
267}
268
269void Chunk::Free(void* ptr) {
270 assert(is_chunk(ptr));
271 assert(ptr_to_chunk(ptr) == this);
272
273 unsigned int n = ptr_to_n(ptr);
274 unsigned int i = n / 32;
275 unsigned int bit = n % 32;
276
Elliott Hughes749ae2d2016-06-28 14:48:45 -0700277 assert(i < arraysize(free_bitmap_));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800278 assert(!(free_bitmap_[i] & (1U << bit)));
279 free_bitmap_[i] |= 1U << bit;
280 free_count_++;
281
282 if (i < first_free_bitmap_) {
283 first_free_bitmap_ = i;
284 }
285
286 if (free_count_ == 1) {
287 heap_->MoveToFreeList(this, bucket_);
288 } else {
289 // TODO(ccross): move down free list if necessary
290 }
291
292 if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
293 Purge();
294 }
295}
296
297void Chunk::Purge() {
298 frees_since_purge_ = 0;
299
Colin Crossa83881e2017-06-22 10:50:05 -0700300 // unsigned int allocsPerPage = kPageSize / allocation_size_;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800301}
302
303// Override new operator on HeapImpl to use mmap to allocate a page
Colin Crossa83881e2017-06-22 10:50:05 -0700304void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800305 assert(count == sizeof(HeapImpl));
306 void* mem = MapAligned(kPageSize, kPageSize);
307 if (!mem) {
Colin Crossa83881e2017-06-22 10:50:05 -0700308 abort(); // throw std::bad_alloc;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800309 }
310
311 heap_count++;
312 return mem;
313}
314
Colin Crossa83881e2017-06-22 10:50:05 -0700315void HeapImpl::operator delete(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800316 munmap(ptr, kPageSize);
317}
318
Colin Crossa83881e2017-06-22 10:50:05 -0700319HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
Colin Crossbcb4ed32016-01-14 15:35:40 -0800320
321bool HeapImpl::Empty() {
322 for (unsigned int i = 0; i < kNumBuckets; i++) {
Colin Crossa83881e2017-06-22 10:50:05 -0700323 for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800324 if (!it->data()->Empty()) {
325 return false;
326 }
327 }
Colin Crossa83881e2017-06-22 10:50:05 -0700328 for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800329 if (!it->data()->Empty()) {
330 return false;
331 }
332 }
333 }
334
335 return true;
336}
337
338HeapImpl::~HeapImpl() {
339 for (unsigned int i = 0; i < kNumBuckets; i++) {
340 while (!free_chunks_[i].empty()) {
Colin Crossa83881e2017-06-22 10:50:05 -0700341 Chunk* chunk = free_chunks_[i].next()->data();
Colin Crossbcb4ed32016-01-14 15:35:40 -0800342 chunk->node_.remove();
343 delete chunk;
344 }
345 while (!full_chunks_[i].empty()) {
Colin Crossa83881e2017-06-22 10:50:05 -0700346 Chunk* chunk = full_chunks_[i].next()->data();
Colin Crossbcb4ed32016-01-14 15:35:40 -0800347 chunk->node_.remove();
348 delete chunk;
349 }
350 }
351}
352
353void* HeapImpl::Alloc(size_t size) {
354 std::lock_guard<std::mutex> lk(m_);
355 return AllocLocked(size);
356}
357
358void* HeapImpl::AllocLocked(size_t size) {
Colin Cross54a16102016-03-02 17:52:56 -0800359 if (size > kMaxBucketAllocationSize) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800360 return MapAlloc(size);
361 }
362 int bucket = size_to_bucket(size);
Colin Cross54a16102016-03-02 17:52:56 -0800363 if (free_chunks_[bucket].empty()) {
Colin Crossa83881e2017-06-22 10:50:05 -0700364 Chunk* chunk = new Chunk(this, bucket);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800365 free_chunks_[bucket].insert(chunk->node_);
366 }
367 return free_chunks_[bucket].next()->data()->Alloc();
368}
369
Colin Crossa83881e2017-06-22 10:50:05 -0700370void HeapImpl::Free(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800371 std::lock_guard<std::mutex> lk(m_);
372 FreeLocked(ptr);
373}
374
Colin Crossa83881e2017-06-22 10:50:05 -0700375void HeapImpl::FreeLocked(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800376 if (!Chunk::is_chunk(ptr)) {
377 HeapImpl::MapFree(ptr);
378 } else {
379 Chunk* chunk = Chunk::ptr_to_chunk(ptr);
380 assert(chunk->heap() == this);
381 chunk->Free(ptr);
382 }
383}
384
385void* HeapImpl::MapAlloc(size_t size) {
386 size = (size + kPageSize - 1) & ~(kPageSize - 1);
387
Colin Crossa83881e2017-06-22 10:50:05 -0700388 MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800389 void* ptr = MapAligned(size, kChunkSize);
390 if (!ptr) {
391 FreeLocked(allocation);
Colin Crossa83881e2017-06-22 10:50:05 -0700392 abort(); // throw std::bad_alloc;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800393 }
394 allocation->ptr = ptr;
395 allocation->size = size;
396 allocation->next = map_allocation_list_;
397 map_allocation_list_ = allocation;
398
399 return ptr;
400}
401
Colin Crossa83881e2017-06-22 10:50:05 -0700402void HeapImpl::MapFree(void* ptr) {
403 MapAllocation** allocation = &map_allocation_list_;
404 while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800405
406 assert(*allocation != nullptr);
407
408 munmap((*allocation)->ptr, (*allocation)->size);
409 FreeLocked(*allocation);
410
411 *allocation = (*allocation)->next;
412}
413
Colin Crossa83881e2017-06-22 10:50:05 -0700414void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800415 MoveToList(chunk, &free_chunks_[bucket]);
416}
417
Colin Crossa83881e2017-06-22 10:50:05 -0700418void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800419 MoveToList(chunk, &full_chunks_[bucket]);
420}
421
Colin Crossa83881e2017-06-22 10:50:05 -0700422void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800423 // Remove from old list
424 chunk->node_.remove();
425
Colin Crossa83881e2017-06-22 10:50:05 -0700426 LinkedList<Chunk*>* node = head;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800427 // Insert into new list, sorted by lowest free count
Colin Crossa83881e2017-06-22 10:50:05 -0700428 while (node->next() != head && node->data() != nullptr &&
429 node->data()->free_count() < chunk->free_count())
Colin Crossbcb4ed32016-01-14 15:35:40 -0800430 node = node->next();
431
432 node->insert(chunk->node_);
433}
434
435Heap::Heap() {
436 // HeapImpl overloads the operator new in order to mmap itself instead of
437 // allocating with new.
438 // Can't use a shared_ptr to store the result because shared_ptr needs to
439 // allocate, and Allocator<T> is still being constructed.
440 impl_ = new HeapImpl();
441 owns_impl_ = true;
442}
443
444Heap::~Heap() {
445 if (owns_impl_) {
446 delete impl_;
447 }
448}
449
450void* Heap::allocate(size_t size) {
451 return impl_->Alloc(size);
452}
453
454void Heap::deallocate(void* ptr) {
455 impl_->Free(ptr);
456}
457
Colin Crossa83881e2017-06-22 10:50:05 -0700458void Heap::deallocate(HeapImpl* impl, void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800459 impl->Free(ptr);
460}
461
462bool Heap::empty() {
463 return impl_->Empty();
464}