blob: ca20e0032507570104be147184d5af949123078c [file] [log] [blame]
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <inttypes.h>
30#include <pthread.h>
31#include <stdatomic.h>
32#include <stdint.h>
33#include <stdio.h>
34
35#include <private/bionic_malloc_dispatch.h>
36
37#include "malloc_common.h"
38#include "malloc_common_dynamic.h"
39#include "malloc_heapprofd.h"
40#include "malloc_limit.h"
41
42__BEGIN_DECLS
43static void* LimitCalloc(size_t n_elements, size_t elem_size);
44static void LimitFree(void* mem);
45static void* LimitMalloc(size_t bytes);
46static void* LimitMemalign(size_t alignment, size_t bytes);
47static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size);
48static void* LimitRealloc(void* old_mem, size_t bytes);
49static void* LimitAlignedAlloc(size_t alignment, size_t size);
50#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
51static void* LimitPvalloc(size_t bytes);
52static void* LimitValloc(size_t bytes);
53#endif
54
55// Pass through functions.
56static size_t LimitUsableSize(const void* mem);
57static struct mallinfo LimitMallinfo();
58static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg);
59static void LimitMallocDisable();
60static void LimitMallocEnable();
61static int LimitMallocInfo(int options, FILE* fp);
62static int LimitMallopt(int param, int value);
63__END_DECLS
64
65static constexpr MallocDispatch __limit_dispatch
66 __attribute__((unused)) = {
67 LimitCalloc,
68 LimitFree,
69 LimitMallinfo,
70 LimitMalloc,
71 LimitUsableSize,
72 LimitMemalign,
73 LimitPosixMemalign,
74#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
75 LimitPvalloc,
76#endif
77 LimitRealloc,
78#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
79 LimitValloc,
80#endif
81 LimitIterate,
82 LimitMallocDisable,
83 LimitMallocEnable,
84 LimitMallopt,
85 LimitAlignedAlloc,
86 LimitMallocInfo,
87 };
88
89static _Atomic uint64_t gAllocated;
90static uint64_t gAllocLimit;
91
92static inline bool CheckLimit(size_t bytes) {
93 uint64_t total;
94 if (__predict_false(__builtin_add_overflow(
95 atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) ||
96 total > gAllocLimit)) {
97 return false;
98 }
99 return true;
100}
101
102static inline void* IncrementLimit(void* mem) {
103 if (__predict_false(mem == nullptr)) {
104 return nullptr;
105 }
106 atomic_fetch_add(&gAllocated, LimitUsableSize(mem));
107 return mem;
108}
109
110void* LimitCalloc(size_t n_elements, size_t elem_size) {
111 size_t total;
112 if (__builtin_add_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) {
113 warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size,
114 gAllocLimit);
115 return nullptr;
116 }
117 auto dispatch_table = GetDefaultDispatchTable();
118 if (__predict_false(dispatch_table != nullptr)) {
119 return IncrementLimit(dispatch_table->calloc(n_elements, elem_size));
120 }
121 return IncrementLimit(Malloc(calloc)(n_elements, elem_size));
122}
123
124void LimitFree(void* mem) {
125 atomic_fetch_sub(&gAllocated, LimitUsableSize(mem));
126 auto dispatch_table = GetDefaultDispatchTable();
127 if (__predict_false(dispatch_table != nullptr)) {
128 return dispatch_table->free(mem);
129 }
130 return Malloc(free)(mem);
131}
132
133void* LimitMalloc(size_t bytes) {
134 if (!CheckLimit(bytes)) {
135 warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
136 return nullptr;
137 }
138 auto dispatch_table = GetDefaultDispatchTable();
139 if (__predict_false(dispatch_table != nullptr)) {
140 return IncrementLimit(dispatch_table->malloc(bytes));
141 }
142 return IncrementLimit(Malloc(malloc)(bytes));
143}
144
145static void* LimitMemalign(size_t alignment, size_t bytes) {
146 if (!CheckLimit(bytes)) {
147 warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes,
148 gAllocLimit);
149 return nullptr;
150 }
151 auto dispatch_table = GetDefaultDispatchTable();
152 if (__predict_false(dispatch_table != nullptr)) {
153 return IncrementLimit(dispatch_table->memalign(alignment, bytes));
154 }
155 return IncrementLimit(Malloc(memalign)(alignment, bytes));
156}
157
158static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) {
159 if (!CheckLimit(size)) {
160 warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size,
161 gAllocLimit);
162 return ENOMEM;
163 }
164 int retval;
165 auto dispatch_table = GetDefaultDispatchTable();
166 if (__predict_false(dispatch_table != nullptr)) {
167 retval = dispatch_table->posix_memalign(memptr, alignment, size);
168 } else {
169 retval = Malloc(posix_memalign)(memptr, alignment, size);
170 }
171 if (__predict_false(retval != 0)) {
172 return retval;
173 }
174 IncrementLimit(*memptr);
175 return 0;
176}
177
178static void* LimitAlignedAlloc(size_t alignment, size_t size) {
179 if (!CheckLimit(size)) {
180 warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size,
181 gAllocLimit);
182 return nullptr;
183 }
184 auto dispatch_table = GetDefaultDispatchTable();
185 if (__predict_false(dispatch_table != nullptr)) {
186 return IncrementLimit(dispatch_table->aligned_alloc(alignment, size));
187 }
188 return IncrementLimit(Malloc(aligned_alloc)(alignment, size));
189}
190
191static void* LimitRealloc(void* old_mem, size_t bytes) {
192 size_t old_usable_size = LimitUsableSize(old_mem);
193 void* new_ptr;
194 // Need to check the size only if the allocation will increase in size.
195 if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) {
196 warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes,
197 gAllocLimit);
198 // Free the old pointer.
199 LimitFree(old_mem);
200 return nullptr;
201 }
202
203 auto dispatch_table = GetDefaultDispatchTable();
204 if (__predict_false(dispatch_table != nullptr)) {
205 new_ptr = dispatch_table->realloc(old_mem, bytes);
206 } else {
207 new_ptr = Malloc(realloc)(old_mem, bytes);
208 }
209
210 if (__predict_false(new_ptr == nullptr)) {
211 // This acts as if the pointer was freed.
212 atomic_fetch_sub(&gAllocated, old_usable_size);
213 return nullptr;
214 }
215
216 size_t new_usable_size = LimitUsableSize(new_ptr);
217 // Assumes that most allocations increase in size, rather than shrink.
218 if (__predict_false(old_usable_size > new_usable_size)) {
219 atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size);
220 } else {
221 atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size);
222 }
223 return new_ptr;
224}
225
226#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
227static void* LimitPvalloc(size_t bytes) {
228 if (!CheckLimit(bytes)) {
229 warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
230 return nullptr;
231 }
232 auto dispatch_table = GetDefaultDispatchTable();
233 if (__predict_false(dispatch_table != nullptr)) {
234 return IncrementLimit(dispatch_table->pvalloc(bytes));
235 }
236 return IncrementLimit(Malloc(pvalloc)(bytes));
237}
238
239static void* LimitValloc(size_t bytes) {
240 if (!CheckLimit(bytes)) {
241 warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
242 return nullptr;
243 }
244 auto dispatch_table = GetDefaultDispatchTable();
245 if (__predict_false(dispatch_table != nullptr)) {
246 return IncrementLimit(dispatch_table->valloc(bytes));
247 }
248 return IncrementLimit(Malloc(valloc)(bytes));
249}
250#endif
251
252#if defined(LIBC_STATIC)
253static bool EnableLimitDispatchTable() {
254 // This is the only valid way to modify the dispatch tables for a
255 // static executable so no locks are necessary.
256 __libc_globals.mutate([](libc_globals* globals) {
257 atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
258 });
259 return true;
260}
261#else
262static bool EnableLimitDispatchTable() {
263 HeapprofdMaskSignal();
264 pthread_mutex_lock(&gGlobalsMutateLock);
265 // All other code that calls mutate will grab the gGlobalsMutateLock.
266 // However, there is one case where the lock cannot be acquired, in the
267 // signal handler that enables heapprofd. In order to avoid having two
268 // threads calling mutate at the same time, use an atomic variable to
269 // verify that only this function or the signal handler are calling mutate.
270 // If this function is called at the same time as the signal handler is
271 // being called, allow up to five ms for the signal handler to complete
272 // before failing.
273 bool enabled = false;
274 size_t max_tries = 5;
275 while (true) {
276 if (!atomic_exchange(&gGlobalsMutating, true)) {
277 __libc_globals.mutate([](libc_globals* globals) {
278 atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
279 });
280 atomic_store(&gGlobalsMutating, false);
281 enabled = true;
282 break;
283 }
284 if (--max_tries == 0) {
285 break;
286 }
287 usleep(1000);
288 }
289 pthread_mutex_unlock(&gGlobalsMutateLock);
290 HeapprofdUnmaskSignal();
291 if (enabled) {
292 info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit);
293 } else {
294 error_log("malloc_limit: Failed to enable allocation limit.");
295 }
296 return enabled;
297}
298#endif
299
300bool LimitEnable(void* arg, size_t arg_size) {
301 if (arg == nullptr || arg_size != sizeof(size_t)) {
302 errno = EINVAL;
303 return false;
304 }
305
306 static _Atomic bool limit_enabled;
307 if (atomic_exchange(&limit_enabled, true)) {
308 // The limit can only be enabled once.
309 error_log("malloc_limit: The allocation limit has already been set, it can only be set once.");
310 return false;
311 }
312
313 gAllocLimit = *reinterpret_cast<size_t*>(arg);
314#if __has_feature(hwaddress_sanitizer)
315 size_t current_allocated = __sanitizer_get_current_allocated_bytes();
316#else
317 size_t current_allocated;
318 auto dispatch_table = GetDefaultDispatchTable();
319 if (__predict_false(dispatch_table != nullptr)) {
320 current_allocated = dispatch_table->mallinfo().uordblks;
321 } else {
322 current_allocated = Malloc(mallinfo)().uordblks;
323 }
324#endif
325 atomic_store(&gAllocated, current_allocated);
326
327 return EnableLimitDispatchTable();
328}
329
330static size_t LimitUsableSize(const void* mem) {
331 auto dispatch_table = GetDefaultDispatchTable();
332 if (__predict_false(dispatch_table != nullptr)) {
333 return dispatch_table->malloc_usable_size(mem);
334 }
335 return Malloc(malloc_usable_size)(mem);
336}
337
338static struct mallinfo LimitMallinfo() {
339 auto dispatch_table = GetDefaultDispatchTable();
340 if (__predict_false(dispatch_table != nullptr)) {
341 return dispatch_table->mallinfo();
342 }
343 return Malloc(mallinfo)();
344}
345
346static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) {
347 auto dispatch_table = GetDefaultDispatchTable();
348 if (__predict_false(dispatch_table != nullptr)) {
349 return dispatch_table->iterate(base, size, callback, arg);
350 }
351 return Malloc(iterate)(base, size, callback, arg);
352}
353
354static void LimitMallocDisable() {
355 auto dispatch_table = GetDefaultDispatchTable();
356 if (__predict_false(dispatch_table != nullptr)) {
357 dispatch_table->malloc_disable();
358 } else {
359 Malloc(malloc_disable)();
360 }
361}
362
363static void LimitMallocEnable() {
364 auto dispatch_table = GetDefaultDispatchTable();
365 if (__predict_false(dispatch_table != nullptr)) {
366 dispatch_table->malloc_enable();
367 } else {
368 Malloc(malloc_enable)();
369 }
370}
371
372static int LimitMallocInfo(int options, FILE* fp) {
373 auto dispatch_table = GetDefaultDispatchTable();
374 if (__predict_false(dispatch_table != nullptr)) {
375 return dispatch_table->malloc_info(options, fp);
376 }
377 return Malloc(malloc_info)(options, fp);
378}
379
380static int LimitMallopt(int param, int value) {
381 auto dispatch_table = GetDefaultDispatchTable();
382 if (__predict_false(dispatch_table != nullptr)) {
383 return dispatch_table->mallopt(param, value);
384 }
385 return Malloc(mallopt)(param, value);
386}