blob: deb63f4fdb8778b3de89fd4cd47aa5e1704ef4ea [file] [log] [blame]
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <inttypes.h>
30#include <pthread.h>
31#include <stdatomic.h>
32#include <stdint.h>
33#include <stdio.h>
34
35#include <private/bionic_malloc_dispatch.h>
36
Peter Collingbourne149ce932019-03-15 22:43:47 -070037#if __has_feature(hwaddress_sanitizer)
38#include <sanitizer/allocator_interface.h>
39#endif
40
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080041#include "malloc_common.h"
42#include "malloc_common_dynamic.h"
43#include "malloc_heapprofd.h"
44#include "malloc_limit.h"
45
46__BEGIN_DECLS
47static void* LimitCalloc(size_t n_elements, size_t elem_size);
48static void LimitFree(void* mem);
49static void* LimitMalloc(size_t bytes);
50static void* LimitMemalign(size_t alignment, size_t bytes);
51static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size);
52static void* LimitRealloc(void* old_mem, size_t bytes);
53static void* LimitAlignedAlloc(size_t alignment, size_t size);
54#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
55static void* LimitPvalloc(size_t bytes);
56static void* LimitValloc(size_t bytes);
57#endif
58
59// Pass through functions.
60static size_t LimitUsableSize(const void* mem);
61static struct mallinfo LimitMallinfo();
62static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg);
63static void LimitMallocDisable();
64static void LimitMallocEnable();
65static int LimitMallocInfo(int options, FILE* fp);
66static int LimitMallopt(int param, int value);
67__END_DECLS
68
69static constexpr MallocDispatch __limit_dispatch
70 __attribute__((unused)) = {
71 LimitCalloc,
72 LimitFree,
73 LimitMallinfo,
74 LimitMalloc,
75 LimitUsableSize,
76 LimitMemalign,
77 LimitPosixMemalign,
78#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
79 LimitPvalloc,
80#endif
81 LimitRealloc,
82#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
83 LimitValloc,
84#endif
85 LimitIterate,
86 LimitMallocDisable,
87 LimitMallocEnable,
88 LimitMallopt,
89 LimitAlignedAlloc,
90 LimitMallocInfo,
91 };
92
93static _Atomic uint64_t gAllocated;
94static uint64_t gAllocLimit;
95
96static inline bool CheckLimit(size_t bytes) {
97 uint64_t total;
98 if (__predict_false(__builtin_add_overflow(
99 atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) ||
100 total > gAllocLimit)) {
101 return false;
102 }
103 return true;
104}
105
106static inline void* IncrementLimit(void* mem) {
107 if (__predict_false(mem == nullptr)) {
108 return nullptr;
109 }
110 atomic_fetch_add(&gAllocated, LimitUsableSize(mem));
111 return mem;
112}
113
114void* LimitCalloc(size_t n_elements, size_t elem_size) {
115 size_t total;
Marco Nelissen323431b2019-06-13 12:24:40 -0700116 if (__builtin_mul_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800117 warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size,
118 gAllocLimit);
119 return nullptr;
120 }
121 auto dispatch_table = GetDefaultDispatchTable();
122 if (__predict_false(dispatch_table != nullptr)) {
123 return IncrementLimit(dispatch_table->calloc(n_elements, elem_size));
124 }
125 return IncrementLimit(Malloc(calloc)(n_elements, elem_size));
126}
127
128void LimitFree(void* mem) {
129 atomic_fetch_sub(&gAllocated, LimitUsableSize(mem));
130 auto dispatch_table = GetDefaultDispatchTable();
131 if (__predict_false(dispatch_table != nullptr)) {
132 return dispatch_table->free(mem);
133 }
134 return Malloc(free)(mem);
135}
136
137void* LimitMalloc(size_t bytes) {
138 if (!CheckLimit(bytes)) {
139 warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
140 return nullptr;
141 }
142 auto dispatch_table = GetDefaultDispatchTable();
143 if (__predict_false(dispatch_table != nullptr)) {
144 return IncrementLimit(dispatch_table->malloc(bytes));
145 }
146 return IncrementLimit(Malloc(malloc)(bytes));
147}
148
149static void* LimitMemalign(size_t alignment, size_t bytes) {
150 if (!CheckLimit(bytes)) {
151 warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes,
152 gAllocLimit);
153 return nullptr;
154 }
155 auto dispatch_table = GetDefaultDispatchTable();
156 if (__predict_false(dispatch_table != nullptr)) {
157 return IncrementLimit(dispatch_table->memalign(alignment, bytes));
158 }
159 return IncrementLimit(Malloc(memalign)(alignment, bytes));
160}
161
162static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) {
163 if (!CheckLimit(size)) {
164 warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size,
165 gAllocLimit);
166 return ENOMEM;
167 }
168 int retval;
169 auto dispatch_table = GetDefaultDispatchTable();
170 if (__predict_false(dispatch_table != nullptr)) {
171 retval = dispatch_table->posix_memalign(memptr, alignment, size);
172 } else {
173 retval = Malloc(posix_memalign)(memptr, alignment, size);
174 }
175 if (__predict_false(retval != 0)) {
176 return retval;
177 }
178 IncrementLimit(*memptr);
179 return 0;
180}
181
182static void* LimitAlignedAlloc(size_t alignment, size_t size) {
183 if (!CheckLimit(size)) {
184 warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size,
185 gAllocLimit);
186 return nullptr;
187 }
188 auto dispatch_table = GetDefaultDispatchTable();
189 if (__predict_false(dispatch_table != nullptr)) {
190 return IncrementLimit(dispatch_table->aligned_alloc(alignment, size));
191 }
192 return IncrementLimit(Malloc(aligned_alloc)(alignment, size));
193}
194
195static void* LimitRealloc(void* old_mem, size_t bytes) {
196 size_t old_usable_size = LimitUsableSize(old_mem);
197 void* new_ptr;
198 // Need to check the size only if the allocation will increase in size.
199 if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) {
200 warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes,
201 gAllocLimit);
202 // Free the old pointer.
203 LimitFree(old_mem);
204 return nullptr;
205 }
206
207 auto dispatch_table = GetDefaultDispatchTable();
208 if (__predict_false(dispatch_table != nullptr)) {
209 new_ptr = dispatch_table->realloc(old_mem, bytes);
210 } else {
211 new_ptr = Malloc(realloc)(old_mem, bytes);
212 }
213
214 if (__predict_false(new_ptr == nullptr)) {
215 // This acts as if the pointer was freed.
216 atomic_fetch_sub(&gAllocated, old_usable_size);
217 return nullptr;
218 }
219
220 size_t new_usable_size = LimitUsableSize(new_ptr);
221 // Assumes that most allocations increase in size, rather than shrink.
222 if (__predict_false(old_usable_size > new_usable_size)) {
223 atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size);
224 } else {
225 atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size);
226 }
227 return new_ptr;
228}
229
230#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
231static void* LimitPvalloc(size_t bytes) {
232 if (!CheckLimit(bytes)) {
233 warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
234 return nullptr;
235 }
236 auto dispatch_table = GetDefaultDispatchTable();
237 if (__predict_false(dispatch_table != nullptr)) {
238 return IncrementLimit(dispatch_table->pvalloc(bytes));
239 }
240 return IncrementLimit(Malloc(pvalloc)(bytes));
241}
242
243static void* LimitValloc(size_t bytes) {
244 if (!CheckLimit(bytes)) {
245 warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
246 return nullptr;
247 }
248 auto dispatch_table = GetDefaultDispatchTable();
249 if (__predict_false(dispatch_table != nullptr)) {
250 return IncrementLimit(dispatch_table->valloc(bytes));
251 }
252 return IncrementLimit(Malloc(valloc)(bytes));
253}
254#endif
255
Mitch Phillips3083cc92020-02-11 15:23:47 -0800256bool MallocLimitInstalled() {
257 return GetDispatchTable() == &__limit_dispatch;
258}
259
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800260#if defined(LIBC_STATIC)
261static bool EnableLimitDispatchTable() {
262 // This is the only valid way to modify the dispatch tables for a
263 // static executable so no locks are necessary.
264 __libc_globals.mutate([](libc_globals* globals) {
265 atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
266 });
267 return true;
268}
269#else
270static bool EnableLimitDispatchTable() {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800271 pthread_mutex_lock(&gGlobalsMutateLock);
272 // All other code that calls mutate will grab the gGlobalsMutateLock.
273 // However, there is one case where the lock cannot be acquired, in the
274 // signal handler that enables heapprofd. In order to avoid having two
275 // threads calling mutate at the same time, use an atomic variable to
276 // verify that only this function or the signal handler are calling mutate.
277 // If this function is called at the same time as the signal handler is
Ryan Savitski175c8862020-01-02 19:54:57 +0000278 // being called, allow a short period for the signal handler to complete
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800279 // before failing.
280 bool enabled = false;
Christopher Ferrise9ffc522023-08-03 17:34:05 -0700281 size_t num_tries = 200;
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800282 while (true) {
283 if (!atomic_exchange(&gGlobalsMutating, true)) {
284 __libc_globals.mutate([](libc_globals* globals) {
285 atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
286 });
287 atomic_store(&gGlobalsMutating, false);
288 enabled = true;
289 break;
290 }
Christopher Ferris9b78aa32019-03-18 20:40:26 -0700291 if (--num_tries == 0) {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800292 break;
293 }
294 usleep(1000);
295 }
296 pthread_mutex_unlock(&gGlobalsMutateLock);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800297 if (enabled) {
298 info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit);
299 } else {
300 error_log("malloc_limit: Failed to enable allocation limit.");
301 }
302 return enabled;
303}
304#endif
305
306bool LimitEnable(void* arg, size_t arg_size) {
307 if (arg == nullptr || arg_size != sizeof(size_t)) {
308 errno = EINVAL;
309 return false;
310 }
311
312 static _Atomic bool limit_enabled;
313 if (atomic_exchange(&limit_enabled, true)) {
314 // The limit can only be enabled once.
315 error_log("malloc_limit: The allocation limit has already been set, it can only be set once.");
316 return false;
317 }
318
319 gAllocLimit = *reinterpret_cast<size_t*>(arg);
320#if __has_feature(hwaddress_sanitizer)
321 size_t current_allocated = __sanitizer_get_current_allocated_bytes();
322#else
323 size_t current_allocated;
324 auto dispatch_table = GetDefaultDispatchTable();
325 if (__predict_false(dispatch_table != nullptr)) {
326 current_allocated = dispatch_table->mallinfo().uordblks;
327 } else {
328 current_allocated = Malloc(mallinfo)().uordblks;
329 }
330#endif
Christopher Ferrise9ffc522023-08-03 17:34:05 -0700331 // This has to be set before the enable occurs since "gAllocated" is used
332 // to compute the limit. If the enable fails, "gAllocated" is never used.
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800333 atomic_store(&gAllocated, current_allocated);
334
Christopher Ferrise9ffc522023-08-03 17:34:05 -0700335 if (!EnableLimitDispatchTable()) {
336 // Failed to enable, reset so a future enable will pass.
337 atomic_store(&limit_enabled, false);
338 return false;
339 }
340 return true;
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800341}
342
343static size_t LimitUsableSize(const void* mem) {
344 auto dispatch_table = GetDefaultDispatchTable();
345 if (__predict_false(dispatch_table != nullptr)) {
346 return dispatch_table->malloc_usable_size(mem);
347 }
348 return Malloc(malloc_usable_size)(mem);
349}
350
351static struct mallinfo LimitMallinfo() {
352 auto dispatch_table = GetDefaultDispatchTable();
353 if (__predict_false(dispatch_table != nullptr)) {
354 return dispatch_table->mallinfo();
355 }
356 return Malloc(mallinfo)();
357}
358
359static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) {
360 auto dispatch_table = GetDefaultDispatchTable();
361 if (__predict_false(dispatch_table != nullptr)) {
Christopher Ferris6f517cd2019-11-08 11:28:38 -0800362 return dispatch_table->malloc_iterate(base, size, callback, arg);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800363 }
Christopher Ferris6f517cd2019-11-08 11:28:38 -0800364 return Malloc(malloc_iterate)(base, size, callback, arg);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800365}
366
367static void LimitMallocDisable() {
368 auto dispatch_table = GetDefaultDispatchTable();
369 if (__predict_false(dispatch_table != nullptr)) {
370 dispatch_table->malloc_disable();
371 } else {
372 Malloc(malloc_disable)();
373 }
374}
375
376static void LimitMallocEnable() {
377 auto dispatch_table = GetDefaultDispatchTable();
378 if (__predict_false(dispatch_table != nullptr)) {
379 dispatch_table->malloc_enable();
380 } else {
381 Malloc(malloc_enable)();
382 }
383}
384
385static int LimitMallocInfo(int options, FILE* fp) {
386 auto dispatch_table = GetDefaultDispatchTable();
387 if (__predict_false(dispatch_table != nullptr)) {
388 return dispatch_table->malloc_info(options, fp);
389 }
390 return Malloc(malloc_info)(options, fp);
391}
392
393static int LimitMallopt(int param, int value) {
394 auto dispatch_table = GetDefaultDispatchTable();
395 if (__predict_false(dispatch_table != nullptr)) {
396 return dispatch_table->mallopt(param, value);
397 }
398 return Malloc(mallopt)(param, value);
399}