blob: 5128a3526e5d168d7540d7e82e75c426b2f6baa6 [file] [log] [blame]
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <inttypes.h>
30#include <pthread.h>
31#include <stdatomic.h>
32#include <stdint.h>
33#include <stdio.h>
Elliott Hughes8f653f82024-05-29 22:25:37 +000034#include <unistd.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080035
36#include <private/bionic_malloc_dispatch.h>
37
Peter Collingbourne149ce932019-03-15 22:43:47 -070038#if __has_feature(hwaddress_sanitizer)
39#include <sanitizer/allocator_interface.h>
40#endif
41
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080042#include "malloc_common.h"
43#include "malloc_common_dynamic.h"
44#include "malloc_heapprofd.h"
45#include "malloc_limit.h"
46
47__BEGIN_DECLS
48static void* LimitCalloc(size_t n_elements, size_t elem_size);
49static void LimitFree(void* mem);
50static void* LimitMalloc(size_t bytes);
51static void* LimitMemalign(size_t alignment, size_t bytes);
52static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size);
53static void* LimitRealloc(void* old_mem, size_t bytes);
54static void* LimitAlignedAlloc(size_t alignment, size_t size);
55#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
56static void* LimitPvalloc(size_t bytes);
57static void* LimitValloc(size_t bytes);
58#endif
59
60// Pass through functions.
61static size_t LimitUsableSize(const void* mem);
62static struct mallinfo LimitMallinfo();
63static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg);
64static void LimitMallocDisable();
65static void LimitMallocEnable();
66static int LimitMallocInfo(int options, FILE* fp);
67static int LimitMallopt(int param, int value);
68__END_DECLS
69
70static constexpr MallocDispatch __limit_dispatch
71 __attribute__((unused)) = {
72 LimitCalloc,
73 LimitFree,
74 LimitMallinfo,
75 LimitMalloc,
76 LimitUsableSize,
77 LimitMemalign,
78 LimitPosixMemalign,
79#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
80 LimitPvalloc,
81#endif
82 LimitRealloc,
83#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
84 LimitValloc,
85#endif
86 LimitIterate,
87 LimitMallocDisable,
88 LimitMallocEnable,
89 LimitMallopt,
90 LimitAlignedAlloc,
91 LimitMallocInfo,
92 };
93
94static _Atomic uint64_t gAllocated;
95static uint64_t gAllocLimit;
96
97static inline bool CheckLimit(size_t bytes) {
98 uint64_t total;
99 if (__predict_false(__builtin_add_overflow(
100 atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) ||
101 total > gAllocLimit)) {
102 return false;
103 }
104 return true;
105}
106
107static inline void* IncrementLimit(void* mem) {
108 if (__predict_false(mem == nullptr)) {
109 return nullptr;
110 }
111 atomic_fetch_add(&gAllocated, LimitUsableSize(mem));
112 return mem;
113}
114
115void* LimitCalloc(size_t n_elements, size_t elem_size) {
116 size_t total;
Marco Nelissen323431b2019-06-13 12:24:40 -0700117 if (__builtin_mul_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800118 warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size,
119 gAllocLimit);
120 return nullptr;
121 }
122 auto dispatch_table = GetDefaultDispatchTable();
123 if (__predict_false(dispatch_table != nullptr)) {
124 return IncrementLimit(dispatch_table->calloc(n_elements, elem_size));
125 }
126 return IncrementLimit(Malloc(calloc)(n_elements, elem_size));
127}
128
129void LimitFree(void* mem) {
130 atomic_fetch_sub(&gAllocated, LimitUsableSize(mem));
131 auto dispatch_table = GetDefaultDispatchTable();
132 if (__predict_false(dispatch_table != nullptr)) {
133 return dispatch_table->free(mem);
134 }
135 return Malloc(free)(mem);
136}
137
138void* LimitMalloc(size_t bytes) {
139 if (!CheckLimit(bytes)) {
140 warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
141 return nullptr;
142 }
143 auto dispatch_table = GetDefaultDispatchTable();
144 if (__predict_false(dispatch_table != nullptr)) {
145 return IncrementLimit(dispatch_table->malloc(bytes));
146 }
147 return IncrementLimit(Malloc(malloc)(bytes));
148}
149
150static void* LimitMemalign(size_t alignment, size_t bytes) {
151 if (!CheckLimit(bytes)) {
152 warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes,
153 gAllocLimit);
154 return nullptr;
155 }
156 auto dispatch_table = GetDefaultDispatchTable();
157 if (__predict_false(dispatch_table != nullptr)) {
158 return IncrementLimit(dispatch_table->memalign(alignment, bytes));
159 }
160 return IncrementLimit(Malloc(memalign)(alignment, bytes));
161}
162
163static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) {
164 if (!CheckLimit(size)) {
165 warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size,
166 gAllocLimit);
167 return ENOMEM;
168 }
169 int retval;
170 auto dispatch_table = GetDefaultDispatchTable();
171 if (__predict_false(dispatch_table != nullptr)) {
172 retval = dispatch_table->posix_memalign(memptr, alignment, size);
173 } else {
174 retval = Malloc(posix_memalign)(memptr, alignment, size);
175 }
176 if (__predict_false(retval != 0)) {
177 return retval;
178 }
179 IncrementLimit(*memptr);
180 return 0;
181}
182
183static void* LimitAlignedAlloc(size_t alignment, size_t size) {
184 if (!CheckLimit(size)) {
185 warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size,
186 gAllocLimit);
187 return nullptr;
188 }
189 auto dispatch_table = GetDefaultDispatchTable();
190 if (__predict_false(dispatch_table != nullptr)) {
191 return IncrementLimit(dispatch_table->aligned_alloc(alignment, size));
192 }
193 return IncrementLimit(Malloc(aligned_alloc)(alignment, size));
194}
195
196static void* LimitRealloc(void* old_mem, size_t bytes) {
197 size_t old_usable_size = LimitUsableSize(old_mem);
198 void* new_ptr;
199 // Need to check the size only if the allocation will increase in size.
200 if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) {
201 warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes,
202 gAllocLimit);
203 // Free the old pointer.
204 LimitFree(old_mem);
205 return nullptr;
206 }
207
208 auto dispatch_table = GetDefaultDispatchTable();
209 if (__predict_false(dispatch_table != nullptr)) {
210 new_ptr = dispatch_table->realloc(old_mem, bytes);
211 } else {
212 new_ptr = Malloc(realloc)(old_mem, bytes);
213 }
214
215 if (__predict_false(new_ptr == nullptr)) {
216 // This acts as if the pointer was freed.
217 atomic_fetch_sub(&gAllocated, old_usable_size);
218 return nullptr;
219 }
220
221 size_t new_usable_size = LimitUsableSize(new_ptr);
222 // Assumes that most allocations increase in size, rather than shrink.
223 if (__predict_false(old_usable_size > new_usable_size)) {
224 atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size);
225 } else {
226 atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size);
227 }
228 return new_ptr;
229}
230
231#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
232static void* LimitPvalloc(size_t bytes) {
233 if (!CheckLimit(bytes)) {
234 warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
235 return nullptr;
236 }
237 auto dispatch_table = GetDefaultDispatchTable();
238 if (__predict_false(dispatch_table != nullptr)) {
239 return IncrementLimit(dispatch_table->pvalloc(bytes));
240 }
241 return IncrementLimit(Malloc(pvalloc)(bytes));
242}
243
244static void* LimitValloc(size_t bytes) {
245 if (!CheckLimit(bytes)) {
246 warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
247 return nullptr;
248 }
249 auto dispatch_table = GetDefaultDispatchTable();
250 if (__predict_false(dispatch_table != nullptr)) {
251 return IncrementLimit(dispatch_table->valloc(bytes));
252 }
253 return IncrementLimit(Malloc(valloc)(bytes));
254}
255#endif
256
Mitch Phillips3083cc92020-02-11 15:23:47 -0800257bool MallocLimitInstalled() {
258 return GetDispatchTable() == &__limit_dispatch;
259}
260
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800261#if defined(LIBC_STATIC)
262static bool EnableLimitDispatchTable() {
263 // This is the only valid way to modify the dispatch tables for a
264 // static executable so no locks are necessary.
265 __libc_globals.mutate([](libc_globals* globals) {
266 atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
267 });
268 return true;
269}
270#else
271static bool EnableLimitDispatchTable() {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800272 pthread_mutex_lock(&gGlobalsMutateLock);
273 // All other code that calls mutate will grab the gGlobalsMutateLock.
274 // However, there is one case where the lock cannot be acquired, in the
275 // signal handler that enables heapprofd. In order to avoid having two
276 // threads calling mutate at the same time, use an atomic variable to
277 // verify that only this function or the signal handler are calling mutate.
278 // If this function is called at the same time as the signal handler is
Ryan Savitski175c8862020-01-02 19:54:57 +0000279 // being called, allow a short period for the signal handler to complete
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800280 // before failing.
281 bool enabled = false;
Christopher Ferrise9ffc522023-08-03 17:34:05 -0700282 size_t num_tries = 200;
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800283 while (true) {
284 if (!atomic_exchange(&gGlobalsMutating, true)) {
285 __libc_globals.mutate([](libc_globals* globals) {
286 atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
287 });
288 atomic_store(&gGlobalsMutating, false);
289 enabled = true;
290 break;
291 }
Christopher Ferris9b78aa32019-03-18 20:40:26 -0700292 if (--num_tries == 0) {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800293 break;
294 }
295 usleep(1000);
296 }
297 pthread_mutex_unlock(&gGlobalsMutateLock);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800298 if (enabled) {
299 info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit);
300 } else {
301 error_log("malloc_limit: Failed to enable allocation limit.");
302 }
303 return enabled;
304}
305#endif
306
307bool LimitEnable(void* arg, size_t arg_size) {
308 if (arg == nullptr || arg_size != sizeof(size_t)) {
309 errno = EINVAL;
310 return false;
311 }
312
313 static _Atomic bool limit_enabled;
314 if (atomic_exchange(&limit_enabled, true)) {
315 // The limit can only be enabled once.
316 error_log("malloc_limit: The allocation limit has already been set, it can only be set once.");
317 return false;
318 }
319
320 gAllocLimit = *reinterpret_cast<size_t*>(arg);
321#if __has_feature(hwaddress_sanitizer)
322 size_t current_allocated = __sanitizer_get_current_allocated_bytes();
323#else
324 size_t current_allocated;
325 auto dispatch_table = GetDefaultDispatchTable();
326 if (__predict_false(dispatch_table != nullptr)) {
327 current_allocated = dispatch_table->mallinfo().uordblks;
328 } else {
329 current_allocated = Malloc(mallinfo)().uordblks;
330 }
331#endif
Christopher Ferrise9ffc522023-08-03 17:34:05 -0700332 // This has to be set before the enable occurs since "gAllocated" is used
333 // to compute the limit. If the enable fails, "gAllocated" is never used.
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800334 atomic_store(&gAllocated, current_allocated);
335
Christopher Ferrise9ffc522023-08-03 17:34:05 -0700336 if (!EnableLimitDispatchTable()) {
337 // Failed to enable, reset so a future enable will pass.
338 atomic_store(&limit_enabled, false);
339 return false;
340 }
341 return true;
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800342}
343
344static size_t LimitUsableSize(const void* mem) {
345 auto dispatch_table = GetDefaultDispatchTable();
346 if (__predict_false(dispatch_table != nullptr)) {
347 return dispatch_table->malloc_usable_size(mem);
348 }
349 return Malloc(malloc_usable_size)(mem);
350}
351
352static struct mallinfo LimitMallinfo() {
353 auto dispatch_table = GetDefaultDispatchTable();
354 if (__predict_false(dispatch_table != nullptr)) {
355 return dispatch_table->mallinfo();
356 }
357 return Malloc(mallinfo)();
358}
359
360static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) {
361 auto dispatch_table = GetDefaultDispatchTable();
362 if (__predict_false(dispatch_table != nullptr)) {
Christopher Ferris6f517cd2019-11-08 11:28:38 -0800363 return dispatch_table->malloc_iterate(base, size, callback, arg);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800364 }
Christopher Ferris6f517cd2019-11-08 11:28:38 -0800365 return Malloc(malloc_iterate)(base, size, callback, arg);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800366}
367
368static void LimitMallocDisable() {
369 auto dispatch_table = GetDefaultDispatchTable();
370 if (__predict_false(dispatch_table != nullptr)) {
371 dispatch_table->malloc_disable();
372 } else {
373 Malloc(malloc_disable)();
374 }
375}
376
377static void LimitMallocEnable() {
378 auto dispatch_table = GetDefaultDispatchTable();
379 if (__predict_false(dispatch_table != nullptr)) {
380 dispatch_table->malloc_enable();
381 } else {
382 Malloc(malloc_enable)();
383 }
384}
385
386static int LimitMallocInfo(int options, FILE* fp) {
387 auto dispatch_table = GetDefaultDispatchTable();
388 if (__predict_false(dispatch_table != nullptr)) {
389 return dispatch_table->malloc_info(options, fp);
390 }
391 return Malloc(malloc_info)(options, fp);
392}
393
394static int LimitMallopt(int param, int value) {
395 auto dispatch_table = GetDefaultDispatchTable();
396 if (__predict_false(dispatch_table != nullptr)) {
397 return dispatch_table->mallopt(param, value);
398 }
399 return Malloc(mallopt)(param, value);
400}