blob: 3cc5bbd3d53a5dc23fd044399517cc22bbcb4685 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070030#include <sys/cdefs.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080031#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <sys/types.h>
33#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070034#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070035
Mitch Phillips9cad8422021-01-20 16:03:27 -080036#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080037#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080038#include <thread>
Mitch Phillips9cad8422021-01-20 16:03:27 -080039#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080040
Dan Albert4caa1f02014-08-20 09:16:57 -070041#include <tinyxml2.h>
42
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080043#include <android-base/file.h>
Florian Mayer750dcd32022-04-15 15:54:47 -070044#include <android-base/test_utils.h>
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080045
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080046#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000047
Elliott Hughesb1770852018-09-18 12:52:42 -070048#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080049
Peter Collingbourne45819dd2020-01-09 11:00:43 -080050#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080051#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080052
Christopher Ferrisb874c332020-01-21 16:39:05 -080053#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070054#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080055#include "platform/bionic/reserved_signals.h"
56#include "private/bionic_config.h"
57
Elliott Hughesb1770852018-09-18 12:52:42 -070058#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080059
Colin Cross7da20342021-07-28 11:18:11 -070060#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080061
Elliott Hughesb1770852018-09-18 12:52:42 -070062#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080063
Colin Cross4c5595c2021-08-16 15:51:59 -070064#elif defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -070065
66#define HAVE_REALLOCARRAY 1
67
Elliott Hughesb1770852018-09-18 12:52:42 -070068#endif
69
Christopher Ferris885f3b92013-05-21 17:48:01 -070070TEST(malloc, malloc_std) {
71 // Simple malloc test.
72 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070073 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070074 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070075 free(ptr);
76}
77
Christopher Ferrisa4037802014-06-09 19:14:11 -070078TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080079 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070080 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070081 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070082 ASSERT_EQ(ENOMEM, errno);
83}
84
Christopher Ferris885f3b92013-05-21 17:48:01 -070085TEST(malloc, calloc_std) {
86 // Simple calloc test.
87 size_t alloc_len = 100;
88 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070089 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070090 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
91 for (size_t i = 0; i < alloc_len; i++) {
92 ASSERT_EQ(0, ptr[i]);
93 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070094 free(ptr);
95}
96
Peter Collingbourne978eb162020-09-21 15:26:02 -070097TEST(malloc, calloc_mem_init_disabled) {
98#if defined(__BIONIC__)
99 // calloc should still zero memory if mem-init is disabled.
100 // With jemalloc the mallopts will fail but that shouldn't affect the
101 // execution of the test.
102 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
103 size_t alloc_len = 100;
104 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
105 for (size_t i = 0; i < alloc_len; i++) {
106 ASSERT_EQ(0, ptr[i]);
107 }
108 free(ptr);
109 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
110#else
111 GTEST_SKIP() << "bionic-only test";
112#endif
113}
114
Christopher Ferrisa4037802014-06-09 19:14:11 -0700115TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800116 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700117 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700118 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700119 ASSERT_EQ(ENOMEM, errno);
120}
121
122TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800123 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700124 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700125 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700126 ASSERT_EQ(ENOMEM, errno);
127 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700128 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700129 ASSERT_EQ(ENOMEM, errno);
130 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700131 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700132 ASSERT_EQ(ENOMEM, errno);
133 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700134 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700135 ASSERT_EQ(ENOMEM, errno);
136}
137
Christopher Ferris885f3b92013-05-21 17:48:01 -0700138TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800139 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700140 // Memalign test where the alignment is any value.
141 for (size_t i = 0; i <= 12; i++) {
142 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700143 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700144 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700145 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
146 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
147 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700148 free(ptr);
149 }
150 }
151}
152
Christopher Ferrisa4037802014-06-09 19:14:11 -0700153TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800154 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700155 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700156}
157
158TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800159 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700160 void* ptr;
161 for (size_t align = 0; align <= 256; align++) {
162 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700163 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700164 free(ptr);
165 }
166}
167
Christopher Ferris885f3b92013-05-21 17:48:01 -0700168TEST(malloc, memalign_realloc) {
169 // Memalign and then realloc the pointer a couple of times.
170 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
171 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700172 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700173 ASSERT_LE(100U, malloc_usable_size(ptr));
174 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
175 memset(ptr, 0x23, 100);
176
177 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700178 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700179 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700180 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700181 for (size_t i = 0; i < 100; i++) {
182 ASSERT_EQ(0x23, ptr[i]);
183 }
184 memset(ptr, 0x45, 200);
185
186 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700187 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700188 ASSERT_LE(300U, malloc_usable_size(ptr));
189 for (size_t i = 0; i < 200; i++) {
190 ASSERT_EQ(0x45, ptr[i]);
191 }
192 memset(ptr, 0x67, 300);
193
194 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700195 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700196 ASSERT_LE(250U, malloc_usable_size(ptr));
197 for (size_t i = 0; i < 250; i++) {
198 ASSERT_EQ(0x67, ptr[i]);
199 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700200 free(ptr);
201 }
202}
203
204TEST(malloc, malloc_realloc_larger) {
205 // Realloc to a larger size, malloc is used for the original allocation.
206 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700207 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700208 ASSERT_LE(100U, malloc_usable_size(ptr));
209 memset(ptr, 67, 100);
210
211 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700212 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700213 ASSERT_LE(200U, malloc_usable_size(ptr));
214 for (size_t i = 0; i < 100; i++) {
215 ASSERT_EQ(67, ptr[i]);
216 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700217 free(ptr);
218}
219
220TEST(malloc, malloc_realloc_smaller) {
221 // Realloc to a smaller size, malloc is used for the original allocation.
222 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700223 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700224 ASSERT_LE(200U, malloc_usable_size(ptr));
225 memset(ptr, 67, 200);
226
227 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700228 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700229 ASSERT_LE(100U, malloc_usable_size(ptr));
230 for (size_t i = 0; i < 100; i++) {
231 ASSERT_EQ(67, ptr[i]);
232 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700233 free(ptr);
234}
235
236TEST(malloc, malloc_multiple_realloc) {
237 // Multiple reallocs, malloc is used for the original allocation.
238 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700239 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700240 ASSERT_LE(200U, malloc_usable_size(ptr));
241 memset(ptr, 0x23, 200);
242
243 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700244 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700245 ASSERT_LE(100U, malloc_usable_size(ptr));
246 for (size_t i = 0; i < 100; i++) {
247 ASSERT_EQ(0x23, ptr[i]);
248 }
249
250 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700251 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700252 ASSERT_LE(50U, malloc_usable_size(ptr));
253 for (size_t i = 0; i < 50; i++) {
254 ASSERT_EQ(0x23, ptr[i]);
255 }
256
257 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700258 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700259 ASSERT_LE(150U, malloc_usable_size(ptr));
260 for (size_t i = 0; i < 50; i++) {
261 ASSERT_EQ(0x23, ptr[i]);
262 }
263 memset(ptr, 0x23, 150);
264
265 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700266 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700267 ASSERT_LE(425U, malloc_usable_size(ptr));
268 for (size_t i = 0; i < 150; i++) {
269 ASSERT_EQ(0x23, ptr[i]);
270 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700271 free(ptr);
272}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700273
Christopher Ferris885f3b92013-05-21 17:48:01 -0700274TEST(malloc, calloc_realloc_larger) {
275 // Realloc to a larger size, calloc is used for the original allocation.
276 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700277 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700278 ASSERT_LE(100U, malloc_usable_size(ptr));
279
280 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700281 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700282 ASSERT_LE(200U, malloc_usable_size(ptr));
283 for (size_t i = 0; i < 100; i++) {
284 ASSERT_EQ(0, ptr[i]);
285 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700286 free(ptr);
287}
288
289TEST(malloc, calloc_realloc_smaller) {
290 // Realloc to a smaller size, calloc is used for the original allocation.
291 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700292 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700293 ASSERT_LE(200U, malloc_usable_size(ptr));
294
295 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700296 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700297 ASSERT_LE(100U, malloc_usable_size(ptr));
298 for (size_t i = 0; i < 100; i++) {
299 ASSERT_EQ(0, ptr[i]);
300 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700301 free(ptr);
302}
303
304TEST(malloc, calloc_multiple_realloc) {
305 // Multiple reallocs, calloc is used for the original allocation.
306 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700307 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700308 ASSERT_LE(200U, malloc_usable_size(ptr));
309
310 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700311 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700312 ASSERT_LE(100U, malloc_usable_size(ptr));
313 for (size_t i = 0; i < 100; i++) {
314 ASSERT_EQ(0, ptr[i]);
315 }
316
317 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700318 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700319 ASSERT_LE(50U, malloc_usable_size(ptr));
320 for (size_t i = 0; i < 50; i++) {
321 ASSERT_EQ(0, ptr[i]);
322 }
323
324 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700325 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700326 ASSERT_LE(150U, malloc_usable_size(ptr));
327 for (size_t i = 0; i < 50; i++) {
328 ASSERT_EQ(0, ptr[i]);
329 }
330 memset(ptr, 0, 150);
331
332 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700333 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700334 ASSERT_LE(425U, malloc_usable_size(ptr));
335 for (size_t i = 0; i < 150; i++) {
336 ASSERT_EQ(0, ptr[i]);
337 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700338 free(ptr);
339}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700340
Christopher Ferrisa4037802014-06-09 19:14:11 -0700341TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800342 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700343 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700344 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700345 ASSERT_EQ(ENOMEM, errno);
346 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700347 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700348 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700349 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700350 ASSERT_EQ(ENOMEM, errno);
351 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700352}
353
Dan Alberte5fdaa42014-06-14 01:04:31 +0000354#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
355extern "C" void* pvalloc(size_t);
356extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700357#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000358
Christopher Ferrisa4037802014-06-09 19:14:11 -0700359TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700360#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700361 size_t pagesize = sysconf(_SC_PAGESIZE);
362 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700363 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700364 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
365 ASSERT_LE(pagesize, malloc_usable_size(ptr));
366 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700367#else
368 GTEST_SKIP() << "pvalloc not supported.";
369#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700370}
371
372TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700373#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700374 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700375#else
376 GTEST_SKIP() << "pvalloc not supported.";
377#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700378}
379
380TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700381#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700382 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700383 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700384 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700385 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
386 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700387#else
388 GTEST_SKIP() << "valloc not supported.";
389#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700390}
391
392TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700393#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700394 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700395#else
396 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000397#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700398}
Dan Albert4caa1f02014-08-20 09:16:57 -0700399
400TEST(malloc, malloc_info) {
401#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700402 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800403
404 TemporaryFile tf;
405 ASSERT_TRUE(tf.fd != -1);
406 FILE* fp = fdopen(tf.fd, "w+");
407 tf.release();
408 ASSERT_TRUE(fp != nullptr);
409 ASSERT_EQ(0, malloc_info(0, fp));
410 ASSERT_EQ(0, fclose(fp));
411
412 std::string contents;
413 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700414
415 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800416 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700417
418 auto root = doc.FirstChildElement();
419 ASSERT_NE(nullptr, root);
420 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700421 std::string version(root->Attribute("version"));
422 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800423 auto arena = root->FirstChildElement();
424 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
425 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700426
Christopher Ferris6c619a02019-03-01 17:59:51 -0800427 ASSERT_STREQ("heap", arena->Name());
428 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
429 ASSERT_EQ(tinyxml2::XML_SUCCESS,
430 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
431 ASSERT_EQ(tinyxml2::XML_SUCCESS,
432 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
433 ASSERT_EQ(tinyxml2::XML_SUCCESS,
434 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
435 ASSERT_EQ(tinyxml2::XML_SUCCESS,
436 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700437
Christopher Ferris6c619a02019-03-01 17:59:51 -0800438 auto bin = arena->FirstChildElement("bin");
439 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
440 if (strcmp(bin->Name(), "bin") == 0) {
441 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
442 ASSERT_EQ(tinyxml2::XML_SUCCESS,
443 bin->FirstChildElement("allocated")->QueryIntText(&val));
444 ASSERT_EQ(tinyxml2::XML_SUCCESS,
445 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
446 ASSERT_EQ(tinyxml2::XML_SUCCESS,
447 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
448 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700449 }
450 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800451 } else if (version == "scudo-1") {
452 auto element = root->FirstChildElement();
453 for (; element != nullptr; element = element->NextSiblingElement()) {
454 int val;
455
456 ASSERT_STREQ("alloc", element->Name());
457 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
458 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
459 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800460 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800461 // Do not verify output for debug malloc.
462 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700463 }
464#endif
465}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800466
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700467TEST(malloc, malloc_info_matches_mallinfo) {
468#ifdef __BIONIC__
469 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
470
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800471 TemporaryFile tf;
472 ASSERT_TRUE(tf.fd != -1);
473 FILE* fp = fdopen(tf.fd, "w+");
474 tf.release();
475 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700476 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800477 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700478 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800479 ASSERT_EQ(0, fclose(fp));
480
481 std::string contents;
482 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700483
484 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800485 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700486
487 size_t total_allocated_bytes = 0;
488 auto root = doc.FirstChildElement();
489 ASSERT_NE(nullptr, root);
490 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700491 std::string version(root->Attribute("version"));
492 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700493 auto arena = root->FirstChildElement();
494 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
495 int val;
496
497 ASSERT_STREQ("heap", arena->Name());
498 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
499 ASSERT_EQ(tinyxml2::XML_SUCCESS,
500 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
501 total_allocated_bytes += val;
502 ASSERT_EQ(tinyxml2::XML_SUCCESS,
503 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
504 total_allocated_bytes += val;
505 ASSERT_EQ(tinyxml2::XML_SUCCESS,
506 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
507 total_allocated_bytes += val;
508 ASSERT_EQ(tinyxml2::XML_SUCCESS,
509 arena->FirstChildElement("bins-total")->QueryIntText(&val));
510 }
511 // The total needs to be between the mallinfo call before and after
512 // since malloc_info allocates some memory.
513 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
514 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800515 } else if (version == "scudo-1") {
516 auto element = root->FirstChildElement();
517 for (; element != nullptr; element = element->NextSiblingElement()) {
518 ASSERT_STREQ("alloc", element->Name());
519 int size;
520 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
521 int count;
522 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
523 total_allocated_bytes += size * count;
524 }
525 // Scudo only gives the information on the primary, so simply make
526 // sure that the value is non-zero.
527 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700528 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800529 // Do not verify output for debug malloc.
530 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700531 }
532#endif
533}
534
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800535TEST(malloc, calloc_usable_size) {
536 for (size_t size = 1; size <= 2048; size++) {
537 void* pointer = malloc(size);
538 ASSERT_TRUE(pointer != nullptr);
539 memset(pointer, 0xeb, malloc_usable_size(pointer));
540 free(pointer);
541
542 // We should get a previous pointer that has been set to non-zero.
543 // If calloc does not zero out all of the data, this will fail.
544 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
545 ASSERT_TRUE(pointer != nullptr);
546 size_t usable_size = malloc_usable_size(zero_mem);
547 for (size_t i = 0; i < usable_size; i++) {
548 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
549 }
550 free(zero_mem);
551 }
552}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800553
554TEST(malloc, malloc_0) {
555 void* p = malloc(0);
556 ASSERT_TRUE(p != nullptr);
557 free(p);
558}
559
560TEST(malloc, calloc_0_0) {
561 void* p = calloc(0, 0);
562 ASSERT_TRUE(p != nullptr);
563 free(p);
564}
565
566TEST(malloc, calloc_0_1) {
567 void* p = calloc(0, 1);
568 ASSERT_TRUE(p != nullptr);
569 free(p);
570}
571
572TEST(malloc, calloc_1_0) {
573 void* p = calloc(1, 0);
574 ASSERT_TRUE(p != nullptr);
575 free(p);
576}
577
578TEST(malloc, realloc_nullptr_0) {
579 // realloc(nullptr, size) is actually malloc(size).
580 void* p = realloc(nullptr, 0);
581 ASSERT_TRUE(p != nullptr);
582 free(p);
583}
584
585TEST(malloc, realloc_0) {
586 void* p = malloc(1024);
587 ASSERT_TRUE(p != nullptr);
588 // realloc(p, 0) is actually free(p).
589 void* p2 = realloc(p, 0);
590 ASSERT_TRUE(p2 == nullptr);
591}
Christopher Ferris72df6702016-02-11 15:51:31 -0800592
593constexpr size_t MAX_LOOPS = 200;
594
595// Make sure that memory returned by malloc is aligned to allow these data types.
596TEST(malloc, verify_alignment) {
597 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
598 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
599 long double** values_ldouble = new long double*[MAX_LOOPS];
600 // Use filler to attempt to force the allocator to get potentially bad alignments.
601 void** filler = new void*[MAX_LOOPS];
602
603 for (size_t i = 0; i < MAX_LOOPS; i++) {
604 // Check uint32_t pointers.
605 filler[i] = malloc(1);
606 ASSERT_TRUE(filler[i] != nullptr);
607
608 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
609 ASSERT_TRUE(values_32[i] != nullptr);
610 *values_32[i] = i;
611 ASSERT_EQ(*values_32[i], i);
612 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
613
614 free(filler[i]);
615 }
616
617 for (size_t i = 0; i < MAX_LOOPS; i++) {
618 // Check uint64_t pointers.
619 filler[i] = malloc(1);
620 ASSERT_TRUE(filler[i] != nullptr);
621
622 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
623 ASSERT_TRUE(values_64[i] != nullptr);
624 *values_64[i] = 0x1000 + i;
625 ASSERT_EQ(*values_64[i], 0x1000 + i);
626 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
627
628 free(filler[i]);
629 }
630
631 for (size_t i = 0; i < MAX_LOOPS; i++) {
632 // Check long double pointers.
633 filler[i] = malloc(1);
634 ASSERT_TRUE(filler[i] != nullptr);
635
636 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
637 ASSERT_TRUE(values_ldouble[i] != nullptr);
638 *values_ldouble[i] = 5.5 + i;
639 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
640 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
641 // required alignment to 0x7.
642#if !defined(__BIONIC__) && !defined(__LP64__)
643 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
644#else
645 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
646#endif
647
648 free(filler[i]);
649 }
650
651 for (size_t i = 0; i < MAX_LOOPS; i++) {
652 free(values_32[i]);
653 free(values_64[i]);
654 free(values_ldouble[i]);
655 }
656
657 delete[] filler;
658 delete[] values_32;
659 delete[] values_64;
660 delete[] values_ldouble;
661}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700662
663TEST(malloc, mallopt_smoke) {
Colin Cross4c5595c2021-08-16 15:51:59 -0700664#if !defined(ANDROID_HOST_MUSL)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700665 errno = 0;
666 ASSERT_EQ(0, mallopt(-1000, 1));
667 // mallopt doesn't set errno.
668 ASSERT_EQ(0, errno);
Colin Cross7da20342021-07-28 11:18:11 -0700669#else
670 GTEST_SKIP() << "musl doesn't have mallopt";
671#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700672}
Elliott Hughesb1770852018-09-18 12:52:42 -0700673
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800674TEST(malloc, mallopt_decay) {
675#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800676 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800677 errno = 0;
678 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
679 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
680 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
681 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
682#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800683 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800684#endif
685}
686
687TEST(malloc, mallopt_purge) {
688#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800689 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800690 errno = 0;
691 ASSERT_EQ(1, mallopt(M_PURGE, 0));
692#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800693 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800694#endif
695}
696
Christopher Ferris88448792020-07-28 14:15:31 -0700697#if defined(__BIONIC__)
698static void GetAllocatorVersion(bool* allocator_scudo) {
699 TemporaryFile tf;
700 ASSERT_TRUE(tf.fd != -1);
701 FILE* fp = fdopen(tf.fd, "w+");
702 tf.release();
703 ASSERT_TRUE(fp != nullptr);
Evgenii Stepanov4edbcee2021-09-17 14:59:15 -0700704 if (malloc_info(0, fp) != 0) {
705 *allocator_scudo = false;
706 return;
707 }
Christopher Ferris88448792020-07-28 14:15:31 -0700708 ASSERT_EQ(0, fclose(fp));
709
710 std::string contents;
711 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
712
713 tinyxml2::XMLDocument doc;
714 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
715
716 auto root = doc.FirstChildElement();
717 ASSERT_NE(nullptr, root);
718 ASSERT_STREQ("malloc", root->Name());
719 std::string version(root->Attribute("version"));
720 *allocator_scudo = (version == "scudo-1");
721}
722#endif
723
724TEST(malloc, mallopt_scudo_only_options) {
725#if defined(__BIONIC__)
726 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
727 bool allocator_scudo;
728 GetAllocatorVersion(&allocator_scudo);
729 if (!allocator_scudo) {
730 GTEST_SKIP() << "scudo allocator only test";
731 }
732 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
733 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
734 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
735#else
736 GTEST_SKIP() << "bionic-only test";
737#endif
738}
739
Elliott Hughesb1770852018-09-18 12:52:42 -0700740TEST(malloc, reallocarray_overflow) {
741#if HAVE_REALLOCARRAY
742 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
743 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
744 size_t b = 2;
745
746 errno = 0;
747 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
748 ASSERT_EQ(ENOMEM, errno);
749
750 errno = 0;
751 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
752 ASSERT_EQ(ENOMEM, errno);
753#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800754 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700755#endif
756}
757
758TEST(malloc, reallocarray) {
759#if HAVE_REALLOCARRAY
760 void* p = reallocarray(nullptr, 2, 32);
761 ASSERT_TRUE(p != nullptr);
762 ASSERT_GE(malloc_usable_size(p), 64U);
763#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800764 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700765#endif
766}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800767
768TEST(malloc, mallinfo) {
Colin Crossfdced952022-01-24 18:15:07 -0800769#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800770 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800771 static size_t sizes[] = {
772 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
773 };
774
775 constexpr static size_t kMaxAllocs = 50;
776
777 for (size_t size : sizes) {
778 // If some of these allocations are stuck in a thread cache, then keep
779 // looping until we make an allocation that changes the total size of the
780 // memory allocated.
781 // jemalloc implementations counts the thread cache allocations against
782 // total memory allocated.
783 void* ptrs[kMaxAllocs] = {};
784 bool pass = false;
785 for (size_t i = 0; i < kMaxAllocs; i++) {
786 size_t allocated = mallinfo().uordblks;
787 ptrs[i] = malloc(size);
788 ASSERT_TRUE(ptrs[i] != nullptr);
789 size_t new_allocated = mallinfo().uordblks;
790 if (allocated != new_allocated) {
791 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800792 // Only check if the total got bigger by at least allocation size.
793 // Sometimes the mallinfo numbers can go backwards due to compaction
794 // and/or freeing of cached data.
795 if (new_allocated >= allocated + usable_size) {
796 pass = true;
797 break;
798 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800799 }
800 }
801 for (void* ptr : ptrs) {
802 free(ptr);
803 }
804 ASSERT_TRUE(pass)
805 << "For size " << size << " allocated bytes did not increase after "
806 << kMaxAllocs << " allocations.";
807 }
808#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800809 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800810#endif
811}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000812
Christopher Ferris8248e622021-12-03 13:55:57 -0800813TEST(malloc, mallinfo2) {
Colin Crossfdced952022-01-24 18:15:07 -0800814#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Christopher Ferris8248e622021-12-03 13:55:57 -0800815 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
816 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
817
818 constexpr static size_t kMaxAllocs = 50;
819
820 for (size_t size : sizes) {
821 // If some of these allocations are stuck in a thread cache, then keep
822 // looping until we make an allocation that changes the total size of the
823 // memory allocated.
824 // jemalloc implementations counts the thread cache allocations against
825 // total memory allocated.
826 void* ptrs[kMaxAllocs] = {};
827 bool pass = false;
828 for (size_t i = 0; i < kMaxAllocs; i++) {
829 struct mallinfo info = mallinfo();
830 struct mallinfo2 info2 = mallinfo2();
831 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800832 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
833 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
834 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
835 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
836 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
837 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
838 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
839 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
840 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
841 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800842
843 size_t allocated = info2.uordblks;
844 ptrs[i] = malloc(size);
845 ASSERT_TRUE(ptrs[i] != nullptr);
846
847 info = mallinfo();
848 info2 = mallinfo2();
849 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800850 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
851 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
852 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
853 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
854 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
855 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
856 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
857 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
858 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
859 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800860
861 size_t new_allocated = info2.uordblks;
862 if (allocated != new_allocated) {
863 size_t usable_size = malloc_usable_size(ptrs[i]);
864 // Only check if the total got bigger by at least allocation size.
865 // Sometimes the mallinfo2 numbers can go backwards due to compaction
866 // and/or freeing of cached data.
867 if (new_allocated >= allocated + usable_size) {
868 pass = true;
869 break;
870 }
871 }
872 }
873 for (void* ptr : ptrs) {
874 free(ptr);
875 }
876 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
877 << kMaxAllocs << " allocations.";
878 }
879#else
880 GTEST_SKIP() << "glibc is broken";
881#endif
882}
883
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800884template <typename Type>
885void __attribute__((optnone)) VerifyAlignment(Type* floating) {
886 size_t expected_alignment = alignof(Type);
887 if (expected_alignment != 0) {
888 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
889 << "Expected alignment " << expected_alignment << " ptr value " << floating;
890 }
891}
892
893template <typename Type>
894void __attribute__((optnone)) TestAllocateType() {
895 // The number of allocations to do in a row. This is to attempt to
896 // expose the worst case alignment for native allocators that use
897 // bins.
898 static constexpr size_t kMaxConsecutiveAllocs = 100;
899
900 // Verify using new directly.
901 Type* types[kMaxConsecutiveAllocs];
902 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
903 types[i] = new Type;
904 VerifyAlignment(types[i]);
905 if (::testing::Test::HasFatalFailure()) {
906 return;
907 }
908 }
909 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
910 delete types[i];
911 }
912
913 // Verify using malloc.
914 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
915 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
916 ASSERT_TRUE(types[i] != nullptr);
917 VerifyAlignment(types[i]);
918 if (::testing::Test::HasFatalFailure()) {
919 return;
920 }
921 }
922 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
923 free(types[i]);
924 }
925
926 // Verify using a vector.
927 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
928 for (size_t i = 0; i < type_vector.size(); i++) {
929 VerifyAlignment(&type_vector[i]);
930 if (::testing::Test::HasFatalFailure()) {
931 return;
932 }
933 }
934}
935
936#if defined(__ANDROID__)
937static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
938 void* ptrs[100];
939 uintptr_t mask = aligned_bytes - 1;
940 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
941 ptrs[i] = malloc(alloc_size);
942 ASSERT_TRUE(ptrs[i] != nullptr);
943 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
944 << "Expected at least " << aligned_bytes << " byte alignment: size "
945 << alloc_size << " actual ptr " << ptrs[i];
946 }
947}
948#endif
949
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700950void AlignCheck() {
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800951 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
952 // for a discussion of type alignment.
953 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
954 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
955 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
956
957 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
958 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
959 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
960 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
961 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
962 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
963 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
964 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
965 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
966 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
967 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
968 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
969 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
970 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
971
972#if defined(__ANDROID__)
973 // On Android, there is a lot of code that expects certain alignments:
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700974 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
975 // must have at least 16 byte alignment.
976 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
977 // not 16 bytes, are only required to have at least 8 byte alignment.
978 // In addition, on Android clang has been configured for 64 bit such that:
979 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
980 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
981 // For 32 bit environments, only the first two requirements must be met.
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800982
983 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
984 // a discussion of this alignment mess. The code below is enforcing
985 // strong-alignment, since who knows what code depends on this behavior now.
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700986 // As mentioned before, for 64 bit this will enforce the higher
987 // requirement since clang expects this behavior on Android now.
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800988 for (size_t i = 1; i <= 128; i++) {
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700989#if defined(__LP64__)
990 if (i <= 8) {
991 AndroidVerifyAlignment(i, 8);
992 } else {
993 AndroidVerifyAlignment(i, 16);
994 }
995#else
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800996 size_t rounded = (i + 7) & ~7;
997 if ((rounded % 16) == 0) {
998 AndroidVerifyAlignment(i, 16);
999 } else {
1000 AndroidVerifyAlignment(i, 8);
1001 }
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001002#endif
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001003 if (::testing::Test::HasFatalFailure()) {
1004 return;
1005 }
1006 }
1007#endif
1008}
1009
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001010TEST(malloc, align_check) {
1011 AlignCheck();
1012}
1013
Christopher Ferris201dcf42020-01-29 13:09:31 -08001014// Jemalloc doesn't pass this test right now, so leave it as disabled.
1015TEST(malloc, DISABLED_alloc_after_fork) {
1016 // Both of these need to be a power of 2.
1017 static constexpr size_t kMinAllocationSize = 8;
1018 static constexpr size_t kMaxAllocationSize = 2097152;
1019
1020 static constexpr size_t kNumAllocatingThreads = 5;
1021 static constexpr size_t kNumForkLoops = 100;
1022
1023 std::atomic_bool stop;
1024
1025 // Create threads that simply allocate and free different sizes.
1026 std::vector<std::thread*> threads;
1027 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1028 std::thread* t = new std::thread([&stop] {
1029 while (!stop) {
1030 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001031 void* ptr;
1032 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001033 free(ptr);
1034 }
1035 }
1036 });
1037 threads.push_back(t);
1038 }
1039
1040 // Create a thread to fork and allocate.
1041 for (size_t i = 0; i < kNumForkLoops; i++) {
1042 pid_t pid;
1043 if ((pid = fork()) == 0) {
1044 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001045 void* ptr;
1046 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001047 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -08001048 // Make sure we can touch all of the allocation.
1049 memset(ptr, 0x1, size);
1050 ASSERT_LE(size, malloc_usable_size(ptr));
1051 free(ptr);
1052 }
1053 _exit(10);
1054 }
1055 ASSERT_NE(-1, pid);
1056 AssertChildExited(pid, 10);
1057 }
1058
1059 stop = true;
1060 for (auto thread : threads) {
1061 thread->join();
1062 delete thread;
1063 }
1064}
1065
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001066TEST(android_mallopt, error_on_unexpected_option) {
1067#if defined(__BIONIC__)
1068 const int unrecognized_option = -1;
1069 errno = 0;
1070 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
1071 EXPECT_EQ(ENOTSUP, errno);
1072#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001073 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001074#endif
1075}
1076
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001077bool IsDynamic() {
1078#if defined(__LP64__)
1079 Elf64_Ehdr ehdr;
1080#else
1081 Elf32_Ehdr ehdr;
1082#endif
1083 std::string path(android::base::GetExecutablePath());
1084
1085 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1086 if (fd == -1) {
1087 // Assume dynamic on error.
1088 return true;
1089 }
1090 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1091 close(fd);
1092 // Assume dynamic in error cases.
1093 return !read_completed || ehdr.e_type == ET_DYN;
1094}
1095
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001096TEST(android_mallopt, init_zygote_child_profiling) {
1097#if defined(__BIONIC__)
1098 // Successful call.
1099 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001100 if (IsDynamic()) {
1101 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1102 EXPECT_EQ(0, errno);
1103 } else {
1104 // Not supported in static executables.
1105 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1106 EXPECT_EQ(ENOTSUP, errno);
1107 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001108
1109 // Unexpected arguments rejected.
1110 errno = 0;
1111 char unexpected = 0;
1112 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001113 if (IsDynamic()) {
1114 EXPECT_EQ(EINVAL, errno);
1115 } else {
1116 EXPECT_EQ(ENOTSUP, errno);
1117 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001118#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001119 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001120#endif
1121}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001122
1123#if defined(__BIONIC__)
1124template <typename FuncType>
1125void CheckAllocationFunction(FuncType func) {
1126 // Assumes that no more than 108MB of memory is allocated before this.
1127 size_t limit = 128 * 1024 * 1024;
1128 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1129 if (!func(20 * 1024 * 1024))
1130 exit(1);
1131 if (func(128 * 1024 * 1024))
1132 exit(1);
1133 exit(0);
1134}
1135#endif
1136
1137TEST(android_mallopt, set_allocation_limit) {
1138#if defined(__BIONIC__)
1139 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1140 testing::ExitedWithCode(0), "");
1141 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1142 testing::ExitedWithCode(0), "");
1143 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1144 testing::ExitedWithCode(0), "");
1145 EXPECT_EXIT(CheckAllocationFunction(
1146 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1147 testing::ExitedWithCode(0), "");
1148 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1149 void* ptr;
1150 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1151 }),
1152 testing::ExitedWithCode(0), "");
1153 EXPECT_EXIT(CheckAllocationFunction(
1154 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1155 testing::ExitedWithCode(0), "");
1156 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1157 void* p = malloc(1024 * 1024);
1158 return realloc(p, bytes) != nullptr;
1159 }),
1160 testing::ExitedWithCode(0), "");
1161#if !defined(__LP64__)
1162 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1163 testing::ExitedWithCode(0), "");
1164 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1165 testing::ExitedWithCode(0), "");
1166#endif
1167#else
Elliott Hughes10907202019-03-27 08:51:02 -07001168 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001169#endif
1170}
1171
1172TEST(android_mallopt, set_allocation_limit_multiple) {
1173#if defined(__BIONIC__)
1174 // Only the first set should work.
1175 size_t limit = 256 * 1024 * 1024;
1176 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1177 limit = 32 * 1024 * 1024;
1178 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1179#else
Elliott Hughes10907202019-03-27 08:51:02 -07001180 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001181#endif
1182}
1183
1184#if defined(__BIONIC__)
1185static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1186
1187static size_t GetMaxAllocations() {
1188 size_t max_pointers = 0;
1189 void* ptrs[20];
1190 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1191 ptrs[i] = malloc(kAllocationSize);
1192 if (ptrs[i] == nullptr) {
1193 max_pointers = i;
1194 break;
1195 }
1196 }
1197 for (size_t i = 0; i < max_pointers; i++) {
1198 free(ptrs[i]);
1199 }
1200 return max_pointers;
1201}
1202
1203static void VerifyMaxPointers(size_t max_pointers) {
1204 // Now verify that we can allocate the same number as before.
1205 void* ptrs[20];
1206 for (size_t i = 0; i < max_pointers; i++) {
1207 ptrs[i] = malloc(kAllocationSize);
1208 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1209 }
1210
1211 // Make sure the next allocation still fails.
1212 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1213 for (size_t i = 0; i < max_pointers; i++) {
1214 free(ptrs[i]);
1215 }
1216}
1217#endif
1218
1219TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1220#if defined(__BIONIC__)
1221 size_t limit = 128 * 1024 * 1024;
1222 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1223
1224 size_t max_pointers = GetMaxAllocations();
1225 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1226
1227 void* memory = malloc(10 * 1024 * 1024);
1228 ASSERT_TRUE(memory != nullptr);
1229
1230 // Increase size.
1231 memory = realloc(memory, 20 * 1024 * 1024);
1232 ASSERT_TRUE(memory != nullptr);
1233 memory = realloc(memory, 40 * 1024 * 1024);
1234 ASSERT_TRUE(memory != nullptr);
1235 memory = realloc(memory, 60 * 1024 * 1024);
1236 ASSERT_TRUE(memory != nullptr);
1237 memory = realloc(memory, 80 * 1024 * 1024);
1238 ASSERT_TRUE(memory != nullptr);
1239 // Now push past limit.
1240 memory = realloc(memory, 130 * 1024 * 1024);
1241 ASSERT_TRUE(memory == nullptr);
1242
1243 VerifyMaxPointers(max_pointers);
1244#else
Elliott Hughes10907202019-03-27 08:51:02 -07001245 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001246#endif
1247}
1248
1249TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1250#if defined(__BIONIC__)
1251 size_t limit = 100 * 1024 * 1024;
1252 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1253
1254 size_t max_pointers = GetMaxAllocations();
1255 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1256
1257 void* memory = malloc(80 * 1024 * 1024);
1258 ASSERT_TRUE(memory != nullptr);
1259
1260 // Decrease size.
1261 memory = realloc(memory, 60 * 1024 * 1024);
1262 ASSERT_TRUE(memory != nullptr);
1263 memory = realloc(memory, 40 * 1024 * 1024);
1264 ASSERT_TRUE(memory != nullptr);
1265 memory = realloc(memory, 20 * 1024 * 1024);
1266 ASSERT_TRUE(memory != nullptr);
1267 memory = realloc(memory, 10 * 1024 * 1024);
1268 ASSERT_TRUE(memory != nullptr);
1269 free(memory);
1270
1271 VerifyMaxPointers(max_pointers);
1272#else
Elliott Hughes10907202019-03-27 08:51:02 -07001273 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001274#endif
1275}
1276
1277TEST(android_mallopt, set_allocation_limit_realloc_free) {
1278#if defined(__BIONIC__)
1279 size_t limit = 100 * 1024 * 1024;
1280 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1281
1282 size_t max_pointers = GetMaxAllocations();
1283 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1284
1285 void* memory = malloc(60 * 1024 * 1024);
1286 ASSERT_TRUE(memory != nullptr);
1287
1288 memory = realloc(memory, 0);
1289 ASSERT_TRUE(memory == nullptr);
1290
1291 VerifyMaxPointers(max_pointers);
1292#else
Elliott Hughes10907202019-03-27 08:51:02 -07001293 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001294#endif
1295}
1296
1297#if defined(__BIONIC__)
1298static void* SetAllocationLimit(void* data) {
1299 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1300 while (!go->load()) {
1301 }
1302 size_t limit = 500 * 1024 * 1024;
1303 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1304 return reinterpret_cast<void*>(-1);
1305 }
1306 return nullptr;
1307}
1308
1309static void SetAllocationLimitMultipleThreads() {
1310 std::atomic_bool go;
1311 go = false;
1312
1313 static constexpr size_t kNumThreads = 4;
1314 pthread_t threads[kNumThreads];
1315 for (size_t i = 0; i < kNumThreads; i++) {
1316 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1317 }
1318
1319 // Let them go all at once.
1320 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001321 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1322 // heapprofd handler.
1323 union sigval signal_value;
1324 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001325 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001326
1327 size_t num_successful = 0;
1328 for (size_t i = 0; i < kNumThreads; i++) {
1329 void* result;
1330 ASSERT_EQ(0, pthread_join(threads[i], &result));
1331 if (result != nullptr) {
1332 num_successful++;
1333 }
1334 }
1335 ASSERT_EQ(1U, num_successful);
1336 exit(0);
1337}
1338#endif
1339
1340TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1341#if defined(__BIONIC__)
1342 if (IsDynamic()) {
1343 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1344 }
1345
1346 // Run this a number of times as a stress test.
1347 for (size_t i = 0; i < 100; i++) {
1348 // Not using ASSERT_EXIT because errors messages are not displayed.
1349 pid_t pid;
1350 if ((pid = fork()) == 0) {
1351 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1352 }
1353 ASSERT_NE(-1, pid);
1354 int status;
1355 ASSERT_EQ(pid, wait(&status));
1356 ASSERT_EQ(0, WEXITSTATUS(status));
1357 }
1358#else
Elliott Hughes10907202019-03-27 08:51:02 -07001359 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001360#endif
1361}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001362
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001363#if defined(__BIONIC__)
Mitch Phillipse6997d52020-11-30 15:04:14 -08001364using Action = android_mallopt_gwp_asan_options_t::Action;
1365TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1366 android_mallopt_gwp_asan_options_t options;
1367 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
1368 options.desire = Action::DONT_TURN_ON_UNLESS_OVERRIDDEN;
1369 // GWP-ASan should already be enabled. Trying to enable or disable it should
1370 // always pass.
1371 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1372 options.desire = Action::TURN_ON_WITH_SAMPLING;
1373 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1374}
1375#endif // defined(__BIONIC__)
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001376
Mitch Phillipse6997d52020-11-30 15:04:14 -08001377TEST(android_mallopt, multiple_enable_gwp_asan) {
1378#if defined(__BIONIC__)
1379 // Always enable GWP-Asan, with default options.
1380 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001381#else
1382 GTEST_SKIP() << "bionic extension";
1383#endif
1384}
1385
Florian Mayercc61ad82022-08-31 11:43:30 -07001386TEST(android_mallopt, memtag_stack_is_on) {
1387#if defined(__BIONIC__)
1388 bool memtag_stack;
1389 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1390#else
1391 GTEST_SKIP() << "bionic extension";
1392#endif
1393}
1394
Mitch Phillips9cad8422021-01-20 16:03:27 -08001395void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1396 std::vector<void*> allocs;
1397 constexpr int kMaxBytesToCheckZero = 64;
1398 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1399
1400 for (int i = 0; i < num_iterations; ++i) {
1401 int size = get_alloc_size(i);
1402 allocs.push_back(malloc(size));
1403 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1404 }
1405
1406 for (void* alloc : allocs) {
1407 free(alloc);
1408 }
1409 allocs.clear();
1410
1411 for (int i = 0; i < num_iterations; ++i) {
1412 int size = get_alloc_size(i);
1413 allocs.push_back(malloc(size));
1414 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1415 }
1416
1417 for (void* alloc : allocs) {
1418 free(alloc);
1419 }
1420}
1421
1422TEST(malloc, zero_init) {
1423#if defined(__BIONIC__)
1424 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1425 bool allocator_scudo;
1426 GetAllocatorVersion(&allocator_scudo);
1427 if (!allocator_scudo) {
1428 GTEST_SKIP() << "scudo allocator only test";
1429 }
1430
1431 mallopt(M_BIONIC_ZERO_INIT, 1);
1432
1433 // Test using a block of 4K small (1-32 byte) allocations.
1434 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1435 return 1 + iteration % 32;
1436 });
1437
1438 // Also test large allocations that land in the scudo secondary, as this is
1439 // the only part of Scudo that's changed by enabling zero initialization with
1440 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1441 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1442 // mallopt_decay. Ensure that we delay for at least a second before releasing
1443 // pages to the OS in order to avoid implicit zeroing by the kernel.
1444 mallopt(M_DECAY_TIME, 1000);
1445 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1446 return 1 << (19 + iteration % 4);
1447 });
1448
1449#else
1450 GTEST_SKIP() << "bionic-only test";
1451#endif
1452}
1453
1454// Note that MTE is enabled on cc_tests on devices that support MTE.
1455TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001456#if defined(__BIONIC__)
1457 if (!mte_supported()) {
1458 GTEST_SKIP() << "This function can only be tested with MTE";
1459 }
1460
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001461 sem_t sem;
1462 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1463
1464 pthread_t thread;
1465 ASSERT_EQ(0, pthread_create(
1466 &thread, nullptr,
1467 [](void* ptr) -> void* {
1468 auto* sem = reinterpret_cast<sem_t*>(ptr);
1469 sem_wait(sem);
1470 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1471 },
1472 &sem));
1473
Mitch Phillips9cad8422021-01-20 16:03:27 -08001474 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001475 ASSERT_EQ(0, sem_post(&sem));
1476
1477 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
Christopher Ferris2abfa9e2021-11-01 16:26:06 -07001478 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001479
1480 void* retval;
1481 ASSERT_EQ(0, pthread_join(thread, &retval));
1482 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1483 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001484#else
1485 GTEST_SKIP() << "bionic extension";
1486#endif
1487}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001488
1489TEST(malloc, allocation_slack) {
1490#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001491 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1492
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001493 bool allocator_scudo;
1494 GetAllocatorVersion(&allocator_scudo);
1495 if (!allocator_scudo) {
1496 GTEST_SKIP() << "scudo allocator only test";
1497 }
1498
1499 // Test that older target SDK levels let you access a few bytes off the end of
1500 // a large allocation.
1501 android_set_application_target_sdk_version(29);
1502 auto p = std::make_unique<char[]>(131072);
1503 volatile char *vp = p.get();
1504 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1505#else
1506 GTEST_SKIP() << "bionic extension";
1507#endif
1508}
Evgenii Stepanovf0d7a342021-11-16 17:34:39 -08001509
1510// Regression test for b/206701345 -- scudo bug, MTE only.
1511// Fix: https://reviews.llvm.org/D105261
1512// Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
1513TEST(malloc, realloc_mte_crash_b206701345) {
1514 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1515 // all size classes allow such placement - mmap size has to be divisible by
1516 // the block size. At the time of writing this could only be reproduced with
1517 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1518 // future. Try several different classes at the lower end.
1519 std::vector<void*> ptrs(10000);
1520 for (int i = 1; i < 32; ++i) {
1521 size_t sz = 16 * i - 1;
1522 for (void*& p : ptrs) {
1523 p = realloc(malloc(sz), sz + 1);
1524 }
1525
1526 for (void* p : ptrs) {
1527 free(p);
1528 }
1529 }
1530}