blob: 1b875cdf19745eaae2decc1515b850032694c9f7 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070030#include <sys/cdefs.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080031#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <sys/types.h>
33#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070034#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070035
Mitch Phillips9cad8422021-01-20 16:03:27 -080036#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080037#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080038#include <thread>
Mitch Phillips9cad8422021-01-20 16:03:27 -080039#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080040
Dan Albert4caa1f02014-08-20 09:16:57 -070041#include <tinyxml2.h>
42
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080043#include <android-base/file.h>
Florian Mayer750dcd32022-04-15 15:54:47 -070044#include <android-base/test_utils.h>
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080045
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080046#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000047
Elliott Hughesb1770852018-09-18 12:52:42 -070048#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080049
Peter Collingbourne45819dd2020-01-09 11:00:43 -080050#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080051#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080052
Christopher Ferrisb874c332020-01-21 16:39:05 -080053#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070054#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080055#include "platform/bionic/reserved_signals.h"
56#include "private/bionic_config.h"
57
Elliott Hughesb1770852018-09-18 12:52:42 -070058#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080059
Colin Cross7da20342021-07-28 11:18:11 -070060#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080061
Elliott Hughesb1770852018-09-18 12:52:42 -070062#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080063
Colin Cross4c5595c2021-08-16 15:51:59 -070064#elif defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -070065
66#define HAVE_REALLOCARRAY 1
67
Elliott Hughesb1770852018-09-18 12:52:42 -070068#endif
69
Christopher Ferris885f3b92013-05-21 17:48:01 -070070TEST(malloc, malloc_std) {
71 // Simple malloc test.
72 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070073 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070074 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070075 free(ptr);
76}
77
Christopher Ferrisa4037802014-06-09 19:14:11 -070078TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080079 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070080 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070081 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070082 ASSERT_EQ(ENOMEM, errno);
83}
84
Christopher Ferris885f3b92013-05-21 17:48:01 -070085TEST(malloc, calloc_std) {
86 // Simple calloc test.
87 size_t alloc_len = 100;
88 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070089 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070090 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
91 for (size_t i = 0; i < alloc_len; i++) {
92 ASSERT_EQ(0, ptr[i]);
93 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070094 free(ptr);
95}
96
Peter Collingbourne978eb162020-09-21 15:26:02 -070097TEST(malloc, calloc_mem_init_disabled) {
98#if defined(__BIONIC__)
99 // calloc should still zero memory if mem-init is disabled.
100 // With jemalloc the mallopts will fail but that shouldn't affect the
101 // execution of the test.
102 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
103 size_t alloc_len = 100;
104 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
105 for (size_t i = 0; i < alloc_len; i++) {
106 ASSERT_EQ(0, ptr[i]);
107 }
108 free(ptr);
109 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
110#else
111 GTEST_SKIP() << "bionic-only test";
112#endif
113}
114
Christopher Ferrisa4037802014-06-09 19:14:11 -0700115TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800116 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700117 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700118 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700119 ASSERT_EQ(ENOMEM, errno);
120}
121
122TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800123 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700124 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700125 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700126 ASSERT_EQ(ENOMEM, errno);
127 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700128 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700129 ASSERT_EQ(ENOMEM, errno);
130 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700131 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700132 ASSERT_EQ(ENOMEM, errno);
133 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700134 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700135 ASSERT_EQ(ENOMEM, errno);
136}
137
Christopher Ferris885f3b92013-05-21 17:48:01 -0700138TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800139 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700140 // Memalign test where the alignment is any value.
141 for (size_t i = 0; i <= 12; i++) {
142 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700143 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700144 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700145 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
146 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
147 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700148 free(ptr);
149 }
150 }
151}
152
Christopher Ferrisa4037802014-06-09 19:14:11 -0700153TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800154 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700155 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700156}
157
158TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800159 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700160 void* ptr;
161 for (size_t align = 0; align <= 256; align++) {
162 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700163 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700164 free(ptr);
165 }
166}
167
Christopher Ferris885f3b92013-05-21 17:48:01 -0700168TEST(malloc, memalign_realloc) {
169 // Memalign and then realloc the pointer a couple of times.
170 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
171 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700172 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700173 ASSERT_LE(100U, malloc_usable_size(ptr));
174 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
175 memset(ptr, 0x23, 100);
176
177 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700178 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700179 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700180 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700181 for (size_t i = 0; i < 100; i++) {
182 ASSERT_EQ(0x23, ptr[i]);
183 }
184 memset(ptr, 0x45, 200);
185
186 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700187 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700188 ASSERT_LE(300U, malloc_usable_size(ptr));
189 for (size_t i = 0; i < 200; i++) {
190 ASSERT_EQ(0x45, ptr[i]);
191 }
192 memset(ptr, 0x67, 300);
193
194 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700195 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700196 ASSERT_LE(250U, malloc_usable_size(ptr));
197 for (size_t i = 0; i < 250; i++) {
198 ASSERT_EQ(0x67, ptr[i]);
199 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700200 free(ptr);
201 }
202}
203
204TEST(malloc, malloc_realloc_larger) {
205 // Realloc to a larger size, malloc is used for the original allocation.
206 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700207 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700208 ASSERT_LE(100U, malloc_usable_size(ptr));
209 memset(ptr, 67, 100);
210
211 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700212 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700213 ASSERT_LE(200U, malloc_usable_size(ptr));
214 for (size_t i = 0; i < 100; i++) {
215 ASSERT_EQ(67, ptr[i]);
216 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700217 free(ptr);
218}
219
220TEST(malloc, malloc_realloc_smaller) {
221 // Realloc to a smaller size, malloc is used for the original allocation.
222 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700223 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700224 ASSERT_LE(200U, malloc_usable_size(ptr));
225 memset(ptr, 67, 200);
226
227 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700228 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700229 ASSERT_LE(100U, malloc_usable_size(ptr));
230 for (size_t i = 0; i < 100; i++) {
231 ASSERT_EQ(67, ptr[i]);
232 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700233 free(ptr);
234}
235
236TEST(malloc, malloc_multiple_realloc) {
237 // Multiple reallocs, malloc is used for the original allocation.
238 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700239 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700240 ASSERT_LE(200U, malloc_usable_size(ptr));
241 memset(ptr, 0x23, 200);
242
243 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700244 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700245 ASSERT_LE(100U, malloc_usable_size(ptr));
246 for (size_t i = 0; i < 100; i++) {
247 ASSERT_EQ(0x23, ptr[i]);
248 }
249
250 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700251 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700252 ASSERT_LE(50U, malloc_usable_size(ptr));
253 for (size_t i = 0; i < 50; i++) {
254 ASSERT_EQ(0x23, ptr[i]);
255 }
256
257 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700258 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700259 ASSERT_LE(150U, malloc_usable_size(ptr));
260 for (size_t i = 0; i < 50; i++) {
261 ASSERT_EQ(0x23, ptr[i]);
262 }
263 memset(ptr, 0x23, 150);
264
265 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700266 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700267 ASSERT_LE(425U, malloc_usable_size(ptr));
268 for (size_t i = 0; i < 150; i++) {
269 ASSERT_EQ(0x23, ptr[i]);
270 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700271 free(ptr);
272}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700273
Christopher Ferris885f3b92013-05-21 17:48:01 -0700274TEST(malloc, calloc_realloc_larger) {
275 // Realloc to a larger size, calloc is used for the original allocation.
276 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700277 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700278 ASSERT_LE(100U, malloc_usable_size(ptr));
279
280 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700281 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700282 ASSERT_LE(200U, malloc_usable_size(ptr));
283 for (size_t i = 0; i < 100; i++) {
284 ASSERT_EQ(0, ptr[i]);
285 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700286 free(ptr);
287}
288
289TEST(malloc, calloc_realloc_smaller) {
290 // Realloc to a smaller size, calloc is used for the original allocation.
291 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700292 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700293 ASSERT_LE(200U, malloc_usable_size(ptr));
294
295 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700296 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700297 ASSERT_LE(100U, malloc_usable_size(ptr));
298 for (size_t i = 0; i < 100; i++) {
299 ASSERT_EQ(0, ptr[i]);
300 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700301 free(ptr);
302}
303
304TEST(malloc, calloc_multiple_realloc) {
305 // Multiple reallocs, calloc is used for the original allocation.
306 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700307 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700308 ASSERT_LE(200U, malloc_usable_size(ptr));
309
310 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700311 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700312 ASSERT_LE(100U, malloc_usable_size(ptr));
313 for (size_t i = 0; i < 100; i++) {
314 ASSERT_EQ(0, ptr[i]);
315 }
316
317 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700318 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700319 ASSERT_LE(50U, malloc_usable_size(ptr));
320 for (size_t i = 0; i < 50; i++) {
321 ASSERT_EQ(0, ptr[i]);
322 }
323
324 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700325 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700326 ASSERT_LE(150U, malloc_usable_size(ptr));
327 for (size_t i = 0; i < 50; i++) {
328 ASSERT_EQ(0, ptr[i]);
329 }
330 memset(ptr, 0, 150);
331
332 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700333 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700334 ASSERT_LE(425U, malloc_usable_size(ptr));
335 for (size_t i = 0; i < 150; i++) {
336 ASSERT_EQ(0, ptr[i]);
337 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700338 free(ptr);
339}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700340
Christopher Ferrisa4037802014-06-09 19:14:11 -0700341TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800342 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700343 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700344 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700345 ASSERT_EQ(ENOMEM, errno);
346 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700347 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700348 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700349 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700350 ASSERT_EQ(ENOMEM, errno);
351 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700352}
353
Dan Alberte5fdaa42014-06-14 01:04:31 +0000354#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
355extern "C" void* pvalloc(size_t);
356extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700357#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000358
Christopher Ferrisa4037802014-06-09 19:14:11 -0700359TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700360#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700361 size_t pagesize = sysconf(_SC_PAGESIZE);
362 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700363 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700364 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
365 ASSERT_LE(pagesize, malloc_usable_size(ptr));
366 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700367#else
368 GTEST_SKIP() << "pvalloc not supported.";
369#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700370}
371
372TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700373#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700374 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700375#else
376 GTEST_SKIP() << "pvalloc not supported.";
377#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700378}
379
380TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700381#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700382 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700383 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700384 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700385 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
386 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700387#else
388 GTEST_SKIP() << "valloc not supported.";
389#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700390}
391
392TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700393#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700394 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700395#else
396 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000397#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700398}
Dan Albert4caa1f02014-08-20 09:16:57 -0700399
400TEST(malloc, malloc_info) {
401#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700402 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800403
404 TemporaryFile tf;
405 ASSERT_TRUE(tf.fd != -1);
406 FILE* fp = fdopen(tf.fd, "w+");
407 tf.release();
408 ASSERT_TRUE(fp != nullptr);
409 ASSERT_EQ(0, malloc_info(0, fp));
410 ASSERT_EQ(0, fclose(fp));
411
412 std::string contents;
413 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700414
415 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800416 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700417
418 auto root = doc.FirstChildElement();
419 ASSERT_NE(nullptr, root);
420 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700421 std::string version(root->Attribute("version"));
422 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800423 auto arena = root->FirstChildElement();
424 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
425 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700426
Christopher Ferris6c619a02019-03-01 17:59:51 -0800427 ASSERT_STREQ("heap", arena->Name());
428 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
429 ASSERT_EQ(tinyxml2::XML_SUCCESS,
430 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
431 ASSERT_EQ(tinyxml2::XML_SUCCESS,
432 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
433 ASSERT_EQ(tinyxml2::XML_SUCCESS,
434 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
435 ASSERT_EQ(tinyxml2::XML_SUCCESS,
436 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700437
Christopher Ferris6c619a02019-03-01 17:59:51 -0800438 auto bin = arena->FirstChildElement("bin");
439 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
440 if (strcmp(bin->Name(), "bin") == 0) {
441 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
442 ASSERT_EQ(tinyxml2::XML_SUCCESS,
443 bin->FirstChildElement("allocated")->QueryIntText(&val));
444 ASSERT_EQ(tinyxml2::XML_SUCCESS,
445 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
446 ASSERT_EQ(tinyxml2::XML_SUCCESS,
447 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
448 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700449 }
450 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800451 } else if (version == "scudo-1") {
452 auto element = root->FirstChildElement();
453 for (; element != nullptr; element = element->NextSiblingElement()) {
454 int val;
455
456 ASSERT_STREQ("alloc", element->Name());
457 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
458 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
459 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800460 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800461 // Do not verify output for debug malloc.
462 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700463 }
464#endif
465}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800466
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700467TEST(malloc, malloc_info_matches_mallinfo) {
468#ifdef __BIONIC__
469 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
470
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800471 TemporaryFile tf;
472 ASSERT_TRUE(tf.fd != -1);
473 FILE* fp = fdopen(tf.fd, "w+");
474 tf.release();
475 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700476 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800477 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700478 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800479 ASSERT_EQ(0, fclose(fp));
480
481 std::string contents;
482 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700483
484 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800485 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700486
487 size_t total_allocated_bytes = 0;
488 auto root = doc.FirstChildElement();
489 ASSERT_NE(nullptr, root);
490 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700491 std::string version(root->Attribute("version"));
492 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700493 auto arena = root->FirstChildElement();
494 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
495 int val;
496
497 ASSERT_STREQ("heap", arena->Name());
498 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
499 ASSERT_EQ(tinyxml2::XML_SUCCESS,
500 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
501 total_allocated_bytes += val;
502 ASSERT_EQ(tinyxml2::XML_SUCCESS,
503 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
504 total_allocated_bytes += val;
505 ASSERT_EQ(tinyxml2::XML_SUCCESS,
506 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
507 total_allocated_bytes += val;
508 ASSERT_EQ(tinyxml2::XML_SUCCESS,
509 arena->FirstChildElement("bins-total")->QueryIntText(&val));
510 }
511 // The total needs to be between the mallinfo call before and after
512 // since malloc_info allocates some memory.
513 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
514 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800515 } else if (version == "scudo-1") {
516 auto element = root->FirstChildElement();
517 for (; element != nullptr; element = element->NextSiblingElement()) {
518 ASSERT_STREQ("alloc", element->Name());
519 int size;
520 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
521 int count;
522 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
523 total_allocated_bytes += size * count;
524 }
525 // Scudo only gives the information on the primary, so simply make
526 // sure that the value is non-zero.
527 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700528 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800529 // Do not verify output for debug malloc.
530 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700531 }
532#endif
533}
534
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800535TEST(malloc, calloc_usable_size) {
536 for (size_t size = 1; size <= 2048; size++) {
537 void* pointer = malloc(size);
538 ASSERT_TRUE(pointer != nullptr);
539 memset(pointer, 0xeb, malloc_usable_size(pointer));
540 free(pointer);
541
542 // We should get a previous pointer that has been set to non-zero.
543 // If calloc does not zero out all of the data, this will fail.
544 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
545 ASSERT_TRUE(pointer != nullptr);
546 size_t usable_size = malloc_usable_size(zero_mem);
547 for (size_t i = 0; i < usable_size; i++) {
548 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
549 }
550 free(zero_mem);
551 }
552}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800553
554TEST(malloc, malloc_0) {
555 void* p = malloc(0);
556 ASSERT_TRUE(p != nullptr);
557 free(p);
558}
559
560TEST(malloc, calloc_0_0) {
561 void* p = calloc(0, 0);
562 ASSERT_TRUE(p != nullptr);
563 free(p);
564}
565
566TEST(malloc, calloc_0_1) {
567 void* p = calloc(0, 1);
568 ASSERT_TRUE(p != nullptr);
569 free(p);
570}
571
572TEST(malloc, calloc_1_0) {
573 void* p = calloc(1, 0);
574 ASSERT_TRUE(p != nullptr);
575 free(p);
576}
577
578TEST(malloc, realloc_nullptr_0) {
579 // realloc(nullptr, size) is actually malloc(size).
580 void* p = realloc(nullptr, 0);
581 ASSERT_TRUE(p != nullptr);
582 free(p);
583}
584
585TEST(malloc, realloc_0) {
586 void* p = malloc(1024);
587 ASSERT_TRUE(p != nullptr);
588 // realloc(p, 0) is actually free(p).
589 void* p2 = realloc(p, 0);
590 ASSERT_TRUE(p2 == nullptr);
591}
Christopher Ferris72df6702016-02-11 15:51:31 -0800592
593constexpr size_t MAX_LOOPS = 200;
594
595// Make sure that memory returned by malloc is aligned to allow these data types.
596TEST(malloc, verify_alignment) {
597 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
598 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
599 long double** values_ldouble = new long double*[MAX_LOOPS];
600 // Use filler to attempt to force the allocator to get potentially bad alignments.
601 void** filler = new void*[MAX_LOOPS];
602
603 for (size_t i = 0; i < MAX_LOOPS; i++) {
604 // Check uint32_t pointers.
605 filler[i] = malloc(1);
606 ASSERT_TRUE(filler[i] != nullptr);
607
608 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
609 ASSERT_TRUE(values_32[i] != nullptr);
610 *values_32[i] = i;
611 ASSERT_EQ(*values_32[i], i);
612 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
613
614 free(filler[i]);
615 }
616
617 for (size_t i = 0; i < MAX_LOOPS; i++) {
618 // Check uint64_t pointers.
619 filler[i] = malloc(1);
620 ASSERT_TRUE(filler[i] != nullptr);
621
622 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
623 ASSERT_TRUE(values_64[i] != nullptr);
624 *values_64[i] = 0x1000 + i;
625 ASSERT_EQ(*values_64[i], 0x1000 + i);
626 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
627
628 free(filler[i]);
629 }
630
631 for (size_t i = 0; i < MAX_LOOPS; i++) {
632 // Check long double pointers.
633 filler[i] = malloc(1);
634 ASSERT_TRUE(filler[i] != nullptr);
635
636 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
637 ASSERT_TRUE(values_ldouble[i] != nullptr);
638 *values_ldouble[i] = 5.5 + i;
639 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
640 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
641 // required alignment to 0x7.
642#if !defined(__BIONIC__) && !defined(__LP64__)
643 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
644#else
645 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
646#endif
647
648 free(filler[i]);
649 }
650
651 for (size_t i = 0; i < MAX_LOOPS; i++) {
652 free(values_32[i]);
653 free(values_64[i]);
654 free(values_ldouble[i]);
655 }
656
657 delete[] filler;
658 delete[] values_32;
659 delete[] values_64;
660 delete[] values_ldouble;
661}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700662
663TEST(malloc, mallopt_smoke) {
Colin Cross4c5595c2021-08-16 15:51:59 -0700664#if !defined(ANDROID_HOST_MUSL)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700665 errno = 0;
666 ASSERT_EQ(0, mallopt(-1000, 1));
667 // mallopt doesn't set errno.
668 ASSERT_EQ(0, errno);
Colin Cross7da20342021-07-28 11:18:11 -0700669#else
670 GTEST_SKIP() << "musl doesn't have mallopt";
671#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700672}
Elliott Hughesb1770852018-09-18 12:52:42 -0700673
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800674TEST(malloc, mallopt_decay) {
675#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800676 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800677 errno = 0;
678 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
679 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
680 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
681 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
682#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800683 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800684#endif
685}
686
687TEST(malloc, mallopt_purge) {
688#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800689 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800690 errno = 0;
691 ASSERT_EQ(1, mallopt(M_PURGE, 0));
692#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800693 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800694#endif
695}
696
Christopher Ferris88448792020-07-28 14:15:31 -0700697#if defined(__BIONIC__)
698static void GetAllocatorVersion(bool* allocator_scudo) {
699 TemporaryFile tf;
700 ASSERT_TRUE(tf.fd != -1);
701 FILE* fp = fdopen(tf.fd, "w+");
702 tf.release();
703 ASSERT_TRUE(fp != nullptr);
Evgenii Stepanov4edbcee2021-09-17 14:59:15 -0700704 if (malloc_info(0, fp) != 0) {
705 *allocator_scudo = false;
706 return;
707 }
Christopher Ferris88448792020-07-28 14:15:31 -0700708 ASSERT_EQ(0, fclose(fp));
709
710 std::string contents;
711 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
712
713 tinyxml2::XMLDocument doc;
714 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
715
716 auto root = doc.FirstChildElement();
717 ASSERT_NE(nullptr, root);
718 ASSERT_STREQ("malloc", root->Name());
719 std::string version(root->Attribute("version"));
720 *allocator_scudo = (version == "scudo-1");
721}
722#endif
723
724TEST(malloc, mallopt_scudo_only_options) {
725#if defined(__BIONIC__)
726 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
727 bool allocator_scudo;
728 GetAllocatorVersion(&allocator_scudo);
729 if (!allocator_scudo) {
730 GTEST_SKIP() << "scudo allocator only test";
731 }
732 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
733 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
734 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
735#else
736 GTEST_SKIP() << "bionic-only test";
737#endif
738}
739
Elliott Hughesb1770852018-09-18 12:52:42 -0700740TEST(malloc, reallocarray_overflow) {
741#if HAVE_REALLOCARRAY
742 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
743 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
744 size_t b = 2;
745
746 errno = 0;
747 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
748 ASSERT_EQ(ENOMEM, errno);
749
750 errno = 0;
751 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
752 ASSERT_EQ(ENOMEM, errno);
753#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800754 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700755#endif
756}
757
758TEST(malloc, reallocarray) {
759#if HAVE_REALLOCARRAY
760 void* p = reallocarray(nullptr, 2, 32);
761 ASSERT_TRUE(p != nullptr);
762 ASSERT_GE(malloc_usable_size(p), 64U);
763#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800764 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700765#endif
766}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800767
768TEST(malloc, mallinfo) {
Colin Crossfdced952022-01-24 18:15:07 -0800769#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800770 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800771 static size_t sizes[] = {
772 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
773 };
774
775 constexpr static size_t kMaxAllocs = 50;
776
777 for (size_t size : sizes) {
778 // If some of these allocations are stuck in a thread cache, then keep
779 // looping until we make an allocation that changes the total size of the
780 // memory allocated.
781 // jemalloc implementations counts the thread cache allocations against
782 // total memory allocated.
783 void* ptrs[kMaxAllocs] = {};
784 bool pass = false;
785 for (size_t i = 0; i < kMaxAllocs; i++) {
786 size_t allocated = mallinfo().uordblks;
787 ptrs[i] = malloc(size);
788 ASSERT_TRUE(ptrs[i] != nullptr);
789 size_t new_allocated = mallinfo().uordblks;
790 if (allocated != new_allocated) {
791 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800792 // Only check if the total got bigger by at least allocation size.
793 // Sometimes the mallinfo numbers can go backwards due to compaction
794 // and/or freeing of cached data.
795 if (new_allocated >= allocated + usable_size) {
796 pass = true;
797 break;
798 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800799 }
800 }
801 for (void* ptr : ptrs) {
802 free(ptr);
803 }
804 ASSERT_TRUE(pass)
805 << "For size " << size << " allocated bytes did not increase after "
806 << kMaxAllocs << " allocations.";
807 }
808#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800809 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800810#endif
811}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000812
Christopher Ferris8248e622021-12-03 13:55:57 -0800813TEST(malloc, mallinfo2) {
Colin Crossfdced952022-01-24 18:15:07 -0800814#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Christopher Ferris8248e622021-12-03 13:55:57 -0800815 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
816 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
817
818 constexpr static size_t kMaxAllocs = 50;
819
820 for (size_t size : sizes) {
821 // If some of these allocations are stuck in a thread cache, then keep
822 // looping until we make an allocation that changes the total size of the
823 // memory allocated.
824 // jemalloc implementations counts the thread cache allocations against
825 // total memory allocated.
826 void* ptrs[kMaxAllocs] = {};
827 bool pass = false;
828 for (size_t i = 0; i < kMaxAllocs; i++) {
829 struct mallinfo info = mallinfo();
830 struct mallinfo2 info2 = mallinfo2();
831 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800832 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
833 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
834 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
835 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
836 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
837 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
838 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
839 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
840 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
841 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800842
843 size_t allocated = info2.uordblks;
844 ptrs[i] = malloc(size);
845 ASSERT_TRUE(ptrs[i] != nullptr);
846
847 info = mallinfo();
848 info2 = mallinfo2();
849 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800850 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
851 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
852 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
853 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
854 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
855 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
856 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
857 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
858 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
859 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800860
861 size_t new_allocated = info2.uordblks;
862 if (allocated != new_allocated) {
863 size_t usable_size = malloc_usable_size(ptrs[i]);
864 // Only check if the total got bigger by at least allocation size.
865 // Sometimes the mallinfo2 numbers can go backwards due to compaction
866 // and/or freeing of cached data.
867 if (new_allocated >= allocated + usable_size) {
868 pass = true;
869 break;
870 }
871 }
872 }
873 for (void* ptr : ptrs) {
874 free(ptr);
875 }
876 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
877 << kMaxAllocs << " allocations.";
878 }
879#else
880 GTEST_SKIP() << "glibc is broken";
881#endif
882}
883
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800884template <typename Type>
885void __attribute__((optnone)) VerifyAlignment(Type* floating) {
886 size_t expected_alignment = alignof(Type);
887 if (expected_alignment != 0) {
888 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
889 << "Expected alignment " << expected_alignment << " ptr value " << floating;
890 }
891}
892
893template <typename Type>
894void __attribute__((optnone)) TestAllocateType() {
895 // The number of allocations to do in a row. This is to attempt to
896 // expose the worst case alignment for native allocators that use
897 // bins.
898 static constexpr size_t kMaxConsecutiveAllocs = 100;
899
900 // Verify using new directly.
901 Type* types[kMaxConsecutiveAllocs];
902 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
903 types[i] = new Type;
904 VerifyAlignment(types[i]);
905 if (::testing::Test::HasFatalFailure()) {
906 return;
907 }
908 }
909 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
910 delete types[i];
911 }
912
913 // Verify using malloc.
914 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
915 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
916 ASSERT_TRUE(types[i] != nullptr);
917 VerifyAlignment(types[i]);
918 if (::testing::Test::HasFatalFailure()) {
919 return;
920 }
921 }
922 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
923 free(types[i]);
924 }
925
926 // Verify using a vector.
927 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
928 for (size_t i = 0; i < type_vector.size(); i++) {
929 VerifyAlignment(&type_vector[i]);
930 if (::testing::Test::HasFatalFailure()) {
931 return;
932 }
933 }
934}
935
936#if defined(__ANDROID__)
937static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
938 void* ptrs[100];
939 uintptr_t mask = aligned_bytes - 1;
940 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
941 ptrs[i] = malloc(alloc_size);
942 ASSERT_TRUE(ptrs[i] != nullptr);
943 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
944 << "Expected at least " << aligned_bytes << " byte alignment: size "
945 << alloc_size << " actual ptr " << ptrs[i];
946 }
947}
948#endif
949
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700950void AlignCheck() {
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800951 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
952 // for a discussion of type alignment.
953 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
954 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
955 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
956
957 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
958 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
959 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
960 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
961 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
962 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
963 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
964 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
965 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
966 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
967 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
968 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
969 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
970 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
971
972#if defined(__ANDROID__)
973 // On Android, there is a lot of code that expects certain alignments:
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700974 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
975 // must have at least 16 byte alignment.
976 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
977 // not 16 bytes, are only required to have at least 8 byte alignment.
978 // In addition, on Android clang has been configured for 64 bit such that:
979 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
980 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
981 // For 32 bit environments, only the first two requirements must be met.
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800982
983 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
984 // a discussion of this alignment mess. The code below is enforcing
985 // strong-alignment, since who knows what code depends on this behavior now.
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700986 // As mentioned before, for 64 bit this will enforce the higher
987 // requirement since clang expects this behavior on Android now.
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800988 for (size_t i = 1; i <= 128; i++) {
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700989#if defined(__LP64__)
990 if (i <= 8) {
991 AndroidVerifyAlignment(i, 8);
992 } else {
993 AndroidVerifyAlignment(i, 16);
994 }
995#else
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800996 size_t rounded = (i + 7) & ~7;
997 if ((rounded % 16) == 0) {
998 AndroidVerifyAlignment(i, 16);
999 } else {
1000 AndroidVerifyAlignment(i, 8);
1001 }
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001002#endif
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001003 if (::testing::Test::HasFatalFailure()) {
1004 return;
1005 }
1006 }
1007#endif
1008}
1009
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001010TEST(malloc, align_check) {
1011 AlignCheck();
1012}
1013
1014// Force GWP-ASan on and verify all alignment checks still pass.
1015TEST(malloc, align_check_gwp_asan) {
1016#if defined(__BIONIC__)
1017 bool force_init = true;
1018 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
1019
1020 AlignCheck();
1021#else
1022 GTEST_SKIP() << "bionic-only test";
1023#endif
1024}
1025
Christopher Ferris201dcf42020-01-29 13:09:31 -08001026// Jemalloc doesn't pass this test right now, so leave it as disabled.
1027TEST(malloc, DISABLED_alloc_after_fork) {
1028 // Both of these need to be a power of 2.
1029 static constexpr size_t kMinAllocationSize = 8;
1030 static constexpr size_t kMaxAllocationSize = 2097152;
1031
1032 static constexpr size_t kNumAllocatingThreads = 5;
1033 static constexpr size_t kNumForkLoops = 100;
1034
1035 std::atomic_bool stop;
1036
1037 // Create threads that simply allocate and free different sizes.
1038 std::vector<std::thread*> threads;
1039 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1040 std::thread* t = new std::thread([&stop] {
1041 while (!stop) {
1042 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001043 void* ptr;
1044 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001045 free(ptr);
1046 }
1047 }
1048 });
1049 threads.push_back(t);
1050 }
1051
1052 // Create a thread to fork and allocate.
1053 for (size_t i = 0; i < kNumForkLoops; i++) {
1054 pid_t pid;
1055 if ((pid = fork()) == 0) {
1056 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001057 void* ptr;
1058 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001059 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -08001060 // Make sure we can touch all of the allocation.
1061 memset(ptr, 0x1, size);
1062 ASSERT_LE(size, malloc_usable_size(ptr));
1063 free(ptr);
1064 }
1065 _exit(10);
1066 }
1067 ASSERT_NE(-1, pid);
1068 AssertChildExited(pid, 10);
1069 }
1070
1071 stop = true;
1072 for (auto thread : threads) {
1073 thread->join();
1074 delete thread;
1075 }
1076}
1077
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001078TEST(android_mallopt, error_on_unexpected_option) {
1079#if defined(__BIONIC__)
1080 const int unrecognized_option = -1;
1081 errno = 0;
1082 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
1083 EXPECT_EQ(ENOTSUP, errno);
1084#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001085 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001086#endif
1087}
1088
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001089bool IsDynamic() {
1090#if defined(__LP64__)
1091 Elf64_Ehdr ehdr;
1092#else
1093 Elf32_Ehdr ehdr;
1094#endif
1095 std::string path(android::base::GetExecutablePath());
1096
1097 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1098 if (fd == -1) {
1099 // Assume dynamic on error.
1100 return true;
1101 }
1102 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1103 close(fd);
1104 // Assume dynamic in error cases.
1105 return !read_completed || ehdr.e_type == ET_DYN;
1106}
1107
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001108TEST(android_mallopt, init_zygote_child_profiling) {
1109#if defined(__BIONIC__)
1110 // Successful call.
1111 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001112 if (IsDynamic()) {
1113 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1114 EXPECT_EQ(0, errno);
1115 } else {
1116 // Not supported in static executables.
1117 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1118 EXPECT_EQ(ENOTSUP, errno);
1119 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001120
1121 // Unexpected arguments rejected.
1122 errno = 0;
1123 char unexpected = 0;
1124 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001125 if (IsDynamic()) {
1126 EXPECT_EQ(EINVAL, errno);
1127 } else {
1128 EXPECT_EQ(ENOTSUP, errno);
1129 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001130#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001131 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001132#endif
1133}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001134
1135#if defined(__BIONIC__)
1136template <typename FuncType>
1137void CheckAllocationFunction(FuncType func) {
1138 // Assumes that no more than 108MB of memory is allocated before this.
1139 size_t limit = 128 * 1024 * 1024;
1140 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1141 if (!func(20 * 1024 * 1024))
1142 exit(1);
1143 if (func(128 * 1024 * 1024))
1144 exit(1);
1145 exit(0);
1146}
1147#endif
1148
1149TEST(android_mallopt, set_allocation_limit) {
1150#if defined(__BIONIC__)
1151 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1152 testing::ExitedWithCode(0), "");
1153 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1154 testing::ExitedWithCode(0), "");
1155 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1156 testing::ExitedWithCode(0), "");
1157 EXPECT_EXIT(CheckAllocationFunction(
1158 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1159 testing::ExitedWithCode(0), "");
1160 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1161 void* ptr;
1162 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1163 }),
1164 testing::ExitedWithCode(0), "");
1165 EXPECT_EXIT(CheckAllocationFunction(
1166 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1167 testing::ExitedWithCode(0), "");
1168 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1169 void* p = malloc(1024 * 1024);
1170 return realloc(p, bytes) != nullptr;
1171 }),
1172 testing::ExitedWithCode(0), "");
1173#if !defined(__LP64__)
1174 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1175 testing::ExitedWithCode(0), "");
1176 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1177 testing::ExitedWithCode(0), "");
1178#endif
1179#else
Elliott Hughes10907202019-03-27 08:51:02 -07001180 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001181#endif
1182}
1183
1184TEST(android_mallopt, set_allocation_limit_multiple) {
1185#if defined(__BIONIC__)
1186 // Only the first set should work.
1187 size_t limit = 256 * 1024 * 1024;
1188 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1189 limit = 32 * 1024 * 1024;
1190 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1191#else
Elliott Hughes10907202019-03-27 08:51:02 -07001192 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001193#endif
1194}
1195
1196#if defined(__BIONIC__)
1197static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1198
1199static size_t GetMaxAllocations() {
1200 size_t max_pointers = 0;
1201 void* ptrs[20];
1202 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1203 ptrs[i] = malloc(kAllocationSize);
1204 if (ptrs[i] == nullptr) {
1205 max_pointers = i;
1206 break;
1207 }
1208 }
1209 for (size_t i = 0; i < max_pointers; i++) {
1210 free(ptrs[i]);
1211 }
1212 return max_pointers;
1213}
1214
1215static void VerifyMaxPointers(size_t max_pointers) {
1216 // Now verify that we can allocate the same number as before.
1217 void* ptrs[20];
1218 for (size_t i = 0; i < max_pointers; i++) {
1219 ptrs[i] = malloc(kAllocationSize);
1220 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1221 }
1222
1223 // Make sure the next allocation still fails.
1224 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1225 for (size_t i = 0; i < max_pointers; i++) {
1226 free(ptrs[i]);
1227 }
1228}
1229#endif
1230
1231TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1232#if defined(__BIONIC__)
1233 size_t limit = 128 * 1024 * 1024;
1234 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1235
1236 size_t max_pointers = GetMaxAllocations();
1237 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1238
1239 void* memory = malloc(10 * 1024 * 1024);
1240 ASSERT_TRUE(memory != nullptr);
1241
1242 // Increase size.
1243 memory = realloc(memory, 20 * 1024 * 1024);
1244 ASSERT_TRUE(memory != nullptr);
1245 memory = realloc(memory, 40 * 1024 * 1024);
1246 ASSERT_TRUE(memory != nullptr);
1247 memory = realloc(memory, 60 * 1024 * 1024);
1248 ASSERT_TRUE(memory != nullptr);
1249 memory = realloc(memory, 80 * 1024 * 1024);
1250 ASSERT_TRUE(memory != nullptr);
1251 // Now push past limit.
1252 memory = realloc(memory, 130 * 1024 * 1024);
1253 ASSERT_TRUE(memory == nullptr);
1254
1255 VerifyMaxPointers(max_pointers);
1256#else
Elliott Hughes10907202019-03-27 08:51:02 -07001257 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001258#endif
1259}
1260
1261TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1262#if defined(__BIONIC__)
1263 size_t limit = 100 * 1024 * 1024;
1264 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1265
1266 size_t max_pointers = GetMaxAllocations();
1267 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1268
1269 void* memory = malloc(80 * 1024 * 1024);
1270 ASSERT_TRUE(memory != nullptr);
1271
1272 // Decrease size.
1273 memory = realloc(memory, 60 * 1024 * 1024);
1274 ASSERT_TRUE(memory != nullptr);
1275 memory = realloc(memory, 40 * 1024 * 1024);
1276 ASSERT_TRUE(memory != nullptr);
1277 memory = realloc(memory, 20 * 1024 * 1024);
1278 ASSERT_TRUE(memory != nullptr);
1279 memory = realloc(memory, 10 * 1024 * 1024);
1280 ASSERT_TRUE(memory != nullptr);
1281 free(memory);
1282
1283 VerifyMaxPointers(max_pointers);
1284#else
Elliott Hughes10907202019-03-27 08:51:02 -07001285 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001286#endif
1287}
1288
1289TEST(android_mallopt, set_allocation_limit_realloc_free) {
1290#if defined(__BIONIC__)
1291 size_t limit = 100 * 1024 * 1024;
1292 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1293
1294 size_t max_pointers = GetMaxAllocations();
1295 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1296
1297 void* memory = malloc(60 * 1024 * 1024);
1298 ASSERT_TRUE(memory != nullptr);
1299
1300 memory = realloc(memory, 0);
1301 ASSERT_TRUE(memory == nullptr);
1302
1303 VerifyMaxPointers(max_pointers);
1304#else
Elliott Hughes10907202019-03-27 08:51:02 -07001305 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001306#endif
1307}
1308
1309#if defined(__BIONIC__)
1310static void* SetAllocationLimit(void* data) {
1311 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1312 while (!go->load()) {
1313 }
1314 size_t limit = 500 * 1024 * 1024;
1315 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1316 return reinterpret_cast<void*>(-1);
1317 }
1318 return nullptr;
1319}
1320
1321static void SetAllocationLimitMultipleThreads() {
1322 std::atomic_bool go;
1323 go = false;
1324
1325 static constexpr size_t kNumThreads = 4;
1326 pthread_t threads[kNumThreads];
1327 for (size_t i = 0; i < kNumThreads; i++) {
1328 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1329 }
1330
1331 // Let them go all at once.
1332 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001333 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1334 // heapprofd handler.
1335 union sigval signal_value;
1336 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001337 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001338
1339 size_t num_successful = 0;
1340 for (size_t i = 0; i < kNumThreads; i++) {
1341 void* result;
1342 ASSERT_EQ(0, pthread_join(threads[i], &result));
1343 if (result != nullptr) {
1344 num_successful++;
1345 }
1346 }
1347 ASSERT_EQ(1U, num_successful);
1348 exit(0);
1349}
1350#endif
1351
1352TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1353#if defined(__BIONIC__)
1354 if (IsDynamic()) {
1355 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1356 }
1357
1358 // Run this a number of times as a stress test.
1359 for (size_t i = 0; i < 100; i++) {
1360 // Not using ASSERT_EXIT because errors messages are not displayed.
1361 pid_t pid;
1362 if ((pid = fork()) == 0) {
1363 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1364 }
1365 ASSERT_NE(-1, pid);
1366 int status;
1367 ASSERT_EQ(pid, wait(&status));
1368 ASSERT_EQ(0, WEXITSTATUS(status));
1369 }
1370#else
Elliott Hughes10907202019-03-27 08:51:02 -07001371 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001372#endif
1373}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001374
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001375TEST(android_mallopt, force_init_gwp_asan) {
1376#if defined(__BIONIC__)
1377 bool force_init = true;
1378 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
1379
1380 // Verify that trying to do the call again also passes no matter the
1381 // value of force_init.
1382 force_init = false;
1383 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
1384 force_init = true;
1385 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
1386#else
1387 GTEST_SKIP() << "bionic extension";
1388#endif
1389}
1390
Mitch Phillips9cad8422021-01-20 16:03:27 -08001391void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1392 std::vector<void*> allocs;
1393 constexpr int kMaxBytesToCheckZero = 64;
1394 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1395
1396 for (int i = 0; i < num_iterations; ++i) {
1397 int size = get_alloc_size(i);
1398 allocs.push_back(malloc(size));
1399 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1400 }
1401
1402 for (void* alloc : allocs) {
1403 free(alloc);
1404 }
1405 allocs.clear();
1406
1407 for (int i = 0; i < num_iterations; ++i) {
1408 int size = get_alloc_size(i);
1409 allocs.push_back(malloc(size));
1410 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1411 }
1412
1413 for (void* alloc : allocs) {
1414 free(alloc);
1415 }
1416}
1417
1418TEST(malloc, zero_init) {
1419#if defined(__BIONIC__)
1420 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1421 bool allocator_scudo;
1422 GetAllocatorVersion(&allocator_scudo);
1423 if (!allocator_scudo) {
1424 GTEST_SKIP() << "scudo allocator only test";
1425 }
1426
1427 mallopt(M_BIONIC_ZERO_INIT, 1);
1428
1429 // Test using a block of 4K small (1-32 byte) allocations.
1430 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1431 return 1 + iteration % 32;
1432 });
1433
1434 // Also test large allocations that land in the scudo secondary, as this is
1435 // the only part of Scudo that's changed by enabling zero initialization with
1436 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1437 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1438 // mallopt_decay. Ensure that we delay for at least a second before releasing
1439 // pages to the OS in order to avoid implicit zeroing by the kernel.
1440 mallopt(M_DECAY_TIME, 1000);
1441 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1442 return 1 << (19 + iteration % 4);
1443 });
1444
1445#else
1446 GTEST_SKIP() << "bionic-only test";
1447#endif
1448}
1449
1450// Note that MTE is enabled on cc_tests on devices that support MTE.
1451TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001452#if defined(__BIONIC__)
1453 if (!mte_supported()) {
1454 GTEST_SKIP() << "This function can only be tested with MTE";
1455 }
1456
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001457 sem_t sem;
1458 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1459
1460 pthread_t thread;
1461 ASSERT_EQ(0, pthread_create(
1462 &thread, nullptr,
1463 [](void* ptr) -> void* {
1464 auto* sem = reinterpret_cast<sem_t*>(ptr);
1465 sem_wait(sem);
1466 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1467 },
1468 &sem));
1469
Mitch Phillips9cad8422021-01-20 16:03:27 -08001470 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001471 ASSERT_EQ(0, sem_post(&sem));
1472
1473 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
Christopher Ferris2abfa9e2021-11-01 16:26:06 -07001474 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001475
1476 void* retval;
1477 ASSERT_EQ(0, pthread_join(thread, &retval));
1478 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1479 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001480#else
1481 GTEST_SKIP() << "bionic extension";
1482#endif
1483}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001484
1485TEST(malloc, allocation_slack) {
1486#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001487 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1488
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001489 bool allocator_scudo;
1490 GetAllocatorVersion(&allocator_scudo);
1491 if (!allocator_scudo) {
1492 GTEST_SKIP() << "scudo allocator only test";
1493 }
1494
1495 // Test that older target SDK levels let you access a few bytes off the end of
1496 // a large allocation.
1497 android_set_application_target_sdk_version(29);
1498 auto p = std::make_unique<char[]>(131072);
1499 volatile char *vp = p.get();
1500 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1501#else
1502 GTEST_SKIP() << "bionic extension";
1503#endif
1504}
Evgenii Stepanovf0d7a342021-11-16 17:34:39 -08001505
1506// Regression test for b/206701345 -- scudo bug, MTE only.
1507// Fix: https://reviews.llvm.org/D105261
1508// Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
1509TEST(malloc, realloc_mte_crash_b206701345) {
1510 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1511 // all size classes allow such placement - mmap size has to be divisible by
1512 // the block size. At the time of writing this could only be reproduced with
1513 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1514 // future. Try several different classes at the lower end.
1515 std::vector<void*> ptrs(10000);
1516 for (int i = 1; i < 32; ++i) {
1517 size_t sz = 16 * i - 1;
1518 for (void*& p : ptrs) {
1519 p = realloc(malloc(sz), sz + 1);
1520 }
1521
1522 for (void* p : ptrs) {
1523 free(p);
1524 }
1525 }
1526}