blob: 63ad99d3d5c5c34c014d648452ef58dadb63e11f [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070030#include <sys/cdefs.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080031#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <sys/types.h>
33#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070034#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070035
Mitch Phillips9cad8422021-01-20 16:03:27 -080036#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080037#include <atomic>
Christopher Ferris02b6bbc2022-06-02 15:20:23 -070038#include <functional>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080039#include <thread>
Mitch Phillips9cad8422021-01-20 16:03:27 -080040#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080041
Dan Albert4caa1f02014-08-20 09:16:57 -070042#include <tinyxml2.h>
43
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080044#include <android-base/file.h>
Florian Mayer750dcd32022-04-15 15:54:47 -070045#include <android-base/test_utils.h>
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080046
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080047#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000048
Elliott Hughesb1770852018-09-18 12:52:42 -070049#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080050
Peter Collingbourne45819dd2020-01-09 11:00:43 -080051#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080052#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080053
Christopher Ferrisb874c332020-01-21 16:39:05 -080054#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070055#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080056#include "platform/bionic/reserved_signals.h"
57#include "private/bionic_config.h"
58
Elliott Hughesb1770852018-09-18 12:52:42 -070059#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080060
Colin Cross7da20342021-07-28 11:18:11 -070061#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080062
Elliott Hughesb1770852018-09-18 12:52:42 -070063#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080064
Colin Cross4c5595c2021-08-16 15:51:59 -070065#elif defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -070066
67#define HAVE_REALLOCARRAY 1
68
Elliott Hughesb1770852018-09-18 12:52:42 -070069#endif
70
Christopher Ferris885f3b92013-05-21 17:48:01 -070071TEST(malloc, malloc_std) {
72 // Simple malloc test.
73 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070074 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070075 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070076 free(ptr);
77}
78
Christopher Ferrisa4037802014-06-09 19:14:11 -070079TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080080 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070081 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070082 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070083 ASSERT_EQ(ENOMEM, errno);
84}
85
Christopher Ferris885f3b92013-05-21 17:48:01 -070086TEST(malloc, calloc_std) {
87 // Simple calloc test.
88 size_t alloc_len = 100;
89 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070090 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070091 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
92 for (size_t i = 0; i < alloc_len; i++) {
93 ASSERT_EQ(0, ptr[i]);
94 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070095 free(ptr);
96}
97
Peter Collingbourne978eb162020-09-21 15:26:02 -070098TEST(malloc, calloc_mem_init_disabled) {
99#if defined(__BIONIC__)
100 // calloc should still zero memory if mem-init is disabled.
101 // With jemalloc the mallopts will fail but that shouldn't affect the
102 // execution of the test.
103 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
104 size_t alloc_len = 100;
105 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
106 for (size_t i = 0; i < alloc_len; i++) {
107 ASSERT_EQ(0, ptr[i]);
108 }
109 free(ptr);
110 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
111#else
112 GTEST_SKIP() << "bionic-only test";
113#endif
114}
115
Christopher Ferrisa4037802014-06-09 19:14:11 -0700116TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800117 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700118 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700119 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700120 ASSERT_EQ(ENOMEM, errno);
121}
122
123TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800124 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700125 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700126 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700127 ASSERT_EQ(ENOMEM, errno);
128 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700129 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700130 ASSERT_EQ(ENOMEM, errno);
131 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700132 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700133 ASSERT_EQ(ENOMEM, errno);
134 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700135 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700136 ASSERT_EQ(ENOMEM, errno);
137}
138
Christopher Ferris885f3b92013-05-21 17:48:01 -0700139TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800140 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700141 // Memalign test where the alignment is any value.
142 for (size_t i = 0; i <= 12; i++) {
143 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700144 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700145 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700146 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
147 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
148 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700149 free(ptr);
150 }
151 }
152}
153
Christopher Ferrisa4037802014-06-09 19:14:11 -0700154TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800155 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700156 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700157}
158
159TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800160 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700161 void* ptr;
162 for (size_t align = 0; align <= 256; align++) {
163 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700164 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700165 free(ptr);
166 }
167}
168
Christopher Ferris885f3b92013-05-21 17:48:01 -0700169TEST(malloc, memalign_realloc) {
170 // Memalign and then realloc the pointer a couple of times.
171 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
172 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700173 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700174 ASSERT_LE(100U, malloc_usable_size(ptr));
175 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
176 memset(ptr, 0x23, 100);
177
178 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700179 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700180 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700181 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700182 for (size_t i = 0; i < 100; i++) {
183 ASSERT_EQ(0x23, ptr[i]);
184 }
185 memset(ptr, 0x45, 200);
186
187 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700188 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700189 ASSERT_LE(300U, malloc_usable_size(ptr));
190 for (size_t i = 0; i < 200; i++) {
191 ASSERT_EQ(0x45, ptr[i]);
192 }
193 memset(ptr, 0x67, 300);
194
195 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700196 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700197 ASSERT_LE(250U, malloc_usable_size(ptr));
198 for (size_t i = 0; i < 250; i++) {
199 ASSERT_EQ(0x67, ptr[i]);
200 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700201 free(ptr);
202 }
203}
204
205TEST(malloc, malloc_realloc_larger) {
206 // Realloc to a larger size, malloc is used for the original allocation.
207 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700208 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700209 ASSERT_LE(100U, malloc_usable_size(ptr));
210 memset(ptr, 67, 100);
211
212 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700213 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700214 ASSERT_LE(200U, malloc_usable_size(ptr));
215 for (size_t i = 0; i < 100; i++) {
216 ASSERT_EQ(67, ptr[i]);
217 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700218 free(ptr);
219}
220
221TEST(malloc, malloc_realloc_smaller) {
222 // Realloc to a smaller size, malloc is used for the original allocation.
223 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700224 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700225 ASSERT_LE(200U, malloc_usable_size(ptr));
226 memset(ptr, 67, 200);
227
228 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700229 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700230 ASSERT_LE(100U, malloc_usable_size(ptr));
231 for (size_t i = 0; i < 100; i++) {
232 ASSERT_EQ(67, ptr[i]);
233 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700234 free(ptr);
235}
236
237TEST(malloc, malloc_multiple_realloc) {
238 // Multiple reallocs, malloc is used for the original allocation.
239 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700240 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700241 ASSERT_LE(200U, malloc_usable_size(ptr));
242 memset(ptr, 0x23, 200);
243
244 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700245 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700246 ASSERT_LE(100U, malloc_usable_size(ptr));
247 for (size_t i = 0; i < 100; i++) {
248 ASSERT_EQ(0x23, ptr[i]);
249 }
250
251 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700252 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700253 ASSERT_LE(50U, malloc_usable_size(ptr));
254 for (size_t i = 0; i < 50; i++) {
255 ASSERT_EQ(0x23, ptr[i]);
256 }
257
258 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700259 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700260 ASSERT_LE(150U, malloc_usable_size(ptr));
261 for (size_t i = 0; i < 50; i++) {
262 ASSERT_EQ(0x23, ptr[i]);
263 }
264 memset(ptr, 0x23, 150);
265
266 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700267 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700268 ASSERT_LE(425U, malloc_usable_size(ptr));
269 for (size_t i = 0; i < 150; i++) {
270 ASSERT_EQ(0x23, ptr[i]);
271 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700272 free(ptr);
273}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700274
Christopher Ferris885f3b92013-05-21 17:48:01 -0700275TEST(malloc, calloc_realloc_larger) {
276 // Realloc to a larger size, calloc is used for the original allocation.
277 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700278 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700279 ASSERT_LE(100U, malloc_usable_size(ptr));
280
281 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700282 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700283 ASSERT_LE(200U, malloc_usable_size(ptr));
284 for (size_t i = 0; i < 100; i++) {
285 ASSERT_EQ(0, ptr[i]);
286 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700287 free(ptr);
288}
289
290TEST(malloc, calloc_realloc_smaller) {
291 // Realloc to a smaller size, calloc is used for the original allocation.
292 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700293 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700294 ASSERT_LE(200U, malloc_usable_size(ptr));
295
296 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700297 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700298 ASSERT_LE(100U, malloc_usable_size(ptr));
299 for (size_t i = 0; i < 100; i++) {
300 ASSERT_EQ(0, ptr[i]);
301 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700302 free(ptr);
303}
304
305TEST(malloc, calloc_multiple_realloc) {
306 // Multiple reallocs, calloc is used for the original allocation.
307 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700308 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700309 ASSERT_LE(200U, malloc_usable_size(ptr));
310
311 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700312 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700313 ASSERT_LE(100U, malloc_usable_size(ptr));
314 for (size_t i = 0; i < 100; i++) {
315 ASSERT_EQ(0, ptr[i]);
316 }
317
318 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700319 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700320 ASSERT_LE(50U, malloc_usable_size(ptr));
321 for (size_t i = 0; i < 50; i++) {
322 ASSERT_EQ(0, ptr[i]);
323 }
324
325 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700326 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700327 ASSERT_LE(150U, malloc_usable_size(ptr));
328 for (size_t i = 0; i < 50; i++) {
329 ASSERT_EQ(0, ptr[i]);
330 }
331 memset(ptr, 0, 150);
332
333 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700334 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700335 ASSERT_LE(425U, malloc_usable_size(ptr));
336 for (size_t i = 0; i < 150; i++) {
337 ASSERT_EQ(0, ptr[i]);
338 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700339 free(ptr);
340}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700341
Christopher Ferrisa4037802014-06-09 19:14:11 -0700342TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800343 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700344 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700345 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700346 ASSERT_EQ(ENOMEM, errno);
347 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700348 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700349 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700350 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700351 ASSERT_EQ(ENOMEM, errno);
352 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700353}
354
Dan Alberte5fdaa42014-06-14 01:04:31 +0000355#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
356extern "C" void* pvalloc(size_t);
357extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700358#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000359
Christopher Ferrisa4037802014-06-09 19:14:11 -0700360TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700361#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700362 size_t pagesize = sysconf(_SC_PAGESIZE);
363 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700364 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700365 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
366 ASSERT_LE(pagesize, malloc_usable_size(ptr));
367 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700368#else
369 GTEST_SKIP() << "pvalloc not supported.";
370#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700371}
372
373TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700374#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700375 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700376#else
377 GTEST_SKIP() << "pvalloc not supported.";
378#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700379}
380
381TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700382#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700383 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700384 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700385 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700386 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
387 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700388#else
389 GTEST_SKIP() << "valloc not supported.";
390#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700391}
392
393TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700394#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700395 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700396#else
397 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000398#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700399}
Dan Albert4caa1f02014-08-20 09:16:57 -0700400
401TEST(malloc, malloc_info) {
402#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700403 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800404
405 TemporaryFile tf;
406 ASSERT_TRUE(tf.fd != -1);
407 FILE* fp = fdopen(tf.fd, "w+");
408 tf.release();
409 ASSERT_TRUE(fp != nullptr);
410 ASSERT_EQ(0, malloc_info(0, fp));
411 ASSERT_EQ(0, fclose(fp));
412
413 std::string contents;
414 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700415
416 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800417 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700418
419 auto root = doc.FirstChildElement();
420 ASSERT_NE(nullptr, root);
421 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700422 std::string version(root->Attribute("version"));
423 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800424 auto arena = root->FirstChildElement();
425 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
426 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700427
Christopher Ferris6c619a02019-03-01 17:59:51 -0800428 ASSERT_STREQ("heap", arena->Name());
429 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
430 ASSERT_EQ(tinyxml2::XML_SUCCESS,
431 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
432 ASSERT_EQ(tinyxml2::XML_SUCCESS,
433 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
434 ASSERT_EQ(tinyxml2::XML_SUCCESS,
435 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700438
Christopher Ferris6c619a02019-03-01 17:59:51 -0800439 auto bin = arena->FirstChildElement("bin");
440 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
441 if (strcmp(bin->Name(), "bin") == 0) {
442 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
443 ASSERT_EQ(tinyxml2::XML_SUCCESS,
444 bin->FirstChildElement("allocated")->QueryIntText(&val));
445 ASSERT_EQ(tinyxml2::XML_SUCCESS,
446 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
447 ASSERT_EQ(tinyxml2::XML_SUCCESS,
448 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
449 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700450 }
451 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800452 } else if (version == "scudo-1") {
453 auto element = root->FirstChildElement();
454 for (; element != nullptr; element = element->NextSiblingElement()) {
455 int val;
456
457 ASSERT_STREQ("alloc", element->Name());
458 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
459 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
460 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800461 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800462 // Do not verify output for debug malloc.
463 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700464 }
465#endif
466}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800467
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700468TEST(malloc, malloc_info_matches_mallinfo) {
469#ifdef __BIONIC__
470 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
471
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800472 TemporaryFile tf;
473 ASSERT_TRUE(tf.fd != -1);
474 FILE* fp = fdopen(tf.fd, "w+");
475 tf.release();
476 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700477 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800478 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700479 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800480 ASSERT_EQ(0, fclose(fp));
481
482 std::string contents;
483 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700484
485 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800486 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700487
488 size_t total_allocated_bytes = 0;
489 auto root = doc.FirstChildElement();
490 ASSERT_NE(nullptr, root);
491 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700492 std::string version(root->Attribute("version"));
493 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700494 auto arena = root->FirstChildElement();
495 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
496 int val;
497
498 ASSERT_STREQ("heap", arena->Name());
499 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
500 ASSERT_EQ(tinyxml2::XML_SUCCESS,
501 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
502 total_allocated_bytes += val;
503 ASSERT_EQ(tinyxml2::XML_SUCCESS,
504 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
505 total_allocated_bytes += val;
506 ASSERT_EQ(tinyxml2::XML_SUCCESS,
507 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
508 total_allocated_bytes += val;
509 ASSERT_EQ(tinyxml2::XML_SUCCESS,
510 arena->FirstChildElement("bins-total")->QueryIntText(&val));
511 }
512 // The total needs to be between the mallinfo call before and after
513 // since malloc_info allocates some memory.
514 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
515 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800516 } else if (version == "scudo-1") {
517 auto element = root->FirstChildElement();
518 for (; element != nullptr; element = element->NextSiblingElement()) {
519 ASSERT_STREQ("alloc", element->Name());
520 int size;
521 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
522 int count;
523 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
524 total_allocated_bytes += size * count;
525 }
526 // Scudo only gives the information on the primary, so simply make
527 // sure that the value is non-zero.
528 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700529 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800530 // Do not verify output for debug malloc.
531 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700532 }
533#endif
534}
535
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800536TEST(malloc, calloc_usable_size) {
537 for (size_t size = 1; size <= 2048; size++) {
538 void* pointer = malloc(size);
539 ASSERT_TRUE(pointer != nullptr);
540 memset(pointer, 0xeb, malloc_usable_size(pointer));
541 free(pointer);
542
543 // We should get a previous pointer that has been set to non-zero.
544 // If calloc does not zero out all of the data, this will fail.
545 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
546 ASSERT_TRUE(pointer != nullptr);
547 size_t usable_size = malloc_usable_size(zero_mem);
548 for (size_t i = 0; i < usable_size; i++) {
549 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
550 }
551 free(zero_mem);
552 }
553}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800554
555TEST(malloc, malloc_0) {
556 void* p = malloc(0);
557 ASSERT_TRUE(p != nullptr);
558 free(p);
559}
560
561TEST(malloc, calloc_0_0) {
562 void* p = calloc(0, 0);
563 ASSERT_TRUE(p != nullptr);
564 free(p);
565}
566
567TEST(malloc, calloc_0_1) {
568 void* p = calloc(0, 1);
569 ASSERT_TRUE(p != nullptr);
570 free(p);
571}
572
573TEST(malloc, calloc_1_0) {
574 void* p = calloc(1, 0);
575 ASSERT_TRUE(p != nullptr);
576 free(p);
577}
578
579TEST(malloc, realloc_nullptr_0) {
580 // realloc(nullptr, size) is actually malloc(size).
581 void* p = realloc(nullptr, 0);
582 ASSERT_TRUE(p != nullptr);
583 free(p);
584}
585
586TEST(malloc, realloc_0) {
587 void* p = malloc(1024);
588 ASSERT_TRUE(p != nullptr);
589 // realloc(p, 0) is actually free(p).
590 void* p2 = realloc(p, 0);
591 ASSERT_TRUE(p2 == nullptr);
592}
Christopher Ferris72df6702016-02-11 15:51:31 -0800593
594constexpr size_t MAX_LOOPS = 200;
595
596// Make sure that memory returned by malloc is aligned to allow these data types.
597TEST(malloc, verify_alignment) {
598 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
599 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
600 long double** values_ldouble = new long double*[MAX_LOOPS];
601 // Use filler to attempt to force the allocator to get potentially bad alignments.
602 void** filler = new void*[MAX_LOOPS];
603
604 for (size_t i = 0; i < MAX_LOOPS; i++) {
605 // Check uint32_t pointers.
606 filler[i] = malloc(1);
607 ASSERT_TRUE(filler[i] != nullptr);
608
609 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
610 ASSERT_TRUE(values_32[i] != nullptr);
611 *values_32[i] = i;
612 ASSERT_EQ(*values_32[i], i);
613 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
614
615 free(filler[i]);
616 }
617
618 for (size_t i = 0; i < MAX_LOOPS; i++) {
619 // Check uint64_t pointers.
620 filler[i] = malloc(1);
621 ASSERT_TRUE(filler[i] != nullptr);
622
623 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
624 ASSERT_TRUE(values_64[i] != nullptr);
625 *values_64[i] = 0x1000 + i;
626 ASSERT_EQ(*values_64[i], 0x1000 + i);
627 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
628
629 free(filler[i]);
630 }
631
632 for (size_t i = 0; i < MAX_LOOPS; i++) {
633 // Check long double pointers.
634 filler[i] = malloc(1);
635 ASSERT_TRUE(filler[i] != nullptr);
636
637 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
638 ASSERT_TRUE(values_ldouble[i] != nullptr);
639 *values_ldouble[i] = 5.5 + i;
640 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
641 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
642 // required alignment to 0x7.
643#if !defined(__BIONIC__) && !defined(__LP64__)
644 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
645#else
646 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
647#endif
648
649 free(filler[i]);
650 }
651
652 for (size_t i = 0; i < MAX_LOOPS; i++) {
653 free(values_32[i]);
654 free(values_64[i]);
655 free(values_ldouble[i]);
656 }
657
658 delete[] filler;
659 delete[] values_32;
660 delete[] values_64;
661 delete[] values_ldouble;
662}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700663
664TEST(malloc, mallopt_smoke) {
Christopher Ferris2ef59372023-01-18 15:08:37 -0800665#if defined(__BIONIC__)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700666 errno = 0;
667 ASSERT_EQ(0, mallopt(-1000, 1));
668 // mallopt doesn't set errno.
669 ASSERT_EQ(0, errno);
Colin Cross7da20342021-07-28 11:18:11 -0700670#else
Christopher Ferris2ef59372023-01-18 15:08:37 -0800671 GTEST_SKIP() << "bionic-only test";
Colin Cross7da20342021-07-28 11:18:11 -0700672#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700673}
Elliott Hughesb1770852018-09-18 12:52:42 -0700674
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800675TEST(malloc, mallopt_decay) {
676#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800677 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800678 errno = 0;
679 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
680 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
681 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
682 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
683#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800684 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800685#endif
686}
687
688TEST(malloc, mallopt_purge) {
689#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800690 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800691 errno = 0;
692 ASSERT_EQ(1, mallopt(M_PURGE, 0));
693#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800694 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800695#endif
696}
697
Christopher Ferris88448792020-07-28 14:15:31 -0700698#if defined(__BIONIC__)
699static void GetAllocatorVersion(bool* allocator_scudo) {
700 TemporaryFile tf;
701 ASSERT_TRUE(tf.fd != -1);
702 FILE* fp = fdopen(tf.fd, "w+");
703 tf.release();
704 ASSERT_TRUE(fp != nullptr);
Evgenii Stepanov4edbcee2021-09-17 14:59:15 -0700705 if (malloc_info(0, fp) != 0) {
706 *allocator_scudo = false;
707 return;
708 }
Christopher Ferris88448792020-07-28 14:15:31 -0700709 ASSERT_EQ(0, fclose(fp));
710
711 std::string contents;
712 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
713
714 tinyxml2::XMLDocument doc;
715 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
716
717 auto root = doc.FirstChildElement();
718 ASSERT_NE(nullptr, root);
719 ASSERT_STREQ("malloc", root->Name());
720 std::string version(root->Attribute("version"));
721 *allocator_scudo = (version == "scudo-1");
722}
723#endif
724
725TEST(malloc, mallopt_scudo_only_options) {
726#if defined(__BIONIC__)
727 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
728 bool allocator_scudo;
729 GetAllocatorVersion(&allocator_scudo);
730 if (!allocator_scudo) {
731 GTEST_SKIP() << "scudo allocator only test";
732 }
733 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
734 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
735 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
736#else
737 GTEST_SKIP() << "bionic-only test";
738#endif
739}
740
Elliott Hughesb1770852018-09-18 12:52:42 -0700741TEST(malloc, reallocarray_overflow) {
742#if HAVE_REALLOCARRAY
743 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
744 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
745 size_t b = 2;
746
747 errno = 0;
748 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
749 ASSERT_EQ(ENOMEM, errno);
750
751 errno = 0;
752 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
753 ASSERT_EQ(ENOMEM, errno);
754#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800755 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700756#endif
757}
758
759TEST(malloc, reallocarray) {
760#if HAVE_REALLOCARRAY
761 void* p = reallocarray(nullptr, 2, 32);
762 ASSERT_TRUE(p != nullptr);
763 ASSERT_GE(malloc_usable_size(p), 64U);
764#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800765 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700766#endif
767}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800768
769TEST(malloc, mallinfo) {
Colin Crossfdced952022-01-24 18:15:07 -0800770#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800771 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800772 static size_t sizes[] = {
773 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
774 };
775
776 constexpr static size_t kMaxAllocs = 50;
777
778 for (size_t size : sizes) {
779 // If some of these allocations are stuck in a thread cache, then keep
780 // looping until we make an allocation that changes the total size of the
781 // memory allocated.
782 // jemalloc implementations counts the thread cache allocations against
783 // total memory allocated.
784 void* ptrs[kMaxAllocs] = {};
785 bool pass = false;
786 for (size_t i = 0; i < kMaxAllocs; i++) {
787 size_t allocated = mallinfo().uordblks;
788 ptrs[i] = malloc(size);
789 ASSERT_TRUE(ptrs[i] != nullptr);
790 size_t new_allocated = mallinfo().uordblks;
791 if (allocated != new_allocated) {
792 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800793 // Only check if the total got bigger by at least allocation size.
794 // Sometimes the mallinfo numbers can go backwards due to compaction
795 // and/or freeing of cached data.
796 if (new_allocated >= allocated + usable_size) {
797 pass = true;
798 break;
799 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800800 }
801 }
802 for (void* ptr : ptrs) {
803 free(ptr);
804 }
805 ASSERT_TRUE(pass)
806 << "For size " << size << " allocated bytes did not increase after "
807 << kMaxAllocs << " allocations.";
808 }
809#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800810 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800811#endif
812}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000813
Christopher Ferris8248e622021-12-03 13:55:57 -0800814TEST(malloc, mallinfo2) {
Colin Crossfdced952022-01-24 18:15:07 -0800815#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Christopher Ferris8248e622021-12-03 13:55:57 -0800816 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
817 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
818
819 constexpr static size_t kMaxAllocs = 50;
820
821 for (size_t size : sizes) {
822 // If some of these allocations are stuck in a thread cache, then keep
823 // looping until we make an allocation that changes the total size of the
824 // memory allocated.
825 // jemalloc implementations counts the thread cache allocations against
826 // total memory allocated.
827 void* ptrs[kMaxAllocs] = {};
828 bool pass = false;
829 for (size_t i = 0; i < kMaxAllocs; i++) {
830 struct mallinfo info = mallinfo();
831 struct mallinfo2 info2 = mallinfo2();
832 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800833 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
834 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
835 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
836 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
837 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
838 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
839 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
840 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
841 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
842 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800843
844 size_t allocated = info2.uordblks;
845 ptrs[i] = malloc(size);
846 ASSERT_TRUE(ptrs[i] != nullptr);
847
848 info = mallinfo();
849 info2 = mallinfo2();
850 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800851 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
852 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
853 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
854 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
855 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
856 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
857 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
858 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
859 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
860 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800861
862 size_t new_allocated = info2.uordblks;
863 if (allocated != new_allocated) {
864 size_t usable_size = malloc_usable_size(ptrs[i]);
865 // Only check if the total got bigger by at least allocation size.
866 // Sometimes the mallinfo2 numbers can go backwards due to compaction
867 // and/or freeing of cached data.
868 if (new_allocated >= allocated + usable_size) {
869 pass = true;
870 break;
871 }
872 }
873 }
874 for (void* ptr : ptrs) {
875 free(ptr);
876 }
877 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
878 << kMaxAllocs << " allocations.";
879 }
880#else
881 GTEST_SKIP() << "glibc is broken";
882#endif
883}
884
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800885template <typename Type>
886void __attribute__((optnone)) VerifyAlignment(Type* floating) {
887 size_t expected_alignment = alignof(Type);
888 if (expected_alignment != 0) {
889 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
890 << "Expected alignment " << expected_alignment << " ptr value " << floating;
891 }
892}
893
894template <typename Type>
895void __attribute__((optnone)) TestAllocateType() {
896 // The number of allocations to do in a row. This is to attempt to
897 // expose the worst case alignment for native allocators that use
898 // bins.
899 static constexpr size_t kMaxConsecutiveAllocs = 100;
900
901 // Verify using new directly.
902 Type* types[kMaxConsecutiveAllocs];
903 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
904 types[i] = new Type;
905 VerifyAlignment(types[i]);
906 if (::testing::Test::HasFatalFailure()) {
907 return;
908 }
909 }
910 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
911 delete types[i];
912 }
913
914 // Verify using malloc.
915 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
916 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
917 ASSERT_TRUE(types[i] != nullptr);
918 VerifyAlignment(types[i]);
919 if (::testing::Test::HasFatalFailure()) {
920 return;
921 }
922 }
923 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
924 free(types[i]);
925 }
926
927 // Verify using a vector.
928 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
929 for (size_t i = 0; i < type_vector.size(); i++) {
930 VerifyAlignment(&type_vector[i]);
931 if (::testing::Test::HasFatalFailure()) {
932 return;
933 }
934 }
935}
936
937#if defined(__ANDROID__)
938static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
939 void* ptrs[100];
940 uintptr_t mask = aligned_bytes - 1;
941 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
942 ptrs[i] = malloc(alloc_size);
943 ASSERT_TRUE(ptrs[i] != nullptr);
944 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
945 << "Expected at least " << aligned_bytes << " byte alignment: size "
946 << alloc_size << " actual ptr " << ptrs[i];
947 }
948}
949#endif
950
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700951void AlignCheck() {
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800952 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
953 // for a discussion of type alignment.
954 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
955 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
956 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
957
958 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
959 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
960 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
961 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
962 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
963 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
964 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
965 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
966 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
967 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
968 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
969 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
970 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
971 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
972
973#if defined(__ANDROID__)
974 // On Android, there is a lot of code that expects certain alignments:
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700975 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
976 // must have at least 16 byte alignment.
977 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
978 // not 16 bytes, are only required to have at least 8 byte alignment.
979 // In addition, on Android clang has been configured for 64 bit such that:
980 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
981 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
982 // For 32 bit environments, only the first two requirements must be met.
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800983
984 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
985 // a discussion of this alignment mess. The code below is enforcing
986 // strong-alignment, since who knows what code depends on this behavior now.
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700987 // As mentioned before, for 64 bit this will enforce the higher
988 // requirement since clang expects this behavior on Android now.
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800989 for (size_t i = 1; i <= 128; i++) {
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -0700990#if defined(__LP64__)
991 if (i <= 8) {
992 AndroidVerifyAlignment(i, 8);
993 } else {
994 AndroidVerifyAlignment(i, 16);
995 }
996#else
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800997 size_t rounded = (i + 7) & ~7;
998 if ((rounded % 16) == 0) {
999 AndroidVerifyAlignment(i, 16);
1000 } else {
1001 AndroidVerifyAlignment(i, 8);
1002 }
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001003#endif
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001004 if (::testing::Test::HasFatalFailure()) {
1005 return;
1006 }
1007 }
1008#endif
1009}
1010
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001011TEST(malloc, align_check) {
1012 AlignCheck();
1013}
1014
Christopher Ferris201dcf42020-01-29 13:09:31 -08001015// Jemalloc doesn't pass this test right now, so leave it as disabled.
1016TEST(malloc, DISABLED_alloc_after_fork) {
1017 // Both of these need to be a power of 2.
1018 static constexpr size_t kMinAllocationSize = 8;
1019 static constexpr size_t kMaxAllocationSize = 2097152;
1020
1021 static constexpr size_t kNumAllocatingThreads = 5;
1022 static constexpr size_t kNumForkLoops = 100;
1023
1024 std::atomic_bool stop;
1025
1026 // Create threads that simply allocate and free different sizes.
1027 std::vector<std::thread*> threads;
1028 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1029 std::thread* t = new std::thread([&stop] {
1030 while (!stop) {
1031 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001032 void* ptr;
1033 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001034 free(ptr);
1035 }
1036 }
1037 });
1038 threads.push_back(t);
1039 }
1040
1041 // Create a thread to fork and allocate.
1042 for (size_t i = 0; i < kNumForkLoops; i++) {
1043 pid_t pid;
1044 if ((pid = fork()) == 0) {
1045 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001046 void* ptr;
1047 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001048 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -08001049 // Make sure we can touch all of the allocation.
1050 memset(ptr, 0x1, size);
1051 ASSERT_LE(size, malloc_usable_size(ptr));
1052 free(ptr);
1053 }
1054 _exit(10);
1055 }
1056 ASSERT_NE(-1, pid);
1057 AssertChildExited(pid, 10);
1058 }
1059
1060 stop = true;
1061 for (auto thread : threads) {
1062 thread->join();
1063 delete thread;
1064 }
1065}
1066
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001067TEST(android_mallopt, error_on_unexpected_option) {
1068#if defined(__BIONIC__)
1069 const int unrecognized_option = -1;
1070 errno = 0;
1071 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
1072 EXPECT_EQ(ENOTSUP, errno);
1073#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001074 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001075#endif
1076}
1077
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001078bool IsDynamic() {
1079#if defined(__LP64__)
1080 Elf64_Ehdr ehdr;
1081#else
1082 Elf32_Ehdr ehdr;
1083#endif
1084 std::string path(android::base::GetExecutablePath());
1085
1086 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1087 if (fd == -1) {
1088 // Assume dynamic on error.
1089 return true;
1090 }
1091 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1092 close(fd);
1093 // Assume dynamic in error cases.
1094 return !read_completed || ehdr.e_type == ET_DYN;
1095}
1096
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001097TEST(android_mallopt, init_zygote_child_profiling) {
1098#if defined(__BIONIC__)
1099 // Successful call.
1100 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001101 if (IsDynamic()) {
1102 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1103 EXPECT_EQ(0, errno);
1104 } else {
1105 // Not supported in static executables.
1106 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1107 EXPECT_EQ(ENOTSUP, errno);
1108 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001109
1110 // Unexpected arguments rejected.
1111 errno = 0;
1112 char unexpected = 0;
1113 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001114 if (IsDynamic()) {
1115 EXPECT_EQ(EINVAL, errno);
1116 } else {
1117 EXPECT_EQ(ENOTSUP, errno);
1118 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001119#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001120 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001121#endif
1122}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001123
1124#if defined(__BIONIC__)
1125template <typename FuncType>
1126void CheckAllocationFunction(FuncType func) {
1127 // Assumes that no more than 108MB of memory is allocated before this.
1128 size_t limit = 128 * 1024 * 1024;
1129 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1130 if (!func(20 * 1024 * 1024))
1131 exit(1);
1132 if (func(128 * 1024 * 1024))
1133 exit(1);
1134 exit(0);
1135}
1136#endif
1137
1138TEST(android_mallopt, set_allocation_limit) {
1139#if defined(__BIONIC__)
1140 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1141 testing::ExitedWithCode(0), "");
1142 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1143 testing::ExitedWithCode(0), "");
1144 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1145 testing::ExitedWithCode(0), "");
1146 EXPECT_EXIT(CheckAllocationFunction(
1147 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1148 testing::ExitedWithCode(0), "");
1149 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1150 void* ptr;
1151 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1152 }),
1153 testing::ExitedWithCode(0), "");
1154 EXPECT_EXIT(CheckAllocationFunction(
1155 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1156 testing::ExitedWithCode(0), "");
1157 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1158 void* p = malloc(1024 * 1024);
1159 return realloc(p, bytes) != nullptr;
1160 }),
1161 testing::ExitedWithCode(0), "");
1162#if !defined(__LP64__)
1163 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1164 testing::ExitedWithCode(0), "");
1165 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1166 testing::ExitedWithCode(0), "");
1167#endif
1168#else
Elliott Hughes10907202019-03-27 08:51:02 -07001169 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001170#endif
1171}
1172
1173TEST(android_mallopt, set_allocation_limit_multiple) {
1174#if defined(__BIONIC__)
1175 // Only the first set should work.
1176 size_t limit = 256 * 1024 * 1024;
1177 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1178 limit = 32 * 1024 * 1024;
1179 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1180#else
Elliott Hughes10907202019-03-27 08:51:02 -07001181 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001182#endif
1183}
1184
1185#if defined(__BIONIC__)
1186static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1187
1188static size_t GetMaxAllocations() {
1189 size_t max_pointers = 0;
1190 void* ptrs[20];
1191 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1192 ptrs[i] = malloc(kAllocationSize);
1193 if (ptrs[i] == nullptr) {
1194 max_pointers = i;
1195 break;
1196 }
1197 }
1198 for (size_t i = 0; i < max_pointers; i++) {
1199 free(ptrs[i]);
1200 }
1201 return max_pointers;
1202}
1203
1204static void VerifyMaxPointers(size_t max_pointers) {
1205 // Now verify that we can allocate the same number as before.
1206 void* ptrs[20];
1207 for (size_t i = 0; i < max_pointers; i++) {
1208 ptrs[i] = malloc(kAllocationSize);
1209 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1210 }
1211
1212 // Make sure the next allocation still fails.
1213 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1214 for (size_t i = 0; i < max_pointers; i++) {
1215 free(ptrs[i]);
1216 }
1217}
1218#endif
1219
1220TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1221#if defined(__BIONIC__)
1222 size_t limit = 128 * 1024 * 1024;
1223 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1224
1225 size_t max_pointers = GetMaxAllocations();
1226 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1227
1228 void* memory = malloc(10 * 1024 * 1024);
1229 ASSERT_TRUE(memory != nullptr);
1230
1231 // Increase size.
1232 memory = realloc(memory, 20 * 1024 * 1024);
1233 ASSERT_TRUE(memory != nullptr);
1234 memory = realloc(memory, 40 * 1024 * 1024);
1235 ASSERT_TRUE(memory != nullptr);
1236 memory = realloc(memory, 60 * 1024 * 1024);
1237 ASSERT_TRUE(memory != nullptr);
1238 memory = realloc(memory, 80 * 1024 * 1024);
1239 ASSERT_TRUE(memory != nullptr);
1240 // Now push past limit.
1241 memory = realloc(memory, 130 * 1024 * 1024);
1242 ASSERT_TRUE(memory == nullptr);
1243
1244 VerifyMaxPointers(max_pointers);
1245#else
Elliott Hughes10907202019-03-27 08:51:02 -07001246 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001247#endif
1248}
1249
1250TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1251#if defined(__BIONIC__)
1252 size_t limit = 100 * 1024 * 1024;
1253 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1254
1255 size_t max_pointers = GetMaxAllocations();
1256 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1257
1258 void* memory = malloc(80 * 1024 * 1024);
1259 ASSERT_TRUE(memory != nullptr);
1260
1261 // Decrease size.
1262 memory = realloc(memory, 60 * 1024 * 1024);
1263 ASSERT_TRUE(memory != nullptr);
1264 memory = realloc(memory, 40 * 1024 * 1024);
1265 ASSERT_TRUE(memory != nullptr);
1266 memory = realloc(memory, 20 * 1024 * 1024);
1267 ASSERT_TRUE(memory != nullptr);
1268 memory = realloc(memory, 10 * 1024 * 1024);
1269 ASSERT_TRUE(memory != nullptr);
1270 free(memory);
1271
1272 VerifyMaxPointers(max_pointers);
1273#else
Elliott Hughes10907202019-03-27 08:51:02 -07001274 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001275#endif
1276}
1277
1278TEST(android_mallopt, set_allocation_limit_realloc_free) {
1279#if defined(__BIONIC__)
1280 size_t limit = 100 * 1024 * 1024;
1281 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1282
1283 size_t max_pointers = GetMaxAllocations();
1284 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1285
1286 void* memory = malloc(60 * 1024 * 1024);
1287 ASSERT_TRUE(memory != nullptr);
1288
1289 memory = realloc(memory, 0);
1290 ASSERT_TRUE(memory == nullptr);
1291
1292 VerifyMaxPointers(max_pointers);
1293#else
Elliott Hughes10907202019-03-27 08:51:02 -07001294 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001295#endif
1296}
1297
1298#if defined(__BIONIC__)
1299static void* SetAllocationLimit(void* data) {
1300 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1301 while (!go->load()) {
1302 }
1303 size_t limit = 500 * 1024 * 1024;
1304 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1305 return reinterpret_cast<void*>(-1);
1306 }
1307 return nullptr;
1308}
1309
1310static void SetAllocationLimitMultipleThreads() {
1311 std::atomic_bool go;
1312 go = false;
1313
1314 static constexpr size_t kNumThreads = 4;
1315 pthread_t threads[kNumThreads];
1316 for (size_t i = 0; i < kNumThreads; i++) {
1317 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1318 }
1319
1320 // Let them go all at once.
1321 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001322 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1323 // heapprofd handler.
1324 union sigval signal_value;
1325 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001326 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001327
1328 size_t num_successful = 0;
1329 for (size_t i = 0; i < kNumThreads; i++) {
1330 void* result;
1331 ASSERT_EQ(0, pthread_join(threads[i], &result));
1332 if (result != nullptr) {
1333 num_successful++;
1334 }
1335 }
1336 ASSERT_EQ(1U, num_successful);
1337 exit(0);
1338}
1339#endif
1340
1341TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1342#if defined(__BIONIC__)
1343 if (IsDynamic()) {
1344 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1345 }
1346
1347 // Run this a number of times as a stress test.
1348 for (size_t i = 0; i < 100; i++) {
1349 // Not using ASSERT_EXIT because errors messages are not displayed.
1350 pid_t pid;
1351 if ((pid = fork()) == 0) {
1352 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1353 }
1354 ASSERT_NE(-1, pid);
1355 int status;
1356 ASSERT_EQ(pid, wait(&status));
1357 ASSERT_EQ(0, WEXITSTATUS(status));
1358 }
1359#else
Elliott Hughes10907202019-03-27 08:51:02 -07001360 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001361#endif
1362}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001363
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001364#if defined(__BIONIC__)
Mitch Phillipse6997d52020-11-30 15:04:14 -08001365using Action = android_mallopt_gwp_asan_options_t::Action;
1366TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1367 android_mallopt_gwp_asan_options_t options;
1368 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
1369 options.desire = Action::DONT_TURN_ON_UNLESS_OVERRIDDEN;
1370 // GWP-ASan should already be enabled. Trying to enable or disable it should
1371 // always pass.
1372 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1373 options.desire = Action::TURN_ON_WITH_SAMPLING;
1374 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1375}
1376#endif // defined(__BIONIC__)
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001377
Mitch Phillipse6997d52020-11-30 15:04:14 -08001378TEST(android_mallopt, multiple_enable_gwp_asan) {
1379#if defined(__BIONIC__)
1380 // Always enable GWP-Asan, with default options.
1381 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001382#else
1383 GTEST_SKIP() << "bionic extension";
1384#endif
1385}
1386
Florian Mayercc61ad82022-08-31 11:43:30 -07001387TEST(android_mallopt, memtag_stack_is_on) {
1388#if defined(__BIONIC__)
1389 bool memtag_stack;
1390 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1391#else
1392 GTEST_SKIP() << "bionic extension";
1393#endif
1394}
1395
Mitch Phillips9cad8422021-01-20 16:03:27 -08001396void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1397 std::vector<void*> allocs;
1398 constexpr int kMaxBytesToCheckZero = 64;
1399 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1400
1401 for (int i = 0; i < num_iterations; ++i) {
1402 int size = get_alloc_size(i);
1403 allocs.push_back(malloc(size));
1404 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1405 }
1406
1407 for (void* alloc : allocs) {
1408 free(alloc);
1409 }
1410 allocs.clear();
1411
1412 for (int i = 0; i < num_iterations; ++i) {
1413 int size = get_alloc_size(i);
1414 allocs.push_back(malloc(size));
1415 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1416 }
1417
1418 for (void* alloc : allocs) {
1419 free(alloc);
1420 }
1421}
1422
1423TEST(malloc, zero_init) {
1424#if defined(__BIONIC__)
1425 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1426 bool allocator_scudo;
1427 GetAllocatorVersion(&allocator_scudo);
1428 if (!allocator_scudo) {
1429 GTEST_SKIP() << "scudo allocator only test";
1430 }
1431
1432 mallopt(M_BIONIC_ZERO_INIT, 1);
1433
1434 // Test using a block of 4K small (1-32 byte) allocations.
1435 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1436 return 1 + iteration % 32;
1437 });
1438
1439 // Also test large allocations that land in the scudo secondary, as this is
1440 // the only part of Scudo that's changed by enabling zero initialization with
1441 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1442 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1443 // mallopt_decay. Ensure that we delay for at least a second before releasing
1444 // pages to the OS in order to avoid implicit zeroing by the kernel.
1445 mallopt(M_DECAY_TIME, 1000);
1446 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1447 return 1 << (19 + iteration % 4);
1448 });
1449
1450#else
1451 GTEST_SKIP() << "bionic-only test";
1452#endif
1453}
1454
1455// Note that MTE is enabled on cc_tests on devices that support MTE.
1456TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001457#if defined(__BIONIC__)
1458 if (!mte_supported()) {
1459 GTEST_SKIP() << "This function can only be tested with MTE";
1460 }
1461
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001462 sem_t sem;
1463 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1464
1465 pthread_t thread;
1466 ASSERT_EQ(0, pthread_create(
1467 &thread, nullptr,
1468 [](void* ptr) -> void* {
1469 auto* sem = reinterpret_cast<sem_t*>(ptr);
1470 sem_wait(sem);
1471 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1472 },
1473 &sem));
1474
Mitch Phillips9cad8422021-01-20 16:03:27 -08001475 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001476 ASSERT_EQ(0, sem_post(&sem));
1477
1478 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
Christopher Ferris2abfa9e2021-11-01 16:26:06 -07001479 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001480
1481 void* retval;
1482 ASSERT_EQ(0, pthread_join(thread, &retval));
1483 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1484 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001485#else
1486 GTEST_SKIP() << "bionic extension";
1487#endif
1488}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001489
1490TEST(malloc, allocation_slack) {
1491#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001492 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1493
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001494 bool allocator_scudo;
1495 GetAllocatorVersion(&allocator_scudo);
1496 if (!allocator_scudo) {
1497 GTEST_SKIP() << "scudo allocator only test";
1498 }
1499
1500 // Test that older target SDK levels let you access a few bytes off the end of
1501 // a large allocation.
1502 android_set_application_target_sdk_version(29);
1503 auto p = std::make_unique<char[]>(131072);
1504 volatile char *vp = p.get();
1505 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1506#else
1507 GTEST_SKIP() << "bionic extension";
1508#endif
1509}
Evgenii Stepanovf0d7a342021-11-16 17:34:39 -08001510
1511// Regression test for b/206701345 -- scudo bug, MTE only.
1512// Fix: https://reviews.llvm.org/D105261
1513// Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
1514TEST(malloc, realloc_mte_crash_b206701345) {
1515 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1516 // all size classes allow such placement - mmap size has to be divisible by
1517 // the block size. At the time of writing this could only be reproduced with
1518 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1519 // future. Try several different classes at the lower end.
1520 std::vector<void*> ptrs(10000);
1521 for (int i = 1; i < 32; ++i) {
1522 size_t sz = 16 * i - 1;
1523 for (void*& p : ptrs) {
1524 p = realloc(malloc(sz), sz + 1);
1525 }
1526
1527 for (void* p : ptrs) {
1528 free(p);
1529 }
1530 }
1531}
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001532
1533void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1534 std::vector<size_t>& test_sizes, size_t max_allocations) {
1535 // Vector of zero'd data used for comparisons. Make it twice the largest size.
1536 std::vector<char> zero(test_sizes.back() * 2, 0);
1537
1538 SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1539
1540 for (size_t test_size : test_sizes) {
1541 std::vector<void*> ptrs(max_allocations);
1542 for (size_t i = 0; i < ptrs.size(); i++) {
1543 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1544 ptrs[i] = alloc_func(test_size);
1545 ASSERT_TRUE(ptrs[i] != nullptr);
1546 size_t alloc_size = malloc_usable_size(ptrs[i]);
1547 ASSERT_LE(alloc_size, zero.size());
1548 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1549
1550 // Set the memory to non-zero to make sure if the pointer
1551 // is reused it's still zero.
1552 memset(ptrs[i], 0xab, alloc_size);
1553 }
1554 // Free the pointers.
1555 for (size_t i = 0; i < ptrs.size(); i++) {
1556 free(ptrs[i]);
1557 }
1558 for (size_t i = 0; i < ptrs.size(); i++) {
1559 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1560 ptrs[i] = malloc(test_size);
1561 ASSERT_TRUE(ptrs[i] != nullptr);
1562 size_t alloc_size = malloc_usable_size(ptrs[i]);
1563 ASSERT_LE(alloc_size, zero.size());
1564 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1565 }
1566 // Free all of the pointers later to maximize the chance of reusing from
1567 // the first loop.
1568 for (size_t i = 0; i < ptrs.size(); i++) {
1569 free(ptrs[i]);
1570 }
1571 }
1572}
1573
1574// Verify that small and medium allocations are always zero.
1575TEST(malloc, zeroed_allocations_small_medium_sizes) {
1576#if !defined(__BIONIC__)
1577 GTEST_SKIP() << "Only valid on bionic";
1578#endif
1579
1580 if (IsLowRamDevice()) {
1581 GTEST_SKIP() << "Skipped on low memory devices.";
1582 }
1583
1584 constexpr size_t kMaxAllocations = 1024;
1585 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1586 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1587 kMaxAllocations);
1588
1589 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1590 test_sizes, kMaxAllocations);
1591
1592 VerifyAllocationsAreZero(
1593 [](size_t size) -> void* {
1594 void* ptr;
1595 if (posix_memalign(&ptr, 64, size) == 0) {
1596 return ptr;
1597 }
1598 return nullptr;
1599 },
1600 "posix_memalign", test_sizes, kMaxAllocations);
1601}
1602
1603// Verify that large allocations are always zero.
1604TEST(malloc, zeroed_allocations_large_sizes) {
1605#if !defined(__BIONIC__)
1606 GTEST_SKIP() << "Only valid on bionic";
1607#endif
1608
1609 if (IsLowRamDevice()) {
1610 GTEST_SKIP() << "Skipped on low memory devices.";
1611 }
1612
1613 constexpr size_t kMaxAllocations = 20;
1614 std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1615 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1616 kMaxAllocations);
1617
1618 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1619 test_sizes, kMaxAllocations);
1620
1621 VerifyAllocationsAreZero(
1622 [](size_t size) -> void* {
1623 void* ptr;
1624 if (posix_memalign(&ptr, 64, size) == 0) {
1625 return ptr;
1626 }
1627 return nullptr;
1628 },
1629 "posix_memalign", test_sizes, kMaxAllocations);
1630}
1631
1632TEST(malloc, zeroed_allocations_realloc) {
1633#if !defined(__BIONIC__)
1634 GTEST_SKIP() << "Only valid on bionic";
1635#endif
1636
1637 if (IsLowRamDevice()) {
1638 GTEST_SKIP() << "Skipped on low memory devices.";
1639 }
1640
1641 // Vector of zero'd data used for comparisons.
1642 constexpr size_t kMaxMemorySize = 131072;
1643 std::vector<char> zero(kMaxMemorySize, 0);
1644
1645 constexpr size_t kMaxAllocations = 1024;
1646 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1647 // Do a number of allocations and set them to non-zero.
1648 for (size_t test_size : test_sizes) {
1649 std::vector<void*> ptrs(kMaxAllocations);
1650 for (size_t i = 0; i < kMaxAllocations; i++) {
1651 ptrs[i] = malloc(test_size);
1652 ASSERT_TRUE(ptrs[i] != nullptr);
1653
1654 // Set the memory to non-zero to make sure if the pointer
1655 // is reused it's still zero.
1656 memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1657 }
1658 // Free the pointers.
1659 for (size_t i = 0; i < kMaxAllocations; i++) {
1660 free(ptrs[i]);
1661 }
1662 }
1663
1664 // Do the reallocs to a larger size and verify the rest of the allocation
1665 // is zero.
1666 constexpr size_t kInitialSize = 8;
1667 for (size_t test_size : test_sizes) {
1668 std::vector<void*> ptrs(kMaxAllocations);
1669 for (size_t i = 0; i < kMaxAllocations; i++) {
1670 ptrs[i] = malloc(kInitialSize);
1671 ASSERT_TRUE(ptrs[i] != nullptr);
1672 size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1673
1674 ptrs[i] = realloc(ptrs[i], test_size);
1675 ASSERT_TRUE(ptrs[i] != nullptr);
1676 size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1677 char* ptr = reinterpret_cast<char*>(ptrs[i]);
1678 ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1679 << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1680 }
1681 for (size_t i = 0; i < kMaxAllocations; i++) {
1682 free(ptrs[i]);
1683 }
1684 }
1685}