blob: f15fc75d5432fb0f32e0641b6127b1d09708d431 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
30#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080031#include <sys/types.h>
32#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070033#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070034
Mitch Phillips9cad8422021-01-20 16:03:27 -080035#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080036#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080037#include <thread>
Mitch Phillips9cad8422021-01-20 16:03:27 -080038#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080039
Dan Albert4caa1f02014-08-20 09:16:57 -070040#include <tinyxml2.h>
41
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080042#include <android-base/file.h>
43
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080044#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000045
Elliott Hughesb1770852018-09-18 12:52:42 -070046#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080047
Peter Collingbourne45819dd2020-01-09 11:00:43 -080048#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080049#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080050
Christopher Ferrisb874c332020-01-21 16:39:05 -080051#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070052#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080053#include "platform/bionic/reserved_signals.h"
54#include "private/bionic_config.h"
55
Elliott Hughesb1770852018-09-18 12:52:42 -070056#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080057
Colin Cross7da20342021-07-28 11:18:11 -070058#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080059
Elliott Hughesb1770852018-09-18 12:52:42 -070060#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080061
Colin Cross7da20342021-07-28 11:18:11 -070062#elif defined(MUSL)
63
64#define HAVE_REALLOCARRAY 1
65
Elliott Hughesb1770852018-09-18 12:52:42 -070066#endif
67
Christopher Ferris885f3b92013-05-21 17:48:01 -070068TEST(malloc, malloc_std) {
69 // Simple malloc test.
70 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070071 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070072 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070073 free(ptr);
74}
75
Christopher Ferrisa4037802014-06-09 19:14:11 -070076TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080077 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070078 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070079 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070080 ASSERT_EQ(ENOMEM, errno);
81}
82
Christopher Ferris885f3b92013-05-21 17:48:01 -070083TEST(malloc, calloc_std) {
84 // Simple calloc test.
85 size_t alloc_len = 100;
86 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070087 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070088 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
89 for (size_t i = 0; i < alloc_len; i++) {
90 ASSERT_EQ(0, ptr[i]);
91 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070092 free(ptr);
93}
94
Peter Collingbourne978eb162020-09-21 15:26:02 -070095TEST(malloc, calloc_mem_init_disabled) {
96#if defined(__BIONIC__)
97 // calloc should still zero memory if mem-init is disabled.
98 // With jemalloc the mallopts will fail but that shouldn't affect the
99 // execution of the test.
100 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
101 size_t alloc_len = 100;
102 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
103 for (size_t i = 0; i < alloc_len; i++) {
104 ASSERT_EQ(0, ptr[i]);
105 }
106 free(ptr);
107 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
108#else
109 GTEST_SKIP() << "bionic-only test";
110#endif
111}
112
Christopher Ferrisa4037802014-06-09 19:14:11 -0700113TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800114 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700115 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700116 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700117 ASSERT_EQ(ENOMEM, errno);
118}
119
120TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800121 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700122 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700123 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700124 ASSERT_EQ(ENOMEM, errno);
125 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700126 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700127 ASSERT_EQ(ENOMEM, errno);
128 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700129 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700130 ASSERT_EQ(ENOMEM, errno);
131 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700132 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700133 ASSERT_EQ(ENOMEM, errno);
134}
135
Christopher Ferris885f3b92013-05-21 17:48:01 -0700136TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800137 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700138 // Memalign test where the alignment is any value.
139 for (size_t i = 0; i <= 12; i++) {
140 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700141 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700142 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700143 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
144 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
145 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700146 free(ptr);
147 }
148 }
149}
150
Christopher Ferrisa4037802014-06-09 19:14:11 -0700151TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800152 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700153 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700154}
155
156TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800157 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700158 void* ptr;
159 for (size_t align = 0; align <= 256; align++) {
160 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700161 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700162 free(ptr);
163 }
164}
165
Christopher Ferris885f3b92013-05-21 17:48:01 -0700166TEST(malloc, memalign_realloc) {
167 // Memalign and then realloc the pointer a couple of times.
168 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
169 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700170 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700171 ASSERT_LE(100U, malloc_usable_size(ptr));
172 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
173 memset(ptr, 0x23, 100);
174
175 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700176 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700177 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700178 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700179 for (size_t i = 0; i < 100; i++) {
180 ASSERT_EQ(0x23, ptr[i]);
181 }
182 memset(ptr, 0x45, 200);
183
184 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700185 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700186 ASSERT_LE(300U, malloc_usable_size(ptr));
187 for (size_t i = 0; i < 200; i++) {
188 ASSERT_EQ(0x45, ptr[i]);
189 }
190 memset(ptr, 0x67, 300);
191
192 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700193 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700194 ASSERT_LE(250U, malloc_usable_size(ptr));
195 for (size_t i = 0; i < 250; i++) {
196 ASSERT_EQ(0x67, ptr[i]);
197 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700198 free(ptr);
199 }
200}
201
202TEST(malloc, malloc_realloc_larger) {
203 // Realloc to a larger size, malloc is used for the original allocation.
204 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700205 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700206 ASSERT_LE(100U, malloc_usable_size(ptr));
207 memset(ptr, 67, 100);
208
209 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700210 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700211 ASSERT_LE(200U, malloc_usable_size(ptr));
212 for (size_t i = 0; i < 100; i++) {
213 ASSERT_EQ(67, ptr[i]);
214 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700215 free(ptr);
216}
217
218TEST(malloc, malloc_realloc_smaller) {
219 // Realloc to a smaller size, malloc is used for the original allocation.
220 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700221 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700222 ASSERT_LE(200U, malloc_usable_size(ptr));
223 memset(ptr, 67, 200);
224
225 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700226 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700227 ASSERT_LE(100U, malloc_usable_size(ptr));
228 for (size_t i = 0; i < 100; i++) {
229 ASSERT_EQ(67, ptr[i]);
230 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700231 free(ptr);
232}
233
234TEST(malloc, malloc_multiple_realloc) {
235 // Multiple reallocs, malloc is used for the original allocation.
236 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700237 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700238 ASSERT_LE(200U, malloc_usable_size(ptr));
239 memset(ptr, 0x23, 200);
240
241 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700242 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700243 ASSERT_LE(100U, malloc_usable_size(ptr));
244 for (size_t i = 0; i < 100; i++) {
245 ASSERT_EQ(0x23, ptr[i]);
246 }
247
248 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700249 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700250 ASSERT_LE(50U, malloc_usable_size(ptr));
251 for (size_t i = 0; i < 50; i++) {
252 ASSERT_EQ(0x23, ptr[i]);
253 }
254
255 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700256 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700257 ASSERT_LE(150U, malloc_usable_size(ptr));
258 for (size_t i = 0; i < 50; i++) {
259 ASSERT_EQ(0x23, ptr[i]);
260 }
261 memset(ptr, 0x23, 150);
262
263 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700264 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700265 ASSERT_LE(425U, malloc_usable_size(ptr));
266 for (size_t i = 0; i < 150; i++) {
267 ASSERT_EQ(0x23, ptr[i]);
268 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700269 free(ptr);
270}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700271
Christopher Ferris885f3b92013-05-21 17:48:01 -0700272TEST(malloc, calloc_realloc_larger) {
273 // Realloc to a larger size, calloc is used for the original allocation.
274 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700275 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700276 ASSERT_LE(100U, malloc_usable_size(ptr));
277
278 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700279 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700280 ASSERT_LE(200U, malloc_usable_size(ptr));
281 for (size_t i = 0; i < 100; i++) {
282 ASSERT_EQ(0, ptr[i]);
283 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700284 free(ptr);
285}
286
287TEST(malloc, calloc_realloc_smaller) {
288 // Realloc to a smaller size, calloc is used for the original allocation.
289 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700290 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700291 ASSERT_LE(200U, malloc_usable_size(ptr));
292
293 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700294 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700295 ASSERT_LE(100U, malloc_usable_size(ptr));
296 for (size_t i = 0; i < 100; i++) {
297 ASSERT_EQ(0, ptr[i]);
298 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700299 free(ptr);
300}
301
302TEST(malloc, calloc_multiple_realloc) {
303 // Multiple reallocs, calloc is used for the original allocation.
304 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700305 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700306 ASSERT_LE(200U, malloc_usable_size(ptr));
307
308 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700309 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700310 ASSERT_LE(100U, malloc_usable_size(ptr));
311 for (size_t i = 0; i < 100; i++) {
312 ASSERT_EQ(0, ptr[i]);
313 }
314
315 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700316 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700317 ASSERT_LE(50U, malloc_usable_size(ptr));
318 for (size_t i = 0; i < 50; i++) {
319 ASSERT_EQ(0, ptr[i]);
320 }
321
322 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700323 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700324 ASSERT_LE(150U, malloc_usable_size(ptr));
325 for (size_t i = 0; i < 50; i++) {
326 ASSERT_EQ(0, ptr[i]);
327 }
328 memset(ptr, 0, 150);
329
330 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700331 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700332 ASSERT_LE(425U, malloc_usable_size(ptr));
333 for (size_t i = 0; i < 150; i++) {
334 ASSERT_EQ(0, ptr[i]);
335 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700336 free(ptr);
337}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700338
Christopher Ferrisa4037802014-06-09 19:14:11 -0700339TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800340 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700341 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700342 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700343 ASSERT_EQ(ENOMEM, errno);
344 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700345 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700346 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700347 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700348 ASSERT_EQ(ENOMEM, errno);
349 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700350}
351
Dan Alberte5fdaa42014-06-14 01:04:31 +0000352#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
353extern "C" void* pvalloc(size_t);
354extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700355#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000356
Christopher Ferrisa4037802014-06-09 19:14:11 -0700357TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700358#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700359 size_t pagesize = sysconf(_SC_PAGESIZE);
360 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700361 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700362 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
363 ASSERT_LE(pagesize, malloc_usable_size(ptr));
364 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700365#else
366 GTEST_SKIP() << "pvalloc not supported.";
367#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700368}
369
370TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700371#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700372 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700373#else
374 GTEST_SKIP() << "pvalloc not supported.";
375#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700376}
377
378TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700379#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700380 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700381 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700382 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700383 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
384 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700385#else
386 GTEST_SKIP() << "valloc not supported.";
387#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700388}
389
390TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700391#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700392 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700393#else
394 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000395#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700396}
Dan Albert4caa1f02014-08-20 09:16:57 -0700397
398TEST(malloc, malloc_info) {
399#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700400 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800401
402 TemporaryFile tf;
403 ASSERT_TRUE(tf.fd != -1);
404 FILE* fp = fdopen(tf.fd, "w+");
405 tf.release();
406 ASSERT_TRUE(fp != nullptr);
407 ASSERT_EQ(0, malloc_info(0, fp));
408 ASSERT_EQ(0, fclose(fp));
409
410 std::string contents;
411 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700412
413 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800414 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700415
416 auto root = doc.FirstChildElement();
417 ASSERT_NE(nullptr, root);
418 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700419 std::string version(root->Attribute("version"));
420 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800421 auto arena = root->FirstChildElement();
422 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
423 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700424
Christopher Ferris6c619a02019-03-01 17:59:51 -0800425 ASSERT_STREQ("heap", arena->Name());
426 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
427 ASSERT_EQ(tinyxml2::XML_SUCCESS,
428 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
429 ASSERT_EQ(tinyxml2::XML_SUCCESS,
430 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
431 ASSERT_EQ(tinyxml2::XML_SUCCESS,
432 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
433 ASSERT_EQ(tinyxml2::XML_SUCCESS,
434 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700435
Christopher Ferris6c619a02019-03-01 17:59:51 -0800436 auto bin = arena->FirstChildElement("bin");
437 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
438 if (strcmp(bin->Name(), "bin") == 0) {
439 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
440 ASSERT_EQ(tinyxml2::XML_SUCCESS,
441 bin->FirstChildElement("allocated")->QueryIntText(&val));
442 ASSERT_EQ(tinyxml2::XML_SUCCESS,
443 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
444 ASSERT_EQ(tinyxml2::XML_SUCCESS,
445 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
446 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700447 }
448 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800449 } else if (version == "scudo-1") {
450 auto element = root->FirstChildElement();
451 for (; element != nullptr; element = element->NextSiblingElement()) {
452 int val;
453
454 ASSERT_STREQ("alloc", element->Name());
455 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
456 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
457 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800458 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800459 // Do not verify output for debug malloc.
460 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700461 }
462#endif
463}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800464
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700465TEST(malloc, malloc_info_matches_mallinfo) {
466#ifdef __BIONIC__
467 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
468
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800469 TemporaryFile tf;
470 ASSERT_TRUE(tf.fd != -1);
471 FILE* fp = fdopen(tf.fd, "w+");
472 tf.release();
473 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700474 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800475 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700476 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800477 ASSERT_EQ(0, fclose(fp));
478
479 std::string contents;
480 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700481
482 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800483 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700484
485 size_t total_allocated_bytes = 0;
486 auto root = doc.FirstChildElement();
487 ASSERT_NE(nullptr, root);
488 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700489 std::string version(root->Attribute("version"));
490 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700491 auto arena = root->FirstChildElement();
492 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
493 int val;
494
495 ASSERT_STREQ("heap", arena->Name());
496 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
497 ASSERT_EQ(tinyxml2::XML_SUCCESS,
498 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
499 total_allocated_bytes += val;
500 ASSERT_EQ(tinyxml2::XML_SUCCESS,
501 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
502 total_allocated_bytes += val;
503 ASSERT_EQ(tinyxml2::XML_SUCCESS,
504 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
505 total_allocated_bytes += val;
506 ASSERT_EQ(tinyxml2::XML_SUCCESS,
507 arena->FirstChildElement("bins-total")->QueryIntText(&val));
508 }
509 // The total needs to be between the mallinfo call before and after
510 // since malloc_info allocates some memory.
511 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
512 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800513 } else if (version == "scudo-1") {
514 auto element = root->FirstChildElement();
515 for (; element != nullptr; element = element->NextSiblingElement()) {
516 ASSERT_STREQ("alloc", element->Name());
517 int size;
518 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
519 int count;
520 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
521 total_allocated_bytes += size * count;
522 }
523 // Scudo only gives the information on the primary, so simply make
524 // sure that the value is non-zero.
525 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700526 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800527 // Do not verify output for debug malloc.
528 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700529 }
530#endif
531}
532
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800533TEST(malloc, calloc_usable_size) {
534 for (size_t size = 1; size <= 2048; size++) {
535 void* pointer = malloc(size);
536 ASSERT_TRUE(pointer != nullptr);
537 memset(pointer, 0xeb, malloc_usable_size(pointer));
538 free(pointer);
539
540 // We should get a previous pointer that has been set to non-zero.
541 // If calloc does not zero out all of the data, this will fail.
542 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
543 ASSERT_TRUE(pointer != nullptr);
544 size_t usable_size = malloc_usable_size(zero_mem);
545 for (size_t i = 0; i < usable_size; i++) {
546 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
547 }
548 free(zero_mem);
549 }
550}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800551
552TEST(malloc, malloc_0) {
553 void* p = malloc(0);
554 ASSERT_TRUE(p != nullptr);
555 free(p);
556}
557
558TEST(malloc, calloc_0_0) {
559 void* p = calloc(0, 0);
560 ASSERT_TRUE(p != nullptr);
561 free(p);
562}
563
564TEST(malloc, calloc_0_1) {
565 void* p = calloc(0, 1);
566 ASSERT_TRUE(p != nullptr);
567 free(p);
568}
569
570TEST(malloc, calloc_1_0) {
571 void* p = calloc(1, 0);
572 ASSERT_TRUE(p != nullptr);
573 free(p);
574}
575
576TEST(malloc, realloc_nullptr_0) {
577 // realloc(nullptr, size) is actually malloc(size).
578 void* p = realloc(nullptr, 0);
579 ASSERT_TRUE(p != nullptr);
580 free(p);
581}
582
583TEST(malloc, realloc_0) {
584 void* p = malloc(1024);
585 ASSERT_TRUE(p != nullptr);
586 // realloc(p, 0) is actually free(p).
587 void* p2 = realloc(p, 0);
588 ASSERT_TRUE(p2 == nullptr);
589}
Christopher Ferris72df6702016-02-11 15:51:31 -0800590
591constexpr size_t MAX_LOOPS = 200;
592
593// Make sure that memory returned by malloc is aligned to allow these data types.
594TEST(malloc, verify_alignment) {
595 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
596 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
597 long double** values_ldouble = new long double*[MAX_LOOPS];
598 // Use filler to attempt to force the allocator to get potentially bad alignments.
599 void** filler = new void*[MAX_LOOPS];
600
601 for (size_t i = 0; i < MAX_LOOPS; i++) {
602 // Check uint32_t pointers.
603 filler[i] = malloc(1);
604 ASSERT_TRUE(filler[i] != nullptr);
605
606 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
607 ASSERT_TRUE(values_32[i] != nullptr);
608 *values_32[i] = i;
609 ASSERT_EQ(*values_32[i], i);
610 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
611
612 free(filler[i]);
613 }
614
615 for (size_t i = 0; i < MAX_LOOPS; i++) {
616 // Check uint64_t pointers.
617 filler[i] = malloc(1);
618 ASSERT_TRUE(filler[i] != nullptr);
619
620 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
621 ASSERT_TRUE(values_64[i] != nullptr);
622 *values_64[i] = 0x1000 + i;
623 ASSERT_EQ(*values_64[i], 0x1000 + i);
624 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
625
626 free(filler[i]);
627 }
628
629 for (size_t i = 0; i < MAX_LOOPS; i++) {
630 // Check long double pointers.
631 filler[i] = malloc(1);
632 ASSERT_TRUE(filler[i] != nullptr);
633
634 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
635 ASSERT_TRUE(values_ldouble[i] != nullptr);
636 *values_ldouble[i] = 5.5 + i;
637 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
638 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
639 // required alignment to 0x7.
640#if !defined(__BIONIC__) && !defined(__LP64__)
641 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
642#else
643 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
644#endif
645
646 free(filler[i]);
647 }
648
649 for (size_t i = 0; i < MAX_LOOPS; i++) {
650 free(values_32[i]);
651 free(values_64[i]);
652 free(values_ldouble[i]);
653 }
654
655 delete[] filler;
656 delete[] values_32;
657 delete[] values_64;
658 delete[] values_ldouble;
659}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700660
661TEST(malloc, mallopt_smoke) {
Colin Cross7da20342021-07-28 11:18:11 -0700662#if !defined(MUSL)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700663 errno = 0;
664 ASSERT_EQ(0, mallopt(-1000, 1));
665 // mallopt doesn't set errno.
666 ASSERT_EQ(0, errno);
Colin Cross7da20342021-07-28 11:18:11 -0700667#else
668 GTEST_SKIP() << "musl doesn't have mallopt";
669#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700670}
Elliott Hughesb1770852018-09-18 12:52:42 -0700671
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800672TEST(malloc, mallopt_decay) {
673#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800674 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800675 errno = 0;
676 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
677 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
678 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
679 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
680#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800681 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800682#endif
683}
684
685TEST(malloc, mallopt_purge) {
686#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800687 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800688 errno = 0;
689 ASSERT_EQ(1, mallopt(M_PURGE, 0));
690#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800691 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800692#endif
693}
694
Christopher Ferris88448792020-07-28 14:15:31 -0700695#if defined(__BIONIC__)
696static void GetAllocatorVersion(bool* allocator_scudo) {
697 TemporaryFile tf;
698 ASSERT_TRUE(tf.fd != -1);
699 FILE* fp = fdopen(tf.fd, "w+");
700 tf.release();
701 ASSERT_TRUE(fp != nullptr);
702 ASSERT_EQ(0, malloc_info(0, fp));
703 ASSERT_EQ(0, fclose(fp));
704
705 std::string contents;
706 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
707
708 tinyxml2::XMLDocument doc;
709 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
710
711 auto root = doc.FirstChildElement();
712 ASSERT_NE(nullptr, root);
713 ASSERT_STREQ("malloc", root->Name());
714 std::string version(root->Attribute("version"));
715 *allocator_scudo = (version == "scudo-1");
716}
717#endif
718
719TEST(malloc, mallopt_scudo_only_options) {
720#if defined(__BIONIC__)
721 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
722 bool allocator_scudo;
723 GetAllocatorVersion(&allocator_scudo);
724 if (!allocator_scudo) {
725 GTEST_SKIP() << "scudo allocator only test";
726 }
727 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
728 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
729 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
730#else
731 GTEST_SKIP() << "bionic-only test";
732#endif
733}
734
Elliott Hughesb1770852018-09-18 12:52:42 -0700735TEST(malloc, reallocarray_overflow) {
736#if HAVE_REALLOCARRAY
737 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
738 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
739 size_t b = 2;
740
741 errno = 0;
742 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
743 ASSERT_EQ(ENOMEM, errno);
744
745 errno = 0;
746 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
747 ASSERT_EQ(ENOMEM, errno);
748#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800749 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700750#endif
751}
752
753TEST(malloc, reallocarray) {
754#if HAVE_REALLOCARRAY
755 void* p = reallocarray(nullptr, 2, 32);
756 ASSERT_TRUE(p != nullptr);
757 ASSERT_GE(malloc_usable_size(p), 64U);
758#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800759 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700760#endif
761}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800762
763TEST(malloc, mallinfo) {
764#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800765 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800766 static size_t sizes[] = {
767 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
768 };
769
770 constexpr static size_t kMaxAllocs = 50;
771
772 for (size_t size : sizes) {
773 // If some of these allocations are stuck in a thread cache, then keep
774 // looping until we make an allocation that changes the total size of the
775 // memory allocated.
776 // jemalloc implementations counts the thread cache allocations against
777 // total memory allocated.
778 void* ptrs[kMaxAllocs] = {};
779 bool pass = false;
780 for (size_t i = 0; i < kMaxAllocs; i++) {
781 size_t allocated = mallinfo().uordblks;
782 ptrs[i] = malloc(size);
783 ASSERT_TRUE(ptrs[i] != nullptr);
784 size_t new_allocated = mallinfo().uordblks;
785 if (allocated != new_allocated) {
786 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800787 // Only check if the total got bigger by at least allocation size.
788 // Sometimes the mallinfo numbers can go backwards due to compaction
789 // and/or freeing of cached data.
790 if (new_allocated >= allocated + usable_size) {
791 pass = true;
792 break;
793 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800794 }
795 }
796 for (void* ptr : ptrs) {
797 free(ptr);
798 }
799 ASSERT_TRUE(pass)
800 << "For size " << size << " allocated bytes did not increase after "
801 << kMaxAllocs << " allocations.";
802 }
803#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800804 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800805#endif
806}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000807
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800808template <typename Type>
809void __attribute__((optnone)) VerifyAlignment(Type* floating) {
810 size_t expected_alignment = alignof(Type);
811 if (expected_alignment != 0) {
812 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
813 << "Expected alignment " << expected_alignment << " ptr value " << floating;
814 }
815}
816
817template <typename Type>
818void __attribute__((optnone)) TestAllocateType() {
819 // The number of allocations to do in a row. This is to attempt to
820 // expose the worst case alignment for native allocators that use
821 // bins.
822 static constexpr size_t kMaxConsecutiveAllocs = 100;
823
824 // Verify using new directly.
825 Type* types[kMaxConsecutiveAllocs];
826 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
827 types[i] = new Type;
828 VerifyAlignment(types[i]);
829 if (::testing::Test::HasFatalFailure()) {
830 return;
831 }
832 }
833 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
834 delete types[i];
835 }
836
837 // Verify using malloc.
838 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
839 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
840 ASSERT_TRUE(types[i] != nullptr);
841 VerifyAlignment(types[i]);
842 if (::testing::Test::HasFatalFailure()) {
843 return;
844 }
845 }
846 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
847 free(types[i]);
848 }
849
850 // Verify using a vector.
851 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
852 for (size_t i = 0; i < type_vector.size(); i++) {
853 VerifyAlignment(&type_vector[i]);
854 if (::testing::Test::HasFatalFailure()) {
855 return;
856 }
857 }
858}
859
860#if defined(__ANDROID__)
861static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
862 void* ptrs[100];
863 uintptr_t mask = aligned_bytes - 1;
864 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
865 ptrs[i] = malloc(alloc_size);
866 ASSERT_TRUE(ptrs[i] != nullptr);
867 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
868 << "Expected at least " << aligned_bytes << " byte alignment: size "
869 << alloc_size << " actual ptr " << ptrs[i];
870 }
871}
872#endif
873
874TEST(malloc, align_check) {
875 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
876 // for a discussion of type alignment.
877 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
878 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
879 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
880
881 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
882 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
883 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
884 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
885 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
886 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
887 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
888 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
889 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
890 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
891 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
892 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
893 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
894 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
895
896#if defined(__ANDROID__)
897 // On Android, there is a lot of code that expects certain alignments:
898 // - Allocations of a size that rounds up to a multiple of 16 bytes
899 // must have at least 16 byte alignment.
900 // - Allocations of a size that rounds up to a multiple of 8 bytes and
901 // not 16 bytes, are only required to have at least 8 byte alignment.
902 // This is regardless of whether it is in a 32 bit or 64 bit environment.
903
904 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
905 // a discussion of this alignment mess. The code below is enforcing
906 // strong-alignment, since who knows what code depends on this behavior now.
907 for (size_t i = 1; i <= 128; i++) {
908 size_t rounded = (i + 7) & ~7;
909 if ((rounded % 16) == 0) {
910 AndroidVerifyAlignment(i, 16);
911 } else {
912 AndroidVerifyAlignment(i, 8);
913 }
914 if (::testing::Test::HasFatalFailure()) {
915 return;
916 }
917 }
918#endif
919}
920
Christopher Ferris201dcf42020-01-29 13:09:31 -0800921// Jemalloc doesn't pass this test right now, so leave it as disabled.
922TEST(malloc, DISABLED_alloc_after_fork) {
923 // Both of these need to be a power of 2.
924 static constexpr size_t kMinAllocationSize = 8;
925 static constexpr size_t kMaxAllocationSize = 2097152;
926
927 static constexpr size_t kNumAllocatingThreads = 5;
928 static constexpr size_t kNumForkLoops = 100;
929
930 std::atomic_bool stop;
931
932 // Create threads that simply allocate and free different sizes.
933 std::vector<std::thread*> threads;
934 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
935 std::thread* t = new std::thread([&stop] {
936 while (!stop) {
937 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700938 void* ptr;
939 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800940 free(ptr);
941 }
942 }
943 });
944 threads.push_back(t);
945 }
946
947 // Create a thread to fork and allocate.
948 for (size_t i = 0; i < kNumForkLoops; i++) {
949 pid_t pid;
950 if ((pid = fork()) == 0) {
951 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700952 void* ptr;
953 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800954 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -0800955 // Make sure we can touch all of the allocation.
956 memset(ptr, 0x1, size);
957 ASSERT_LE(size, malloc_usable_size(ptr));
958 free(ptr);
959 }
960 _exit(10);
961 }
962 ASSERT_NE(-1, pid);
963 AssertChildExited(pid, 10);
964 }
965
966 stop = true;
967 for (auto thread : threads) {
968 thread->join();
969 delete thread;
970 }
971}
972
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000973TEST(android_mallopt, error_on_unexpected_option) {
974#if defined(__BIONIC__)
975 const int unrecognized_option = -1;
976 errno = 0;
977 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
978 EXPECT_EQ(ENOTSUP, errno);
979#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800980 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000981#endif
982}
983
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800984bool IsDynamic() {
985#if defined(__LP64__)
986 Elf64_Ehdr ehdr;
987#else
988 Elf32_Ehdr ehdr;
989#endif
990 std::string path(android::base::GetExecutablePath());
991
992 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
993 if (fd == -1) {
994 // Assume dynamic on error.
995 return true;
996 }
997 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
998 close(fd);
999 // Assume dynamic in error cases.
1000 return !read_completed || ehdr.e_type == ET_DYN;
1001}
1002
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001003TEST(android_mallopt, init_zygote_child_profiling) {
1004#if defined(__BIONIC__)
1005 // Successful call.
1006 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001007 if (IsDynamic()) {
1008 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1009 EXPECT_EQ(0, errno);
1010 } else {
1011 // Not supported in static executables.
1012 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1013 EXPECT_EQ(ENOTSUP, errno);
1014 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001015
1016 // Unexpected arguments rejected.
1017 errno = 0;
1018 char unexpected = 0;
1019 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001020 if (IsDynamic()) {
1021 EXPECT_EQ(EINVAL, errno);
1022 } else {
1023 EXPECT_EQ(ENOTSUP, errno);
1024 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001025#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001026 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001027#endif
1028}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001029
1030#if defined(__BIONIC__)
1031template <typename FuncType>
1032void CheckAllocationFunction(FuncType func) {
1033 // Assumes that no more than 108MB of memory is allocated before this.
1034 size_t limit = 128 * 1024 * 1024;
1035 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1036 if (!func(20 * 1024 * 1024))
1037 exit(1);
1038 if (func(128 * 1024 * 1024))
1039 exit(1);
1040 exit(0);
1041}
1042#endif
1043
1044TEST(android_mallopt, set_allocation_limit) {
1045#if defined(__BIONIC__)
1046 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1047 testing::ExitedWithCode(0), "");
1048 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1049 testing::ExitedWithCode(0), "");
1050 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1051 testing::ExitedWithCode(0), "");
1052 EXPECT_EXIT(CheckAllocationFunction(
1053 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1054 testing::ExitedWithCode(0), "");
1055 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1056 void* ptr;
1057 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1058 }),
1059 testing::ExitedWithCode(0), "");
1060 EXPECT_EXIT(CheckAllocationFunction(
1061 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1062 testing::ExitedWithCode(0), "");
1063 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1064 void* p = malloc(1024 * 1024);
1065 return realloc(p, bytes) != nullptr;
1066 }),
1067 testing::ExitedWithCode(0), "");
1068#if !defined(__LP64__)
1069 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1070 testing::ExitedWithCode(0), "");
1071 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1072 testing::ExitedWithCode(0), "");
1073#endif
1074#else
Elliott Hughes10907202019-03-27 08:51:02 -07001075 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001076#endif
1077}
1078
1079TEST(android_mallopt, set_allocation_limit_multiple) {
1080#if defined(__BIONIC__)
1081 // Only the first set should work.
1082 size_t limit = 256 * 1024 * 1024;
1083 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1084 limit = 32 * 1024 * 1024;
1085 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1086#else
Elliott Hughes10907202019-03-27 08:51:02 -07001087 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001088#endif
1089}
1090
1091#if defined(__BIONIC__)
1092static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1093
1094static size_t GetMaxAllocations() {
1095 size_t max_pointers = 0;
1096 void* ptrs[20];
1097 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1098 ptrs[i] = malloc(kAllocationSize);
1099 if (ptrs[i] == nullptr) {
1100 max_pointers = i;
1101 break;
1102 }
1103 }
1104 for (size_t i = 0; i < max_pointers; i++) {
1105 free(ptrs[i]);
1106 }
1107 return max_pointers;
1108}
1109
1110static void VerifyMaxPointers(size_t max_pointers) {
1111 // Now verify that we can allocate the same number as before.
1112 void* ptrs[20];
1113 for (size_t i = 0; i < max_pointers; i++) {
1114 ptrs[i] = malloc(kAllocationSize);
1115 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1116 }
1117
1118 // Make sure the next allocation still fails.
1119 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1120 for (size_t i = 0; i < max_pointers; i++) {
1121 free(ptrs[i]);
1122 }
1123}
1124#endif
1125
1126TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1127#if defined(__BIONIC__)
1128 size_t limit = 128 * 1024 * 1024;
1129 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1130
1131 size_t max_pointers = GetMaxAllocations();
1132 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1133
1134 void* memory = malloc(10 * 1024 * 1024);
1135 ASSERT_TRUE(memory != nullptr);
1136
1137 // Increase size.
1138 memory = realloc(memory, 20 * 1024 * 1024);
1139 ASSERT_TRUE(memory != nullptr);
1140 memory = realloc(memory, 40 * 1024 * 1024);
1141 ASSERT_TRUE(memory != nullptr);
1142 memory = realloc(memory, 60 * 1024 * 1024);
1143 ASSERT_TRUE(memory != nullptr);
1144 memory = realloc(memory, 80 * 1024 * 1024);
1145 ASSERT_TRUE(memory != nullptr);
1146 // Now push past limit.
1147 memory = realloc(memory, 130 * 1024 * 1024);
1148 ASSERT_TRUE(memory == nullptr);
1149
1150 VerifyMaxPointers(max_pointers);
1151#else
Elliott Hughes10907202019-03-27 08:51:02 -07001152 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001153#endif
1154}
1155
1156TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1157#if defined(__BIONIC__)
1158 size_t limit = 100 * 1024 * 1024;
1159 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1160
1161 size_t max_pointers = GetMaxAllocations();
1162 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1163
1164 void* memory = malloc(80 * 1024 * 1024);
1165 ASSERT_TRUE(memory != nullptr);
1166
1167 // Decrease size.
1168 memory = realloc(memory, 60 * 1024 * 1024);
1169 ASSERT_TRUE(memory != nullptr);
1170 memory = realloc(memory, 40 * 1024 * 1024);
1171 ASSERT_TRUE(memory != nullptr);
1172 memory = realloc(memory, 20 * 1024 * 1024);
1173 ASSERT_TRUE(memory != nullptr);
1174 memory = realloc(memory, 10 * 1024 * 1024);
1175 ASSERT_TRUE(memory != nullptr);
1176 free(memory);
1177
1178 VerifyMaxPointers(max_pointers);
1179#else
Elliott Hughes10907202019-03-27 08:51:02 -07001180 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001181#endif
1182}
1183
1184TEST(android_mallopt, set_allocation_limit_realloc_free) {
1185#if defined(__BIONIC__)
1186 size_t limit = 100 * 1024 * 1024;
1187 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1188
1189 size_t max_pointers = GetMaxAllocations();
1190 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1191
1192 void* memory = malloc(60 * 1024 * 1024);
1193 ASSERT_TRUE(memory != nullptr);
1194
1195 memory = realloc(memory, 0);
1196 ASSERT_TRUE(memory == nullptr);
1197
1198 VerifyMaxPointers(max_pointers);
1199#else
Elliott Hughes10907202019-03-27 08:51:02 -07001200 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001201#endif
1202}
1203
1204#if defined(__BIONIC__)
1205static void* SetAllocationLimit(void* data) {
1206 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1207 while (!go->load()) {
1208 }
1209 size_t limit = 500 * 1024 * 1024;
1210 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1211 return reinterpret_cast<void*>(-1);
1212 }
1213 return nullptr;
1214}
1215
1216static void SetAllocationLimitMultipleThreads() {
1217 std::atomic_bool go;
1218 go = false;
1219
1220 static constexpr size_t kNumThreads = 4;
1221 pthread_t threads[kNumThreads];
1222 for (size_t i = 0; i < kNumThreads; i++) {
1223 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1224 }
1225
1226 // Let them go all at once.
1227 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001228 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1229 // heapprofd handler.
1230 union sigval signal_value;
1231 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001232 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001233
1234 size_t num_successful = 0;
1235 for (size_t i = 0; i < kNumThreads; i++) {
1236 void* result;
1237 ASSERT_EQ(0, pthread_join(threads[i], &result));
1238 if (result != nullptr) {
1239 num_successful++;
1240 }
1241 }
1242 ASSERT_EQ(1U, num_successful);
1243 exit(0);
1244}
1245#endif
1246
1247TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1248#if defined(__BIONIC__)
1249 if (IsDynamic()) {
1250 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1251 }
1252
1253 // Run this a number of times as a stress test.
1254 for (size_t i = 0; i < 100; i++) {
1255 // Not using ASSERT_EXIT because errors messages are not displayed.
1256 pid_t pid;
1257 if ((pid = fork()) == 0) {
1258 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1259 }
1260 ASSERT_NE(-1, pid);
1261 int status;
1262 ASSERT_EQ(pid, wait(&status));
1263 ASSERT_EQ(0, WEXITSTATUS(status));
1264 }
1265#else
Elliott Hughes10907202019-03-27 08:51:02 -07001266 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001267#endif
1268}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001269
Mitch Phillips9cad8422021-01-20 16:03:27 -08001270void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1271 std::vector<void*> allocs;
1272 constexpr int kMaxBytesToCheckZero = 64;
1273 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1274
1275 for (int i = 0; i < num_iterations; ++i) {
1276 int size = get_alloc_size(i);
1277 allocs.push_back(malloc(size));
1278 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1279 }
1280
1281 for (void* alloc : allocs) {
1282 free(alloc);
1283 }
1284 allocs.clear();
1285
1286 for (int i = 0; i < num_iterations; ++i) {
1287 int size = get_alloc_size(i);
1288 allocs.push_back(malloc(size));
1289 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1290 }
1291
1292 for (void* alloc : allocs) {
1293 free(alloc);
1294 }
1295}
1296
1297TEST(malloc, zero_init) {
1298#if defined(__BIONIC__)
1299 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1300 bool allocator_scudo;
1301 GetAllocatorVersion(&allocator_scudo);
1302 if (!allocator_scudo) {
1303 GTEST_SKIP() << "scudo allocator only test";
1304 }
1305
1306 mallopt(M_BIONIC_ZERO_INIT, 1);
1307
1308 // Test using a block of 4K small (1-32 byte) allocations.
1309 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1310 return 1 + iteration % 32;
1311 });
1312
1313 // Also test large allocations that land in the scudo secondary, as this is
1314 // the only part of Scudo that's changed by enabling zero initialization with
1315 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1316 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1317 // mallopt_decay. Ensure that we delay for at least a second before releasing
1318 // pages to the OS in order to avoid implicit zeroing by the kernel.
1319 mallopt(M_DECAY_TIME, 1000);
1320 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1321 return 1 << (19 + iteration % 4);
1322 });
1323
1324#else
1325 GTEST_SKIP() << "bionic-only test";
1326#endif
1327}
1328
1329// Note that MTE is enabled on cc_tests on devices that support MTE.
1330TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001331#if defined(__BIONIC__)
1332 if (!mte_supported()) {
1333 GTEST_SKIP() << "This function can only be tested with MTE";
1334 }
1335
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001336 sem_t sem;
1337 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1338
1339 pthread_t thread;
1340 ASSERT_EQ(0, pthread_create(
1341 &thread, nullptr,
1342 [](void* ptr) -> void* {
1343 auto* sem = reinterpret_cast<sem_t*>(ptr);
1344 sem_wait(sem);
1345 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1346 },
1347 &sem));
1348
Mitch Phillips9cad8422021-01-20 16:03:27 -08001349 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001350 ASSERT_EQ(0, sem_post(&sem));
1351
1352 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1353 ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1354
1355 void* retval;
1356 ASSERT_EQ(0, pthread_join(thread, &retval));
1357 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1358 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001359#else
1360 GTEST_SKIP() << "bionic extension";
1361#endif
1362}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001363
1364TEST(malloc, allocation_slack) {
1365#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001366 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1367
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001368 bool allocator_scudo;
1369 GetAllocatorVersion(&allocator_scudo);
1370 if (!allocator_scudo) {
1371 GTEST_SKIP() << "scudo allocator only test";
1372 }
1373
1374 // Test that older target SDK levels let you access a few bytes off the end of
1375 // a large allocation.
1376 android_set_application_target_sdk_version(29);
1377 auto p = std::make_unique<char[]>(131072);
1378 volatile char *vp = p.get();
1379 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1380#else
1381 GTEST_SKIP() << "bionic extension";
1382#endif
1383}