blob: 30da5c32f67f8611c1090394c22f138ba8d6ff46 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
30#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080031#include <sys/types.h>
32#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070033#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070034
Mitch Phillips9cad8422021-01-20 16:03:27 -080035#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080036#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080037#include <thread>
Mitch Phillips9cad8422021-01-20 16:03:27 -080038#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080039
Dan Albert4caa1f02014-08-20 09:16:57 -070040#include <tinyxml2.h>
41
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080042#include <android-base/file.h>
43
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080044#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000045
Elliott Hughesb1770852018-09-18 12:52:42 -070046#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080047
Peter Collingbourne45819dd2020-01-09 11:00:43 -080048#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080049#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080050
Christopher Ferrisb874c332020-01-21 16:39:05 -080051#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070052#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080053#include "platform/bionic/reserved_signals.h"
54#include "private/bionic_config.h"
55
Elliott Hughesb1770852018-09-18 12:52:42 -070056#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080057
Elliott Hughesb1770852018-09-18 12:52:42 -070058#else
Christopher Ferrisb874c332020-01-21 16:39:05 -080059
Elliott Hughesb1770852018-09-18 12:52:42 -070060#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080061
Elliott Hughesb1770852018-09-18 12:52:42 -070062#endif
63
Christopher Ferris885f3b92013-05-21 17:48:01 -070064TEST(malloc, malloc_std) {
65 // Simple malloc test.
66 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070067 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070068 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070069 free(ptr);
70}
71
Christopher Ferrisa4037802014-06-09 19:14:11 -070072TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080073 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070074 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070075 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070076 ASSERT_EQ(ENOMEM, errno);
77}
78
Christopher Ferris885f3b92013-05-21 17:48:01 -070079TEST(malloc, calloc_std) {
80 // Simple calloc test.
81 size_t alloc_len = 100;
82 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070083 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070084 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
85 for (size_t i = 0; i < alloc_len; i++) {
86 ASSERT_EQ(0, ptr[i]);
87 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070088 free(ptr);
89}
90
Peter Collingbourne978eb162020-09-21 15:26:02 -070091TEST(malloc, calloc_mem_init_disabled) {
92#if defined(__BIONIC__)
93 // calloc should still zero memory if mem-init is disabled.
94 // With jemalloc the mallopts will fail but that shouldn't affect the
95 // execution of the test.
96 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
97 size_t alloc_len = 100;
98 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
99 for (size_t i = 0; i < alloc_len; i++) {
100 ASSERT_EQ(0, ptr[i]);
101 }
102 free(ptr);
103 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
104#else
105 GTEST_SKIP() << "bionic-only test";
106#endif
107}
108
Christopher Ferrisa4037802014-06-09 19:14:11 -0700109TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800110 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700111 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700112 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700113 ASSERT_EQ(ENOMEM, errno);
114}
115
116TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800117 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700118 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700119 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700120 ASSERT_EQ(ENOMEM, errno);
121 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700122 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700123 ASSERT_EQ(ENOMEM, errno);
124 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700125 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700126 ASSERT_EQ(ENOMEM, errno);
127 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700128 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700129 ASSERT_EQ(ENOMEM, errno);
130}
131
Christopher Ferris885f3b92013-05-21 17:48:01 -0700132TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800133 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700134 // Memalign test where the alignment is any value.
135 for (size_t i = 0; i <= 12; i++) {
136 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700137 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700138 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700139 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
140 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
141 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700142 free(ptr);
143 }
144 }
145}
146
Christopher Ferrisa4037802014-06-09 19:14:11 -0700147TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800148 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700149 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700150}
151
152TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800153 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700154 void* ptr;
155 for (size_t align = 0; align <= 256; align++) {
156 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700157 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700158 free(ptr);
159 }
160}
161
Christopher Ferris885f3b92013-05-21 17:48:01 -0700162TEST(malloc, memalign_realloc) {
163 // Memalign and then realloc the pointer a couple of times.
164 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
165 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700166 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700167 ASSERT_LE(100U, malloc_usable_size(ptr));
168 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
169 memset(ptr, 0x23, 100);
170
171 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700172 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700173 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700174 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700175 for (size_t i = 0; i < 100; i++) {
176 ASSERT_EQ(0x23, ptr[i]);
177 }
178 memset(ptr, 0x45, 200);
179
180 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700181 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700182 ASSERT_LE(300U, malloc_usable_size(ptr));
183 for (size_t i = 0; i < 200; i++) {
184 ASSERT_EQ(0x45, ptr[i]);
185 }
186 memset(ptr, 0x67, 300);
187
188 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700189 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700190 ASSERT_LE(250U, malloc_usable_size(ptr));
191 for (size_t i = 0; i < 250; i++) {
192 ASSERT_EQ(0x67, ptr[i]);
193 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700194 free(ptr);
195 }
196}
197
198TEST(malloc, malloc_realloc_larger) {
199 // Realloc to a larger size, malloc is used for the original allocation.
200 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700201 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700202 ASSERT_LE(100U, malloc_usable_size(ptr));
203 memset(ptr, 67, 100);
204
205 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700206 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700207 ASSERT_LE(200U, malloc_usable_size(ptr));
208 for (size_t i = 0; i < 100; i++) {
209 ASSERT_EQ(67, ptr[i]);
210 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700211 free(ptr);
212}
213
214TEST(malloc, malloc_realloc_smaller) {
215 // Realloc to a smaller size, malloc is used for the original allocation.
216 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700217 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700218 ASSERT_LE(200U, malloc_usable_size(ptr));
219 memset(ptr, 67, 200);
220
221 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700222 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700223 ASSERT_LE(100U, malloc_usable_size(ptr));
224 for (size_t i = 0; i < 100; i++) {
225 ASSERT_EQ(67, ptr[i]);
226 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700227 free(ptr);
228}
229
230TEST(malloc, malloc_multiple_realloc) {
231 // Multiple reallocs, malloc is used for the original allocation.
232 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700233 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700234 ASSERT_LE(200U, malloc_usable_size(ptr));
235 memset(ptr, 0x23, 200);
236
237 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700238 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700239 ASSERT_LE(100U, malloc_usable_size(ptr));
240 for (size_t i = 0; i < 100; i++) {
241 ASSERT_EQ(0x23, ptr[i]);
242 }
243
244 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700245 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700246 ASSERT_LE(50U, malloc_usable_size(ptr));
247 for (size_t i = 0; i < 50; i++) {
248 ASSERT_EQ(0x23, ptr[i]);
249 }
250
251 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700252 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700253 ASSERT_LE(150U, malloc_usable_size(ptr));
254 for (size_t i = 0; i < 50; i++) {
255 ASSERT_EQ(0x23, ptr[i]);
256 }
257 memset(ptr, 0x23, 150);
258
259 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700260 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700261 ASSERT_LE(425U, malloc_usable_size(ptr));
262 for (size_t i = 0; i < 150; i++) {
263 ASSERT_EQ(0x23, ptr[i]);
264 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700265 free(ptr);
266}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700267
Christopher Ferris885f3b92013-05-21 17:48:01 -0700268TEST(malloc, calloc_realloc_larger) {
269 // Realloc to a larger size, calloc is used for the original allocation.
270 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700271 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700272 ASSERT_LE(100U, malloc_usable_size(ptr));
273
274 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700275 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700276 ASSERT_LE(200U, malloc_usable_size(ptr));
277 for (size_t i = 0; i < 100; i++) {
278 ASSERT_EQ(0, ptr[i]);
279 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700280 free(ptr);
281}
282
283TEST(malloc, calloc_realloc_smaller) {
284 // Realloc to a smaller size, calloc is used for the original allocation.
285 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700286 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700287 ASSERT_LE(200U, malloc_usable_size(ptr));
288
289 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700290 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700291 ASSERT_LE(100U, malloc_usable_size(ptr));
292 for (size_t i = 0; i < 100; i++) {
293 ASSERT_EQ(0, ptr[i]);
294 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700295 free(ptr);
296}
297
298TEST(malloc, calloc_multiple_realloc) {
299 // Multiple reallocs, calloc is used for the original allocation.
300 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700301 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700302 ASSERT_LE(200U, malloc_usable_size(ptr));
303
304 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700305 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700306 ASSERT_LE(100U, malloc_usable_size(ptr));
307 for (size_t i = 0; i < 100; i++) {
308 ASSERT_EQ(0, ptr[i]);
309 }
310
311 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700312 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700313 ASSERT_LE(50U, malloc_usable_size(ptr));
314 for (size_t i = 0; i < 50; i++) {
315 ASSERT_EQ(0, ptr[i]);
316 }
317
318 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700319 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700320 ASSERT_LE(150U, malloc_usable_size(ptr));
321 for (size_t i = 0; i < 50; i++) {
322 ASSERT_EQ(0, ptr[i]);
323 }
324 memset(ptr, 0, 150);
325
326 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700327 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700328 ASSERT_LE(425U, malloc_usable_size(ptr));
329 for (size_t i = 0; i < 150; i++) {
330 ASSERT_EQ(0, ptr[i]);
331 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700332 free(ptr);
333}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700334
Christopher Ferrisa4037802014-06-09 19:14:11 -0700335TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800336 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700337 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700338 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700339 ASSERT_EQ(ENOMEM, errno);
340 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700341 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700342 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700343 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700344 ASSERT_EQ(ENOMEM, errno);
345 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700346}
347
Dan Alberte5fdaa42014-06-14 01:04:31 +0000348#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
349extern "C" void* pvalloc(size_t);
350extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700351#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000352
Christopher Ferrisa4037802014-06-09 19:14:11 -0700353TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700354#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700355 size_t pagesize = sysconf(_SC_PAGESIZE);
356 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700357 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700358 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
359 ASSERT_LE(pagesize, malloc_usable_size(ptr));
360 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700361#else
362 GTEST_SKIP() << "pvalloc not supported.";
363#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700364}
365
366TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700367#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700368 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700369#else
370 GTEST_SKIP() << "pvalloc not supported.";
371#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700372}
373
374TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700375#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700376 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700377 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700378 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700379 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
380 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700381#else
382 GTEST_SKIP() << "valloc not supported.";
383#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700384}
385
386TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700387#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700388 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700389#else
390 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000391#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700392}
Dan Albert4caa1f02014-08-20 09:16:57 -0700393
394TEST(malloc, malloc_info) {
395#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700396 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800397
398 TemporaryFile tf;
399 ASSERT_TRUE(tf.fd != -1);
400 FILE* fp = fdopen(tf.fd, "w+");
401 tf.release();
402 ASSERT_TRUE(fp != nullptr);
403 ASSERT_EQ(0, malloc_info(0, fp));
404 ASSERT_EQ(0, fclose(fp));
405
406 std::string contents;
407 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700408
409 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800410 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700411
412 auto root = doc.FirstChildElement();
413 ASSERT_NE(nullptr, root);
414 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700415 std::string version(root->Attribute("version"));
416 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800417 auto arena = root->FirstChildElement();
418 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
419 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700420
Christopher Ferris6c619a02019-03-01 17:59:51 -0800421 ASSERT_STREQ("heap", arena->Name());
422 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
423 ASSERT_EQ(tinyxml2::XML_SUCCESS,
424 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
425 ASSERT_EQ(tinyxml2::XML_SUCCESS,
426 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
427 ASSERT_EQ(tinyxml2::XML_SUCCESS,
428 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
429 ASSERT_EQ(tinyxml2::XML_SUCCESS,
430 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700431
Christopher Ferris6c619a02019-03-01 17:59:51 -0800432 auto bin = arena->FirstChildElement("bin");
433 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
434 if (strcmp(bin->Name(), "bin") == 0) {
435 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 bin->FirstChildElement("allocated")->QueryIntText(&val));
438 ASSERT_EQ(tinyxml2::XML_SUCCESS,
439 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
440 ASSERT_EQ(tinyxml2::XML_SUCCESS,
441 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
442 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700443 }
444 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800445 } else if (version == "scudo-1") {
446 auto element = root->FirstChildElement();
447 for (; element != nullptr; element = element->NextSiblingElement()) {
448 int val;
449
450 ASSERT_STREQ("alloc", element->Name());
451 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
452 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
453 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800454 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800455 // Do not verify output for debug malloc.
456 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700457 }
458#endif
459}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800460
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700461TEST(malloc, malloc_info_matches_mallinfo) {
462#ifdef __BIONIC__
463 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
464
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800465 TemporaryFile tf;
466 ASSERT_TRUE(tf.fd != -1);
467 FILE* fp = fdopen(tf.fd, "w+");
468 tf.release();
469 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700470 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800471 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700472 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800473 ASSERT_EQ(0, fclose(fp));
474
475 std::string contents;
476 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700477
478 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800479 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700480
481 size_t total_allocated_bytes = 0;
482 auto root = doc.FirstChildElement();
483 ASSERT_NE(nullptr, root);
484 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700485 std::string version(root->Attribute("version"));
486 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700487 auto arena = root->FirstChildElement();
488 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
489 int val;
490
491 ASSERT_STREQ("heap", arena->Name());
492 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
493 ASSERT_EQ(tinyxml2::XML_SUCCESS,
494 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
495 total_allocated_bytes += val;
496 ASSERT_EQ(tinyxml2::XML_SUCCESS,
497 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
498 total_allocated_bytes += val;
499 ASSERT_EQ(tinyxml2::XML_SUCCESS,
500 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
501 total_allocated_bytes += val;
502 ASSERT_EQ(tinyxml2::XML_SUCCESS,
503 arena->FirstChildElement("bins-total")->QueryIntText(&val));
504 }
505 // The total needs to be between the mallinfo call before and after
506 // since malloc_info allocates some memory.
507 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
508 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800509 } else if (version == "scudo-1") {
510 auto element = root->FirstChildElement();
511 for (; element != nullptr; element = element->NextSiblingElement()) {
512 ASSERT_STREQ("alloc", element->Name());
513 int size;
514 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
515 int count;
516 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
517 total_allocated_bytes += size * count;
518 }
519 // Scudo only gives the information on the primary, so simply make
520 // sure that the value is non-zero.
521 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700522 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800523 // Do not verify output for debug malloc.
524 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700525 }
526#endif
527}
528
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800529TEST(malloc, calloc_usable_size) {
530 for (size_t size = 1; size <= 2048; size++) {
531 void* pointer = malloc(size);
532 ASSERT_TRUE(pointer != nullptr);
533 memset(pointer, 0xeb, malloc_usable_size(pointer));
534 free(pointer);
535
536 // We should get a previous pointer that has been set to non-zero.
537 // If calloc does not zero out all of the data, this will fail.
538 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
539 ASSERT_TRUE(pointer != nullptr);
540 size_t usable_size = malloc_usable_size(zero_mem);
541 for (size_t i = 0; i < usable_size; i++) {
542 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
543 }
544 free(zero_mem);
545 }
546}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800547
548TEST(malloc, malloc_0) {
549 void* p = malloc(0);
550 ASSERT_TRUE(p != nullptr);
551 free(p);
552}
553
554TEST(malloc, calloc_0_0) {
555 void* p = calloc(0, 0);
556 ASSERT_TRUE(p != nullptr);
557 free(p);
558}
559
560TEST(malloc, calloc_0_1) {
561 void* p = calloc(0, 1);
562 ASSERT_TRUE(p != nullptr);
563 free(p);
564}
565
566TEST(malloc, calloc_1_0) {
567 void* p = calloc(1, 0);
568 ASSERT_TRUE(p != nullptr);
569 free(p);
570}
571
572TEST(malloc, realloc_nullptr_0) {
573 // realloc(nullptr, size) is actually malloc(size).
574 void* p = realloc(nullptr, 0);
575 ASSERT_TRUE(p != nullptr);
576 free(p);
577}
578
579TEST(malloc, realloc_0) {
580 void* p = malloc(1024);
581 ASSERT_TRUE(p != nullptr);
582 // realloc(p, 0) is actually free(p).
583 void* p2 = realloc(p, 0);
584 ASSERT_TRUE(p2 == nullptr);
585}
Christopher Ferris72df6702016-02-11 15:51:31 -0800586
587constexpr size_t MAX_LOOPS = 200;
588
589// Make sure that memory returned by malloc is aligned to allow these data types.
590TEST(malloc, verify_alignment) {
591 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
592 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
593 long double** values_ldouble = new long double*[MAX_LOOPS];
594 // Use filler to attempt to force the allocator to get potentially bad alignments.
595 void** filler = new void*[MAX_LOOPS];
596
597 for (size_t i = 0; i < MAX_LOOPS; i++) {
598 // Check uint32_t pointers.
599 filler[i] = malloc(1);
600 ASSERT_TRUE(filler[i] != nullptr);
601
602 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
603 ASSERT_TRUE(values_32[i] != nullptr);
604 *values_32[i] = i;
605 ASSERT_EQ(*values_32[i], i);
606 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
607
608 free(filler[i]);
609 }
610
611 for (size_t i = 0; i < MAX_LOOPS; i++) {
612 // Check uint64_t pointers.
613 filler[i] = malloc(1);
614 ASSERT_TRUE(filler[i] != nullptr);
615
616 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
617 ASSERT_TRUE(values_64[i] != nullptr);
618 *values_64[i] = 0x1000 + i;
619 ASSERT_EQ(*values_64[i], 0x1000 + i);
620 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
621
622 free(filler[i]);
623 }
624
625 for (size_t i = 0; i < MAX_LOOPS; i++) {
626 // Check long double pointers.
627 filler[i] = malloc(1);
628 ASSERT_TRUE(filler[i] != nullptr);
629
630 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
631 ASSERT_TRUE(values_ldouble[i] != nullptr);
632 *values_ldouble[i] = 5.5 + i;
633 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
634 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
635 // required alignment to 0x7.
636#if !defined(__BIONIC__) && !defined(__LP64__)
637 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
638#else
639 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
640#endif
641
642 free(filler[i]);
643 }
644
645 for (size_t i = 0; i < MAX_LOOPS; i++) {
646 free(values_32[i]);
647 free(values_64[i]);
648 free(values_ldouble[i]);
649 }
650
651 delete[] filler;
652 delete[] values_32;
653 delete[] values_64;
654 delete[] values_ldouble;
655}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700656
657TEST(malloc, mallopt_smoke) {
658 errno = 0;
659 ASSERT_EQ(0, mallopt(-1000, 1));
660 // mallopt doesn't set errno.
661 ASSERT_EQ(0, errno);
662}
Elliott Hughesb1770852018-09-18 12:52:42 -0700663
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800664TEST(malloc, mallopt_decay) {
665#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800666 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800667 errno = 0;
668 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
669 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
670 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
671 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
672#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800673 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800674#endif
675}
676
677TEST(malloc, mallopt_purge) {
678#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800679 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800680 errno = 0;
681 ASSERT_EQ(1, mallopt(M_PURGE, 0));
682#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800683 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800684#endif
685}
686
Christopher Ferris88448792020-07-28 14:15:31 -0700687#if defined(__BIONIC__)
688static void GetAllocatorVersion(bool* allocator_scudo) {
689 TemporaryFile tf;
690 ASSERT_TRUE(tf.fd != -1);
691 FILE* fp = fdopen(tf.fd, "w+");
692 tf.release();
693 ASSERT_TRUE(fp != nullptr);
694 ASSERT_EQ(0, malloc_info(0, fp));
695 ASSERT_EQ(0, fclose(fp));
696
697 std::string contents;
698 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
699
700 tinyxml2::XMLDocument doc;
701 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
702
703 auto root = doc.FirstChildElement();
704 ASSERT_NE(nullptr, root);
705 ASSERT_STREQ("malloc", root->Name());
706 std::string version(root->Attribute("version"));
707 *allocator_scudo = (version == "scudo-1");
708}
709#endif
710
711TEST(malloc, mallopt_scudo_only_options) {
712#if defined(__BIONIC__)
713 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
714 bool allocator_scudo;
715 GetAllocatorVersion(&allocator_scudo);
716 if (!allocator_scudo) {
717 GTEST_SKIP() << "scudo allocator only test";
718 }
719 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
720 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
721 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
722#else
723 GTEST_SKIP() << "bionic-only test";
724#endif
725}
726
Elliott Hughesb1770852018-09-18 12:52:42 -0700727TEST(malloc, reallocarray_overflow) {
728#if HAVE_REALLOCARRAY
729 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
730 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
731 size_t b = 2;
732
733 errno = 0;
734 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
735 ASSERT_EQ(ENOMEM, errno);
736
737 errno = 0;
738 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
739 ASSERT_EQ(ENOMEM, errno);
740#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800741 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700742#endif
743}
744
745TEST(malloc, reallocarray) {
746#if HAVE_REALLOCARRAY
747 void* p = reallocarray(nullptr, 2, 32);
748 ASSERT_TRUE(p != nullptr);
749 ASSERT_GE(malloc_usable_size(p), 64U);
750#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800751 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700752#endif
753}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800754
755TEST(malloc, mallinfo) {
756#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800757 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800758 static size_t sizes[] = {
759 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
760 };
761
762 constexpr static size_t kMaxAllocs = 50;
763
764 for (size_t size : sizes) {
765 // If some of these allocations are stuck in a thread cache, then keep
766 // looping until we make an allocation that changes the total size of the
767 // memory allocated.
768 // jemalloc implementations counts the thread cache allocations against
769 // total memory allocated.
770 void* ptrs[kMaxAllocs] = {};
771 bool pass = false;
772 for (size_t i = 0; i < kMaxAllocs; i++) {
773 size_t allocated = mallinfo().uordblks;
774 ptrs[i] = malloc(size);
775 ASSERT_TRUE(ptrs[i] != nullptr);
776 size_t new_allocated = mallinfo().uordblks;
777 if (allocated != new_allocated) {
778 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800779 // Only check if the total got bigger by at least allocation size.
780 // Sometimes the mallinfo numbers can go backwards due to compaction
781 // and/or freeing of cached data.
782 if (new_allocated >= allocated + usable_size) {
783 pass = true;
784 break;
785 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800786 }
787 }
788 for (void* ptr : ptrs) {
789 free(ptr);
790 }
791 ASSERT_TRUE(pass)
792 << "For size " << size << " allocated bytes did not increase after "
793 << kMaxAllocs << " allocations.";
794 }
795#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800796 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800797#endif
798}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000799
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800800template <typename Type>
801void __attribute__((optnone)) VerifyAlignment(Type* floating) {
802 size_t expected_alignment = alignof(Type);
803 if (expected_alignment != 0) {
804 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
805 << "Expected alignment " << expected_alignment << " ptr value " << floating;
806 }
807}
808
809template <typename Type>
810void __attribute__((optnone)) TestAllocateType() {
811 // The number of allocations to do in a row. This is to attempt to
812 // expose the worst case alignment for native allocators that use
813 // bins.
814 static constexpr size_t kMaxConsecutiveAllocs = 100;
815
816 // Verify using new directly.
817 Type* types[kMaxConsecutiveAllocs];
818 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
819 types[i] = new Type;
820 VerifyAlignment(types[i]);
821 if (::testing::Test::HasFatalFailure()) {
822 return;
823 }
824 }
825 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
826 delete types[i];
827 }
828
829 // Verify using malloc.
830 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
831 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
832 ASSERT_TRUE(types[i] != nullptr);
833 VerifyAlignment(types[i]);
834 if (::testing::Test::HasFatalFailure()) {
835 return;
836 }
837 }
838 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
839 free(types[i]);
840 }
841
842 // Verify using a vector.
843 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
844 for (size_t i = 0; i < type_vector.size(); i++) {
845 VerifyAlignment(&type_vector[i]);
846 if (::testing::Test::HasFatalFailure()) {
847 return;
848 }
849 }
850}
851
852#if defined(__ANDROID__)
853static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
854 void* ptrs[100];
855 uintptr_t mask = aligned_bytes - 1;
856 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
857 ptrs[i] = malloc(alloc_size);
858 ASSERT_TRUE(ptrs[i] != nullptr);
859 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
860 << "Expected at least " << aligned_bytes << " byte alignment: size "
861 << alloc_size << " actual ptr " << ptrs[i];
862 }
863}
864#endif
865
866TEST(malloc, align_check) {
867 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
868 // for a discussion of type alignment.
869 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
870 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
871 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
872
873 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
874 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
875 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
876 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
877 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
878 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
879 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
880 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
881 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
882 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
883 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
884 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
885 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
886 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
887
888#if defined(__ANDROID__)
889 // On Android, there is a lot of code that expects certain alignments:
890 // - Allocations of a size that rounds up to a multiple of 16 bytes
891 // must have at least 16 byte alignment.
892 // - Allocations of a size that rounds up to a multiple of 8 bytes and
893 // not 16 bytes, are only required to have at least 8 byte alignment.
894 // This is regardless of whether it is in a 32 bit or 64 bit environment.
895
896 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
897 // a discussion of this alignment mess. The code below is enforcing
898 // strong-alignment, since who knows what code depends on this behavior now.
899 for (size_t i = 1; i <= 128; i++) {
900 size_t rounded = (i + 7) & ~7;
901 if ((rounded % 16) == 0) {
902 AndroidVerifyAlignment(i, 16);
903 } else {
904 AndroidVerifyAlignment(i, 8);
905 }
906 if (::testing::Test::HasFatalFailure()) {
907 return;
908 }
909 }
910#endif
911}
912
Christopher Ferris201dcf42020-01-29 13:09:31 -0800913// Jemalloc doesn't pass this test right now, so leave it as disabled.
914TEST(malloc, DISABLED_alloc_after_fork) {
915 // Both of these need to be a power of 2.
916 static constexpr size_t kMinAllocationSize = 8;
917 static constexpr size_t kMaxAllocationSize = 2097152;
918
919 static constexpr size_t kNumAllocatingThreads = 5;
920 static constexpr size_t kNumForkLoops = 100;
921
922 std::atomic_bool stop;
923
924 // Create threads that simply allocate and free different sizes.
925 std::vector<std::thread*> threads;
926 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
927 std::thread* t = new std::thread([&stop] {
928 while (!stop) {
929 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700930 void* ptr;
931 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800932 free(ptr);
933 }
934 }
935 });
936 threads.push_back(t);
937 }
938
939 // Create a thread to fork and allocate.
940 for (size_t i = 0; i < kNumForkLoops; i++) {
941 pid_t pid;
942 if ((pid = fork()) == 0) {
943 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700944 void* ptr;
945 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800946 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -0800947 // Make sure we can touch all of the allocation.
948 memset(ptr, 0x1, size);
949 ASSERT_LE(size, malloc_usable_size(ptr));
950 free(ptr);
951 }
952 _exit(10);
953 }
954 ASSERT_NE(-1, pid);
955 AssertChildExited(pid, 10);
956 }
957
958 stop = true;
959 for (auto thread : threads) {
960 thread->join();
961 delete thread;
962 }
963}
964
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000965TEST(android_mallopt, error_on_unexpected_option) {
966#if defined(__BIONIC__)
967 const int unrecognized_option = -1;
968 errno = 0;
969 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
970 EXPECT_EQ(ENOTSUP, errno);
971#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800972 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000973#endif
974}
975
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800976bool IsDynamic() {
977#if defined(__LP64__)
978 Elf64_Ehdr ehdr;
979#else
980 Elf32_Ehdr ehdr;
981#endif
982 std::string path(android::base::GetExecutablePath());
983
984 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
985 if (fd == -1) {
986 // Assume dynamic on error.
987 return true;
988 }
989 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
990 close(fd);
991 // Assume dynamic in error cases.
992 return !read_completed || ehdr.e_type == ET_DYN;
993}
994
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000995TEST(android_mallopt, init_zygote_child_profiling) {
996#if defined(__BIONIC__)
997 // Successful call.
998 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800999 if (IsDynamic()) {
1000 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1001 EXPECT_EQ(0, errno);
1002 } else {
1003 // Not supported in static executables.
1004 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1005 EXPECT_EQ(ENOTSUP, errno);
1006 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001007
1008 // Unexpected arguments rejected.
1009 errno = 0;
1010 char unexpected = 0;
1011 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001012 if (IsDynamic()) {
1013 EXPECT_EQ(EINVAL, errno);
1014 } else {
1015 EXPECT_EQ(ENOTSUP, errno);
1016 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001017#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001018 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001019#endif
1020}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001021
1022#if defined(__BIONIC__)
1023template <typename FuncType>
1024void CheckAllocationFunction(FuncType func) {
1025 // Assumes that no more than 108MB of memory is allocated before this.
1026 size_t limit = 128 * 1024 * 1024;
1027 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1028 if (!func(20 * 1024 * 1024))
1029 exit(1);
1030 if (func(128 * 1024 * 1024))
1031 exit(1);
1032 exit(0);
1033}
1034#endif
1035
1036TEST(android_mallopt, set_allocation_limit) {
1037#if defined(__BIONIC__)
1038 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1039 testing::ExitedWithCode(0), "");
1040 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1041 testing::ExitedWithCode(0), "");
1042 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1043 testing::ExitedWithCode(0), "");
1044 EXPECT_EXIT(CheckAllocationFunction(
1045 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1046 testing::ExitedWithCode(0), "");
1047 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1048 void* ptr;
1049 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1050 }),
1051 testing::ExitedWithCode(0), "");
1052 EXPECT_EXIT(CheckAllocationFunction(
1053 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1054 testing::ExitedWithCode(0), "");
1055 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1056 void* p = malloc(1024 * 1024);
1057 return realloc(p, bytes) != nullptr;
1058 }),
1059 testing::ExitedWithCode(0), "");
1060#if !defined(__LP64__)
1061 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1062 testing::ExitedWithCode(0), "");
1063 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1064 testing::ExitedWithCode(0), "");
1065#endif
1066#else
Elliott Hughes10907202019-03-27 08:51:02 -07001067 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001068#endif
1069}
1070
1071TEST(android_mallopt, set_allocation_limit_multiple) {
1072#if defined(__BIONIC__)
1073 // Only the first set should work.
1074 size_t limit = 256 * 1024 * 1024;
1075 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1076 limit = 32 * 1024 * 1024;
1077 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1078#else
Elliott Hughes10907202019-03-27 08:51:02 -07001079 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001080#endif
1081}
1082
1083#if defined(__BIONIC__)
1084static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1085
1086static size_t GetMaxAllocations() {
1087 size_t max_pointers = 0;
1088 void* ptrs[20];
1089 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1090 ptrs[i] = malloc(kAllocationSize);
1091 if (ptrs[i] == nullptr) {
1092 max_pointers = i;
1093 break;
1094 }
1095 }
1096 for (size_t i = 0; i < max_pointers; i++) {
1097 free(ptrs[i]);
1098 }
1099 return max_pointers;
1100}
1101
1102static void VerifyMaxPointers(size_t max_pointers) {
1103 // Now verify that we can allocate the same number as before.
1104 void* ptrs[20];
1105 for (size_t i = 0; i < max_pointers; i++) {
1106 ptrs[i] = malloc(kAllocationSize);
1107 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1108 }
1109
1110 // Make sure the next allocation still fails.
1111 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1112 for (size_t i = 0; i < max_pointers; i++) {
1113 free(ptrs[i]);
1114 }
1115}
1116#endif
1117
1118TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1119#if defined(__BIONIC__)
1120 size_t limit = 128 * 1024 * 1024;
1121 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1122
1123 size_t max_pointers = GetMaxAllocations();
1124 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1125
1126 void* memory = malloc(10 * 1024 * 1024);
1127 ASSERT_TRUE(memory != nullptr);
1128
1129 // Increase size.
1130 memory = realloc(memory, 20 * 1024 * 1024);
1131 ASSERT_TRUE(memory != nullptr);
1132 memory = realloc(memory, 40 * 1024 * 1024);
1133 ASSERT_TRUE(memory != nullptr);
1134 memory = realloc(memory, 60 * 1024 * 1024);
1135 ASSERT_TRUE(memory != nullptr);
1136 memory = realloc(memory, 80 * 1024 * 1024);
1137 ASSERT_TRUE(memory != nullptr);
1138 // Now push past limit.
1139 memory = realloc(memory, 130 * 1024 * 1024);
1140 ASSERT_TRUE(memory == nullptr);
1141
1142 VerifyMaxPointers(max_pointers);
1143#else
Elliott Hughes10907202019-03-27 08:51:02 -07001144 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001145#endif
1146}
1147
1148TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1149#if defined(__BIONIC__)
1150 size_t limit = 100 * 1024 * 1024;
1151 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1152
1153 size_t max_pointers = GetMaxAllocations();
1154 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1155
1156 void* memory = malloc(80 * 1024 * 1024);
1157 ASSERT_TRUE(memory != nullptr);
1158
1159 // Decrease size.
1160 memory = realloc(memory, 60 * 1024 * 1024);
1161 ASSERT_TRUE(memory != nullptr);
1162 memory = realloc(memory, 40 * 1024 * 1024);
1163 ASSERT_TRUE(memory != nullptr);
1164 memory = realloc(memory, 20 * 1024 * 1024);
1165 ASSERT_TRUE(memory != nullptr);
1166 memory = realloc(memory, 10 * 1024 * 1024);
1167 ASSERT_TRUE(memory != nullptr);
1168 free(memory);
1169
1170 VerifyMaxPointers(max_pointers);
1171#else
Elliott Hughes10907202019-03-27 08:51:02 -07001172 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001173#endif
1174}
1175
1176TEST(android_mallopt, set_allocation_limit_realloc_free) {
1177#if defined(__BIONIC__)
1178 size_t limit = 100 * 1024 * 1024;
1179 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1180
1181 size_t max_pointers = GetMaxAllocations();
1182 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1183
1184 void* memory = malloc(60 * 1024 * 1024);
1185 ASSERT_TRUE(memory != nullptr);
1186
1187 memory = realloc(memory, 0);
1188 ASSERT_TRUE(memory == nullptr);
1189
1190 VerifyMaxPointers(max_pointers);
1191#else
Elliott Hughes10907202019-03-27 08:51:02 -07001192 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001193#endif
1194}
1195
1196#if defined(__BIONIC__)
1197static void* SetAllocationLimit(void* data) {
1198 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1199 while (!go->load()) {
1200 }
1201 size_t limit = 500 * 1024 * 1024;
1202 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1203 return reinterpret_cast<void*>(-1);
1204 }
1205 return nullptr;
1206}
1207
1208static void SetAllocationLimitMultipleThreads() {
1209 std::atomic_bool go;
1210 go = false;
1211
1212 static constexpr size_t kNumThreads = 4;
1213 pthread_t threads[kNumThreads];
1214 for (size_t i = 0; i < kNumThreads; i++) {
1215 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1216 }
1217
1218 // Let them go all at once.
1219 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001220 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1221 // heapprofd handler.
1222 union sigval signal_value;
1223 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001224 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001225
1226 size_t num_successful = 0;
1227 for (size_t i = 0; i < kNumThreads; i++) {
1228 void* result;
1229 ASSERT_EQ(0, pthread_join(threads[i], &result));
1230 if (result != nullptr) {
1231 num_successful++;
1232 }
1233 }
1234 ASSERT_EQ(1U, num_successful);
1235 exit(0);
1236}
1237#endif
1238
1239TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1240#if defined(__BIONIC__)
1241 if (IsDynamic()) {
1242 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1243 }
1244
1245 // Run this a number of times as a stress test.
1246 for (size_t i = 0; i < 100; i++) {
1247 // Not using ASSERT_EXIT because errors messages are not displayed.
1248 pid_t pid;
1249 if ((pid = fork()) == 0) {
1250 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1251 }
1252 ASSERT_NE(-1, pid);
1253 int status;
1254 ASSERT_EQ(pid, wait(&status));
1255 ASSERT_EQ(0, WEXITSTATUS(status));
1256 }
1257#else
Elliott Hughes10907202019-03-27 08:51:02 -07001258 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001259#endif
1260}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001261
Mitch Phillips9cad8422021-01-20 16:03:27 -08001262void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1263 std::vector<void*> allocs;
1264 constexpr int kMaxBytesToCheckZero = 64;
1265 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1266
1267 for (int i = 0; i < num_iterations; ++i) {
1268 int size = get_alloc_size(i);
1269 allocs.push_back(malloc(size));
1270 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1271 }
1272
1273 for (void* alloc : allocs) {
1274 free(alloc);
1275 }
1276 allocs.clear();
1277
1278 for (int i = 0; i < num_iterations; ++i) {
1279 int size = get_alloc_size(i);
1280 allocs.push_back(malloc(size));
1281 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1282 }
1283
1284 for (void* alloc : allocs) {
1285 free(alloc);
1286 }
1287}
1288
1289TEST(malloc, zero_init) {
1290#if defined(__BIONIC__)
1291 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1292 bool allocator_scudo;
1293 GetAllocatorVersion(&allocator_scudo);
1294 if (!allocator_scudo) {
1295 GTEST_SKIP() << "scudo allocator only test";
1296 }
1297
1298 mallopt(M_BIONIC_ZERO_INIT, 1);
1299
1300 // Test using a block of 4K small (1-32 byte) allocations.
1301 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1302 return 1 + iteration % 32;
1303 });
1304
1305 // Also test large allocations that land in the scudo secondary, as this is
1306 // the only part of Scudo that's changed by enabling zero initialization with
1307 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1308 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1309 // mallopt_decay. Ensure that we delay for at least a second before releasing
1310 // pages to the OS in order to avoid implicit zeroing by the kernel.
1311 mallopt(M_DECAY_TIME, 1000);
1312 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1313 return 1 << (19 + iteration % 4);
1314 });
1315
1316#else
1317 GTEST_SKIP() << "bionic-only test";
1318#endif
1319}
1320
1321// Note that MTE is enabled on cc_tests on devices that support MTE.
1322TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001323#if defined(__BIONIC__)
1324 if (!mte_supported()) {
1325 GTEST_SKIP() << "This function can only be tested with MTE";
1326 }
1327
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001328 sem_t sem;
1329 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1330
1331 pthread_t thread;
1332 ASSERT_EQ(0, pthread_create(
1333 &thread, nullptr,
1334 [](void* ptr) -> void* {
1335 auto* sem = reinterpret_cast<sem_t*>(ptr);
1336 sem_wait(sem);
1337 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1338 },
1339 &sem));
1340
Mitch Phillips9cad8422021-01-20 16:03:27 -08001341 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001342 ASSERT_EQ(0, sem_post(&sem));
1343
1344 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1345 ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1346
1347 void* retval;
1348 ASSERT_EQ(0, pthread_join(thread, &retval));
1349 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1350 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001351#else
1352 GTEST_SKIP() << "bionic extension";
1353#endif
1354}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001355
1356TEST(malloc, allocation_slack) {
1357#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001358 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1359
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001360 bool allocator_scudo;
1361 GetAllocatorVersion(&allocator_scudo);
1362 if (!allocator_scudo) {
1363 GTEST_SKIP() << "scudo allocator only test";
1364 }
1365
1366 // Test that older target SDK levels let you access a few bytes off the end of
1367 // a large allocation.
1368 android_set_application_target_sdk_version(29);
1369 auto p = std::make_unique<char[]>(131072);
1370 volatile char *vp = p.get();
1371 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1372#else
1373 GTEST_SKIP() << "bionic extension";
1374#endif
1375}