blob: 3a09258f7d8ef1e36f12e9cebbba6068e732fa68 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
30#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080031#include <sys/types.h>
32#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070033#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070034
Mitch Phillips9cad8422021-01-20 16:03:27 -080035#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080036#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080037#include <thread>
Mitch Phillips9cad8422021-01-20 16:03:27 -080038#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080039
Dan Albert4caa1f02014-08-20 09:16:57 -070040#include <tinyxml2.h>
41
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080042#include <android-base/file.h>
43
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080044#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000045
Elliott Hughesb1770852018-09-18 12:52:42 -070046#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080047
Peter Collingbourne45819dd2020-01-09 11:00:43 -080048#include "SignalUtils.h"
49
Christopher Ferrisb874c332020-01-21 16:39:05 -080050#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070051#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080052#include "platform/bionic/reserved_signals.h"
53#include "private/bionic_config.h"
54
Elliott Hughesb1770852018-09-18 12:52:42 -070055#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080056
Elliott Hughesb1770852018-09-18 12:52:42 -070057#else
Christopher Ferrisb874c332020-01-21 16:39:05 -080058
Elliott Hughesb1770852018-09-18 12:52:42 -070059#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080060
Elliott Hughesb1770852018-09-18 12:52:42 -070061#endif
62
Christopher Ferris885f3b92013-05-21 17:48:01 -070063TEST(malloc, malloc_std) {
64 // Simple malloc test.
65 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070066 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070067 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070068 free(ptr);
69}
70
Christopher Ferrisa4037802014-06-09 19:14:11 -070071TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080072 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070073 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070074 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070075 ASSERT_EQ(ENOMEM, errno);
76}
77
Christopher Ferris885f3b92013-05-21 17:48:01 -070078TEST(malloc, calloc_std) {
79 // Simple calloc test.
80 size_t alloc_len = 100;
81 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070082 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070083 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
84 for (size_t i = 0; i < alloc_len; i++) {
85 ASSERT_EQ(0, ptr[i]);
86 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070087 free(ptr);
88}
89
Peter Collingbourne978eb162020-09-21 15:26:02 -070090TEST(malloc, calloc_mem_init_disabled) {
91#if defined(__BIONIC__)
92 // calloc should still zero memory if mem-init is disabled.
93 // With jemalloc the mallopts will fail but that shouldn't affect the
94 // execution of the test.
95 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
96 size_t alloc_len = 100;
97 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
98 for (size_t i = 0; i < alloc_len; i++) {
99 ASSERT_EQ(0, ptr[i]);
100 }
101 free(ptr);
102 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
103#else
104 GTEST_SKIP() << "bionic-only test";
105#endif
106}
107
Christopher Ferrisa4037802014-06-09 19:14:11 -0700108TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800109 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700110 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700111 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700112 ASSERT_EQ(ENOMEM, errno);
113}
114
115TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800116 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700117 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700118 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700119 ASSERT_EQ(ENOMEM, errno);
120 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700121 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700122 ASSERT_EQ(ENOMEM, errno);
123 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700124 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700125 ASSERT_EQ(ENOMEM, errno);
126 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700127 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700128 ASSERT_EQ(ENOMEM, errno);
129}
130
Christopher Ferris885f3b92013-05-21 17:48:01 -0700131TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800132 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700133 // Memalign test where the alignment is any value.
134 for (size_t i = 0; i <= 12; i++) {
135 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700136 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700137 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700138 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
139 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
140 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700141 free(ptr);
142 }
143 }
144}
145
Christopher Ferrisa4037802014-06-09 19:14:11 -0700146TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800147 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700148 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700149}
150
151TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800152 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700153 void* ptr;
154 for (size_t align = 0; align <= 256; align++) {
155 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700156 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700157 free(ptr);
158 }
159}
160
Christopher Ferris885f3b92013-05-21 17:48:01 -0700161TEST(malloc, memalign_realloc) {
162 // Memalign and then realloc the pointer a couple of times.
163 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
164 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700165 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700166 ASSERT_LE(100U, malloc_usable_size(ptr));
167 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
168 memset(ptr, 0x23, 100);
169
170 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700171 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700172 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700173 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700174 for (size_t i = 0; i < 100; i++) {
175 ASSERT_EQ(0x23, ptr[i]);
176 }
177 memset(ptr, 0x45, 200);
178
179 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700180 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700181 ASSERT_LE(300U, malloc_usable_size(ptr));
182 for (size_t i = 0; i < 200; i++) {
183 ASSERT_EQ(0x45, ptr[i]);
184 }
185 memset(ptr, 0x67, 300);
186
187 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700188 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700189 ASSERT_LE(250U, malloc_usable_size(ptr));
190 for (size_t i = 0; i < 250; i++) {
191 ASSERT_EQ(0x67, ptr[i]);
192 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700193 free(ptr);
194 }
195}
196
197TEST(malloc, malloc_realloc_larger) {
198 // Realloc to a larger size, malloc is used for the original allocation.
199 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700200 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700201 ASSERT_LE(100U, malloc_usable_size(ptr));
202 memset(ptr, 67, 100);
203
204 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700205 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700206 ASSERT_LE(200U, malloc_usable_size(ptr));
207 for (size_t i = 0; i < 100; i++) {
208 ASSERT_EQ(67, ptr[i]);
209 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700210 free(ptr);
211}
212
213TEST(malloc, malloc_realloc_smaller) {
214 // Realloc to a smaller size, malloc is used for the original allocation.
215 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700216 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700217 ASSERT_LE(200U, malloc_usable_size(ptr));
218 memset(ptr, 67, 200);
219
220 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700221 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700222 ASSERT_LE(100U, malloc_usable_size(ptr));
223 for (size_t i = 0; i < 100; i++) {
224 ASSERT_EQ(67, ptr[i]);
225 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700226 free(ptr);
227}
228
229TEST(malloc, malloc_multiple_realloc) {
230 // Multiple reallocs, malloc is used for the original allocation.
231 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700232 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700233 ASSERT_LE(200U, malloc_usable_size(ptr));
234 memset(ptr, 0x23, 200);
235
236 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700237 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700238 ASSERT_LE(100U, malloc_usable_size(ptr));
239 for (size_t i = 0; i < 100; i++) {
240 ASSERT_EQ(0x23, ptr[i]);
241 }
242
243 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700244 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700245 ASSERT_LE(50U, malloc_usable_size(ptr));
246 for (size_t i = 0; i < 50; i++) {
247 ASSERT_EQ(0x23, ptr[i]);
248 }
249
250 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700251 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700252 ASSERT_LE(150U, malloc_usable_size(ptr));
253 for (size_t i = 0; i < 50; i++) {
254 ASSERT_EQ(0x23, ptr[i]);
255 }
256 memset(ptr, 0x23, 150);
257
258 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700259 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700260 ASSERT_LE(425U, malloc_usable_size(ptr));
261 for (size_t i = 0; i < 150; i++) {
262 ASSERT_EQ(0x23, ptr[i]);
263 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700264 free(ptr);
265}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700266
Christopher Ferris885f3b92013-05-21 17:48:01 -0700267TEST(malloc, calloc_realloc_larger) {
268 // Realloc to a larger size, calloc is used for the original allocation.
269 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700270 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700271 ASSERT_LE(100U, malloc_usable_size(ptr));
272
273 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700274 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700275 ASSERT_LE(200U, malloc_usable_size(ptr));
276 for (size_t i = 0; i < 100; i++) {
277 ASSERT_EQ(0, ptr[i]);
278 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700279 free(ptr);
280}
281
282TEST(malloc, calloc_realloc_smaller) {
283 // Realloc to a smaller size, calloc is used for the original allocation.
284 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700285 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700286 ASSERT_LE(200U, malloc_usable_size(ptr));
287
288 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700289 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700290 ASSERT_LE(100U, malloc_usable_size(ptr));
291 for (size_t i = 0; i < 100; i++) {
292 ASSERT_EQ(0, ptr[i]);
293 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700294 free(ptr);
295}
296
297TEST(malloc, calloc_multiple_realloc) {
298 // Multiple reallocs, calloc is used for the original allocation.
299 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700300 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700301 ASSERT_LE(200U, malloc_usable_size(ptr));
302
303 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700304 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700305 ASSERT_LE(100U, malloc_usable_size(ptr));
306 for (size_t i = 0; i < 100; i++) {
307 ASSERT_EQ(0, ptr[i]);
308 }
309
310 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700311 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700312 ASSERT_LE(50U, malloc_usable_size(ptr));
313 for (size_t i = 0; i < 50; i++) {
314 ASSERT_EQ(0, ptr[i]);
315 }
316
317 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700318 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700319 ASSERT_LE(150U, malloc_usable_size(ptr));
320 for (size_t i = 0; i < 50; i++) {
321 ASSERT_EQ(0, ptr[i]);
322 }
323 memset(ptr, 0, 150);
324
325 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700326 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700327 ASSERT_LE(425U, malloc_usable_size(ptr));
328 for (size_t i = 0; i < 150; i++) {
329 ASSERT_EQ(0, ptr[i]);
330 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700331 free(ptr);
332}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700333
Christopher Ferrisa4037802014-06-09 19:14:11 -0700334TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800335 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700336 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700337 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700338 ASSERT_EQ(ENOMEM, errno);
339 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700340 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700341 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700342 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700343 ASSERT_EQ(ENOMEM, errno);
344 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700345}
346
Dan Alberte5fdaa42014-06-14 01:04:31 +0000347#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
348extern "C" void* pvalloc(size_t);
349extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700350#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000351
Christopher Ferrisa4037802014-06-09 19:14:11 -0700352TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700353#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700354 size_t pagesize = sysconf(_SC_PAGESIZE);
355 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700356 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700357 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
358 ASSERT_LE(pagesize, malloc_usable_size(ptr));
359 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700360#else
361 GTEST_SKIP() << "pvalloc not supported.";
362#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700363}
364
365TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700366#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700367 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700368#else
369 GTEST_SKIP() << "pvalloc not supported.";
370#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700371}
372
373TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700374#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700375 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700376 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700377 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700378 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
379 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700380#else
381 GTEST_SKIP() << "valloc not supported.";
382#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700383}
384
385TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700386#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700387 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700388#else
389 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000390#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700391}
Dan Albert4caa1f02014-08-20 09:16:57 -0700392
393TEST(malloc, malloc_info) {
394#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700395 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800396
397 TemporaryFile tf;
398 ASSERT_TRUE(tf.fd != -1);
399 FILE* fp = fdopen(tf.fd, "w+");
400 tf.release();
401 ASSERT_TRUE(fp != nullptr);
402 ASSERT_EQ(0, malloc_info(0, fp));
403 ASSERT_EQ(0, fclose(fp));
404
405 std::string contents;
406 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700407
408 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800409 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700410
411 auto root = doc.FirstChildElement();
412 ASSERT_NE(nullptr, root);
413 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700414 std::string version(root->Attribute("version"));
415 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800416 auto arena = root->FirstChildElement();
417 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
418 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700419
Christopher Ferris6c619a02019-03-01 17:59:51 -0800420 ASSERT_STREQ("heap", arena->Name());
421 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
422 ASSERT_EQ(tinyxml2::XML_SUCCESS,
423 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
424 ASSERT_EQ(tinyxml2::XML_SUCCESS,
425 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
426 ASSERT_EQ(tinyxml2::XML_SUCCESS,
427 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
428 ASSERT_EQ(tinyxml2::XML_SUCCESS,
429 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700430
Christopher Ferris6c619a02019-03-01 17:59:51 -0800431 auto bin = arena->FirstChildElement("bin");
432 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
433 if (strcmp(bin->Name(), "bin") == 0) {
434 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
435 ASSERT_EQ(tinyxml2::XML_SUCCESS,
436 bin->FirstChildElement("allocated")->QueryIntText(&val));
437 ASSERT_EQ(tinyxml2::XML_SUCCESS,
438 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
439 ASSERT_EQ(tinyxml2::XML_SUCCESS,
440 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
441 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700442 }
443 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800444 } else if (version == "scudo-1") {
445 auto element = root->FirstChildElement();
446 for (; element != nullptr; element = element->NextSiblingElement()) {
447 int val;
448
449 ASSERT_STREQ("alloc", element->Name());
450 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
451 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
452 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800453 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800454 // Do not verify output for debug malloc.
455 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700456 }
457#endif
458}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800459
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700460TEST(malloc, malloc_info_matches_mallinfo) {
461#ifdef __BIONIC__
462 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
463
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800464 TemporaryFile tf;
465 ASSERT_TRUE(tf.fd != -1);
466 FILE* fp = fdopen(tf.fd, "w+");
467 tf.release();
468 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700469 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800470 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700471 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800472 ASSERT_EQ(0, fclose(fp));
473
474 std::string contents;
475 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700476
477 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800478 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700479
480 size_t total_allocated_bytes = 0;
481 auto root = doc.FirstChildElement();
482 ASSERT_NE(nullptr, root);
483 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700484 std::string version(root->Attribute("version"));
485 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700486 auto arena = root->FirstChildElement();
487 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
488 int val;
489
490 ASSERT_STREQ("heap", arena->Name());
491 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
492 ASSERT_EQ(tinyxml2::XML_SUCCESS,
493 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
494 total_allocated_bytes += val;
495 ASSERT_EQ(tinyxml2::XML_SUCCESS,
496 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
497 total_allocated_bytes += val;
498 ASSERT_EQ(tinyxml2::XML_SUCCESS,
499 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
500 total_allocated_bytes += val;
501 ASSERT_EQ(tinyxml2::XML_SUCCESS,
502 arena->FirstChildElement("bins-total")->QueryIntText(&val));
503 }
504 // The total needs to be between the mallinfo call before and after
505 // since malloc_info allocates some memory.
506 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
507 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800508 } else if (version == "scudo-1") {
509 auto element = root->FirstChildElement();
510 for (; element != nullptr; element = element->NextSiblingElement()) {
511 ASSERT_STREQ("alloc", element->Name());
512 int size;
513 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
514 int count;
515 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
516 total_allocated_bytes += size * count;
517 }
518 // Scudo only gives the information on the primary, so simply make
519 // sure that the value is non-zero.
520 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700521 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800522 // Do not verify output for debug malloc.
523 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700524 }
525#endif
526}
527
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800528TEST(malloc, calloc_usable_size) {
529 for (size_t size = 1; size <= 2048; size++) {
530 void* pointer = malloc(size);
531 ASSERT_TRUE(pointer != nullptr);
532 memset(pointer, 0xeb, malloc_usable_size(pointer));
533 free(pointer);
534
535 // We should get a previous pointer that has been set to non-zero.
536 // If calloc does not zero out all of the data, this will fail.
537 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
538 ASSERT_TRUE(pointer != nullptr);
539 size_t usable_size = malloc_usable_size(zero_mem);
540 for (size_t i = 0; i < usable_size; i++) {
541 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
542 }
543 free(zero_mem);
544 }
545}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800546
547TEST(malloc, malloc_0) {
548 void* p = malloc(0);
549 ASSERT_TRUE(p != nullptr);
550 free(p);
551}
552
553TEST(malloc, calloc_0_0) {
554 void* p = calloc(0, 0);
555 ASSERT_TRUE(p != nullptr);
556 free(p);
557}
558
559TEST(malloc, calloc_0_1) {
560 void* p = calloc(0, 1);
561 ASSERT_TRUE(p != nullptr);
562 free(p);
563}
564
565TEST(malloc, calloc_1_0) {
566 void* p = calloc(1, 0);
567 ASSERT_TRUE(p != nullptr);
568 free(p);
569}
570
571TEST(malloc, realloc_nullptr_0) {
572 // realloc(nullptr, size) is actually malloc(size).
573 void* p = realloc(nullptr, 0);
574 ASSERT_TRUE(p != nullptr);
575 free(p);
576}
577
578TEST(malloc, realloc_0) {
579 void* p = malloc(1024);
580 ASSERT_TRUE(p != nullptr);
581 // realloc(p, 0) is actually free(p).
582 void* p2 = realloc(p, 0);
583 ASSERT_TRUE(p2 == nullptr);
584}
Christopher Ferris72df6702016-02-11 15:51:31 -0800585
586constexpr size_t MAX_LOOPS = 200;
587
588// Make sure that memory returned by malloc is aligned to allow these data types.
589TEST(malloc, verify_alignment) {
590 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
591 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
592 long double** values_ldouble = new long double*[MAX_LOOPS];
593 // Use filler to attempt to force the allocator to get potentially bad alignments.
594 void** filler = new void*[MAX_LOOPS];
595
596 for (size_t i = 0; i < MAX_LOOPS; i++) {
597 // Check uint32_t pointers.
598 filler[i] = malloc(1);
599 ASSERT_TRUE(filler[i] != nullptr);
600
601 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
602 ASSERT_TRUE(values_32[i] != nullptr);
603 *values_32[i] = i;
604 ASSERT_EQ(*values_32[i], i);
605 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
606
607 free(filler[i]);
608 }
609
610 for (size_t i = 0; i < MAX_LOOPS; i++) {
611 // Check uint64_t pointers.
612 filler[i] = malloc(1);
613 ASSERT_TRUE(filler[i] != nullptr);
614
615 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
616 ASSERT_TRUE(values_64[i] != nullptr);
617 *values_64[i] = 0x1000 + i;
618 ASSERT_EQ(*values_64[i], 0x1000 + i);
619 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
620
621 free(filler[i]);
622 }
623
624 for (size_t i = 0; i < MAX_LOOPS; i++) {
625 // Check long double pointers.
626 filler[i] = malloc(1);
627 ASSERT_TRUE(filler[i] != nullptr);
628
629 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
630 ASSERT_TRUE(values_ldouble[i] != nullptr);
631 *values_ldouble[i] = 5.5 + i;
632 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
633 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
634 // required alignment to 0x7.
635#if !defined(__BIONIC__) && !defined(__LP64__)
636 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
637#else
638 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
639#endif
640
641 free(filler[i]);
642 }
643
644 for (size_t i = 0; i < MAX_LOOPS; i++) {
645 free(values_32[i]);
646 free(values_64[i]);
647 free(values_ldouble[i]);
648 }
649
650 delete[] filler;
651 delete[] values_32;
652 delete[] values_64;
653 delete[] values_ldouble;
654}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700655
656TEST(malloc, mallopt_smoke) {
657 errno = 0;
658 ASSERT_EQ(0, mallopt(-1000, 1));
659 // mallopt doesn't set errno.
660 ASSERT_EQ(0, errno);
661}
Elliott Hughesb1770852018-09-18 12:52:42 -0700662
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800663TEST(malloc, mallopt_decay) {
664#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800665 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800666 errno = 0;
667 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
668 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
669 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
670 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
671#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800672 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800673#endif
674}
675
676TEST(malloc, mallopt_purge) {
677#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800678 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800679 errno = 0;
680 ASSERT_EQ(1, mallopt(M_PURGE, 0));
681#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800682 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800683#endif
684}
685
Christopher Ferris88448792020-07-28 14:15:31 -0700686#if defined(__BIONIC__)
687static void GetAllocatorVersion(bool* allocator_scudo) {
688 TemporaryFile tf;
689 ASSERT_TRUE(tf.fd != -1);
690 FILE* fp = fdopen(tf.fd, "w+");
691 tf.release();
692 ASSERT_TRUE(fp != nullptr);
693 ASSERT_EQ(0, malloc_info(0, fp));
694 ASSERT_EQ(0, fclose(fp));
695
696 std::string contents;
697 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
698
699 tinyxml2::XMLDocument doc;
700 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
701
702 auto root = doc.FirstChildElement();
703 ASSERT_NE(nullptr, root);
704 ASSERT_STREQ("malloc", root->Name());
705 std::string version(root->Attribute("version"));
706 *allocator_scudo = (version == "scudo-1");
707}
708#endif
709
710TEST(malloc, mallopt_scudo_only_options) {
711#if defined(__BIONIC__)
712 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
713 bool allocator_scudo;
714 GetAllocatorVersion(&allocator_scudo);
715 if (!allocator_scudo) {
716 GTEST_SKIP() << "scudo allocator only test";
717 }
718 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
719 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
720 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
721#else
722 GTEST_SKIP() << "bionic-only test";
723#endif
724}
725
Elliott Hughesb1770852018-09-18 12:52:42 -0700726TEST(malloc, reallocarray_overflow) {
727#if HAVE_REALLOCARRAY
728 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
729 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
730 size_t b = 2;
731
732 errno = 0;
733 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
734 ASSERT_EQ(ENOMEM, errno);
735
736 errno = 0;
737 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
738 ASSERT_EQ(ENOMEM, errno);
739#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800740 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700741#endif
742}
743
744TEST(malloc, reallocarray) {
745#if HAVE_REALLOCARRAY
746 void* p = reallocarray(nullptr, 2, 32);
747 ASSERT_TRUE(p != nullptr);
748 ASSERT_GE(malloc_usable_size(p), 64U);
749#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800750 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700751#endif
752}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800753
754TEST(malloc, mallinfo) {
755#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800756 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800757 static size_t sizes[] = {
758 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
759 };
760
761 constexpr static size_t kMaxAllocs = 50;
762
763 for (size_t size : sizes) {
764 // If some of these allocations are stuck in a thread cache, then keep
765 // looping until we make an allocation that changes the total size of the
766 // memory allocated.
767 // jemalloc implementations counts the thread cache allocations against
768 // total memory allocated.
769 void* ptrs[kMaxAllocs] = {};
770 bool pass = false;
771 for (size_t i = 0; i < kMaxAllocs; i++) {
772 size_t allocated = mallinfo().uordblks;
773 ptrs[i] = malloc(size);
774 ASSERT_TRUE(ptrs[i] != nullptr);
775 size_t new_allocated = mallinfo().uordblks;
776 if (allocated != new_allocated) {
777 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800778 // Only check if the total got bigger by at least allocation size.
779 // Sometimes the mallinfo numbers can go backwards due to compaction
780 // and/or freeing of cached data.
781 if (new_allocated >= allocated + usable_size) {
782 pass = true;
783 break;
784 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800785 }
786 }
787 for (void* ptr : ptrs) {
788 free(ptr);
789 }
790 ASSERT_TRUE(pass)
791 << "For size " << size << " allocated bytes did not increase after "
792 << kMaxAllocs << " allocations.";
793 }
794#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800795 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800796#endif
797}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000798
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800799template <typename Type>
800void __attribute__((optnone)) VerifyAlignment(Type* floating) {
801 size_t expected_alignment = alignof(Type);
802 if (expected_alignment != 0) {
803 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
804 << "Expected alignment " << expected_alignment << " ptr value " << floating;
805 }
806}
807
808template <typename Type>
809void __attribute__((optnone)) TestAllocateType() {
810 // The number of allocations to do in a row. This is to attempt to
811 // expose the worst case alignment for native allocators that use
812 // bins.
813 static constexpr size_t kMaxConsecutiveAllocs = 100;
814
815 // Verify using new directly.
816 Type* types[kMaxConsecutiveAllocs];
817 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
818 types[i] = new Type;
819 VerifyAlignment(types[i]);
820 if (::testing::Test::HasFatalFailure()) {
821 return;
822 }
823 }
824 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
825 delete types[i];
826 }
827
828 // Verify using malloc.
829 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
830 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
831 ASSERT_TRUE(types[i] != nullptr);
832 VerifyAlignment(types[i]);
833 if (::testing::Test::HasFatalFailure()) {
834 return;
835 }
836 }
837 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
838 free(types[i]);
839 }
840
841 // Verify using a vector.
842 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
843 for (size_t i = 0; i < type_vector.size(); i++) {
844 VerifyAlignment(&type_vector[i]);
845 if (::testing::Test::HasFatalFailure()) {
846 return;
847 }
848 }
849}
850
851#if defined(__ANDROID__)
852static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
853 void* ptrs[100];
854 uintptr_t mask = aligned_bytes - 1;
855 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
856 ptrs[i] = malloc(alloc_size);
857 ASSERT_TRUE(ptrs[i] != nullptr);
858 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
859 << "Expected at least " << aligned_bytes << " byte alignment: size "
860 << alloc_size << " actual ptr " << ptrs[i];
861 }
862}
863#endif
864
865TEST(malloc, align_check) {
866 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
867 // for a discussion of type alignment.
868 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
869 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
870 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
871
872 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
873 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
874 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
875 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
876 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
877 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
878 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
879 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
880 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
881 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
882 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
883 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
884 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
885 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
886
887#if defined(__ANDROID__)
888 // On Android, there is a lot of code that expects certain alignments:
889 // - Allocations of a size that rounds up to a multiple of 16 bytes
890 // must have at least 16 byte alignment.
891 // - Allocations of a size that rounds up to a multiple of 8 bytes and
892 // not 16 bytes, are only required to have at least 8 byte alignment.
893 // This is regardless of whether it is in a 32 bit or 64 bit environment.
894
895 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
896 // a discussion of this alignment mess. The code below is enforcing
897 // strong-alignment, since who knows what code depends on this behavior now.
898 for (size_t i = 1; i <= 128; i++) {
899 size_t rounded = (i + 7) & ~7;
900 if ((rounded % 16) == 0) {
901 AndroidVerifyAlignment(i, 16);
902 } else {
903 AndroidVerifyAlignment(i, 8);
904 }
905 if (::testing::Test::HasFatalFailure()) {
906 return;
907 }
908 }
909#endif
910}
911
Christopher Ferris201dcf42020-01-29 13:09:31 -0800912// Jemalloc doesn't pass this test right now, so leave it as disabled.
913TEST(malloc, DISABLED_alloc_after_fork) {
914 // Both of these need to be a power of 2.
915 static constexpr size_t kMinAllocationSize = 8;
916 static constexpr size_t kMaxAllocationSize = 2097152;
917
918 static constexpr size_t kNumAllocatingThreads = 5;
919 static constexpr size_t kNumForkLoops = 100;
920
921 std::atomic_bool stop;
922
923 // Create threads that simply allocate and free different sizes.
924 std::vector<std::thread*> threads;
925 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
926 std::thread* t = new std::thread([&stop] {
927 while (!stop) {
928 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700929 void* ptr;
930 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800931 free(ptr);
932 }
933 }
934 });
935 threads.push_back(t);
936 }
937
938 // Create a thread to fork and allocate.
939 for (size_t i = 0; i < kNumForkLoops; i++) {
940 pid_t pid;
941 if ((pid = fork()) == 0) {
942 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700943 void* ptr;
944 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800945 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -0800946 // Make sure we can touch all of the allocation.
947 memset(ptr, 0x1, size);
948 ASSERT_LE(size, malloc_usable_size(ptr));
949 free(ptr);
950 }
951 _exit(10);
952 }
953 ASSERT_NE(-1, pid);
954 AssertChildExited(pid, 10);
955 }
956
957 stop = true;
958 for (auto thread : threads) {
959 thread->join();
960 delete thread;
961 }
962}
963
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000964TEST(android_mallopt, error_on_unexpected_option) {
965#if defined(__BIONIC__)
966 const int unrecognized_option = -1;
967 errno = 0;
968 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
969 EXPECT_EQ(ENOTSUP, errno);
970#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800971 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000972#endif
973}
974
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800975bool IsDynamic() {
976#if defined(__LP64__)
977 Elf64_Ehdr ehdr;
978#else
979 Elf32_Ehdr ehdr;
980#endif
981 std::string path(android::base::GetExecutablePath());
982
983 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
984 if (fd == -1) {
985 // Assume dynamic on error.
986 return true;
987 }
988 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
989 close(fd);
990 // Assume dynamic in error cases.
991 return !read_completed || ehdr.e_type == ET_DYN;
992}
993
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000994TEST(android_mallopt, init_zygote_child_profiling) {
995#if defined(__BIONIC__)
996 // Successful call.
997 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800998 if (IsDynamic()) {
999 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1000 EXPECT_EQ(0, errno);
1001 } else {
1002 // Not supported in static executables.
1003 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1004 EXPECT_EQ(ENOTSUP, errno);
1005 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001006
1007 // Unexpected arguments rejected.
1008 errno = 0;
1009 char unexpected = 0;
1010 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001011 if (IsDynamic()) {
1012 EXPECT_EQ(EINVAL, errno);
1013 } else {
1014 EXPECT_EQ(ENOTSUP, errno);
1015 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001016#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001017 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001018#endif
1019}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001020
1021#if defined(__BIONIC__)
1022template <typename FuncType>
1023void CheckAllocationFunction(FuncType func) {
1024 // Assumes that no more than 108MB of memory is allocated before this.
1025 size_t limit = 128 * 1024 * 1024;
1026 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1027 if (!func(20 * 1024 * 1024))
1028 exit(1);
1029 if (func(128 * 1024 * 1024))
1030 exit(1);
1031 exit(0);
1032}
1033#endif
1034
1035TEST(android_mallopt, set_allocation_limit) {
1036#if defined(__BIONIC__)
1037 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1038 testing::ExitedWithCode(0), "");
1039 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1040 testing::ExitedWithCode(0), "");
1041 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1042 testing::ExitedWithCode(0), "");
1043 EXPECT_EXIT(CheckAllocationFunction(
1044 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1045 testing::ExitedWithCode(0), "");
1046 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1047 void* ptr;
1048 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1049 }),
1050 testing::ExitedWithCode(0), "");
1051 EXPECT_EXIT(CheckAllocationFunction(
1052 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1053 testing::ExitedWithCode(0), "");
1054 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1055 void* p = malloc(1024 * 1024);
1056 return realloc(p, bytes) != nullptr;
1057 }),
1058 testing::ExitedWithCode(0), "");
1059#if !defined(__LP64__)
1060 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1061 testing::ExitedWithCode(0), "");
1062 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1063 testing::ExitedWithCode(0), "");
1064#endif
1065#else
Elliott Hughes10907202019-03-27 08:51:02 -07001066 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001067#endif
1068}
1069
1070TEST(android_mallopt, set_allocation_limit_multiple) {
1071#if defined(__BIONIC__)
1072 // Only the first set should work.
1073 size_t limit = 256 * 1024 * 1024;
1074 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1075 limit = 32 * 1024 * 1024;
1076 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1077#else
Elliott Hughes10907202019-03-27 08:51:02 -07001078 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001079#endif
1080}
1081
1082#if defined(__BIONIC__)
1083static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1084
1085static size_t GetMaxAllocations() {
1086 size_t max_pointers = 0;
1087 void* ptrs[20];
1088 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1089 ptrs[i] = malloc(kAllocationSize);
1090 if (ptrs[i] == nullptr) {
1091 max_pointers = i;
1092 break;
1093 }
1094 }
1095 for (size_t i = 0; i < max_pointers; i++) {
1096 free(ptrs[i]);
1097 }
1098 return max_pointers;
1099}
1100
1101static void VerifyMaxPointers(size_t max_pointers) {
1102 // Now verify that we can allocate the same number as before.
1103 void* ptrs[20];
1104 for (size_t i = 0; i < max_pointers; i++) {
1105 ptrs[i] = malloc(kAllocationSize);
1106 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1107 }
1108
1109 // Make sure the next allocation still fails.
1110 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1111 for (size_t i = 0; i < max_pointers; i++) {
1112 free(ptrs[i]);
1113 }
1114}
1115#endif
1116
1117TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1118#if defined(__BIONIC__)
1119 size_t limit = 128 * 1024 * 1024;
1120 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1121
1122 size_t max_pointers = GetMaxAllocations();
1123 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1124
1125 void* memory = malloc(10 * 1024 * 1024);
1126 ASSERT_TRUE(memory != nullptr);
1127
1128 // Increase size.
1129 memory = realloc(memory, 20 * 1024 * 1024);
1130 ASSERT_TRUE(memory != nullptr);
1131 memory = realloc(memory, 40 * 1024 * 1024);
1132 ASSERT_TRUE(memory != nullptr);
1133 memory = realloc(memory, 60 * 1024 * 1024);
1134 ASSERT_TRUE(memory != nullptr);
1135 memory = realloc(memory, 80 * 1024 * 1024);
1136 ASSERT_TRUE(memory != nullptr);
1137 // Now push past limit.
1138 memory = realloc(memory, 130 * 1024 * 1024);
1139 ASSERT_TRUE(memory == nullptr);
1140
1141 VerifyMaxPointers(max_pointers);
1142#else
Elliott Hughes10907202019-03-27 08:51:02 -07001143 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001144#endif
1145}
1146
1147TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1148#if defined(__BIONIC__)
1149 size_t limit = 100 * 1024 * 1024;
1150 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1151
1152 size_t max_pointers = GetMaxAllocations();
1153 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1154
1155 void* memory = malloc(80 * 1024 * 1024);
1156 ASSERT_TRUE(memory != nullptr);
1157
1158 // Decrease size.
1159 memory = realloc(memory, 60 * 1024 * 1024);
1160 ASSERT_TRUE(memory != nullptr);
1161 memory = realloc(memory, 40 * 1024 * 1024);
1162 ASSERT_TRUE(memory != nullptr);
1163 memory = realloc(memory, 20 * 1024 * 1024);
1164 ASSERT_TRUE(memory != nullptr);
1165 memory = realloc(memory, 10 * 1024 * 1024);
1166 ASSERT_TRUE(memory != nullptr);
1167 free(memory);
1168
1169 VerifyMaxPointers(max_pointers);
1170#else
Elliott Hughes10907202019-03-27 08:51:02 -07001171 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001172#endif
1173}
1174
1175TEST(android_mallopt, set_allocation_limit_realloc_free) {
1176#if defined(__BIONIC__)
1177 size_t limit = 100 * 1024 * 1024;
1178 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1179
1180 size_t max_pointers = GetMaxAllocations();
1181 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1182
1183 void* memory = malloc(60 * 1024 * 1024);
1184 ASSERT_TRUE(memory != nullptr);
1185
1186 memory = realloc(memory, 0);
1187 ASSERT_TRUE(memory == nullptr);
1188
1189 VerifyMaxPointers(max_pointers);
1190#else
Elliott Hughes10907202019-03-27 08:51:02 -07001191 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001192#endif
1193}
1194
1195#if defined(__BIONIC__)
1196static void* SetAllocationLimit(void* data) {
1197 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1198 while (!go->load()) {
1199 }
1200 size_t limit = 500 * 1024 * 1024;
1201 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1202 return reinterpret_cast<void*>(-1);
1203 }
1204 return nullptr;
1205}
1206
1207static void SetAllocationLimitMultipleThreads() {
1208 std::atomic_bool go;
1209 go = false;
1210
1211 static constexpr size_t kNumThreads = 4;
1212 pthread_t threads[kNumThreads];
1213 for (size_t i = 0; i < kNumThreads; i++) {
1214 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1215 }
1216
1217 // Let them go all at once.
1218 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001219 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1220 // heapprofd handler.
1221 union sigval signal_value;
1222 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001223 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001224
1225 size_t num_successful = 0;
1226 for (size_t i = 0; i < kNumThreads; i++) {
1227 void* result;
1228 ASSERT_EQ(0, pthread_join(threads[i], &result));
1229 if (result != nullptr) {
1230 num_successful++;
1231 }
1232 }
1233 ASSERT_EQ(1U, num_successful);
1234 exit(0);
1235}
1236#endif
1237
1238TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1239#if defined(__BIONIC__)
1240 if (IsDynamic()) {
1241 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1242 }
1243
1244 // Run this a number of times as a stress test.
1245 for (size_t i = 0; i < 100; i++) {
1246 // Not using ASSERT_EXIT because errors messages are not displayed.
1247 pid_t pid;
1248 if ((pid = fork()) == 0) {
1249 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1250 }
1251 ASSERT_NE(-1, pid);
1252 int status;
1253 ASSERT_EQ(pid, wait(&status));
1254 ASSERT_EQ(0, WEXITSTATUS(status));
1255 }
1256#else
Elliott Hughes10907202019-03-27 08:51:02 -07001257 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001258#endif
1259}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001260
Mitch Phillips9cad8422021-01-20 16:03:27 -08001261void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1262 std::vector<void*> allocs;
1263 constexpr int kMaxBytesToCheckZero = 64;
1264 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1265
1266 for (int i = 0; i < num_iterations; ++i) {
1267 int size = get_alloc_size(i);
1268 allocs.push_back(malloc(size));
1269 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1270 }
1271
1272 for (void* alloc : allocs) {
1273 free(alloc);
1274 }
1275 allocs.clear();
1276
1277 for (int i = 0; i < num_iterations; ++i) {
1278 int size = get_alloc_size(i);
1279 allocs.push_back(malloc(size));
1280 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1281 }
1282
1283 for (void* alloc : allocs) {
1284 free(alloc);
1285 }
1286}
1287
1288TEST(malloc, zero_init) {
1289#if defined(__BIONIC__)
1290 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1291 bool allocator_scudo;
1292 GetAllocatorVersion(&allocator_scudo);
1293 if (!allocator_scudo) {
1294 GTEST_SKIP() << "scudo allocator only test";
1295 }
1296
1297 mallopt(M_BIONIC_ZERO_INIT, 1);
1298
1299 // Test using a block of 4K small (1-32 byte) allocations.
1300 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1301 return 1 + iteration % 32;
1302 });
1303
1304 // Also test large allocations that land in the scudo secondary, as this is
1305 // the only part of Scudo that's changed by enabling zero initialization with
1306 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1307 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1308 // mallopt_decay. Ensure that we delay for at least a second before releasing
1309 // pages to the OS in order to avoid implicit zeroing by the kernel.
1310 mallopt(M_DECAY_TIME, 1000);
1311 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1312 return 1 << (19 + iteration % 4);
1313 });
1314
1315#else
1316 GTEST_SKIP() << "bionic-only test";
1317#endif
1318}
1319
1320// Note that MTE is enabled on cc_tests on devices that support MTE.
1321TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001322#if defined(__BIONIC__)
1323 if (!mte_supported()) {
1324 GTEST_SKIP() << "This function can only be tested with MTE";
1325 }
1326
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001327 sem_t sem;
1328 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1329
1330 pthread_t thread;
1331 ASSERT_EQ(0, pthread_create(
1332 &thread, nullptr,
1333 [](void* ptr) -> void* {
1334 auto* sem = reinterpret_cast<sem_t*>(ptr);
1335 sem_wait(sem);
1336 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1337 },
1338 &sem));
1339
Mitch Phillips9cad8422021-01-20 16:03:27 -08001340 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001341 ASSERT_EQ(0, sem_post(&sem));
1342
1343 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1344 ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1345
1346 void* retval;
1347 ASSERT_EQ(0, pthread_join(thread, &retval));
1348 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1349 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001350#else
1351 GTEST_SKIP() << "bionic extension";
1352#endif
1353}