blob: ddd3416bebeebf76ac1a8fb7f953746eac2c4d24 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
30#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080031#include <sys/types.h>
32#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070033#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070034
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080035#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080036#include <thread>
37
Dan Albert4caa1f02014-08-20 09:16:57 -070038#include <tinyxml2.h>
39
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080040#include <android-base/file.h>
41
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080042#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000043
Elliott Hughesb1770852018-09-18 12:52:42 -070044#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080045
Peter Collingbourne45819dd2020-01-09 11:00:43 -080046#include "SignalUtils.h"
47
Christopher Ferrisb874c332020-01-21 16:39:05 -080048#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070049#include "platform/bionic/mte.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080050#include "platform/bionic/mte_kernel.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080051#include "platform/bionic/reserved_signals.h"
52#include "private/bionic_config.h"
53
Elliott Hughesb1770852018-09-18 12:52:42 -070054#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080055
Elliott Hughesb1770852018-09-18 12:52:42 -070056#else
Christopher Ferrisb874c332020-01-21 16:39:05 -080057
Elliott Hughesb1770852018-09-18 12:52:42 -070058#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080059
Elliott Hughesb1770852018-09-18 12:52:42 -070060#endif
61
Christopher Ferris885f3b92013-05-21 17:48:01 -070062TEST(malloc, malloc_std) {
63 // Simple malloc test.
64 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070065 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070066 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070067 free(ptr);
68}
69
Christopher Ferrisa4037802014-06-09 19:14:11 -070070TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080071 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070072 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070073 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070074 ASSERT_EQ(ENOMEM, errno);
75}
76
Christopher Ferris885f3b92013-05-21 17:48:01 -070077TEST(malloc, calloc_std) {
78 // Simple calloc test.
79 size_t alloc_len = 100;
80 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070081 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070082 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
83 for (size_t i = 0; i < alloc_len; i++) {
84 ASSERT_EQ(0, ptr[i]);
85 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070086 free(ptr);
87}
88
Peter Collingbourne978eb162020-09-21 15:26:02 -070089TEST(malloc, calloc_mem_init_disabled) {
90#if defined(__BIONIC__)
91 // calloc should still zero memory if mem-init is disabled.
92 // With jemalloc the mallopts will fail but that shouldn't affect the
93 // execution of the test.
94 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
95 size_t alloc_len = 100;
96 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
97 for (size_t i = 0; i < alloc_len; i++) {
98 ASSERT_EQ(0, ptr[i]);
99 }
100 free(ptr);
101 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
102#else
103 GTEST_SKIP() << "bionic-only test";
104#endif
105}
106
Christopher Ferrisa4037802014-06-09 19:14:11 -0700107TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800108 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700109 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700110 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700111 ASSERT_EQ(ENOMEM, errno);
112}
113
114TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800115 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700116 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700117 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700118 ASSERT_EQ(ENOMEM, errno);
119 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700120 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700121 ASSERT_EQ(ENOMEM, errno);
122 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700123 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700124 ASSERT_EQ(ENOMEM, errno);
125 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700126 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700127 ASSERT_EQ(ENOMEM, errno);
128}
129
Christopher Ferris885f3b92013-05-21 17:48:01 -0700130TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800131 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700132 // Memalign test where the alignment is any value.
133 for (size_t i = 0; i <= 12; i++) {
134 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700135 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700136 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700137 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
138 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
139 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700140 free(ptr);
141 }
142 }
143}
144
Christopher Ferrisa4037802014-06-09 19:14:11 -0700145TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800146 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700147 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700148}
149
150TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800151 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700152 void* ptr;
153 for (size_t align = 0; align <= 256; align++) {
154 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700155 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700156 free(ptr);
157 }
158}
159
Christopher Ferris885f3b92013-05-21 17:48:01 -0700160TEST(malloc, memalign_realloc) {
161 // Memalign and then realloc the pointer a couple of times.
162 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
163 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700164 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700165 ASSERT_LE(100U, malloc_usable_size(ptr));
166 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
167 memset(ptr, 0x23, 100);
168
169 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700170 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700171 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700172 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700173 for (size_t i = 0; i < 100; i++) {
174 ASSERT_EQ(0x23, ptr[i]);
175 }
176 memset(ptr, 0x45, 200);
177
178 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700179 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700180 ASSERT_LE(300U, malloc_usable_size(ptr));
181 for (size_t i = 0; i < 200; i++) {
182 ASSERT_EQ(0x45, ptr[i]);
183 }
184 memset(ptr, 0x67, 300);
185
186 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700187 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700188 ASSERT_LE(250U, malloc_usable_size(ptr));
189 for (size_t i = 0; i < 250; i++) {
190 ASSERT_EQ(0x67, ptr[i]);
191 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700192 free(ptr);
193 }
194}
195
196TEST(malloc, malloc_realloc_larger) {
197 // Realloc to a larger size, malloc is used for the original allocation.
198 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700199 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700200 ASSERT_LE(100U, malloc_usable_size(ptr));
201 memset(ptr, 67, 100);
202
203 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700204 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700205 ASSERT_LE(200U, malloc_usable_size(ptr));
206 for (size_t i = 0; i < 100; i++) {
207 ASSERT_EQ(67, ptr[i]);
208 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700209 free(ptr);
210}
211
212TEST(malloc, malloc_realloc_smaller) {
213 // Realloc to a smaller size, malloc is used for the original allocation.
214 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700215 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700216 ASSERT_LE(200U, malloc_usable_size(ptr));
217 memset(ptr, 67, 200);
218
219 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700220 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700221 ASSERT_LE(100U, malloc_usable_size(ptr));
222 for (size_t i = 0; i < 100; i++) {
223 ASSERT_EQ(67, ptr[i]);
224 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700225 free(ptr);
226}
227
228TEST(malloc, malloc_multiple_realloc) {
229 // Multiple reallocs, malloc is used for the original allocation.
230 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700231 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700232 ASSERT_LE(200U, malloc_usable_size(ptr));
233 memset(ptr, 0x23, 200);
234
235 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700236 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700237 ASSERT_LE(100U, malloc_usable_size(ptr));
238 for (size_t i = 0; i < 100; i++) {
239 ASSERT_EQ(0x23, ptr[i]);
240 }
241
242 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700243 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700244 ASSERT_LE(50U, malloc_usable_size(ptr));
245 for (size_t i = 0; i < 50; i++) {
246 ASSERT_EQ(0x23, ptr[i]);
247 }
248
249 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700250 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700251 ASSERT_LE(150U, malloc_usable_size(ptr));
252 for (size_t i = 0; i < 50; i++) {
253 ASSERT_EQ(0x23, ptr[i]);
254 }
255 memset(ptr, 0x23, 150);
256
257 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700258 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700259 ASSERT_LE(425U, malloc_usable_size(ptr));
260 for (size_t i = 0; i < 150; i++) {
261 ASSERT_EQ(0x23, ptr[i]);
262 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700263 free(ptr);
264}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700265
Christopher Ferris885f3b92013-05-21 17:48:01 -0700266TEST(malloc, calloc_realloc_larger) {
267 // Realloc to a larger size, calloc is used for the original allocation.
268 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700269 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700270 ASSERT_LE(100U, malloc_usable_size(ptr));
271
272 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700273 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700274 ASSERT_LE(200U, malloc_usable_size(ptr));
275 for (size_t i = 0; i < 100; i++) {
276 ASSERT_EQ(0, ptr[i]);
277 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700278 free(ptr);
279}
280
281TEST(malloc, calloc_realloc_smaller) {
282 // Realloc to a smaller size, calloc is used for the original allocation.
283 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700284 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700285 ASSERT_LE(200U, malloc_usable_size(ptr));
286
287 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700288 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700289 ASSERT_LE(100U, malloc_usable_size(ptr));
290 for (size_t i = 0; i < 100; i++) {
291 ASSERT_EQ(0, ptr[i]);
292 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700293 free(ptr);
294}
295
296TEST(malloc, calloc_multiple_realloc) {
297 // Multiple reallocs, calloc is used for the original allocation.
298 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700299 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700300 ASSERT_LE(200U, malloc_usable_size(ptr));
301
302 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700303 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700304 ASSERT_LE(100U, malloc_usable_size(ptr));
305 for (size_t i = 0; i < 100; i++) {
306 ASSERT_EQ(0, ptr[i]);
307 }
308
309 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700310 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700311 ASSERT_LE(50U, malloc_usable_size(ptr));
312 for (size_t i = 0; i < 50; i++) {
313 ASSERT_EQ(0, ptr[i]);
314 }
315
316 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700317 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700318 ASSERT_LE(150U, malloc_usable_size(ptr));
319 for (size_t i = 0; i < 50; i++) {
320 ASSERT_EQ(0, ptr[i]);
321 }
322 memset(ptr, 0, 150);
323
324 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700325 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700326 ASSERT_LE(425U, malloc_usable_size(ptr));
327 for (size_t i = 0; i < 150; i++) {
328 ASSERT_EQ(0, ptr[i]);
329 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700330 free(ptr);
331}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700332
Christopher Ferrisa4037802014-06-09 19:14:11 -0700333TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800334 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700335 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700336 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700337 ASSERT_EQ(ENOMEM, errno);
338 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700339 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700340 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700341 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700342 ASSERT_EQ(ENOMEM, errno);
343 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700344}
345
Dan Alberte5fdaa42014-06-14 01:04:31 +0000346#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
347extern "C" void* pvalloc(size_t);
348extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700349#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000350
Christopher Ferrisa4037802014-06-09 19:14:11 -0700351TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700352#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700353 size_t pagesize = sysconf(_SC_PAGESIZE);
354 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700355 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700356 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
357 ASSERT_LE(pagesize, malloc_usable_size(ptr));
358 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700359#else
360 GTEST_SKIP() << "pvalloc not supported.";
361#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700362}
363
364TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700365#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700366 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700367#else
368 GTEST_SKIP() << "pvalloc not supported.";
369#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700370}
371
372TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700373#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700374 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700375 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700376 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700377 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
378 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700379#else
380 GTEST_SKIP() << "valloc not supported.";
381#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700382}
383
384TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700385#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700386 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700387#else
388 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000389#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700390}
Dan Albert4caa1f02014-08-20 09:16:57 -0700391
392TEST(malloc, malloc_info) {
393#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700394 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800395
396 TemporaryFile tf;
397 ASSERT_TRUE(tf.fd != -1);
398 FILE* fp = fdopen(tf.fd, "w+");
399 tf.release();
400 ASSERT_TRUE(fp != nullptr);
401 ASSERT_EQ(0, malloc_info(0, fp));
402 ASSERT_EQ(0, fclose(fp));
403
404 std::string contents;
405 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700406
407 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800408 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700409
410 auto root = doc.FirstChildElement();
411 ASSERT_NE(nullptr, root);
412 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700413 std::string version(root->Attribute("version"));
414 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800415 auto arena = root->FirstChildElement();
416 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
417 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700418
Christopher Ferris6c619a02019-03-01 17:59:51 -0800419 ASSERT_STREQ("heap", arena->Name());
420 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
421 ASSERT_EQ(tinyxml2::XML_SUCCESS,
422 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
423 ASSERT_EQ(tinyxml2::XML_SUCCESS,
424 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
425 ASSERT_EQ(tinyxml2::XML_SUCCESS,
426 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
427 ASSERT_EQ(tinyxml2::XML_SUCCESS,
428 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700429
Christopher Ferris6c619a02019-03-01 17:59:51 -0800430 auto bin = arena->FirstChildElement("bin");
431 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
432 if (strcmp(bin->Name(), "bin") == 0) {
433 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
434 ASSERT_EQ(tinyxml2::XML_SUCCESS,
435 bin->FirstChildElement("allocated")->QueryIntText(&val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
438 ASSERT_EQ(tinyxml2::XML_SUCCESS,
439 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
440 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700441 }
442 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800443 } else if (version == "scudo-1") {
444 auto element = root->FirstChildElement();
445 for (; element != nullptr; element = element->NextSiblingElement()) {
446 int val;
447
448 ASSERT_STREQ("alloc", element->Name());
449 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
450 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
451 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800452 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800453 // Do not verify output for debug malloc.
454 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700455 }
456#endif
457}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800458
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700459TEST(malloc, malloc_info_matches_mallinfo) {
460#ifdef __BIONIC__
461 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
462
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800463 TemporaryFile tf;
464 ASSERT_TRUE(tf.fd != -1);
465 FILE* fp = fdopen(tf.fd, "w+");
466 tf.release();
467 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700468 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800469 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700470 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800471 ASSERT_EQ(0, fclose(fp));
472
473 std::string contents;
474 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700475
476 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800477 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700478
479 size_t total_allocated_bytes = 0;
480 auto root = doc.FirstChildElement();
481 ASSERT_NE(nullptr, root);
482 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700483 std::string version(root->Attribute("version"));
484 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700485 auto arena = root->FirstChildElement();
486 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
487 int val;
488
489 ASSERT_STREQ("heap", arena->Name());
490 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
491 ASSERT_EQ(tinyxml2::XML_SUCCESS,
492 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
493 total_allocated_bytes += val;
494 ASSERT_EQ(tinyxml2::XML_SUCCESS,
495 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
496 total_allocated_bytes += val;
497 ASSERT_EQ(tinyxml2::XML_SUCCESS,
498 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
499 total_allocated_bytes += val;
500 ASSERT_EQ(tinyxml2::XML_SUCCESS,
501 arena->FirstChildElement("bins-total")->QueryIntText(&val));
502 }
503 // The total needs to be between the mallinfo call before and after
504 // since malloc_info allocates some memory.
505 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
506 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800507 } else if (version == "scudo-1") {
508 auto element = root->FirstChildElement();
509 for (; element != nullptr; element = element->NextSiblingElement()) {
510 ASSERT_STREQ("alloc", element->Name());
511 int size;
512 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
513 int count;
514 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
515 total_allocated_bytes += size * count;
516 }
517 // Scudo only gives the information on the primary, so simply make
518 // sure that the value is non-zero.
519 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700520 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800521 // Do not verify output for debug malloc.
522 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700523 }
524#endif
525}
526
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800527TEST(malloc, calloc_usable_size) {
528 for (size_t size = 1; size <= 2048; size++) {
529 void* pointer = malloc(size);
530 ASSERT_TRUE(pointer != nullptr);
531 memset(pointer, 0xeb, malloc_usable_size(pointer));
532 free(pointer);
533
534 // We should get a previous pointer that has been set to non-zero.
535 // If calloc does not zero out all of the data, this will fail.
536 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
537 ASSERT_TRUE(pointer != nullptr);
538 size_t usable_size = malloc_usable_size(zero_mem);
539 for (size_t i = 0; i < usable_size; i++) {
540 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
541 }
542 free(zero_mem);
543 }
544}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800545
546TEST(malloc, malloc_0) {
547 void* p = malloc(0);
548 ASSERT_TRUE(p != nullptr);
549 free(p);
550}
551
552TEST(malloc, calloc_0_0) {
553 void* p = calloc(0, 0);
554 ASSERT_TRUE(p != nullptr);
555 free(p);
556}
557
558TEST(malloc, calloc_0_1) {
559 void* p = calloc(0, 1);
560 ASSERT_TRUE(p != nullptr);
561 free(p);
562}
563
564TEST(malloc, calloc_1_0) {
565 void* p = calloc(1, 0);
566 ASSERT_TRUE(p != nullptr);
567 free(p);
568}
569
570TEST(malloc, realloc_nullptr_0) {
571 // realloc(nullptr, size) is actually malloc(size).
572 void* p = realloc(nullptr, 0);
573 ASSERT_TRUE(p != nullptr);
574 free(p);
575}
576
577TEST(malloc, realloc_0) {
578 void* p = malloc(1024);
579 ASSERT_TRUE(p != nullptr);
580 // realloc(p, 0) is actually free(p).
581 void* p2 = realloc(p, 0);
582 ASSERT_TRUE(p2 == nullptr);
583}
Christopher Ferris72df6702016-02-11 15:51:31 -0800584
585constexpr size_t MAX_LOOPS = 200;
586
587// Make sure that memory returned by malloc is aligned to allow these data types.
588TEST(malloc, verify_alignment) {
589 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
590 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
591 long double** values_ldouble = new long double*[MAX_LOOPS];
592 // Use filler to attempt to force the allocator to get potentially bad alignments.
593 void** filler = new void*[MAX_LOOPS];
594
595 for (size_t i = 0; i < MAX_LOOPS; i++) {
596 // Check uint32_t pointers.
597 filler[i] = malloc(1);
598 ASSERT_TRUE(filler[i] != nullptr);
599
600 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
601 ASSERT_TRUE(values_32[i] != nullptr);
602 *values_32[i] = i;
603 ASSERT_EQ(*values_32[i], i);
604 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
605
606 free(filler[i]);
607 }
608
609 for (size_t i = 0; i < MAX_LOOPS; i++) {
610 // Check uint64_t pointers.
611 filler[i] = malloc(1);
612 ASSERT_TRUE(filler[i] != nullptr);
613
614 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
615 ASSERT_TRUE(values_64[i] != nullptr);
616 *values_64[i] = 0x1000 + i;
617 ASSERT_EQ(*values_64[i], 0x1000 + i);
618 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
619
620 free(filler[i]);
621 }
622
623 for (size_t i = 0; i < MAX_LOOPS; i++) {
624 // Check long double pointers.
625 filler[i] = malloc(1);
626 ASSERT_TRUE(filler[i] != nullptr);
627
628 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
629 ASSERT_TRUE(values_ldouble[i] != nullptr);
630 *values_ldouble[i] = 5.5 + i;
631 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
632 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
633 // required alignment to 0x7.
634#if !defined(__BIONIC__) && !defined(__LP64__)
635 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
636#else
637 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
638#endif
639
640 free(filler[i]);
641 }
642
643 for (size_t i = 0; i < MAX_LOOPS; i++) {
644 free(values_32[i]);
645 free(values_64[i]);
646 free(values_ldouble[i]);
647 }
648
649 delete[] filler;
650 delete[] values_32;
651 delete[] values_64;
652 delete[] values_ldouble;
653}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700654
655TEST(malloc, mallopt_smoke) {
656 errno = 0;
657 ASSERT_EQ(0, mallopt(-1000, 1));
658 // mallopt doesn't set errno.
659 ASSERT_EQ(0, errno);
660}
Elliott Hughesb1770852018-09-18 12:52:42 -0700661
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800662TEST(malloc, mallopt_decay) {
663#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800664 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800665 errno = 0;
666 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
667 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
668 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
669 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
670#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800671 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800672#endif
673}
674
675TEST(malloc, mallopt_purge) {
676#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800677 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800678 errno = 0;
679 ASSERT_EQ(1, mallopt(M_PURGE, 0));
680#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800681 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800682#endif
683}
684
Christopher Ferris88448792020-07-28 14:15:31 -0700685#if defined(__BIONIC__)
686static void GetAllocatorVersion(bool* allocator_scudo) {
687 TemporaryFile tf;
688 ASSERT_TRUE(tf.fd != -1);
689 FILE* fp = fdopen(tf.fd, "w+");
690 tf.release();
691 ASSERT_TRUE(fp != nullptr);
692 ASSERT_EQ(0, malloc_info(0, fp));
693 ASSERT_EQ(0, fclose(fp));
694
695 std::string contents;
696 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
697
698 tinyxml2::XMLDocument doc;
699 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
700
701 auto root = doc.FirstChildElement();
702 ASSERT_NE(nullptr, root);
703 ASSERT_STREQ("malloc", root->Name());
704 std::string version(root->Attribute("version"));
705 *allocator_scudo = (version == "scudo-1");
706}
707#endif
708
709TEST(malloc, mallopt_scudo_only_options) {
710#if defined(__BIONIC__)
711 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
712 bool allocator_scudo;
713 GetAllocatorVersion(&allocator_scudo);
714 if (!allocator_scudo) {
715 GTEST_SKIP() << "scudo allocator only test";
716 }
717 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
718 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
719 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
720#else
721 GTEST_SKIP() << "bionic-only test";
722#endif
723}
724
Elliott Hughesb1770852018-09-18 12:52:42 -0700725TEST(malloc, reallocarray_overflow) {
726#if HAVE_REALLOCARRAY
727 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
728 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
729 size_t b = 2;
730
731 errno = 0;
732 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
733 ASSERT_EQ(ENOMEM, errno);
734
735 errno = 0;
736 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
737 ASSERT_EQ(ENOMEM, errno);
738#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800739 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700740#endif
741}
742
743TEST(malloc, reallocarray) {
744#if HAVE_REALLOCARRAY
745 void* p = reallocarray(nullptr, 2, 32);
746 ASSERT_TRUE(p != nullptr);
747 ASSERT_GE(malloc_usable_size(p), 64U);
748#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800749 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700750#endif
751}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800752
753TEST(malloc, mallinfo) {
754#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800755 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800756 static size_t sizes[] = {
757 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
758 };
759
760 constexpr static size_t kMaxAllocs = 50;
761
762 for (size_t size : sizes) {
763 // If some of these allocations are stuck in a thread cache, then keep
764 // looping until we make an allocation that changes the total size of the
765 // memory allocated.
766 // jemalloc implementations counts the thread cache allocations against
767 // total memory allocated.
768 void* ptrs[kMaxAllocs] = {};
769 bool pass = false;
770 for (size_t i = 0; i < kMaxAllocs; i++) {
771 size_t allocated = mallinfo().uordblks;
772 ptrs[i] = malloc(size);
773 ASSERT_TRUE(ptrs[i] != nullptr);
774 size_t new_allocated = mallinfo().uordblks;
775 if (allocated != new_allocated) {
776 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800777 // Only check if the total got bigger by at least allocation size.
778 // Sometimes the mallinfo numbers can go backwards due to compaction
779 // and/or freeing of cached data.
780 if (new_allocated >= allocated + usable_size) {
781 pass = true;
782 break;
783 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800784 }
785 }
786 for (void* ptr : ptrs) {
787 free(ptr);
788 }
789 ASSERT_TRUE(pass)
790 << "For size " << size << " allocated bytes did not increase after "
791 << kMaxAllocs << " allocations.";
792 }
793#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800794 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800795#endif
796}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000797
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800798template <typename Type>
799void __attribute__((optnone)) VerifyAlignment(Type* floating) {
800 size_t expected_alignment = alignof(Type);
801 if (expected_alignment != 0) {
802 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
803 << "Expected alignment " << expected_alignment << " ptr value " << floating;
804 }
805}
806
807template <typename Type>
808void __attribute__((optnone)) TestAllocateType() {
809 // The number of allocations to do in a row. This is to attempt to
810 // expose the worst case alignment for native allocators that use
811 // bins.
812 static constexpr size_t kMaxConsecutiveAllocs = 100;
813
814 // Verify using new directly.
815 Type* types[kMaxConsecutiveAllocs];
816 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
817 types[i] = new Type;
818 VerifyAlignment(types[i]);
819 if (::testing::Test::HasFatalFailure()) {
820 return;
821 }
822 }
823 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
824 delete types[i];
825 }
826
827 // Verify using malloc.
828 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
829 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
830 ASSERT_TRUE(types[i] != nullptr);
831 VerifyAlignment(types[i]);
832 if (::testing::Test::HasFatalFailure()) {
833 return;
834 }
835 }
836 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
837 free(types[i]);
838 }
839
840 // Verify using a vector.
841 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
842 for (size_t i = 0; i < type_vector.size(); i++) {
843 VerifyAlignment(&type_vector[i]);
844 if (::testing::Test::HasFatalFailure()) {
845 return;
846 }
847 }
848}
849
850#if defined(__ANDROID__)
851static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
852 void* ptrs[100];
853 uintptr_t mask = aligned_bytes - 1;
854 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
855 ptrs[i] = malloc(alloc_size);
856 ASSERT_TRUE(ptrs[i] != nullptr);
857 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
858 << "Expected at least " << aligned_bytes << " byte alignment: size "
859 << alloc_size << " actual ptr " << ptrs[i];
860 }
861}
862#endif
863
864TEST(malloc, align_check) {
865 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
866 // for a discussion of type alignment.
867 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
868 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
869 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
870
871 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
872 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
873 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
874 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
875 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
876 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
877 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
878 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
879 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
880 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
881 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
882 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
883 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
884 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
885
886#if defined(__ANDROID__)
887 // On Android, there is a lot of code that expects certain alignments:
888 // - Allocations of a size that rounds up to a multiple of 16 bytes
889 // must have at least 16 byte alignment.
890 // - Allocations of a size that rounds up to a multiple of 8 bytes and
891 // not 16 bytes, are only required to have at least 8 byte alignment.
892 // This is regardless of whether it is in a 32 bit or 64 bit environment.
893
894 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
895 // a discussion of this alignment mess. The code below is enforcing
896 // strong-alignment, since who knows what code depends on this behavior now.
897 for (size_t i = 1; i <= 128; i++) {
898 size_t rounded = (i + 7) & ~7;
899 if ((rounded % 16) == 0) {
900 AndroidVerifyAlignment(i, 16);
901 } else {
902 AndroidVerifyAlignment(i, 8);
903 }
904 if (::testing::Test::HasFatalFailure()) {
905 return;
906 }
907 }
908#endif
909}
910
Christopher Ferris201dcf42020-01-29 13:09:31 -0800911// Jemalloc doesn't pass this test right now, so leave it as disabled.
912TEST(malloc, DISABLED_alloc_after_fork) {
913 // Both of these need to be a power of 2.
914 static constexpr size_t kMinAllocationSize = 8;
915 static constexpr size_t kMaxAllocationSize = 2097152;
916
917 static constexpr size_t kNumAllocatingThreads = 5;
918 static constexpr size_t kNumForkLoops = 100;
919
920 std::atomic_bool stop;
921
922 // Create threads that simply allocate and free different sizes.
923 std::vector<std::thread*> threads;
924 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
925 std::thread* t = new std::thread([&stop] {
926 while (!stop) {
927 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700928 void* ptr;
929 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800930 free(ptr);
931 }
932 }
933 });
934 threads.push_back(t);
935 }
936
937 // Create a thread to fork and allocate.
938 for (size_t i = 0; i < kNumForkLoops; i++) {
939 pid_t pid;
940 if ((pid = fork()) == 0) {
941 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700942 void* ptr;
943 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800944 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -0800945 // Make sure we can touch all of the allocation.
946 memset(ptr, 0x1, size);
947 ASSERT_LE(size, malloc_usable_size(ptr));
948 free(ptr);
949 }
950 _exit(10);
951 }
952 ASSERT_NE(-1, pid);
953 AssertChildExited(pid, 10);
954 }
955
956 stop = true;
957 for (auto thread : threads) {
958 thread->join();
959 delete thread;
960 }
961}
962
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000963TEST(android_mallopt, error_on_unexpected_option) {
964#if defined(__BIONIC__)
965 const int unrecognized_option = -1;
966 errno = 0;
967 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
968 EXPECT_EQ(ENOTSUP, errno);
969#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800970 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000971#endif
972}
973
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800974bool IsDynamic() {
975#if defined(__LP64__)
976 Elf64_Ehdr ehdr;
977#else
978 Elf32_Ehdr ehdr;
979#endif
980 std::string path(android::base::GetExecutablePath());
981
982 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
983 if (fd == -1) {
984 // Assume dynamic on error.
985 return true;
986 }
987 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
988 close(fd);
989 // Assume dynamic in error cases.
990 return !read_completed || ehdr.e_type == ET_DYN;
991}
992
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000993TEST(android_mallopt, init_zygote_child_profiling) {
994#if defined(__BIONIC__)
995 // Successful call.
996 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800997 if (IsDynamic()) {
998 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
999 EXPECT_EQ(0, errno);
1000 } else {
1001 // Not supported in static executables.
1002 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1003 EXPECT_EQ(ENOTSUP, errno);
1004 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001005
1006 // Unexpected arguments rejected.
1007 errno = 0;
1008 char unexpected = 0;
1009 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001010 if (IsDynamic()) {
1011 EXPECT_EQ(EINVAL, errno);
1012 } else {
1013 EXPECT_EQ(ENOTSUP, errno);
1014 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001015#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001016 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001017#endif
1018}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001019
1020#if defined(__BIONIC__)
1021template <typename FuncType>
1022void CheckAllocationFunction(FuncType func) {
1023 // Assumes that no more than 108MB of memory is allocated before this.
1024 size_t limit = 128 * 1024 * 1024;
1025 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1026 if (!func(20 * 1024 * 1024))
1027 exit(1);
1028 if (func(128 * 1024 * 1024))
1029 exit(1);
1030 exit(0);
1031}
1032#endif
1033
1034TEST(android_mallopt, set_allocation_limit) {
1035#if defined(__BIONIC__)
1036 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1037 testing::ExitedWithCode(0), "");
1038 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1039 testing::ExitedWithCode(0), "");
1040 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1041 testing::ExitedWithCode(0), "");
1042 EXPECT_EXIT(CheckAllocationFunction(
1043 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1044 testing::ExitedWithCode(0), "");
1045 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1046 void* ptr;
1047 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1048 }),
1049 testing::ExitedWithCode(0), "");
1050 EXPECT_EXIT(CheckAllocationFunction(
1051 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1052 testing::ExitedWithCode(0), "");
1053 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1054 void* p = malloc(1024 * 1024);
1055 return realloc(p, bytes) != nullptr;
1056 }),
1057 testing::ExitedWithCode(0), "");
1058#if !defined(__LP64__)
1059 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1060 testing::ExitedWithCode(0), "");
1061 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1062 testing::ExitedWithCode(0), "");
1063#endif
1064#else
Elliott Hughes10907202019-03-27 08:51:02 -07001065 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001066#endif
1067}
1068
1069TEST(android_mallopt, set_allocation_limit_multiple) {
1070#if defined(__BIONIC__)
1071 // Only the first set should work.
1072 size_t limit = 256 * 1024 * 1024;
1073 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1074 limit = 32 * 1024 * 1024;
1075 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1076#else
Elliott Hughes10907202019-03-27 08:51:02 -07001077 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001078#endif
1079}
1080
1081#if defined(__BIONIC__)
1082static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1083
1084static size_t GetMaxAllocations() {
1085 size_t max_pointers = 0;
1086 void* ptrs[20];
1087 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1088 ptrs[i] = malloc(kAllocationSize);
1089 if (ptrs[i] == nullptr) {
1090 max_pointers = i;
1091 break;
1092 }
1093 }
1094 for (size_t i = 0; i < max_pointers; i++) {
1095 free(ptrs[i]);
1096 }
1097 return max_pointers;
1098}
1099
1100static void VerifyMaxPointers(size_t max_pointers) {
1101 // Now verify that we can allocate the same number as before.
1102 void* ptrs[20];
1103 for (size_t i = 0; i < max_pointers; i++) {
1104 ptrs[i] = malloc(kAllocationSize);
1105 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1106 }
1107
1108 // Make sure the next allocation still fails.
1109 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1110 for (size_t i = 0; i < max_pointers; i++) {
1111 free(ptrs[i]);
1112 }
1113}
1114#endif
1115
1116TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1117#if defined(__BIONIC__)
1118 size_t limit = 128 * 1024 * 1024;
1119 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1120
1121 size_t max_pointers = GetMaxAllocations();
1122 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1123
1124 void* memory = malloc(10 * 1024 * 1024);
1125 ASSERT_TRUE(memory != nullptr);
1126
1127 // Increase size.
1128 memory = realloc(memory, 20 * 1024 * 1024);
1129 ASSERT_TRUE(memory != nullptr);
1130 memory = realloc(memory, 40 * 1024 * 1024);
1131 ASSERT_TRUE(memory != nullptr);
1132 memory = realloc(memory, 60 * 1024 * 1024);
1133 ASSERT_TRUE(memory != nullptr);
1134 memory = realloc(memory, 80 * 1024 * 1024);
1135 ASSERT_TRUE(memory != nullptr);
1136 // Now push past limit.
1137 memory = realloc(memory, 130 * 1024 * 1024);
1138 ASSERT_TRUE(memory == nullptr);
1139
1140 VerifyMaxPointers(max_pointers);
1141#else
Elliott Hughes10907202019-03-27 08:51:02 -07001142 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001143#endif
1144}
1145
1146TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1147#if defined(__BIONIC__)
1148 size_t limit = 100 * 1024 * 1024;
1149 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1150
1151 size_t max_pointers = GetMaxAllocations();
1152 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1153
1154 void* memory = malloc(80 * 1024 * 1024);
1155 ASSERT_TRUE(memory != nullptr);
1156
1157 // Decrease size.
1158 memory = realloc(memory, 60 * 1024 * 1024);
1159 ASSERT_TRUE(memory != nullptr);
1160 memory = realloc(memory, 40 * 1024 * 1024);
1161 ASSERT_TRUE(memory != nullptr);
1162 memory = realloc(memory, 20 * 1024 * 1024);
1163 ASSERT_TRUE(memory != nullptr);
1164 memory = realloc(memory, 10 * 1024 * 1024);
1165 ASSERT_TRUE(memory != nullptr);
1166 free(memory);
1167
1168 VerifyMaxPointers(max_pointers);
1169#else
Elliott Hughes10907202019-03-27 08:51:02 -07001170 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001171#endif
1172}
1173
1174TEST(android_mallopt, set_allocation_limit_realloc_free) {
1175#if defined(__BIONIC__)
1176 size_t limit = 100 * 1024 * 1024;
1177 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1178
1179 size_t max_pointers = GetMaxAllocations();
1180 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1181
1182 void* memory = malloc(60 * 1024 * 1024);
1183 ASSERT_TRUE(memory != nullptr);
1184
1185 memory = realloc(memory, 0);
1186 ASSERT_TRUE(memory == nullptr);
1187
1188 VerifyMaxPointers(max_pointers);
1189#else
Elliott Hughes10907202019-03-27 08:51:02 -07001190 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001191#endif
1192}
1193
1194#if defined(__BIONIC__)
1195static void* SetAllocationLimit(void* data) {
1196 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1197 while (!go->load()) {
1198 }
1199 size_t limit = 500 * 1024 * 1024;
1200 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1201 return reinterpret_cast<void*>(-1);
1202 }
1203 return nullptr;
1204}
1205
1206static void SetAllocationLimitMultipleThreads() {
1207 std::atomic_bool go;
1208 go = false;
1209
1210 static constexpr size_t kNumThreads = 4;
1211 pthread_t threads[kNumThreads];
1212 for (size_t i = 0; i < kNumThreads; i++) {
1213 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1214 }
1215
1216 // Let them go all at once.
1217 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001218 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1219 // heapprofd handler.
1220 union sigval signal_value;
1221 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001222 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001223
1224 size_t num_successful = 0;
1225 for (size_t i = 0; i < kNumThreads; i++) {
1226 void* result;
1227 ASSERT_EQ(0, pthread_join(threads[i], &result));
1228 if (result != nullptr) {
1229 num_successful++;
1230 }
1231 }
1232 ASSERT_EQ(1U, num_successful);
1233 exit(0);
1234}
1235#endif
1236
1237TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1238#if defined(__BIONIC__)
1239 if (IsDynamic()) {
1240 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1241 }
1242
1243 // Run this a number of times as a stress test.
1244 for (size_t i = 0; i < 100; i++) {
1245 // Not using ASSERT_EXIT because errors messages are not displayed.
1246 pid_t pid;
1247 if ((pid = fork()) == 0) {
1248 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1249 }
1250 ASSERT_NE(-1, pid);
1251 int status;
1252 ASSERT_EQ(pid, wait(&status));
1253 ASSERT_EQ(0, WEXITSTATUS(status));
1254 }
1255#else
Elliott Hughes10907202019-03-27 08:51:02 -07001256 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001257#endif
1258}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001259
1260TEST(android_mallopt, disable_memory_mitigations) {
1261#if defined(__BIONIC__)
1262 if (!mte_supported()) {
1263 GTEST_SKIP() << "This function can only be tested with MTE";
1264 }
1265
1266#ifdef ANDROID_EXPERIMENTAL_MTE
1267 sem_t sem;
1268 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1269
1270 pthread_t thread;
1271 ASSERT_EQ(0, pthread_create(
1272 &thread, nullptr,
1273 [](void* ptr) -> void* {
1274 auto* sem = reinterpret_cast<sem_t*>(ptr);
1275 sem_wait(sem);
1276 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1277 },
1278 &sem));
1279
1280 ASSERT_TRUE(android_mallopt(M_DISABLE_MEMORY_MITIGATIONS, nullptr, 0));
1281 ASSERT_EQ(0, sem_post(&sem));
1282
1283 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1284 ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1285
1286 void* retval;
1287 ASSERT_EQ(0, pthread_join(thread, &retval));
1288 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1289 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
1290#endif
1291#else
1292 GTEST_SKIP() << "bionic extension";
1293#endif
1294}