blob: 6620cdbfda22f345d0544bfbe24c093c13670fe6 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000023#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070024#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080025#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070026#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080027#include <string.h>
28#include <sys/types.h>
29#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070030#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070031
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080033#include <thread>
34
Dan Albert4caa1f02014-08-20 09:16:57 -070035#include <tinyxml2.h>
36
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080037#include <android-base/file.h>
38
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080039#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000040
Elliott Hughesb1770852018-09-18 12:52:42 -070041#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080042
43#include "platform/bionic/malloc.h"
44#include "platform/bionic/reserved_signals.h"
45#include "private/bionic_config.h"
46
Elliott Hughesb1770852018-09-18 12:52:42 -070047#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080048
Elliott Hughesb1770852018-09-18 12:52:42 -070049#else
Christopher Ferrisb874c332020-01-21 16:39:05 -080050
Elliott Hughesb1770852018-09-18 12:52:42 -070051#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080052
Elliott Hughesb1770852018-09-18 12:52:42 -070053#endif
54
Christopher Ferris885f3b92013-05-21 17:48:01 -070055TEST(malloc, malloc_std) {
56 // Simple malloc test.
57 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070058 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070059 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070060 free(ptr);
61}
62
Christopher Ferrisa4037802014-06-09 19:14:11 -070063TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080064 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070065 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070066 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070067 ASSERT_EQ(ENOMEM, errno);
68}
69
Christopher Ferris885f3b92013-05-21 17:48:01 -070070TEST(malloc, calloc_std) {
71 // Simple calloc test.
72 size_t alloc_len = 100;
73 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070074 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070075 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
76 for (size_t i = 0; i < alloc_len; i++) {
77 ASSERT_EQ(0, ptr[i]);
78 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070079 free(ptr);
80}
81
Christopher Ferrisa4037802014-06-09 19:14:11 -070082TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080083 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070084 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070085 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -070086 ASSERT_EQ(ENOMEM, errno);
87}
88
89TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080090 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070091 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070092 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070093 ASSERT_EQ(ENOMEM, errno);
94 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070095 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070096 ASSERT_EQ(ENOMEM, errno);
97 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070098 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070099 ASSERT_EQ(ENOMEM, errno);
100 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700101 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700102 ASSERT_EQ(ENOMEM, errno);
103}
104
Christopher Ferris885f3b92013-05-21 17:48:01 -0700105TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800106 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700107 // Memalign test where the alignment is any value.
108 for (size_t i = 0; i <= 12; i++) {
109 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700110 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700111 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700112 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
113 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
114 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700115 free(ptr);
116 }
117 }
118}
119
Christopher Ferrisa4037802014-06-09 19:14:11 -0700120TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800121 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700122 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700123}
124
125TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800126 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700127 void* ptr;
128 for (size_t align = 0; align <= 256; align++) {
129 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700130 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700131 free(ptr);
132 }
133}
134
Christopher Ferris885f3b92013-05-21 17:48:01 -0700135TEST(malloc, memalign_realloc) {
136 // Memalign and then realloc the pointer a couple of times.
137 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
138 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700139 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700140 ASSERT_LE(100U, malloc_usable_size(ptr));
141 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
142 memset(ptr, 0x23, 100);
143
144 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700145 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700146 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700147 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700148 for (size_t i = 0; i < 100; i++) {
149 ASSERT_EQ(0x23, ptr[i]);
150 }
151 memset(ptr, 0x45, 200);
152
153 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700154 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700155 ASSERT_LE(300U, malloc_usable_size(ptr));
156 for (size_t i = 0; i < 200; i++) {
157 ASSERT_EQ(0x45, ptr[i]);
158 }
159 memset(ptr, 0x67, 300);
160
161 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700162 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700163 ASSERT_LE(250U, malloc_usable_size(ptr));
164 for (size_t i = 0; i < 250; i++) {
165 ASSERT_EQ(0x67, ptr[i]);
166 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700167 free(ptr);
168 }
169}
170
171TEST(malloc, malloc_realloc_larger) {
172 // Realloc to a larger size, malloc is used for the original allocation.
173 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700174 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700175 ASSERT_LE(100U, malloc_usable_size(ptr));
176 memset(ptr, 67, 100);
177
178 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700179 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700180 ASSERT_LE(200U, malloc_usable_size(ptr));
181 for (size_t i = 0; i < 100; i++) {
182 ASSERT_EQ(67, ptr[i]);
183 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700184 free(ptr);
185}
186
187TEST(malloc, malloc_realloc_smaller) {
188 // Realloc to a smaller size, malloc is used for the original allocation.
189 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700190 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700191 ASSERT_LE(200U, malloc_usable_size(ptr));
192 memset(ptr, 67, 200);
193
194 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700195 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700196 ASSERT_LE(100U, malloc_usable_size(ptr));
197 for (size_t i = 0; i < 100; i++) {
198 ASSERT_EQ(67, ptr[i]);
199 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700200 free(ptr);
201}
202
203TEST(malloc, malloc_multiple_realloc) {
204 // Multiple reallocs, malloc is used for the original allocation.
205 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700206 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700207 ASSERT_LE(200U, malloc_usable_size(ptr));
208 memset(ptr, 0x23, 200);
209
210 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700211 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700212 ASSERT_LE(100U, malloc_usable_size(ptr));
213 for (size_t i = 0; i < 100; i++) {
214 ASSERT_EQ(0x23, ptr[i]);
215 }
216
217 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700218 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700219 ASSERT_LE(50U, malloc_usable_size(ptr));
220 for (size_t i = 0; i < 50; i++) {
221 ASSERT_EQ(0x23, ptr[i]);
222 }
223
224 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700225 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700226 ASSERT_LE(150U, malloc_usable_size(ptr));
227 for (size_t i = 0; i < 50; i++) {
228 ASSERT_EQ(0x23, ptr[i]);
229 }
230 memset(ptr, 0x23, 150);
231
232 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700233 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700234 ASSERT_LE(425U, malloc_usable_size(ptr));
235 for (size_t i = 0; i < 150; i++) {
236 ASSERT_EQ(0x23, ptr[i]);
237 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700238 free(ptr);
239}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700240
Christopher Ferris885f3b92013-05-21 17:48:01 -0700241TEST(malloc, calloc_realloc_larger) {
242 // Realloc to a larger size, calloc is used for the original allocation.
243 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700244 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700245 ASSERT_LE(100U, malloc_usable_size(ptr));
246
247 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700248 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700249 ASSERT_LE(200U, malloc_usable_size(ptr));
250 for (size_t i = 0; i < 100; i++) {
251 ASSERT_EQ(0, ptr[i]);
252 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700253 free(ptr);
254}
255
256TEST(malloc, calloc_realloc_smaller) {
257 // Realloc to a smaller size, calloc is used for the original allocation.
258 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700259 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700260 ASSERT_LE(200U, malloc_usable_size(ptr));
261
262 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700263 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700264 ASSERT_LE(100U, malloc_usable_size(ptr));
265 for (size_t i = 0; i < 100; i++) {
266 ASSERT_EQ(0, ptr[i]);
267 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700268 free(ptr);
269}
270
271TEST(malloc, calloc_multiple_realloc) {
272 // Multiple reallocs, calloc is used for the original allocation.
273 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700274 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700275 ASSERT_LE(200U, malloc_usable_size(ptr));
276
277 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700278 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700279 ASSERT_LE(100U, malloc_usable_size(ptr));
280 for (size_t i = 0; i < 100; i++) {
281 ASSERT_EQ(0, ptr[i]);
282 }
283
284 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700285 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700286 ASSERT_LE(50U, malloc_usable_size(ptr));
287 for (size_t i = 0; i < 50; i++) {
288 ASSERT_EQ(0, ptr[i]);
289 }
290
291 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700292 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700293 ASSERT_LE(150U, malloc_usable_size(ptr));
294 for (size_t i = 0; i < 50; i++) {
295 ASSERT_EQ(0, ptr[i]);
296 }
297 memset(ptr, 0, 150);
298
299 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700300 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700301 ASSERT_LE(425U, malloc_usable_size(ptr));
302 for (size_t i = 0; i < 150; i++) {
303 ASSERT_EQ(0, ptr[i]);
304 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700305 free(ptr);
306}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700307
Christopher Ferrisa4037802014-06-09 19:14:11 -0700308TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800309 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700310 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700311 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700312 ASSERT_EQ(ENOMEM, errno);
313 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700314 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700315 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700316 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700317 ASSERT_EQ(ENOMEM, errno);
318 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700319}
320
Dan Alberte5fdaa42014-06-14 01:04:31 +0000321#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
322extern "C" void* pvalloc(size_t);
323extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700324#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000325
Christopher Ferrisa4037802014-06-09 19:14:11 -0700326TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700327#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700328 size_t pagesize = sysconf(_SC_PAGESIZE);
329 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700330 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700331 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
332 ASSERT_LE(pagesize, malloc_usable_size(ptr));
333 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700334#else
335 GTEST_SKIP() << "pvalloc not supported.";
336#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700337}
338
339TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700340#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700341 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700342#else
343 GTEST_SKIP() << "pvalloc not supported.";
344#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700345}
346
347TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700348#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700349 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700350 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700351 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700352 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
353 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700354#else
355 GTEST_SKIP() << "valloc not supported.";
356#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700357}
358
359TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700360#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700361 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700362#else
363 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000364#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700365}
Dan Albert4caa1f02014-08-20 09:16:57 -0700366
367TEST(malloc, malloc_info) {
368#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700369 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800370
371 TemporaryFile tf;
372 ASSERT_TRUE(tf.fd != -1);
373 FILE* fp = fdopen(tf.fd, "w+");
374 tf.release();
375 ASSERT_TRUE(fp != nullptr);
376 ASSERT_EQ(0, malloc_info(0, fp));
377 ASSERT_EQ(0, fclose(fp));
378
379 std::string contents;
380 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700381
382 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800383 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700384
385 auto root = doc.FirstChildElement();
386 ASSERT_NE(nullptr, root);
387 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700388 std::string version(root->Attribute("version"));
389 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800390 // Verify jemalloc version of this data.
391 ASSERT_STREQ("jemalloc-1", root->Attribute("version"));
Dan Albert4caa1f02014-08-20 09:16:57 -0700392
Christopher Ferris6c619a02019-03-01 17:59:51 -0800393 auto arena = root->FirstChildElement();
394 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
395 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700396
Christopher Ferris6c619a02019-03-01 17:59:51 -0800397 ASSERT_STREQ("heap", arena->Name());
398 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
399 ASSERT_EQ(tinyxml2::XML_SUCCESS,
400 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
401 ASSERT_EQ(tinyxml2::XML_SUCCESS,
402 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
403 ASSERT_EQ(tinyxml2::XML_SUCCESS,
404 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
405 ASSERT_EQ(tinyxml2::XML_SUCCESS,
406 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700407
Christopher Ferris6c619a02019-03-01 17:59:51 -0800408 auto bin = arena->FirstChildElement("bin");
409 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
410 if (strcmp(bin->Name(), "bin") == 0) {
411 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
412 ASSERT_EQ(tinyxml2::XML_SUCCESS,
413 bin->FirstChildElement("allocated")->QueryIntText(&val));
414 ASSERT_EQ(tinyxml2::XML_SUCCESS,
415 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
416 ASSERT_EQ(tinyxml2::XML_SUCCESS,
417 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
418 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700419 }
420 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800421 } else {
Christopher Ferris85169652019-10-09 18:41:55 -0700422 // Do not verify output for scudo or debug malloc.
423 ASSERT_TRUE(version == "scudo-1" || version == "debug-malloc-1")
424 << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700425 }
426#endif
427}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800428
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700429TEST(malloc, malloc_info_matches_mallinfo) {
430#ifdef __BIONIC__
431 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
432
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800433 TemporaryFile tf;
434 ASSERT_TRUE(tf.fd != -1);
435 FILE* fp = fdopen(tf.fd, "w+");
436 tf.release();
437 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700438 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800439 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700440 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800441 ASSERT_EQ(0, fclose(fp));
442
443 std::string contents;
444 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700445
446 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800447 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700448
449 size_t total_allocated_bytes = 0;
450 auto root = doc.FirstChildElement();
451 ASSERT_NE(nullptr, root);
452 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700453 std::string version(root->Attribute("version"));
454 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700455 // Verify jemalloc version of this data.
456 ASSERT_STREQ("jemalloc-1", root->Attribute("version"));
457
458 auto arena = root->FirstChildElement();
459 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
460 int val;
461
462 ASSERT_STREQ("heap", arena->Name());
463 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
464 ASSERT_EQ(tinyxml2::XML_SUCCESS,
465 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
466 total_allocated_bytes += val;
467 ASSERT_EQ(tinyxml2::XML_SUCCESS,
468 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
469 total_allocated_bytes += val;
470 ASSERT_EQ(tinyxml2::XML_SUCCESS,
471 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
472 total_allocated_bytes += val;
473 ASSERT_EQ(tinyxml2::XML_SUCCESS,
474 arena->FirstChildElement("bins-total")->QueryIntText(&val));
475 }
476 // The total needs to be between the mallinfo call before and after
477 // since malloc_info allocates some memory.
478 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
479 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
480 } else {
Christopher Ferris85169652019-10-09 18:41:55 -0700481 // Do not verify output for scudo or debug malloc.
482 ASSERT_TRUE(version == "scudo-1" || version == "debug-malloc-1")
483 << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700484 }
485#endif
486}
487
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800488TEST(malloc, calloc_usable_size) {
489 for (size_t size = 1; size <= 2048; size++) {
490 void* pointer = malloc(size);
491 ASSERT_TRUE(pointer != nullptr);
492 memset(pointer, 0xeb, malloc_usable_size(pointer));
493 free(pointer);
494
495 // We should get a previous pointer that has been set to non-zero.
496 // If calloc does not zero out all of the data, this will fail.
497 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
498 ASSERT_TRUE(pointer != nullptr);
499 size_t usable_size = malloc_usable_size(zero_mem);
500 for (size_t i = 0; i < usable_size; i++) {
501 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
502 }
503 free(zero_mem);
504 }
505}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800506
507TEST(malloc, malloc_0) {
508 void* p = malloc(0);
509 ASSERT_TRUE(p != nullptr);
510 free(p);
511}
512
513TEST(malloc, calloc_0_0) {
514 void* p = calloc(0, 0);
515 ASSERT_TRUE(p != nullptr);
516 free(p);
517}
518
519TEST(malloc, calloc_0_1) {
520 void* p = calloc(0, 1);
521 ASSERT_TRUE(p != nullptr);
522 free(p);
523}
524
525TEST(malloc, calloc_1_0) {
526 void* p = calloc(1, 0);
527 ASSERT_TRUE(p != nullptr);
528 free(p);
529}
530
531TEST(malloc, realloc_nullptr_0) {
532 // realloc(nullptr, size) is actually malloc(size).
533 void* p = realloc(nullptr, 0);
534 ASSERT_TRUE(p != nullptr);
535 free(p);
536}
537
538TEST(malloc, realloc_0) {
539 void* p = malloc(1024);
540 ASSERT_TRUE(p != nullptr);
541 // realloc(p, 0) is actually free(p).
542 void* p2 = realloc(p, 0);
543 ASSERT_TRUE(p2 == nullptr);
544}
Christopher Ferris72df6702016-02-11 15:51:31 -0800545
546constexpr size_t MAX_LOOPS = 200;
547
548// Make sure that memory returned by malloc is aligned to allow these data types.
549TEST(malloc, verify_alignment) {
550 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
551 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
552 long double** values_ldouble = new long double*[MAX_LOOPS];
553 // Use filler to attempt to force the allocator to get potentially bad alignments.
554 void** filler = new void*[MAX_LOOPS];
555
556 for (size_t i = 0; i < MAX_LOOPS; i++) {
557 // Check uint32_t pointers.
558 filler[i] = malloc(1);
559 ASSERT_TRUE(filler[i] != nullptr);
560
561 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
562 ASSERT_TRUE(values_32[i] != nullptr);
563 *values_32[i] = i;
564 ASSERT_EQ(*values_32[i], i);
565 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
566
567 free(filler[i]);
568 }
569
570 for (size_t i = 0; i < MAX_LOOPS; i++) {
571 // Check uint64_t pointers.
572 filler[i] = malloc(1);
573 ASSERT_TRUE(filler[i] != nullptr);
574
575 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
576 ASSERT_TRUE(values_64[i] != nullptr);
577 *values_64[i] = 0x1000 + i;
578 ASSERT_EQ(*values_64[i], 0x1000 + i);
579 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
580
581 free(filler[i]);
582 }
583
584 for (size_t i = 0; i < MAX_LOOPS; i++) {
585 // Check long double pointers.
586 filler[i] = malloc(1);
587 ASSERT_TRUE(filler[i] != nullptr);
588
589 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
590 ASSERT_TRUE(values_ldouble[i] != nullptr);
591 *values_ldouble[i] = 5.5 + i;
592 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
593 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
594 // required alignment to 0x7.
595#if !defined(__BIONIC__) && !defined(__LP64__)
596 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
597#else
598 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
599#endif
600
601 free(filler[i]);
602 }
603
604 for (size_t i = 0; i < MAX_LOOPS; i++) {
605 free(values_32[i]);
606 free(values_64[i]);
607 free(values_ldouble[i]);
608 }
609
610 delete[] filler;
611 delete[] values_32;
612 delete[] values_64;
613 delete[] values_ldouble;
614}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700615
616TEST(malloc, mallopt_smoke) {
617 errno = 0;
618 ASSERT_EQ(0, mallopt(-1000, 1));
619 // mallopt doesn't set errno.
620 ASSERT_EQ(0, errno);
621}
Elliott Hughesb1770852018-09-18 12:52:42 -0700622
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800623TEST(malloc, mallopt_decay) {
624#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800625 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800626 errno = 0;
627 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
628 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
629 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
630 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
631#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800632 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800633#endif
634}
635
636TEST(malloc, mallopt_purge) {
637#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800638 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800639 errno = 0;
640 ASSERT_EQ(1, mallopt(M_PURGE, 0));
641#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800642 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800643#endif
644}
645
Elliott Hughesb1770852018-09-18 12:52:42 -0700646TEST(malloc, reallocarray_overflow) {
647#if HAVE_REALLOCARRAY
648 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
649 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
650 size_t b = 2;
651
652 errno = 0;
653 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
654 ASSERT_EQ(ENOMEM, errno);
655
656 errno = 0;
657 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
658 ASSERT_EQ(ENOMEM, errno);
659#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800660 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700661#endif
662}
663
664TEST(malloc, reallocarray) {
665#if HAVE_REALLOCARRAY
666 void* p = reallocarray(nullptr, 2, 32);
667 ASSERT_TRUE(p != nullptr);
668 ASSERT_GE(malloc_usable_size(p), 64U);
669#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800670 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700671#endif
672}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800673
674TEST(malloc, mallinfo) {
675#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800676 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800677 static size_t sizes[] = {
678 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
679 };
680
681 constexpr static size_t kMaxAllocs = 50;
682
683 for (size_t size : sizes) {
684 // If some of these allocations are stuck in a thread cache, then keep
685 // looping until we make an allocation that changes the total size of the
686 // memory allocated.
687 // jemalloc implementations counts the thread cache allocations against
688 // total memory allocated.
689 void* ptrs[kMaxAllocs] = {};
690 bool pass = false;
691 for (size_t i = 0; i < kMaxAllocs; i++) {
692 size_t allocated = mallinfo().uordblks;
693 ptrs[i] = malloc(size);
694 ASSERT_TRUE(ptrs[i] != nullptr);
695 size_t new_allocated = mallinfo().uordblks;
696 if (allocated != new_allocated) {
697 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800698 // Only check if the total got bigger by at least allocation size.
699 // Sometimes the mallinfo numbers can go backwards due to compaction
700 // and/or freeing of cached data.
701 if (new_allocated >= allocated + usable_size) {
702 pass = true;
703 break;
704 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800705 }
706 }
707 for (void* ptr : ptrs) {
708 free(ptr);
709 }
710 ASSERT_TRUE(pass)
711 << "For size " << size << " allocated bytes did not increase after "
712 << kMaxAllocs << " allocations.";
713 }
714#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800715 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800716#endif
717}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000718
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800719template <typename Type>
720void __attribute__((optnone)) VerifyAlignment(Type* floating) {
721 size_t expected_alignment = alignof(Type);
722 if (expected_alignment != 0) {
723 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
724 << "Expected alignment " << expected_alignment << " ptr value " << floating;
725 }
726}
727
728template <typename Type>
729void __attribute__((optnone)) TestAllocateType() {
730 // The number of allocations to do in a row. This is to attempt to
731 // expose the worst case alignment for native allocators that use
732 // bins.
733 static constexpr size_t kMaxConsecutiveAllocs = 100;
734
735 // Verify using new directly.
736 Type* types[kMaxConsecutiveAllocs];
737 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
738 types[i] = new Type;
739 VerifyAlignment(types[i]);
740 if (::testing::Test::HasFatalFailure()) {
741 return;
742 }
743 }
744 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
745 delete types[i];
746 }
747
748 // Verify using malloc.
749 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
750 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
751 ASSERT_TRUE(types[i] != nullptr);
752 VerifyAlignment(types[i]);
753 if (::testing::Test::HasFatalFailure()) {
754 return;
755 }
756 }
757 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
758 free(types[i]);
759 }
760
761 // Verify using a vector.
762 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
763 for (size_t i = 0; i < type_vector.size(); i++) {
764 VerifyAlignment(&type_vector[i]);
765 if (::testing::Test::HasFatalFailure()) {
766 return;
767 }
768 }
769}
770
771#if defined(__ANDROID__)
772static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
773 void* ptrs[100];
774 uintptr_t mask = aligned_bytes - 1;
775 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
776 ptrs[i] = malloc(alloc_size);
777 ASSERT_TRUE(ptrs[i] != nullptr);
778 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
779 << "Expected at least " << aligned_bytes << " byte alignment: size "
780 << alloc_size << " actual ptr " << ptrs[i];
781 }
782}
783#endif
784
785TEST(malloc, align_check) {
786 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
787 // for a discussion of type alignment.
788 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
789 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
790 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
791
792 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
793 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
794 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
795 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
796 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
797 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
798 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
799 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
800 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
801 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
802 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
803 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
804 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
805 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
806
807#if defined(__ANDROID__)
808 // On Android, there is a lot of code that expects certain alignments:
809 // - Allocations of a size that rounds up to a multiple of 16 bytes
810 // must have at least 16 byte alignment.
811 // - Allocations of a size that rounds up to a multiple of 8 bytes and
812 // not 16 bytes, are only required to have at least 8 byte alignment.
813 // This is regardless of whether it is in a 32 bit or 64 bit environment.
814
815 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
816 // a discussion of this alignment mess. The code below is enforcing
817 // strong-alignment, since who knows what code depends on this behavior now.
818 for (size_t i = 1; i <= 128; i++) {
819 size_t rounded = (i + 7) & ~7;
820 if ((rounded % 16) == 0) {
821 AndroidVerifyAlignment(i, 16);
822 } else {
823 AndroidVerifyAlignment(i, 8);
824 }
825 if (::testing::Test::HasFatalFailure()) {
826 return;
827 }
828 }
829#endif
830}
831
Christopher Ferris201dcf42020-01-29 13:09:31 -0800832// Jemalloc doesn't pass this test right now, so leave it as disabled.
833TEST(malloc, DISABLED_alloc_after_fork) {
834 // Both of these need to be a power of 2.
835 static constexpr size_t kMinAllocationSize = 8;
836 static constexpr size_t kMaxAllocationSize = 2097152;
837
838 static constexpr size_t kNumAllocatingThreads = 5;
839 static constexpr size_t kNumForkLoops = 100;
840
841 std::atomic_bool stop;
842
843 // Create threads that simply allocate and free different sizes.
844 std::vector<std::thread*> threads;
845 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
846 std::thread* t = new std::thread([&stop] {
847 while (!stop) {
848 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
849 void* ptr = malloc(size);
850 if (ptr == nullptr) {
851 return;
852 }
853 // Make sure this value is not optimized away.
854 asm volatile("" : : "r,m"(ptr) : "memory");
855 free(ptr);
856 }
857 }
858 });
859 threads.push_back(t);
860 }
861
862 // Create a thread to fork and allocate.
863 for (size_t i = 0; i < kNumForkLoops; i++) {
864 pid_t pid;
865 if ((pid = fork()) == 0) {
866 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
867 void* ptr = malloc(size);
868 ASSERT_TRUE(ptr != nullptr);
869 // Make sure this value is not optimized away.
870 asm volatile("" : : "r,m"(ptr) : "memory");
871 // Make sure we can touch all of the allocation.
872 memset(ptr, 0x1, size);
873 ASSERT_LE(size, malloc_usable_size(ptr));
874 free(ptr);
875 }
876 _exit(10);
877 }
878 ASSERT_NE(-1, pid);
879 AssertChildExited(pid, 10);
880 }
881
882 stop = true;
883 for (auto thread : threads) {
884 thread->join();
885 delete thread;
886 }
887}
888
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000889TEST(android_mallopt, error_on_unexpected_option) {
890#if defined(__BIONIC__)
891 const int unrecognized_option = -1;
892 errno = 0;
893 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
894 EXPECT_EQ(ENOTSUP, errno);
895#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800896 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000897#endif
898}
899
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800900bool IsDynamic() {
901#if defined(__LP64__)
902 Elf64_Ehdr ehdr;
903#else
904 Elf32_Ehdr ehdr;
905#endif
906 std::string path(android::base::GetExecutablePath());
907
908 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
909 if (fd == -1) {
910 // Assume dynamic on error.
911 return true;
912 }
913 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
914 close(fd);
915 // Assume dynamic in error cases.
916 return !read_completed || ehdr.e_type == ET_DYN;
917}
918
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000919TEST(android_mallopt, init_zygote_child_profiling) {
920#if defined(__BIONIC__)
921 // Successful call.
922 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800923 if (IsDynamic()) {
924 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
925 EXPECT_EQ(0, errno);
926 } else {
927 // Not supported in static executables.
928 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
929 EXPECT_EQ(ENOTSUP, errno);
930 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000931
932 // Unexpected arguments rejected.
933 errno = 0;
934 char unexpected = 0;
935 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800936 if (IsDynamic()) {
937 EXPECT_EQ(EINVAL, errno);
938 } else {
939 EXPECT_EQ(ENOTSUP, errno);
940 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000941#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800942 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000943#endif
944}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800945
946#if defined(__BIONIC__)
947template <typename FuncType>
948void CheckAllocationFunction(FuncType func) {
949 // Assumes that no more than 108MB of memory is allocated before this.
950 size_t limit = 128 * 1024 * 1024;
951 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
952 if (!func(20 * 1024 * 1024))
953 exit(1);
954 if (func(128 * 1024 * 1024))
955 exit(1);
956 exit(0);
957}
958#endif
959
960TEST(android_mallopt, set_allocation_limit) {
961#if defined(__BIONIC__)
962 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
963 testing::ExitedWithCode(0), "");
964 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
965 testing::ExitedWithCode(0), "");
966 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
967 testing::ExitedWithCode(0), "");
968 EXPECT_EXIT(CheckAllocationFunction(
969 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
970 testing::ExitedWithCode(0), "");
971 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
972 void* ptr;
973 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
974 }),
975 testing::ExitedWithCode(0), "");
976 EXPECT_EXIT(CheckAllocationFunction(
977 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
978 testing::ExitedWithCode(0), "");
979 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
980 void* p = malloc(1024 * 1024);
981 return realloc(p, bytes) != nullptr;
982 }),
983 testing::ExitedWithCode(0), "");
984#if !defined(__LP64__)
985 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
986 testing::ExitedWithCode(0), "");
987 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
988 testing::ExitedWithCode(0), "");
989#endif
990#else
Elliott Hughes10907202019-03-27 08:51:02 -0700991 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -0800992#endif
993}
994
995TEST(android_mallopt, set_allocation_limit_multiple) {
996#if defined(__BIONIC__)
997 // Only the first set should work.
998 size_t limit = 256 * 1024 * 1024;
999 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1000 limit = 32 * 1024 * 1024;
1001 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1002#else
Elliott Hughes10907202019-03-27 08:51:02 -07001003 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001004#endif
1005}
1006
1007#if defined(__BIONIC__)
1008static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1009
1010static size_t GetMaxAllocations() {
1011 size_t max_pointers = 0;
1012 void* ptrs[20];
1013 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1014 ptrs[i] = malloc(kAllocationSize);
1015 if (ptrs[i] == nullptr) {
1016 max_pointers = i;
1017 break;
1018 }
1019 }
1020 for (size_t i = 0; i < max_pointers; i++) {
1021 free(ptrs[i]);
1022 }
1023 return max_pointers;
1024}
1025
1026static void VerifyMaxPointers(size_t max_pointers) {
1027 // Now verify that we can allocate the same number as before.
1028 void* ptrs[20];
1029 for (size_t i = 0; i < max_pointers; i++) {
1030 ptrs[i] = malloc(kAllocationSize);
1031 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1032 }
1033
1034 // Make sure the next allocation still fails.
1035 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1036 for (size_t i = 0; i < max_pointers; i++) {
1037 free(ptrs[i]);
1038 }
1039}
1040#endif
1041
1042TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1043#if defined(__BIONIC__)
1044 size_t limit = 128 * 1024 * 1024;
1045 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1046
1047 size_t max_pointers = GetMaxAllocations();
1048 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1049
1050 void* memory = malloc(10 * 1024 * 1024);
1051 ASSERT_TRUE(memory != nullptr);
1052
1053 // Increase size.
1054 memory = realloc(memory, 20 * 1024 * 1024);
1055 ASSERT_TRUE(memory != nullptr);
1056 memory = realloc(memory, 40 * 1024 * 1024);
1057 ASSERT_TRUE(memory != nullptr);
1058 memory = realloc(memory, 60 * 1024 * 1024);
1059 ASSERT_TRUE(memory != nullptr);
1060 memory = realloc(memory, 80 * 1024 * 1024);
1061 ASSERT_TRUE(memory != nullptr);
1062 // Now push past limit.
1063 memory = realloc(memory, 130 * 1024 * 1024);
1064 ASSERT_TRUE(memory == nullptr);
1065
1066 VerifyMaxPointers(max_pointers);
1067#else
Elliott Hughes10907202019-03-27 08:51:02 -07001068 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001069#endif
1070}
1071
1072TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1073#if defined(__BIONIC__)
1074 size_t limit = 100 * 1024 * 1024;
1075 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1076
1077 size_t max_pointers = GetMaxAllocations();
1078 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1079
1080 void* memory = malloc(80 * 1024 * 1024);
1081 ASSERT_TRUE(memory != nullptr);
1082
1083 // Decrease size.
1084 memory = realloc(memory, 60 * 1024 * 1024);
1085 ASSERT_TRUE(memory != nullptr);
1086 memory = realloc(memory, 40 * 1024 * 1024);
1087 ASSERT_TRUE(memory != nullptr);
1088 memory = realloc(memory, 20 * 1024 * 1024);
1089 ASSERT_TRUE(memory != nullptr);
1090 memory = realloc(memory, 10 * 1024 * 1024);
1091 ASSERT_TRUE(memory != nullptr);
1092 free(memory);
1093
1094 VerifyMaxPointers(max_pointers);
1095#else
Elliott Hughes10907202019-03-27 08:51:02 -07001096 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001097#endif
1098}
1099
1100TEST(android_mallopt, set_allocation_limit_realloc_free) {
1101#if defined(__BIONIC__)
1102 size_t limit = 100 * 1024 * 1024;
1103 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1104
1105 size_t max_pointers = GetMaxAllocations();
1106 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1107
1108 void* memory = malloc(60 * 1024 * 1024);
1109 ASSERT_TRUE(memory != nullptr);
1110
1111 memory = realloc(memory, 0);
1112 ASSERT_TRUE(memory == nullptr);
1113
1114 VerifyMaxPointers(max_pointers);
1115#else
Elliott Hughes10907202019-03-27 08:51:02 -07001116 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001117#endif
1118}
1119
1120#if defined(__BIONIC__)
1121static void* SetAllocationLimit(void* data) {
1122 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1123 while (!go->load()) {
1124 }
1125 size_t limit = 500 * 1024 * 1024;
1126 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1127 return reinterpret_cast<void*>(-1);
1128 }
1129 return nullptr;
1130}
1131
1132static void SetAllocationLimitMultipleThreads() {
1133 std::atomic_bool go;
1134 go = false;
1135
1136 static constexpr size_t kNumThreads = 4;
1137 pthread_t threads[kNumThreads];
1138 for (size_t i = 0; i < kNumThreads; i++) {
1139 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1140 }
1141
1142 // Let them go all at once.
1143 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001144 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1145 // heapprofd handler.
1146 union sigval signal_value;
1147 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001148 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001149
1150 size_t num_successful = 0;
1151 for (size_t i = 0; i < kNumThreads; i++) {
1152 void* result;
1153 ASSERT_EQ(0, pthread_join(threads[i], &result));
1154 if (result != nullptr) {
1155 num_successful++;
1156 }
1157 }
1158 ASSERT_EQ(1U, num_successful);
1159 exit(0);
1160}
1161#endif
1162
1163TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1164#if defined(__BIONIC__)
1165 if (IsDynamic()) {
1166 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1167 }
1168
1169 // Run this a number of times as a stress test.
1170 for (size_t i = 0; i < 100; i++) {
1171 // Not using ASSERT_EXIT because errors messages are not displayed.
1172 pid_t pid;
1173 if ((pid = fork()) == 0) {
1174 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1175 }
1176 ASSERT_NE(-1, pid);
1177 int status;
1178 ASSERT_EQ(pid, wait(&status));
1179 ASSERT_EQ(0, WEXITSTATUS(status));
1180 }
1181#else
Elliott Hughes10907202019-03-27 08:51:02 -07001182 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001183#endif
1184}