blob: d692cf95c7bd0139985d8c37c2fe1a35726b4d5f [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000023#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070024#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080025#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070026#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080027#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080028#include <sys/auxv.h>
29#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080030#include <sys/types.h>
31#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070032#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070033
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080034#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080035#include <thread>
36
Dan Albert4caa1f02014-08-20 09:16:57 -070037#include <tinyxml2.h>
38
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080039#include <android-base/file.h>
40
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080041#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000042
Elliott Hughesb1770852018-09-18 12:52:42 -070043#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080044
Peter Collingbourne45819dd2020-01-09 11:00:43 -080045#include "SignalUtils.h"
46
Christopher Ferrisb874c332020-01-21 16:39:05 -080047#include "platform/bionic/malloc.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080048#include "platform/bionic/mte_kernel.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080049#include "platform/bionic/reserved_signals.h"
50#include "private/bionic_config.h"
51
Elliott Hughesb1770852018-09-18 12:52:42 -070052#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080053
Elliott Hughesb1770852018-09-18 12:52:42 -070054#else
Christopher Ferrisb874c332020-01-21 16:39:05 -080055
Elliott Hughesb1770852018-09-18 12:52:42 -070056#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080057
Elliott Hughesb1770852018-09-18 12:52:42 -070058#endif
59
Christopher Ferris885f3b92013-05-21 17:48:01 -070060TEST(malloc, malloc_std) {
61 // Simple malloc test.
62 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070063 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070064 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070065 free(ptr);
66}
67
Christopher Ferrisa4037802014-06-09 19:14:11 -070068TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080069 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070070 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070071 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070072 ASSERT_EQ(ENOMEM, errno);
73}
74
Christopher Ferris885f3b92013-05-21 17:48:01 -070075TEST(malloc, calloc_std) {
76 // Simple calloc test.
77 size_t alloc_len = 100;
78 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070079 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070080 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
81 for (size_t i = 0; i < alloc_len; i++) {
82 ASSERT_EQ(0, ptr[i]);
83 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070084 free(ptr);
85}
86
Peter Collingbourne978eb162020-09-21 15:26:02 -070087TEST(malloc, calloc_mem_init_disabled) {
88#if defined(__BIONIC__)
89 // calloc should still zero memory if mem-init is disabled.
90 // With jemalloc the mallopts will fail but that shouldn't affect the
91 // execution of the test.
92 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
93 size_t alloc_len = 100;
94 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
95 for (size_t i = 0; i < alloc_len; i++) {
96 ASSERT_EQ(0, ptr[i]);
97 }
98 free(ptr);
99 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
100#else
101 GTEST_SKIP() << "bionic-only test";
102#endif
103}
104
Christopher Ferrisa4037802014-06-09 19:14:11 -0700105TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800106 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700107 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700108 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700109 ASSERT_EQ(ENOMEM, errno);
110}
111
112TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800113 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700114 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700115 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700116 ASSERT_EQ(ENOMEM, errno);
117 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700118 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700119 ASSERT_EQ(ENOMEM, errno);
120 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700121 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700122 ASSERT_EQ(ENOMEM, errno);
123 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700124 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700125 ASSERT_EQ(ENOMEM, errno);
126}
127
Christopher Ferris885f3b92013-05-21 17:48:01 -0700128TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800129 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700130 // Memalign test where the alignment is any value.
131 for (size_t i = 0; i <= 12; i++) {
132 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700133 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700134 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700135 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
136 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
137 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700138 free(ptr);
139 }
140 }
141}
142
Christopher Ferrisa4037802014-06-09 19:14:11 -0700143TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800144 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700145 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700146}
147
148TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800149 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700150 void* ptr;
151 for (size_t align = 0; align <= 256; align++) {
152 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700153 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700154 free(ptr);
155 }
156}
157
Christopher Ferris885f3b92013-05-21 17:48:01 -0700158TEST(malloc, memalign_realloc) {
159 // Memalign and then realloc the pointer a couple of times.
160 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
161 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700162 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700163 ASSERT_LE(100U, malloc_usable_size(ptr));
164 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
165 memset(ptr, 0x23, 100);
166
167 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700168 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700169 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700170 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700171 for (size_t i = 0; i < 100; i++) {
172 ASSERT_EQ(0x23, ptr[i]);
173 }
174 memset(ptr, 0x45, 200);
175
176 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700177 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700178 ASSERT_LE(300U, malloc_usable_size(ptr));
179 for (size_t i = 0; i < 200; i++) {
180 ASSERT_EQ(0x45, ptr[i]);
181 }
182 memset(ptr, 0x67, 300);
183
184 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700185 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700186 ASSERT_LE(250U, malloc_usable_size(ptr));
187 for (size_t i = 0; i < 250; i++) {
188 ASSERT_EQ(0x67, ptr[i]);
189 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700190 free(ptr);
191 }
192}
193
194TEST(malloc, malloc_realloc_larger) {
195 // Realloc to a larger size, malloc is used for the original allocation.
196 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700197 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700198 ASSERT_LE(100U, malloc_usable_size(ptr));
199 memset(ptr, 67, 100);
200
201 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700202 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700203 ASSERT_LE(200U, malloc_usable_size(ptr));
204 for (size_t i = 0; i < 100; i++) {
205 ASSERT_EQ(67, ptr[i]);
206 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700207 free(ptr);
208}
209
210TEST(malloc, malloc_realloc_smaller) {
211 // Realloc to a smaller size, malloc is used for the original allocation.
212 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700213 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700214 ASSERT_LE(200U, malloc_usable_size(ptr));
215 memset(ptr, 67, 200);
216
217 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700218 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700219 ASSERT_LE(100U, malloc_usable_size(ptr));
220 for (size_t i = 0; i < 100; i++) {
221 ASSERT_EQ(67, ptr[i]);
222 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700223 free(ptr);
224}
225
226TEST(malloc, malloc_multiple_realloc) {
227 // Multiple reallocs, malloc is used for the original allocation.
228 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700229 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700230 ASSERT_LE(200U, malloc_usable_size(ptr));
231 memset(ptr, 0x23, 200);
232
233 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700234 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700235 ASSERT_LE(100U, malloc_usable_size(ptr));
236 for (size_t i = 0; i < 100; i++) {
237 ASSERT_EQ(0x23, ptr[i]);
238 }
239
240 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700241 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700242 ASSERT_LE(50U, malloc_usable_size(ptr));
243 for (size_t i = 0; i < 50; i++) {
244 ASSERT_EQ(0x23, ptr[i]);
245 }
246
247 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700248 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700249 ASSERT_LE(150U, malloc_usable_size(ptr));
250 for (size_t i = 0; i < 50; i++) {
251 ASSERT_EQ(0x23, ptr[i]);
252 }
253 memset(ptr, 0x23, 150);
254
255 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700256 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700257 ASSERT_LE(425U, malloc_usable_size(ptr));
258 for (size_t i = 0; i < 150; i++) {
259 ASSERT_EQ(0x23, ptr[i]);
260 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700261 free(ptr);
262}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700263
Christopher Ferris885f3b92013-05-21 17:48:01 -0700264TEST(malloc, calloc_realloc_larger) {
265 // Realloc to a larger size, calloc is used for the original allocation.
266 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700267 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700268 ASSERT_LE(100U, malloc_usable_size(ptr));
269
270 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700271 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700272 ASSERT_LE(200U, malloc_usable_size(ptr));
273 for (size_t i = 0; i < 100; i++) {
274 ASSERT_EQ(0, ptr[i]);
275 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700276 free(ptr);
277}
278
279TEST(malloc, calloc_realloc_smaller) {
280 // Realloc to a smaller size, calloc is used for the original allocation.
281 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700282 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700283 ASSERT_LE(200U, malloc_usable_size(ptr));
284
285 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700286 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700287 ASSERT_LE(100U, malloc_usable_size(ptr));
288 for (size_t i = 0; i < 100; i++) {
289 ASSERT_EQ(0, ptr[i]);
290 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700291 free(ptr);
292}
293
294TEST(malloc, calloc_multiple_realloc) {
295 // Multiple reallocs, calloc is used for the original allocation.
296 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700297 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700298 ASSERT_LE(200U, malloc_usable_size(ptr));
299
300 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700301 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700302 ASSERT_LE(100U, malloc_usable_size(ptr));
303 for (size_t i = 0; i < 100; i++) {
304 ASSERT_EQ(0, ptr[i]);
305 }
306
307 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700308 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700309 ASSERT_LE(50U, malloc_usable_size(ptr));
310 for (size_t i = 0; i < 50; i++) {
311 ASSERT_EQ(0, ptr[i]);
312 }
313
314 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700315 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700316 ASSERT_LE(150U, malloc_usable_size(ptr));
317 for (size_t i = 0; i < 50; i++) {
318 ASSERT_EQ(0, ptr[i]);
319 }
320 memset(ptr, 0, 150);
321
322 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700323 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700324 ASSERT_LE(425U, malloc_usable_size(ptr));
325 for (size_t i = 0; i < 150; i++) {
326 ASSERT_EQ(0, ptr[i]);
327 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700328 free(ptr);
329}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700330
Christopher Ferrisa4037802014-06-09 19:14:11 -0700331TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800332 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700333 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700334 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700335 ASSERT_EQ(ENOMEM, errno);
336 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700337 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700338 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700339 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700340 ASSERT_EQ(ENOMEM, errno);
341 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700342}
343
Dan Alberte5fdaa42014-06-14 01:04:31 +0000344#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
345extern "C" void* pvalloc(size_t);
346extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700347#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000348
Christopher Ferrisa4037802014-06-09 19:14:11 -0700349TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700350#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700351 size_t pagesize = sysconf(_SC_PAGESIZE);
352 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700353 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700354 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
355 ASSERT_LE(pagesize, malloc_usable_size(ptr));
356 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700357#else
358 GTEST_SKIP() << "pvalloc not supported.";
359#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700360}
361
362TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700363#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700364 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700365#else
366 GTEST_SKIP() << "pvalloc not supported.";
367#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700368}
369
370TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700371#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700372 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700373 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700374 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700375 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
376 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700377#else
378 GTEST_SKIP() << "valloc not supported.";
379#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700380}
381
382TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700383#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700384 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700385#else
386 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000387#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700388}
Dan Albert4caa1f02014-08-20 09:16:57 -0700389
390TEST(malloc, malloc_info) {
391#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700392 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800393
394 TemporaryFile tf;
395 ASSERT_TRUE(tf.fd != -1);
396 FILE* fp = fdopen(tf.fd, "w+");
397 tf.release();
398 ASSERT_TRUE(fp != nullptr);
399 ASSERT_EQ(0, malloc_info(0, fp));
400 ASSERT_EQ(0, fclose(fp));
401
402 std::string contents;
403 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700404
405 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800406 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700407
408 auto root = doc.FirstChildElement();
409 ASSERT_NE(nullptr, root);
410 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700411 std::string version(root->Attribute("version"));
412 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800413 auto arena = root->FirstChildElement();
414 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
415 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700416
Christopher Ferris6c619a02019-03-01 17:59:51 -0800417 ASSERT_STREQ("heap", arena->Name());
418 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
419 ASSERT_EQ(tinyxml2::XML_SUCCESS,
420 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
421 ASSERT_EQ(tinyxml2::XML_SUCCESS,
422 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
423 ASSERT_EQ(tinyxml2::XML_SUCCESS,
424 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
425 ASSERT_EQ(tinyxml2::XML_SUCCESS,
426 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700427
Christopher Ferris6c619a02019-03-01 17:59:51 -0800428 auto bin = arena->FirstChildElement("bin");
429 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
430 if (strcmp(bin->Name(), "bin") == 0) {
431 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
432 ASSERT_EQ(tinyxml2::XML_SUCCESS,
433 bin->FirstChildElement("allocated")->QueryIntText(&val));
434 ASSERT_EQ(tinyxml2::XML_SUCCESS,
435 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
438 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700439 }
440 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800441 } else if (version == "scudo-1") {
442 auto element = root->FirstChildElement();
443 for (; element != nullptr; element = element->NextSiblingElement()) {
444 int val;
445
446 ASSERT_STREQ("alloc", element->Name());
447 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
448 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
449 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800450 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800451 // Do not verify output for debug malloc.
452 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700453 }
454#endif
455}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800456
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700457TEST(malloc, malloc_info_matches_mallinfo) {
458#ifdef __BIONIC__
459 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
460
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800461 TemporaryFile tf;
462 ASSERT_TRUE(tf.fd != -1);
463 FILE* fp = fdopen(tf.fd, "w+");
464 tf.release();
465 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700466 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800467 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700468 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800469 ASSERT_EQ(0, fclose(fp));
470
471 std::string contents;
472 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700473
474 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800475 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700476
477 size_t total_allocated_bytes = 0;
478 auto root = doc.FirstChildElement();
479 ASSERT_NE(nullptr, root);
480 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700481 std::string version(root->Attribute("version"));
482 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700483 auto arena = root->FirstChildElement();
484 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
485 int val;
486
487 ASSERT_STREQ("heap", arena->Name());
488 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
489 ASSERT_EQ(tinyxml2::XML_SUCCESS,
490 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
491 total_allocated_bytes += val;
492 ASSERT_EQ(tinyxml2::XML_SUCCESS,
493 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
494 total_allocated_bytes += val;
495 ASSERT_EQ(tinyxml2::XML_SUCCESS,
496 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
497 total_allocated_bytes += val;
498 ASSERT_EQ(tinyxml2::XML_SUCCESS,
499 arena->FirstChildElement("bins-total")->QueryIntText(&val));
500 }
501 // The total needs to be between the mallinfo call before and after
502 // since malloc_info allocates some memory.
503 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
504 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800505 } else if (version == "scudo-1") {
506 auto element = root->FirstChildElement();
507 for (; element != nullptr; element = element->NextSiblingElement()) {
508 ASSERT_STREQ("alloc", element->Name());
509 int size;
510 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
511 int count;
512 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
513 total_allocated_bytes += size * count;
514 }
515 // Scudo only gives the information on the primary, so simply make
516 // sure that the value is non-zero.
517 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700518 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800519 // Do not verify output for debug malloc.
520 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700521 }
522#endif
523}
524
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800525TEST(malloc, calloc_usable_size) {
526 for (size_t size = 1; size <= 2048; size++) {
527 void* pointer = malloc(size);
528 ASSERT_TRUE(pointer != nullptr);
529 memset(pointer, 0xeb, malloc_usable_size(pointer));
530 free(pointer);
531
532 // We should get a previous pointer that has been set to non-zero.
533 // If calloc does not zero out all of the data, this will fail.
534 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
535 ASSERT_TRUE(pointer != nullptr);
536 size_t usable_size = malloc_usable_size(zero_mem);
537 for (size_t i = 0; i < usable_size; i++) {
538 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
539 }
540 free(zero_mem);
541 }
542}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800543
544TEST(malloc, malloc_0) {
545 void* p = malloc(0);
546 ASSERT_TRUE(p != nullptr);
547 free(p);
548}
549
550TEST(malloc, calloc_0_0) {
551 void* p = calloc(0, 0);
552 ASSERT_TRUE(p != nullptr);
553 free(p);
554}
555
556TEST(malloc, calloc_0_1) {
557 void* p = calloc(0, 1);
558 ASSERT_TRUE(p != nullptr);
559 free(p);
560}
561
562TEST(malloc, calloc_1_0) {
563 void* p = calloc(1, 0);
564 ASSERT_TRUE(p != nullptr);
565 free(p);
566}
567
568TEST(malloc, realloc_nullptr_0) {
569 // realloc(nullptr, size) is actually malloc(size).
570 void* p = realloc(nullptr, 0);
571 ASSERT_TRUE(p != nullptr);
572 free(p);
573}
574
575TEST(malloc, realloc_0) {
576 void* p = malloc(1024);
577 ASSERT_TRUE(p != nullptr);
578 // realloc(p, 0) is actually free(p).
579 void* p2 = realloc(p, 0);
580 ASSERT_TRUE(p2 == nullptr);
581}
Christopher Ferris72df6702016-02-11 15:51:31 -0800582
583constexpr size_t MAX_LOOPS = 200;
584
585// Make sure that memory returned by malloc is aligned to allow these data types.
586TEST(malloc, verify_alignment) {
587 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
588 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
589 long double** values_ldouble = new long double*[MAX_LOOPS];
590 // Use filler to attempt to force the allocator to get potentially bad alignments.
591 void** filler = new void*[MAX_LOOPS];
592
593 for (size_t i = 0; i < MAX_LOOPS; i++) {
594 // Check uint32_t pointers.
595 filler[i] = malloc(1);
596 ASSERT_TRUE(filler[i] != nullptr);
597
598 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
599 ASSERT_TRUE(values_32[i] != nullptr);
600 *values_32[i] = i;
601 ASSERT_EQ(*values_32[i], i);
602 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
603
604 free(filler[i]);
605 }
606
607 for (size_t i = 0; i < MAX_LOOPS; i++) {
608 // Check uint64_t pointers.
609 filler[i] = malloc(1);
610 ASSERT_TRUE(filler[i] != nullptr);
611
612 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
613 ASSERT_TRUE(values_64[i] != nullptr);
614 *values_64[i] = 0x1000 + i;
615 ASSERT_EQ(*values_64[i], 0x1000 + i);
616 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
617
618 free(filler[i]);
619 }
620
621 for (size_t i = 0; i < MAX_LOOPS; i++) {
622 // Check long double pointers.
623 filler[i] = malloc(1);
624 ASSERT_TRUE(filler[i] != nullptr);
625
626 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
627 ASSERT_TRUE(values_ldouble[i] != nullptr);
628 *values_ldouble[i] = 5.5 + i;
629 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
630 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
631 // required alignment to 0x7.
632#if !defined(__BIONIC__) && !defined(__LP64__)
633 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
634#else
635 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
636#endif
637
638 free(filler[i]);
639 }
640
641 for (size_t i = 0; i < MAX_LOOPS; i++) {
642 free(values_32[i]);
643 free(values_64[i]);
644 free(values_ldouble[i]);
645 }
646
647 delete[] filler;
648 delete[] values_32;
649 delete[] values_64;
650 delete[] values_ldouble;
651}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700652
653TEST(malloc, mallopt_smoke) {
654 errno = 0;
655 ASSERT_EQ(0, mallopt(-1000, 1));
656 // mallopt doesn't set errno.
657 ASSERT_EQ(0, errno);
658}
Elliott Hughesb1770852018-09-18 12:52:42 -0700659
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800660TEST(malloc, mallopt_decay) {
661#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800662 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800663 errno = 0;
664 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
665 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
666 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
667 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
668#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800669 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800670#endif
671}
672
673TEST(malloc, mallopt_purge) {
674#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800675 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800676 errno = 0;
677 ASSERT_EQ(1, mallopt(M_PURGE, 0));
678#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800679 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800680#endif
681}
682
Christopher Ferris88448792020-07-28 14:15:31 -0700683#if defined(__BIONIC__)
684static void GetAllocatorVersion(bool* allocator_scudo) {
685 TemporaryFile tf;
686 ASSERT_TRUE(tf.fd != -1);
687 FILE* fp = fdopen(tf.fd, "w+");
688 tf.release();
689 ASSERT_TRUE(fp != nullptr);
690 ASSERT_EQ(0, malloc_info(0, fp));
691 ASSERT_EQ(0, fclose(fp));
692
693 std::string contents;
694 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
695
696 tinyxml2::XMLDocument doc;
697 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
698
699 auto root = doc.FirstChildElement();
700 ASSERT_NE(nullptr, root);
701 ASSERT_STREQ("malloc", root->Name());
702 std::string version(root->Attribute("version"));
703 *allocator_scudo = (version == "scudo-1");
704}
705#endif
706
707TEST(malloc, mallopt_scudo_only_options) {
708#if defined(__BIONIC__)
709 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
710 bool allocator_scudo;
711 GetAllocatorVersion(&allocator_scudo);
712 if (!allocator_scudo) {
713 GTEST_SKIP() << "scudo allocator only test";
714 }
715 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
716 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
717 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
718#else
719 GTEST_SKIP() << "bionic-only test";
720#endif
721}
722
Elliott Hughesb1770852018-09-18 12:52:42 -0700723TEST(malloc, reallocarray_overflow) {
724#if HAVE_REALLOCARRAY
725 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
726 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
727 size_t b = 2;
728
729 errno = 0;
730 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
731 ASSERT_EQ(ENOMEM, errno);
732
733 errno = 0;
734 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
735 ASSERT_EQ(ENOMEM, errno);
736#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800737 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700738#endif
739}
740
741TEST(malloc, reallocarray) {
742#if HAVE_REALLOCARRAY
743 void* p = reallocarray(nullptr, 2, 32);
744 ASSERT_TRUE(p != nullptr);
745 ASSERT_GE(malloc_usable_size(p), 64U);
746#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800747 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700748#endif
749}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800750
751TEST(malloc, mallinfo) {
752#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800753 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800754 static size_t sizes[] = {
755 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
756 };
757
758 constexpr static size_t kMaxAllocs = 50;
759
760 for (size_t size : sizes) {
761 // If some of these allocations are stuck in a thread cache, then keep
762 // looping until we make an allocation that changes the total size of the
763 // memory allocated.
764 // jemalloc implementations counts the thread cache allocations against
765 // total memory allocated.
766 void* ptrs[kMaxAllocs] = {};
767 bool pass = false;
768 for (size_t i = 0; i < kMaxAllocs; i++) {
769 size_t allocated = mallinfo().uordblks;
770 ptrs[i] = malloc(size);
771 ASSERT_TRUE(ptrs[i] != nullptr);
772 size_t new_allocated = mallinfo().uordblks;
773 if (allocated != new_allocated) {
774 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800775 // Only check if the total got bigger by at least allocation size.
776 // Sometimes the mallinfo numbers can go backwards due to compaction
777 // and/or freeing of cached data.
778 if (new_allocated >= allocated + usable_size) {
779 pass = true;
780 break;
781 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800782 }
783 }
784 for (void* ptr : ptrs) {
785 free(ptr);
786 }
787 ASSERT_TRUE(pass)
788 << "For size " << size << " allocated bytes did not increase after "
789 << kMaxAllocs << " allocations.";
790 }
791#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800792 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800793#endif
794}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000795
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800796template <typename Type>
797void __attribute__((optnone)) VerifyAlignment(Type* floating) {
798 size_t expected_alignment = alignof(Type);
799 if (expected_alignment != 0) {
800 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
801 << "Expected alignment " << expected_alignment << " ptr value " << floating;
802 }
803}
804
805template <typename Type>
806void __attribute__((optnone)) TestAllocateType() {
807 // The number of allocations to do in a row. This is to attempt to
808 // expose the worst case alignment for native allocators that use
809 // bins.
810 static constexpr size_t kMaxConsecutiveAllocs = 100;
811
812 // Verify using new directly.
813 Type* types[kMaxConsecutiveAllocs];
814 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
815 types[i] = new Type;
816 VerifyAlignment(types[i]);
817 if (::testing::Test::HasFatalFailure()) {
818 return;
819 }
820 }
821 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
822 delete types[i];
823 }
824
825 // Verify using malloc.
826 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
827 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
828 ASSERT_TRUE(types[i] != nullptr);
829 VerifyAlignment(types[i]);
830 if (::testing::Test::HasFatalFailure()) {
831 return;
832 }
833 }
834 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
835 free(types[i]);
836 }
837
838 // Verify using a vector.
839 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
840 for (size_t i = 0; i < type_vector.size(); i++) {
841 VerifyAlignment(&type_vector[i]);
842 if (::testing::Test::HasFatalFailure()) {
843 return;
844 }
845 }
846}
847
848#if defined(__ANDROID__)
849static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
850 void* ptrs[100];
851 uintptr_t mask = aligned_bytes - 1;
852 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
853 ptrs[i] = malloc(alloc_size);
854 ASSERT_TRUE(ptrs[i] != nullptr);
855 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
856 << "Expected at least " << aligned_bytes << " byte alignment: size "
857 << alloc_size << " actual ptr " << ptrs[i];
858 }
859}
860#endif
861
862TEST(malloc, align_check) {
863 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
864 // for a discussion of type alignment.
865 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
866 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
867 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
868
869 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
870 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
871 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
872 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
873 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
874 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
875 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
876 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
877 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
878 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
879 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
880 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
881 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
882 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
883
884#if defined(__ANDROID__)
885 // On Android, there is a lot of code that expects certain alignments:
886 // - Allocations of a size that rounds up to a multiple of 16 bytes
887 // must have at least 16 byte alignment.
888 // - Allocations of a size that rounds up to a multiple of 8 bytes and
889 // not 16 bytes, are only required to have at least 8 byte alignment.
890 // This is regardless of whether it is in a 32 bit or 64 bit environment.
891
892 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
893 // a discussion of this alignment mess. The code below is enforcing
894 // strong-alignment, since who knows what code depends on this behavior now.
895 for (size_t i = 1; i <= 128; i++) {
896 size_t rounded = (i + 7) & ~7;
897 if ((rounded % 16) == 0) {
898 AndroidVerifyAlignment(i, 16);
899 } else {
900 AndroidVerifyAlignment(i, 8);
901 }
902 if (::testing::Test::HasFatalFailure()) {
903 return;
904 }
905 }
906#endif
907}
908
Christopher Ferris201dcf42020-01-29 13:09:31 -0800909// Jemalloc doesn't pass this test right now, so leave it as disabled.
910TEST(malloc, DISABLED_alloc_after_fork) {
911 // Both of these need to be a power of 2.
912 static constexpr size_t kMinAllocationSize = 8;
913 static constexpr size_t kMaxAllocationSize = 2097152;
914
915 static constexpr size_t kNumAllocatingThreads = 5;
916 static constexpr size_t kNumForkLoops = 100;
917
918 std::atomic_bool stop;
919
920 // Create threads that simply allocate and free different sizes.
921 std::vector<std::thread*> threads;
922 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
923 std::thread* t = new std::thread([&stop] {
924 while (!stop) {
925 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
926 void* ptr = malloc(size);
927 if (ptr == nullptr) {
928 return;
929 }
930 // Make sure this value is not optimized away.
931 asm volatile("" : : "r,m"(ptr) : "memory");
932 free(ptr);
933 }
934 }
935 });
936 threads.push_back(t);
937 }
938
939 // Create a thread to fork and allocate.
940 for (size_t i = 0; i < kNumForkLoops; i++) {
941 pid_t pid;
942 if ((pid = fork()) == 0) {
943 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
944 void* ptr = malloc(size);
945 ASSERT_TRUE(ptr != nullptr);
946 // Make sure this value is not optimized away.
947 asm volatile("" : : "r,m"(ptr) : "memory");
948 // Make sure we can touch all of the allocation.
949 memset(ptr, 0x1, size);
950 ASSERT_LE(size, malloc_usable_size(ptr));
951 free(ptr);
952 }
953 _exit(10);
954 }
955 ASSERT_NE(-1, pid);
956 AssertChildExited(pid, 10);
957 }
958
959 stop = true;
960 for (auto thread : threads) {
961 thread->join();
962 delete thread;
963 }
964}
965
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000966TEST(android_mallopt, error_on_unexpected_option) {
967#if defined(__BIONIC__)
968 const int unrecognized_option = -1;
969 errno = 0;
970 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
971 EXPECT_EQ(ENOTSUP, errno);
972#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800973 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000974#endif
975}
976
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800977bool IsDynamic() {
978#if defined(__LP64__)
979 Elf64_Ehdr ehdr;
980#else
981 Elf32_Ehdr ehdr;
982#endif
983 std::string path(android::base::GetExecutablePath());
984
985 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
986 if (fd == -1) {
987 // Assume dynamic on error.
988 return true;
989 }
990 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
991 close(fd);
992 // Assume dynamic in error cases.
993 return !read_completed || ehdr.e_type == ET_DYN;
994}
995
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000996TEST(android_mallopt, init_zygote_child_profiling) {
997#if defined(__BIONIC__)
998 // Successful call.
999 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001000 if (IsDynamic()) {
1001 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1002 EXPECT_EQ(0, errno);
1003 } else {
1004 // Not supported in static executables.
1005 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1006 EXPECT_EQ(ENOTSUP, errno);
1007 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001008
1009 // Unexpected arguments rejected.
1010 errno = 0;
1011 char unexpected = 0;
1012 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001013 if (IsDynamic()) {
1014 EXPECT_EQ(EINVAL, errno);
1015 } else {
1016 EXPECT_EQ(ENOTSUP, errno);
1017 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001018#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001019 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001020#endif
1021}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001022
1023#if defined(__BIONIC__)
1024template <typename FuncType>
1025void CheckAllocationFunction(FuncType func) {
1026 // Assumes that no more than 108MB of memory is allocated before this.
1027 size_t limit = 128 * 1024 * 1024;
1028 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1029 if (!func(20 * 1024 * 1024))
1030 exit(1);
1031 if (func(128 * 1024 * 1024))
1032 exit(1);
1033 exit(0);
1034}
1035#endif
1036
1037TEST(android_mallopt, set_allocation_limit) {
1038#if defined(__BIONIC__)
1039 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1040 testing::ExitedWithCode(0), "");
1041 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1042 testing::ExitedWithCode(0), "");
1043 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1044 testing::ExitedWithCode(0), "");
1045 EXPECT_EXIT(CheckAllocationFunction(
1046 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1047 testing::ExitedWithCode(0), "");
1048 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1049 void* ptr;
1050 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1051 }),
1052 testing::ExitedWithCode(0), "");
1053 EXPECT_EXIT(CheckAllocationFunction(
1054 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1055 testing::ExitedWithCode(0), "");
1056 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1057 void* p = malloc(1024 * 1024);
1058 return realloc(p, bytes) != nullptr;
1059 }),
1060 testing::ExitedWithCode(0), "");
1061#if !defined(__LP64__)
1062 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1063 testing::ExitedWithCode(0), "");
1064 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1065 testing::ExitedWithCode(0), "");
1066#endif
1067#else
Elliott Hughes10907202019-03-27 08:51:02 -07001068 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001069#endif
1070}
1071
1072TEST(android_mallopt, set_allocation_limit_multiple) {
1073#if defined(__BIONIC__)
1074 // Only the first set should work.
1075 size_t limit = 256 * 1024 * 1024;
1076 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1077 limit = 32 * 1024 * 1024;
1078 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1079#else
Elliott Hughes10907202019-03-27 08:51:02 -07001080 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001081#endif
1082}
1083
1084#if defined(__BIONIC__)
1085static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1086
1087static size_t GetMaxAllocations() {
1088 size_t max_pointers = 0;
1089 void* ptrs[20];
1090 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1091 ptrs[i] = malloc(kAllocationSize);
1092 if (ptrs[i] == nullptr) {
1093 max_pointers = i;
1094 break;
1095 }
1096 }
1097 for (size_t i = 0; i < max_pointers; i++) {
1098 free(ptrs[i]);
1099 }
1100 return max_pointers;
1101}
1102
1103static void VerifyMaxPointers(size_t max_pointers) {
1104 // Now verify that we can allocate the same number as before.
1105 void* ptrs[20];
1106 for (size_t i = 0; i < max_pointers; i++) {
1107 ptrs[i] = malloc(kAllocationSize);
1108 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1109 }
1110
1111 // Make sure the next allocation still fails.
1112 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1113 for (size_t i = 0; i < max_pointers; i++) {
1114 free(ptrs[i]);
1115 }
1116}
1117#endif
1118
1119TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1120#if defined(__BIONIC__)
1121 size_t limit = 128 * 1024 * 1024;
1122 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1123
1124 size_t max_pointers = GetMaxAllocations();
1125 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1126
1127 void* memory = malloc(10 * 1024 * 1024);
1128 ASSERT_TRUE(memory != nullptr);
1129
1130 // Increase size.
1131 memory = realloc(memory, 20 * 1024 * 1024);
1132 ASSERT_TRUE(memory != nullptr);
1133 memory = realloc(memory, 40 * 1024 * 1024);
1134 ASSERT_TRUE(memory != nullptr);
1135 memory = realloc(memory, 60 * 1024 * 1024);
1136 ASSERT_TRUE(memory != nullptr);
1137 memory = realloc(memory, 80 * 1024 * 1024);
1138 ASSERT_TRUE(memory != nullptr);
1139 // Now push past limit.
1140 memory = realloc(memory, 130 * 1024 * 1024);
1141 ASSERT_TRUE(memory == nullptr);
1142
1143 VerifyMaxPointers(max_pointers);
1144#else
Elliott Hughes10907202019-03-27 08:51:02 -07001145 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001146#endif
1147}
1148
1149TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1150#if defined(__BIONIC__)
1151 size_t limit = 100 * 1024 * 1024;
1152 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1153
1154 size_t max_pointers = GetMaxAllocations();
1155 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1156
1157 void* memory = malloc(80 * 1024 * 1024);
1158 ASSERT_TRUE(memory != nullptr);
1159
1160 // Decrease size.
1161 memory = realloc(memory, 60 * 1024 * 1024);
1162 ASSERT_TRUE(memory != nullptr);
1163 memory = realloc(memory, 40 * 1024 * 1024);
1164 ASSERT_TRUE(memory != nullptr);
1165 memory = realloc(memory, 20 * 1024 * 1024);
1166 ASSERT_TRUE(memory != nullptr);
1167 memory = realloc(memory, 10 * 1024 * 1024);
1168 ASSERT_TRUE(memory != nullptr);
1169 free(memory);
1170
1171 VerifyMaxPointers(max_pointers);
1172#else
Elliott Hughes10907202019-03-27 08:51:02 -07001173 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001174#endif
1175}
1176
1177TEST(android_mallopt, set_allocation_limit_realloc_free) {
1178#if defined(__BIONIC__)
1179 size_t limit = 100 * 1024 * 1024;
1180 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1181
1182 size_t max_pointers = GetMaxAllocations();
1183 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1184
1185 void* memory = malloc(60 * 1024 * 1024);
1186 ASSERT_TRUE(memory != nullptr);
1187
1188 memory = realloc(memory, 0);
1189 ASSERT_TRUE(memory == nullptr);
1190
1191 VerifyMaxPointers(max_pointers);
1192#else
Elliott Hughes10907202019-03-27 08:51:02 -07001193 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001194#endif
1195}
1196
1197#if defined(__BIONIC__)
1198static void* SetAllocationLimit(void* data) {
1199 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1200 while (!go->load()) {
1201 }
1202 size_t limit = 500 * 1024 * 1024;
1203 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1204 return reinterpret_cast<void*>(-1);
1205 }
1206 return nullptr;
1207}
1208
1209static void SetAllocationLimitMultipleThreads() {
1210 std::atomic_bool go;
1211 go = false;
1212
1213 static constexpr size_t kNumThreads = 4;
1214 pthread_t threads[kNumThreads];
1215 for (size_t i = 0; i < kNumThreads; i++) {
1216 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1217 }
1218
1219 // Let them go all at once.
1220 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001221 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1222 // heapprofd handler.
1223 union sigval signal_value;
1224 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001225 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001226
1227 size_t num_successful = 0;
1228 for (size_t i = 0; i < kNumThreads; i++) {
1229 void* result;
1230 ASSERT_EQ(0, pthread_join(threads[i], &result));
1231 if (result != nullptr) {
1232 num_successful++;
1233 }
1234 }
1235 ASSERT_EQ(1U, num_successful);
1236 exit(0);
1237}
1238#endif
1239
1240TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1241#if defined(__BIONIC__)
1242 if (IsDynamic()) {
1243 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1244 }
1245
1246 // Run this a number of times as a stress test.
1247 for (size_t i = 0; i < 100; i++) {
1248 // Not using ASSERT_EXIT because errors messages are not displayed.
1249 pid_t pid;
1250 if ((pid = fork()) == 0) {
1251 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1252 }
1253 ASSERT_NE(-1, pid);
1254 int status;
1255 ASSERT_EQ(pid, wait(&status));
1256 ASSERT_EQ(0, WEXITSTATUS(status));
1257 }
1258#else
Elliott Hughes10907202019-03-27 08:51:02 -07001259 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001260#endif
1261}