blob: bddd9ab63b8b85743894b347f92df58b22176f99 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
30#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080031#include <sys/types.h>
32#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070033#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070034
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080035#include <atomic>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080036#include <thread>
37
Dan Albert4caa1f02014-08-20 09:16:57 -070038#include <tinyxml2.h>
39
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080040#include <android-base/file.h>
41
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080042#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000043
Elliott Hughesb1770852018-09-18 12:52:42 -070044#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080045
Peter Collingbourne45819dd2020-01-09 11:00:43 -080046#include "SignalUtils.h"
47
Christopher Ferrisb874c332020-01-21 16:39:05 -080048#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070049#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080050#include "platform/bionic/reserved_signals.h"
51#include "private/bionic_config.h"
52
Elliott Hughesb1770852018-09-18 12:52:42 -070053#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080054
Elliott Hughesb1770852018-09-18 12:52:42 -070055#else
Christopher Ferrisb874c332020-01-21 16:39:05 -080056
Elliott Hughesb1770852018-09-18 12:52:42 -070057#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080058
Elliott Hughesb1770852018-09-18 12:52:42 -070059#endif
60
Christopher Ferris885f3b92013-05-21 17:48:01 -070061TEST(malloc, malloc_std) {
62 // Simple malloc test.
63 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070064 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070065 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070066 free(ptr);
67}
68
Christopher Ferrisa4037802014-06-09 19:14:11 -070069TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080070 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070071 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070072 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -070073 ASSERT_EQ(ENOMEM, errno);
74}
75
Christopher Ferris885f3b92013-05-21 17:48:01 -070076TEST(malloc, calloc_std) {
77 // Simple calloc test.
78 size_t alloc_len = 100;
79 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070080 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070081 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
82 for (size_t i = 0; i < alloc_len; i++) {
83 ASSERT_EQ(0, ptr[i]);
84 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070085 free(ptr);
86}
87
Peter Collingbourne978eb162020-09-21 15:26:02 -070088TEST(malloc, calloc_mem_init_disabled) {
89#if defined(__BIONIC__)
90 // calloc should still zero memory if mem-init is disabled.
91 // With jemalloc the mallopts will fail but that shouldn't affect the
92 // execution of the test.
93 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
94 size_t alloc_len = 100;
95 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
96 for (size_t i = 0; i < alloc_len; i++) {
97 ASSERT_EQ(0, ptr[i]);
98 }
99 free(ptr);
100 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
101#else
102 GTEST_SKIP() << "bionic-only test";
103#endif
104}
105
Christopher Ferrisa4037802014-06-09 19:14:11 -0700106TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800107 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700108 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700109 ASSERT_EQ(nullptr, calloc(-1, 100));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700110 ASSERT_EQ(ENOMEM, errno);
111}
112
113TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800114 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700115 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700116 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700117 ASSERT_EQ(ENOMEM, errno);
118 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700119 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700120 ASSERT_EQ(ENOMEM, errno);
121 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700122 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700123 ASSERT_EQ(ENOMEM, errno);
124 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700125 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700126 ASSERT_EQ(ENOMEM, errno);
127}
128
Christopher Ferris885f3b92013-05-21 17:48:01 -0700129TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800130 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700131 // Memalign test where the alignment is any value.
132 for (size_t i = 0; i <= 12; i++) {
133 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700134 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700135 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700136 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
137 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
138 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700139 free(ptr);
140 }
141 }
142}
143
Christopher Ferrisa4037802014-06-09 19:14:11 -0700144TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800145 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700146 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700147}
148
149TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800150 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700151 void* ptr;
152 for (size_t align = 0; align <= 256; align++) {
153 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700154 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700155 free(ptr);
156 }
157}
158
Christopher Ferris885f3b92013-05-21 17:48:01 -0700159TEST(malloc, memalign_realloc) {
160 // Memalign and then realloc the pointer a couple of times.
161 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
162 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700163 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700164 ASSERT_LE(100U, malloc_usable_size(ptr));
165 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
166 memset(ptr, 0x23, 100);
167
168 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700169 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700170 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700171 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700172 for (size_t i = 0; i < 100; i++) {
173 ASSERT_EQ(0x23, ptr[i]);
174 }
175 memset(ptr, 0x45, 200);
176
177 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700178 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700179 ASSERT_LE(300U, malloc_usable_size(ptr));
180 for (size_t i = 0; i < 200; i++) {
181 ASSERT_EQ(0x45, ptr[i]);
182 }
183 memset(ptr, 0x67, 300);
184
185 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700186 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700187 ASSERT_LE(250U, malloc_usable_size(ptr));
188 for (size_t i = 0; i < 250; i++) {
189 ASSERT_EQ(0x67, ptr[i]);
190 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700191 free(ptr);
192 }
193}
194
195TEST(malloc, malloc_realloc_larger) {
196 // Realloc to a larger size, malloc is used for the original allocation.
197 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700198 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700199 ASSERT_LE(100U, malloc_usable_size(ptr));
200 memset(ptr, 67, 100);
201
202 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700203 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700204 ASSERT_LE(200U, malloc_usable_size(ptr));
205 for (size_t i = 0; i < 100; i++) {
206 ASSERT_EQ(67, ptr[i]);
207 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700208 free(ptr);
209}
210
211TEST(malloc, malloc_realloc_smaller) {
212 // Realloc to a smaller size, malloc is used for the original allocation.
213 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700214 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700215 ASSERT_LE(200U, malloc_usable_size(ptr));
216 memset(ptr, 67, 200);
217
218 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700219 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700220 ASSERT_LE(100U, malloc_usable_size(ptr));
221 for (size_t i = 0; i < 100; i++) {
222 ASSERT_EQ(67, ptr[i]);
223 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700224 free(ptr);
225}
226
227TEST(malloc, malloc_multiple_realloc) {
228 // Multiple reallocs, malloc is used for the original allocation.
229 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700230 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700231 ASSERT_LE(200U, malloc_usable_size(ptr));
232 memset(ptr, 0x23, 200);
233
234 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700235 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700236 ASSERT_LE(100U, malloc_usable_size(ptr));
237 for (size_t i = 0; i < 100; i++) {
238 ASSERT_EQ(0x23, ptr[i]);
239 }
240
241 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700242 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700243 ASSERT_LE(50U, malloc_usable_size(ptr));
244 for (size_t i = 0; i < 50; i++) {
245 ASSERT_EQ(0x23, ptr[i]);
246 }
247
248 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700249 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700250 ASSERT_LE(150U, malloc_usable_size(ptr));
251 for (size_t i = 0; i < 50; i++) {
252 ASSERT_EQ(0x23, ptr[i]);
253 }
254 memset(ptr, 0x23, 150);
255
256 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700257 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700258 ASSERT_LE(425U, malloc_usable_size(ptr));
259 for (size_t i = 0; i < 150; i++) {
260 ASSERT_EQ(0x23, ptr[i]);
261 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700262 free(ptr);
263}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700264
Christopher Ferris885f3b92013-05-21 17:48:01 -0700265TEST(malloc, calloc_realloc_larger) {
266 // Realloc to a larger size, calloc is used for the original allocation.
267 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700268 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700269 ASSERT_LE(100U, malloc_usable_size(ptr));
270
271 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700272 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700273 ASSERT_LE(200U, malloc_usable_size(ptr));
274 for (size_t i = 0; i < 100; i++) {
275 ASSERT_EQ(0, ptr[i]);
276 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700277 free(ptr);
278}
279
280TEST(malloc, calloc_realloc_smaller) {
281 // Realloc to a smaller size, calloc is used for the original allocation.
282 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700283 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700284 ASSERT_LE(200U, malloc_usable_size(ptr));
285
286 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700287 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700288 ASSERT_LE(100U, malloc_usable_size(ptr));
289 for (size_t i = 0; i < 100; i++) {
290 ASSERT_EQ(0, ptr[i]);
291 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700292 free(ptr);
293}
294
295TEST(malloc, calloc_multiple_realloc) {
296 // Multiple reallocs, calloc is used for the original allocation.
297 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700298 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700299 ASSERT_LE(200U, malloc_usable_size(ptr));
300
301 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700302 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700303 ASSERT_LE(100U, malloc_usable_size(ptr));
304 for (size_t i = 0; i < 100; i++) {
305 ASSERT_EQ(0, ptr[i]);
306 }
307
308 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700309 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700310 ASSERT_LE(50U, malloc_usable_size(ptr));
311 for (size_t i = 0; i < 50; i++) {
312 ASSERT_EQ(0, ptr[i]);
313 }
314
315 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700316 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700317 ASSERT_LE(150U, malloc_usable_size(ptr));
318 for (size_t i = 0; i < 50; i++) {
319 ASSERT_EQ(0, ptr[i]);
320 }
321 memset(ptr, 0, 150);
322
323 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700324 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700325 ASSERT_LE(425U, malloc_usable_size(ptr));
326 for (size_t i = 0; i < 150; i++) {
327 ASSERT_EQ(0, ptr[i]);
328 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700329 free(ptr);
330}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700331
Christopher Ferrisa4037802014-06-09 19:14:11 -0700332TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800333 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700334 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700335 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700336 ASSERT_EQ(ENOMEM, errno);
337 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700338 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700339 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700340 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700341 ASSERT_EQ(ENOMEM, errno);
342 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700343}
344
Dan Alberte5fdaa42014-06-14 01:04:31 +0000345#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
346extern "C" void* pvalloc(size_t);
347extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700348#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000349
Christopher Ferrisa4037802014-06-09 19:14:11 -0700350TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700351#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700352 size_t pagesize = sysconf(_SC_PAGESIZE);
353 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700354 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700355 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
356 ASSERT_LE(pagesize, malloc_usable_size(ptr));
357 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700358#else
359 GTEST_SKIP() << "pvalloc not supported.";
360#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700361}
362
363TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700364#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700365 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700366#else
367 GTEST_SKIP() << "pvalloc not supported.";
368#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700369}
370
371TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700372#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700373 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700374 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700375 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700376 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
377 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700378#else
379 GTEST_SKIP() << "valloc not supported.";
380#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700381}
382
383TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700384#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700385 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700386#else
387 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000388#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700389}
Dan Albert4caa1f02014-08-20 09:16:57 -0700390
391TEST(malloc, malloc_info) {
392#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700393 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800394
395 TemporaryFile tf;
396 ASSERT_TRUE(tf.fd != -1);
397 FILE* fp = fdopen(tf.fd, "w+");
398 tf.release();
399 ASSERT_TRUE(fp != nullptr);
400 ASSERT_EQ(0, malloc_info(0, fp));
401 ASSERT_EQ(0, fclose(fp));
402
403 std::string contents;
404 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700405
406 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800407 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700408
409 auto root = doc.FirstChildElement();
410 ASSERT_NE(nullptr, root);
411 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700412 std::string version(root->Attribute("version"));
413 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800414 auto arena = root->FirstChildElement();
415 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
416 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700417
Christopher Ferris6c619a02019-03-01 17:59:51 -0800418 ASSERT_STREQ("heap", arena->Name());
419 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
420 ASSERT_EQ(tinyxml2::XML_SUCCESS,
421 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
422 ASSERT_EQ(tinyxml2::XML_SUCCESS,
423 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
424 ASSERT_EQ(tinyxml2::XML_SUCCESS,
425 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
426 ASSERT_EQ(tinyxml2::XML_SUCCESS,
427 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700428
Christopher Ferris6c619a02019-03-01 17:59:51 -0800429 auto bin = arena->FirstChildElement("bin");
430 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
431 if (strcmp(bin->Name(), "bin") == 0) {
432 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
433 ASSERT_EQ(tinyxml2::XML_SUCCESS,
434 bin->FirstChildElement("allocated")->QueryIntText(&val));
435 ASSERT_EQ(tinyxml2::XML_SUCCESS,
436 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
437 ASSERT_EQ(tinyxml2::XML_SUCCESS,
438 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
439 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700440 }
441 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800442 } else if (version == "scudo-1") {
443 auto element = root->FirstChildElement();
444 for (; element != nullptr; element = element->NextSiblingElement()) {
445 int val;
446
447 ASSERT_STREQ("alloc", element->Name());
448 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
449 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
450 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800451 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800452 // Do not verify output for debug malloc.
453 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700454 }
455#endif
456}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800457
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700458TEST(malloc, malloc_info_matches_mallinfo) {
459#ifdef __BIONIC__
460 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
461
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800462 TemporaryFile tf;
463 ASSERT_TRUE(tf.fd != -1);
464 FILE* fp = fdopen(tf.fd, "w+");
465 tf.release();
466 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700467 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800468 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700469 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800470 ASSERT_EQ(0, fclose(fp));
471
472 std::string contents;
473 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700474
475 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800476 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700477
478 size_t total_allocated_bytes = 0;
479 auto root = doc.FirstChildElement();
480 ASSERT_NE(nullptr, root);
481 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700482 std::string version(root->Attribute("version"));
483 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700484 auto arena = root->FirstChildElement();
485 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
486 int val;
487
488 ASSERT_STREQ("heap", arena->Name());
489 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
490 ASSERT_EQ(tinyxml2::XML_SUCCESS,
491 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
492 total_allocated_bytes += val;
493 ASSERT_EQ(tinyxml2::XML_SUCCESS,
494 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
495 total_allocated_bytes += val;
496 ASSERT_EQ(tinyxml2::XML_SUCCESS,
497 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
498 total_allocated_bytes += val;
499 ASSERT_EQ(tinyxml2::XML_SUCCESS,
500 arena->FirstChildElement("bins-total")->QueryIntText(&val));
501 }
502 // The total needs to be between the mallinfo call before and after
503 // since malloc_info allocates some memory.
504 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
505 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800506 } else if (version == "scudo-1") {
507 auto element = root->FirstChildElement();
508 for (; element != nullptr; element = element->NextSiblingElement()) {
509 ASSERT_STREQ("alloc", element->Name());
510 int size;
511 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
512 int count;
513 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
514 total_allocated_bytes += size * count;
515 }
516 // Scudo only gives the information on the primary, so simply make
517 // sure that the value is non-zero.
518 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700519 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800520 // Do not verify output for debug malloc.
521 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700522 }
523#endif
524}
525
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800526TEST(malloc, calloc_usable_size) {
527 for (size_t size = 1; size <= 2048; size++) {
528 void* pointer = malloc(size);
529 ASSERT_TRUE(pointer != nullptr);
530 memset(pointer, 0xeb, malloc_usable_size(pointer));
531 free(pointer);
532
533 // We should get a previous pointer that has been set to non-zero.
534 // If calloc does not zero out all of the data, this will fail.
535 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
536 ASSERT_TRUE(pointer != nullptr);
537 size_t usable_size = malloc_usable_size(zero_mem);
538 for (size_t i = 0; i < usable_size; i++) {
539 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
540 }
541 free(zero_mem);
542 }
543}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800544
545TEST(malloc, malloc_0) {
546 void* p = malloc(0);
547 ASSERT_TRUE(p != nullptr);
548 free(p);
549}
550
551TEST(malloc, calloc_0_0) {
552 void* p = calloc(0, 0);
553 ASSERT_TRUE(p != nullptr);
554 free(p);
555}
556
557TEST(malloc, calloc_0_1) {
558 void* p = calloc(0, 1);
559 ASSERT_TRUE(p != nullptr);
560 free(p);
561}
562
563TEST(malloc, calloc_1_0) {
564 void* p = calloc(1, 0);
565 ASSERT_TRUE(p != nullptr);
566 free(p);
567}
568
569TEST(malloc, realloc_nullptr_0) {
570 // realloc(nullptr, size) is actually malloc(size).
571 void* p = realloc(nullptr, 0);
572 ASSERT_TRUE(p != nullptr);
573 free(p);
574}
575
576TEST(malloc, realloc_0) {
577 void* p = malloc(1024);
578 ASSERT_TRUE(p != nullptr);
579 // realloc(p, 0) is actually free(p).
580 void* p2 = realloc(p, 0);
581 ASSERT_TRUE(p2 == nullptr);
582}
Christopher Ferris72df6702016-02-11 15:51:31 -0800583
584constexpr size_t MAX_LOOPS = 200;
585
586// Make sure that memory returned by malloc is aligned to allow these data types.
587TEST(malloc, verify_alignment) {
588 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
589 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
590 long double** values_ldouble = new long double*[MAX_LOOPS];
591 // Use filler to attempt to force the allocator to get potentially bad alignments.
592 void** filler = new void*[MAX_LOOPS];
593
594 for (size_t i = 0; i < MAX_LOOPS; i++) {
595 // Check uint32_t pointers.
596 filler[i] = malloc(1);
597 ASSERT_TRUE(filler[i] != nullptr);
598
599 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
600 ASSERT_TRUE(values_32[i] != nullptr);
601 *values_32[i] = i;
602 ASSERT_EQ(*values_32[i], i);
603 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
604
605 free(filler[i]);
606 }
607
608 for (size_t i = 0; i < MAX_LOOPS; i++) {
609 // Check uint64_t pointers.
610 filler[i] = malloc(1);
611 ASSERT_TRUE(filler[i] != nullptr);
612
613 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
614 ASSERT_TRUE(values_64[i] != nullptr);
615 *values_64[i] = 0x1000 + i;
616 ASSERT_EQ(*values_64[i], 0x1000 + i);
617 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
618
619 free(filler[i]);
620 }
621
622 for (size_t i = 0; i < MAX_LOOPS; i++) {
623 // Check long double pointers.
624 filler[i] = malloc(1);
625 ASSERT_TRUE(filler[i] != nullptr);
626
627 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
628 ASSERT_TRUE(values_ldouble[i] != nullptr);
629 *values_ldouble[i] = 5.5 + i;
630 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
631 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
632 // required alignment to 0x7.
633#if !defined(__BIONIC__) && !defined(__LP64__)
634 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
635#else
636 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
637#endif
638
639 free(filler[i]);
640 }
641
642 for (size_t i = 0; i < MAX_LOOPS; i++) {
643 free(values_32[i]);
644 free(values_64[i]);
645 free(values_ldouble[i]);
646 }
647
648 delete[] filler;
649 delete[] values_32;
650 delete[] values_64;
651 delete[] values_ldouble;
652}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700653
654TEST(malloc, mallopt_smoke) {
655 errno = 0;
656 ASSERT_EQ(0, mallopt(-1000, 1));
657 // mallopt doesn't set errno.
658 ASSERT_EQ(0, errno);
659}
Elliott Hughesb1770852018-09-18 12:52:42 -0700660
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800661TEST(malloc, mallopt_decay) {
662#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800663 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800664 errno = 0;
665 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
666 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
667 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
668 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
669#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800670 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800671#endif
672}
673
674TEST(malloc, mallopt_purge) {
675#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800676 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800677 errno = 0;
678 ASSERT_EQ(1, mallopt(M_PURGE, 0));
679#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800680 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800681#endif
682}
683
Christopher Ferris88448792020-07-28 14:15:31 -0700684#if defined(__BIONIC__)
685static void GetAllocatorVersion(bool* allocator_scudo) {
686 TemporaryFile tf;
687 ASSERT_TRUE(tf.fd != -1);
688 FILE* fp = fdopen(tf.fd, "w+");
689 tf.release();
690 ASSERT_TRUE(fp != nullptr);
691 ASSERT_EQ(0, malloc_info(0, fp));
692 ASSERT_EQ(0, fclose(fp));
693
694 std::string contents;
695 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
696
697 tinyxml2::XMLDocument doc;
698 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
699
700 auto root = doc.FirstChildElement();
701 ASSERT_NE(nullptr, root);
702 ASSERT_STREQ("malloc", root->Name());
703 std::string version(root->Attribute("version"));
704 *allocator_scudo = (version == "scudo-1");
705}
706#endif
707
708TEST(malloc, mallopt_scudo_only_options) {
709#if defined(__BIONIC__)
710 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
711 bool allocator_scudo;
712 GetAllocatorVersion(&allocator_scudo);
713 if (!allocator_scudo) {
714 GTEST_SKIP() << "scudo allocator only test";
715 }
716 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
717 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
718 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
719#else
720 GTEST_SKIP() << "bionic-only test";
721#endif
722}
723
Elliott Hughesb1770852018-09-18 12:52:42 -0700724TEST(malloc, reallocarray_overflow) {
725#if HAVE_REALLOCARRAY
726 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
727 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
728 size_t b = 2;
729
730 errno = 0;
731 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
732 ASSERT_EQ(ENOMEM, errno);
733
734 errno = 0;
735 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
736 ASSERT_EQ(ENOMEM, errno);
737#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800738 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700739#endif
740}
741
742TEST(malloc, reallocarray) {
743#if HAVE_REALLOCARRAY
744 void* p = reallocarray(nullptr, 2, 32);
745 ASSERT_TRUE(p != nullptr);
746 ASSERT_GE(malloc_usable_size(p), 64U);
747#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800748 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700749#endif
750}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800751
752TEST(malloc, mallinfo) {
753#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800754 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800755 static size_t sizes[] = {
756 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
757 };
758
759 constexpr static size_t kMaxAllocs = 50;
760
761 for (size_t size : sizes) {
762 // If some of these allocations are stuck in a thread cache, then keep
763 // looping until we make an allocation that changes the total size of the
764 // memory allocated.
765 // jemalloc implementations counts the thread cache allocations against
766 // total memory allocated.
767 void* ptrs[kMaxAllocs] = {};
768 bool pass = false;
769 for (size_t i = 0; i < kMaxAllocs; i++) {
770 size_t allocated = mallinfo().uordblks;
771 ptrs[i] = malloc(size);
772 ASSERT_TRUE(ptrs[i] != nullptr);
773 size_t new_allocated = mallinfo().uordblks;
774 if (allocated != new_allocated) {
775 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800776 // Only check if the total got bigger by at least allocation size.
777 // Sometimes the mallinfo numbers can go backwards due to compaction
778 // and/or freeing of cached data.
779 if (new_allocated >= allocated + usable_size) {
780 pass = true;
781 break;
782 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800783 }
784 }
785 for (void* ptr : ptrs) {
786 free(ptr);
787 }
788 ASSERT_TRUE(pass)
789 << "For size " << size << " allocated bytes did not increase after "
790 << kMaxAllocs << " allocations.";
791 }
792#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800793 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800794#endif
795}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000796
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800797template <typename Type>
798void __attribute__((optnone)) VerifyAlignment(Type* floating) {
799 size_t expected_alignment = alignof(Type);
800 if (expected_alignment != 0) {
801 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
802 << "Expected alignment " << expected_alignment << " ptr value " << floating;
803 }
804}
805
806template <typename Type>
807void __attribute__((optnone)) TestAllocateType() {
808 // The number of allocations to do in a row. This is to attempt to
809 // expose the worst case alignment for native allocators that use
810 // bins.
811 static constexpr size_t kMaxConsecutiveAllocs = 100;
812
813 // Verify using new directly.
814 Type* types[kMaxConsecutiveAllocs];
815 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
816 types[i] = new Type;
817 VerifyAlignment(types[i]);
818 if (::testing::Test::HasFatalFailure()) {
819 return;
820 }
821 }
822 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
823 delete types[i];
824 }
825
826 // Verify using malloc.
827 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
828 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
829 ASSERT_TRUE(types[i] != nullptr);
830 VerifyAlignment(types[i]);
831 if (::testing::Test::HasFatalFailure()) {
832 return;
833 }
834 }
835 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
836 free(types[i]);
837 }
838
839 // Verify using a vector.
840 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
841 for (size_t i = 0; i < type_vector.size(); i++) {
842 VerifyAlignment(&type_vector[i]);
843 if (::testing::Test::HasFatalFailure()) {
844 return;
845 }
846 }
847}
848
849#if defined(__ANDROID__)
850static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
851 void* ptrs[100];
852 uintptr_t mask = aligned_bytes - 1;
853 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
854 ptrs[i] = malloc(alloc_size);
855 ASSERT_TRUE(ptrs[i] != nullptr);
856 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
857 << "Expected at least " << aligned_bytes << " byte alignment: size "
858 << alloc_size << " actual ptr " << ptrs[i];
859 }
860}
861#endif
862
863TEST(malloc, align_check) {
864 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
865 // for a discussion of type alignment.
866 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
867 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
868 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
869
870 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
871 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
872 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
873 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
874 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
875 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
876 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
877 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
878 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
879 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
880 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
881 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
882 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
883 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
884
885#if defined(__ANDROID__)
886 // On Android, there is a lot of code that expects certain alignments:
887 // - Allocations of a size that rounds up to a multiple of 16 bytes
888 // must have at least 16 byte alignment.
889 // - Allocations of a size that rounds up to a multiple of 8 bytes and
890 // not 16 bytes, are only required to have at least 8 byte alignment.
891 // This is regardless of whether it is in a 32 bit or 64 bit environment.
892
893 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
894 // a discussion of this alignment mess. The code below is enforcing
895 // strong-alignment, since who knows what code depends on this behavior now.
896 for (size_t i = 1; i <= 128; i++) {
897 size_t rounded = (i + 7) & ~7;
898 if ((rounded % 16) == 0) {
899 AndroidVerifyAlignment(i, 16);
900 } else {
901 AndroidVerifyAlignment(i, 8);
902 }
903 if (::testing::Test::HasFatalFailure()) {
904 return;
905 }
906 }
907#endif
908}
909
Christopher Ferris201dcf42020-01-29 13:09:31 -0800910// Jemalloc doesn't pass this test right now, so leave it as disabled.
911TEST(malloc, DISABLED_alloc_after_fork) {
912 // Both of these need to be a power of 2.
913 static constexpr size_t kMinAllocationSize = 8;
914 static constexpr size_t kMaxAllocationSize = 2097152;
915
916 static constexpr size_t kNumAllocatingThreads = 5;
917 static constexpr size_t kNumForkLoops = 100;
918
919 std::atomic_bool stop;
920
921 // Create threads that simply allocate and free different sizes.
922 std::vector<std::thread*> threads;
923 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
924 std::thread* t = new std::thread([&stop] {
925 while (!stop) {
926 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700927 void* ptr;
928 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800929 free(ptr);
930 }
931 }
932 });
933 threads.push_back(t);
934 }
935
936 // Create a thread to fork and allocate.
937 for (size_t i = 0; i < kNumForkLoops; i++) {
938 pid_t pid;
939 if ((pid = fork()) == 0) {
940 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -0700941 void* ptr;
942 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -0800943 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -0800944 // Make sure we can touch all of the allocation.
945 memset(ptr, 0x1, size);
946 ASSERT_LE(size, malloc_usable_size(ptr));
947 free(ptr);
948 }
949 _exit(10);
950 }
951 ASSERT_NE(-1, pid);
952 AssertChildExited(pid, 10);
953 }
954
955 stop = true;
956 for (auto thread : threads) {
957 thread->join();
958 delete thread;
959 }
960}
961
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000962TEST(android_mallopt, error_on_unexpected_option) {
963#if defined(__BIONIC__)
964 const int unrecognized_option = -1;
965 errno = 0;
966 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
967 EXPECT_EQ(ENOTSUP, errno);
968#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800969 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000970#endif
971}
972
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800973bool IsDynamic() {
974#if defined(__LP64__)
975 Elf64_Ehdr ehdr;
976#else
977 Elf32_Ehdr ehdr;
978#endif
979 std::string path(android::base::GetExecutablePath());
980
981 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
982 if (fd == -1) {
983 // Assume dynamic on error.
984 return true;
985 }
986 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
987 close(fd);
988 // Assume dynamic in error cases.
989 return !read_completed || ehdr.e_type == ET_DYN;
990}
991
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000992TEST(android_mallopt, init_zygote_child_profiling) {
993#if defined(__BIONIC__)
994 // Successful call.
995 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -0800996 if (IsDynamic()) {
997 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
998 EXPECT_EQ(0, errno);
999 } else {
1000 // Not supported in static executables.
1001 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1002 EXPECT_EQ(ENOTSUP, errno);
1003 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001004
1005 // Unexpected arguments rejected.
1006 errno = 0;
1007 char unexpected = 0;
1008 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001009 if (IsDynamic()) {
1010 EXPECT_EQ(EINVAL, errno);
1011 } else {
1012 EXPECT_EQ(ENOTSUP, errno);
1013 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001014#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001015 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001016#endif
1017}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001018
1019#if defined(__BIONIC__)
1020template <typename FuncType>
1021void CheckAllocationFunction(FuncType func) {
1022 // Assumes that no more than 108MB of memory is allocated before this.
1023 size_t limit = 128 * 1024 * 1024;
1024 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1025 if (!func(20 * 1024 * 1024))
1026 exit(1);
1027 if (func(128 * 1024 * 1024))
1028 exit(1);
1029 exit(0);
1030}
1031#endif
1032
1033TEST(android_mallopt, set_allocation_limit) {
1034#if defined(__BIONIC__)
1035 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1036 testing::ExitedWithCode(0), "");
1037 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1038 testing::ExitedWithCode(0), "");
1039 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1040 testing::ExitedWithCode(0), "");
1041 EXPECT_EXIT(CheckAllocationFunction(
1042 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1043 testing::ExitedWithCode(0), "");
1044 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1045 void* ptr;
1046 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1047 }),
1048 testing::ExitedWithCode(0), "");
1049 EXPECT_EXIT(CheckAllocationFunction(
1050 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1051 testing::ExitedWithCode(0), "");
1052 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1053 void* p = malloc(1024 * 1024);
1054 return realloc(p, bytes) != nullptr;
1055 }),
1056 testing::ExitedWithCode(0), "");
1057#if !defined(__LP64__)
1058 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1059 testing::ExitedWithCode(0), "");
1060 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1061 testing::ExitedWithCode(0), "");
1062#endif
1063#else
Elliott Hughes10907202019-03-27 08:51:02 -07001064 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001065#endif
1066}
1067
1068TEST(android_mallopt, set_allocation_limit_multiple) {
1069#if defined(__BIONIC__)
1070 // Only the first set should work.
1071 size_t limit = 256 * 1024 * 1024;
1072 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1073 limit = 32 * 1024 * 1024;
1074 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1075#else
Elliott Hughes10907202019-03-27 08:51:02 -07001076 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001077#endif
1078}
1079
1080#if defined(__BIONIC__)
1081static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1082
1083static size_t GetMaxAllocations() {
1084 size_t max_pointers = 0;
1085 void* ptrs[20];
1086 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1087 ptrs[i] = malloc(kAllocationSize);
1088 if (ptrs[i] == nullptr) {
1089 max_pointers = i;
1090 break;
1091 }
1092 }
1093 for (size_t i = 0; i < max_pointers; i++) {
1094 free(ptrs[i]);
1095 }
1096 return max_pointers;
1097}
1098
1099static void VerifyMaxPointers(size_t max_pointers) {
1100 // Now verify that we can allocate the same number as before.
1101 void* ptrs[20];
1102 for (size_t i = 0; i < max_pointers; i++) {
1103 ptrs[i] = malloc(kAllocationSize);
1104 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1105 }
1106
1107 // Make sure the next allocation still fails.
1108 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1109 for (size_t i = 0; i < max_pointers; i++) {
1110 free(ptrs[i]);
1111 }
1112}
1113#endif
1114
1115TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1116#if defined(__BIONIC__)
1117 size_t limit = 128 * 1024 * 1024;
1118 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1119
1120 size_t max_pointers = GetMaxAllocations();
1121 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1122
1123 void* memory = malloc(10 * 1024 * 1024);
1124 ASSERT_TRUE(memory != nullptr);
1125
1126 // Increase size.
1127 memory = realloc(memory, 20 * 1024 * 1024);
1128 ASSERT_TRUE(memory != nullptr);
1129 memory = realloc(memory, 40 * 1024 * 1024);
1130 ASSERT_TRUE(memory != nullptr);
1131 memory = realloc(memory, 60 * 1024 * 1024);
1132 ASSERT_TRUE(memory != nullptr);
1133 memory = realloc(memory, 80 * 1024 * 1024);
1134 ASSERT_TRUE(memory != nullptr);
1135 // Now push past limit.
1136 memory = realloc(memory, 130 * 1024 * 1024);
1137 ASSERT_TRUE(memory == nullptr);
1138
1139 VerifyMaxPointers(max_pointers);
1140#else
Elliott Hughes10907202019-03-27 08:51:02 -07001141 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001142#endif
1143}
1144
1145TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1146#if defined(__BIONIC__)
1147 size_t limit = 100 * 1024 * 1024;
1148 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1149
1150 size_t max_pointers = GetMaxAllocations();
1151 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1152
1153 void* memory = malloc(80 * 1024 * 1024);
1154 ASSERT_TRUE(memory != nullptr);
1155
1156 // Decrease size.
1157 memory = realloc(memory, 60 * 1024 * 1024);
1158 ASSERT_TRUE(memory != nullptr);
1159 memory = realloc(memory, 40 * 1024 * 1024);
1160 ASSERT_TRUE(memory != nullptr);
1161 memory = realloc(memory, 20 * 1024 * 1024);
1162 ASSERT_TRUE(memory != nullptr);
1163 memory = realloc(memory, 10 * 1024 * 1024);
1164 ASSERT_TRUE(memory != nullptr);
1165 free(memory);
1166
1167 VerifyMaxPointers(max_pointers);
1168#else
Elliott Hughes10907202019-03-27 08:51:02 -07001169 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001170#endif
1171}
1172
1173TEST(android_mallopt, set_allocation_limit_realloc_free) {
1174#if defined(__BIONIC__)
1175 size_t limit = 100 * 1024 * 1024;
1176 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1177
1178 size_t max_pointers = GetMaxAllocations();
1179 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1180
1181 void* memory = malloc(60 * 1024 * 1024);
1182 ASSERT_TRUE(memory != nullptr);
1183
1184 memory = realloc(memory, 0);
1185 ASSERT_TRUE(memory == nullptr);
1186
1187 VerifyMaxPointers(max_pointers);
1188#else
Elliott Hughes10907202019-03-27 08:51:02 -07001189 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001190#endif
1191}
1192
1193#if defined(__BIONIC__)
1194static void* SetAllocationLimit(void* data) {
1195 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1196 while (!go->load()) {
1197 }
1198 size_t limit = 500 * 1024 * 1024;
1199 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1200 return reinterpret_cast<void*>(-1);
1201 }
1202 return nullptr;
1203}
1204
1205static void SetAllocationLimitMultipleThreads() {
1206 std::atomic_bool go;
1207 go = false;
1208
1209 static constexpr size_t kNumThreads = 4;
1210 pthread_t threads[kNumThreads];
1211 for (size_t i = 0; i < kNumThreads; i++) {
1212 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1213 }
1214
1215 // Let them go all at once.
1216 go = true;
Ryan Savitski175c8862020-01-02 19:54:57 +00001217 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1218 // heapprofd handler.
1219 union sigval signal_value;
1220 signal_value.sival_int = 0;
Christopher Ferrisb874c332020-01-21 16:39:05 -08001221 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001222
1223 size_t num_successful = 0;
1224 for (size_t i = 0; i < kNumThreads; i++) {
1225 void* result;
1226 ASSERT_EQ(0, pthread_join(threads[i], &result));
1227 if (result != nullptr) {
1228 num_successful++;
1229 }
1230 }
1231 ASSERT_EQ(1U, num_successful);
1232 exit(0);
1233}
1234#endif
1235
1236TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1237#if defined(__BIONIC__)
1238 if (IsDynamic()) {
1239 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1240 }
1241
1242 // Run this a number of times as a stress test.
1243 for (size_t i = 0; i < 100; i++) {
1244 // Not using ASSERT_EXIT because errors messages are not displayed.
1245 pid_t pid;
1246 if ((pid = fork()) == 0) {
1247 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1248 }
1249 ASSERT_NE(-1, pid);
1250 int status;
1251 ASSERT_EQ(pid, wait(&status));
1252 ASSERT_EQ(0, WEXITSTATUS(status));
1253 }
1254#else
Elliott Hughes10907202019-03-27 08:51:02 -07001255 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001256#endif
1257}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001258
1259TEST(android_mallopt, disable_memory_mitigations) {
1260#if defined(__BIONIC__)
1261 if (!mte_supported()) {
1262 GTEST_SKIP() << "This function can only be tested with MTE";
1263 }
1264
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001265 sem_t sem;
1266 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1267
1268 pthread_t thread;
1269 ASSERT_EQ(0, pthread_create(
1270 &thread, nullptr,
1271 [](void* ptr) -> void* {
1272 auto* sem = reinterpret_cast<sem_t*>(ptr);
1273 sem_wait(sem);
1274 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1275 },
1276 &sem));
1277
1278 ASSERT_TRUE(android_mallopt(M_DISABLE_MEMORY_MITIGATIONS, nullptr, 0));
1279 ASSERT_EQ(0, sem_post(&sem));
1280
1281 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1282 ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1283
1284 void* retval;
1285 ASSERT_EQ(0, pthread_join(thread, &retval));
1286 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1287 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001288#else
1289 GTEST_SKIP() << "bionic extension";
1290#endif
1291}