blob: db814dc51f2053e97b7d1144d14b5ea9c929dc9a [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070030#include <sys/cdefs.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080031#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <sys/types.h>
33#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070034#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070035
Mitch Phillips9cad8422021-01-20 16:03:27 -080036#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080037#include <atomic>
Christopher Ferris02b6bbc2022-06-02 15:20:23 -070038#include <functional>
Christopher Ferrisd86eb862023-02-28 12:45:54 -080039#include <string>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080040#include <thread>
Christopher Ferrisd86eb862023-02-28 12:45:54 -080041#include <unordered_map>
42#include <utility>
Mitch Phillips9cad8422021-01-20 16:03:27 -080043#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080044
Dan Albert4caa1f02014-08-20 09:16:57 -070045#include <tinyxml2.h>
46
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080047#include <android-base/file.h>
Florian Mayer750dcd32022-04-15 15:54:47 -070048#include <android-base/test_utils.h>
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080049
Christopher Ferrisdc9b0fd2024-09-30 20:05:18 +000050#include "DoNotOptimize.h"
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080051#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000052
Elliott Hughesb1770852018-09-18 12:52:42 -070053#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080054
Peter Collingbourne45819dd2020-01-09 11:00:43 -080055#include "SignalUtils.h"
Steven Moreland6731e502024-12-06 20:06:11 +000056#include "dlext_private_tests.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080057
Christopher Ferrisb874c332020-01-21 16:39:05 -080058#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070059#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080060#include "platform/bionic/reserved_signals.h"
61#include "private/bionic_config.h"
62
Elliott Hughesb1770852018-09-18 12:52:42 -070063#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080064
Colin Cross7da20342021-07-28 11:18:11 -070065#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080066
Elliott Hughesb1770852018-09-18 12:52:42 -070067#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080068
Colin Cross4c5595c2021-08-16 15:51:59 -070069#elif defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -070070
71#define HAVE_REALLOCARRAY 1
72
Elliott Hughesb1770852018-09-18 12:52:42 -070073#endif
74
Christopher Ferris885f3b92013-05-21 17:48:01 -070075TEST(malloc, malloc_std) {
76 // Simple malloc test.
77 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070078 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070079 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070080 free(ptr);
81}
82
Christopher Ferrisa4037802014-06-09 19:14:11 -070083TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080084 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070085 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070086 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -070087 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -070088}
89
Christopher Ferris885f3b92013-05-21 17:48:01 -070090TEST(malloc, calloc_std) {
91 // Simple calloc test.
92 size_t alloc_len = 100;
93 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070094 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070095 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
96 for (size_t i = 0; i < alloc_len; i++) {
97 ASSERT_EQ(0, ptr[i]);
98 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070099 free(ptr);
100}
101
Peter Collingbourne978eb162020-09-21 15:26:02 -0700102TEST(malloc, calloc_mem_init_disabled) {
103#if defined(__BIONIC__)
104 // calloc should still zero memory if mem-init is disabled.
105 // With jemalloc the mallopts will fail but that shouldn't affect the
106 // execution of the test.
107 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
108 size_t alloc_len = 100;
109 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
110 for (size_t i = 0; i < alloc_len; i++) {
111 ASSERT_EQ(0, ptr[i]);
112 }
113 free(ptr);
114 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
115#else
116 GTEST_SKIP() << "bionic-only test";
117#endif
118}
119
Christopher Ferrisa4037802014-06-09 19:14:11 -0700120TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800121 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700122 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700123 ASSERT_EQ(nullptr, calloc(-1, 100));
Elliott Hughes95646e62023-09-21 14:11:19 -0700124 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700125}
126
127TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800128 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700129 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700130 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700131 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700132 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700133 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700134 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700135 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700136 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700137 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700138 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700139 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Elliott Hughes95646e62023-09-21 14:11:19 -0700140 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700141}
142
Christopher Ferris885f3b92013-05-21 17:48:01 -0700143TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800144 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700145 // Memalign test where the alignment is any value.
146 for (size_t i = 0; i <= 12; i++) {
147 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700148 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700149 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700150 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
151 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
152 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700153 free(ptr);
154 }
155 }
156}
157
Christopher Ferrisa4037802014-06-09 19:14:11 -0700158TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800159 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700160 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700161}
162
163TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800164 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700165 void* ptr;
166 for (size_t align = 0; align <= 256; align++) {
167 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700168 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700169 free(ptr);
170 }
171}
172
Christopher Ferris885f3b92013-05-21 17:48:01 -0700173TEST(malloc, memalign_realloc) {
174 // Memalign and then realloc the pointer a couple of times.
175 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
176 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700177 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700178 ASSERT_LE(100U, malloc_usable_size(ptr));
179 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
180 memset(ptr, 0x23, 100);
181
182 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700183 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700184 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700185 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700186 for (size_t i = 0; i < 100; i++) {
187 ASSERT_EQ(0x23, ptr[i]);
188 }
189 memset(ptr, 0x45, 200);
190
191 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700192 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700193 ASSERT_LE(300U, malloc_usable_size(ptr));
194 for (size_t i = 0; i < 200; i++) {
195 ASSERT_EQ(0x45, ptr[i]);
196 }
197 memset(ptr, 0x67, 300);
198
199 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700200 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700201 ASSERT_LE(250U, malloc_usable_size(ptr));
202 for (size_t i = 0; i < 250; i++) {
203 ASSERT_EQ(0x67, ptr[i]);
204 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700205 free(ptr);
206 }
207}
208
209TEST(malloc, malloc_realloc_larger) {
210 // Realloc to a larger size, malloc is used for the original allocation.
211 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700212 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700213 ASSERT_LE(100U, malloc_usable_size(ptr));
214 memset(ptr, 67, 100);
215
216 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700217 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700218 ASSERT_LE(200U, malloc_usable_size(ptr));
219 for (size_t i = 0; i < 100; i++) {
220 ASSERT_EQ(67, ptr[i]);
221 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700222 free(ptr);
223}
224
225TEST(malloc, malloc_realloc_smaller) {
226 // Realloc to a smaller size, malloc is used for the original allocation.
227 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700228 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700229 ASSERT_LE(200U, malloc_usable_size(ptr));
230 memset(ptr, 67, 200);
231
232 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700233 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700234 ASSERT_LE(100U, malloc_usable_size(ptr));
235 for (size_t i = 0; i < 100; i++) {
236 ASSERT_EQ(67, ptr[i]);
237 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700238 free(ptr);
239}
240
241TEST(malloc, malloc_multiple_realloc) {
242 // Multiple reallocs, malloc is used for the original allocation.
243 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700244 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700245 ASSERT_LE(200U, malloc_usable_size(ptr));
246 memset(ptr, 0x23, 200);
247
248 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700249 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700250 ASSERT_LE(100U, malloc_usable_size(ptr));
251 for (size_t i = 0; i < 100; i++) {
252 ASSERT_EQ(0x23, ptr[i]);
253 }
254
255 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700256 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700257 ASSERT_LE(50U, malloc_usable_size(ptr));
258 for (size_t i = 0; i < 50; i++) {
259 ASSERT_EQ(0x23, ptr[i]);
260 }
261
262 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700263 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700264 ASSERT_LE(150U, malloc_usable_size(ptr));
265 for (size_t i = 0; i < 50; i++) {
266 ASSERT_EQ(0x23, ptr[i]);
267 }
268 memset(ptr, 0x23, 150);
269
270 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700271 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700272 ASSERT_LE(425U, malloc_usable_size(ptr));
273 for (size_t i = 0; i < 150; i++) {
274 ASSERT_EQ(0x23, ptr[i]);
275 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700276 free(ptr);
277}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700278
Christopher Ferris885f3b92013-05-21 17:48:01 -0700279TEST(malloc, calloc_realloc_larger) {
280 // Realloc to a larger size, calloc is used for the original allocation.
281 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700282 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700283 ASSERT_LE(100U, malloc_usable_size(ptr));
284
285 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700286 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700287 ASSERT_LE(200U, malloc_usable_size(ptr));
288 for (size_t i = 0; i < 100; i++) {
289 ASSERT_EQ(0, ptr[i]);
290 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700291 free(ptr);
292}
293
294TEST(malloc, calloc_realloc_smaller) {
295 // Realloc to a smaller size, calloc is used for the original allocation.
296 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700297 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700298 ASSERT_LE(200U, malloc_usable_size(ptr));
299
300 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700301 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700302 ASSERT_LE(100U, malloc_usable_size(ptr));
303 for (size_t i = 0; i < 100; i++) {
304 ASSERT_EQ(0, ptr[i]);
305 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700306 free(ptr);
307}
308
309TEST(malloc, calloc_multiple_realloc) {
310 // Multiple reallocs, calloc is used for the original allocation.
311 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700312 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700313 ASSERT_LE(200U, malloc_usable_size(ptr));
314
315 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700316 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700317 ASSERT_LE(100U, malloc_usable_size(ptr));
318 for (size_t i = 0; i < 100; i++) {
319 ASSERT_EQ(0, ptr[i]);
320 }
321
322 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700323 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700324 ASSERT_LE(50U, malloc_usable_size(ptr));
325 for (size_t i = 0; i < 50; i++) {
326 ASSERT_EQ(0, ptr[i]);
327 }
328
329 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700330 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700331 ASSERT_LE(150U, malloc_usable_size(ptr));
332 for (size_t i = 0; i < 50; i++) {
333 ASSERT_EQ(0, ptr[i]);
334 }
335 memset(ptr, 0, 150);
336
337 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700338 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700339 ASSERT_LE(425U, malloc_usable_size(ptr));
340 for (size_t i = 0; i < 150; i++) {
341 ASSERT_EQ(0, ptr[i]);
342 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700343 free(ptr);
344}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700345
Christopher Ferrisa4037802014-06-09 19:14:11 -0700346TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800347 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700348 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700349 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700350 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700351 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700352 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700353 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700354 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700355 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700356 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700357}
358
Dan Alberte5fdaa42014-06-14 01:04:31 +0000359#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
360extern "C" void* pvalloc(size_t);
361extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700362#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000363
Christopher Ferrisa4037802014-06-09 19:14:11 -0700364TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700365#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700366 size_t pagesize = sysconf(_SC_PAGESIZE);
367 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700368 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700369 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
370 ASSERT_LE(pagesize, malloc_usable_size(ptr));
371 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700372#else
373 GTEST_SKIP() << "pvalloc not supported.";
374#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700375}
376
377TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700378#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700379 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700380#else
381 GTEST_SKIP() << "pvalloc not supported.";
382#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700383}
384
385TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700386#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700387 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700388 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700389 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700390 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
391 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700392#else
393 GTEST_SKIP() << "valloc not supported.";
394#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700395}
396
397TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700398#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700399 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700400#else
401 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000402#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700403}
Dan Albert4caa1f02014-08-20 09:16:57 -0700404
405TEST(malloc, malloc_info) {
406#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700407 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800408
409 TemporaryFile tf;
410 ASSERT_TRUE(tf.fd != -1);
411 FILE* fp = fdopen(tf.fd, "w+");
412 tf.release();
413 ASSERT_TRUE(fp != nullptr);
414 ASSERT_EQ(0, malloc_info(0, fp));
415 ASSERT_EQ(0, fclose(fp));
416
417 std::string contents;
418 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700419
420 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800421 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700422
423 auto root = doc.FirstChildElement();
424 ASSERT_NE(nullptr, root);
425 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700426 std::string version(root->Attribute("version"));
427 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800428 auto arena = root->FirstChildElement();
429 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
430 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700431
Christopher Ferris6c619a02019-03-01 17:59:51 -0800432 ASSERT_STREQ("heap", arena->Name());
433 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
434 ASSERT_EQ(tinyxml2::XML_SUCCESS,
435 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
438 ASSERT_EQ(tinyxml2::XML_SUCCESS,
439 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
440 ASSERT_EQ(tinyxml2::XML_SUCCESS,
441 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700442
Christopher Ferris6c619a02019-03-01 17:59:51 -0800443 auto bin = arena->FirstChildElement("bin");
444 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
445 if (strcmp(bin->Name(), "bin") == 0) {
446 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
447 ASSERT_EQ(tinyxml2::XML_SUCCESS,
448 bin->FirstChildElement("allocated")->QueryIntText(&val));
449 ASSERT_EQ(tinyxml2::XML_SUCCESS,
450 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
451 ASSERT_EQ(tinyxml2::XML_SUCCESS,
452 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
453 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700454 }
455 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800456 } else if (version == "scudo-1") {
457 auto element = root->FirstChildElement();
458 for (; element != nullptr; element = element->NextSiblingElement()) {
459 int val;
460
461 ASSERT_STREQ("alloc", element->Name());
462 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
463 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
464 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800465 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800466 // Do not verify output for debug malloc.
467 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700468 }
Christopher Ferris9eb3f1f2024-12-09 23:07:14 +0000469 printf("Allocator version: %s\n", version.c_str());
Dan Albert4caa1f02014-08-20 09:16:57 -0700470#endif
471}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800472
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700473TEST(malloc, malloc_info_matches_mallinfo) {
474#ifdef __BIONIC__
475 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
476
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800477 TemporaryFile tf;
478 ASSERT_TRUE(tf.fd != -1);
479 FILE* fp = fdopen(tf.fd, "w+");
480 tf.release();
481 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700482 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800483 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700484 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800485 ASSERT_EQ(0, fclose(fp));
486
487 std::string contents;
488 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700489
490 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800491 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700492
493 size_t total_allocated_bytes = 0;
494 auto root = doc.FirstChildElement();
495 ASSERT_NE(nullptr, root);
496 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700497 std::string version(root->Attribute("version"));
498 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700499 auto arena = root->FirstChildElement();
500 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
501 int val;
502
503 ASSERT_STREQ("heap", arena->Name());
504 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
505 ASSERT_EQ(tinyxml2::XML_SUCCESS,
506 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
507 total_allocated_bytes += val;
508 ASSERT_EQ(tinyxml2::XML_SUCCESS,
509 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
510 total_allocated_bytes += val;
511 ASSERT_EQ(tinyxml2::XML_SUCCESS,
512 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
513 total_allocated_bytes += val;
514 ASSERT_EQ(tinyxml2::XML_SUCCESS,
515 arena->FirstChildElement("bins-total")->QueryIntText(&val));
516 }
517 // The total needs to be between the mallinfo call before and after
518 // since malloc_info allocates some memory.
519 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
520 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800521 } else if (version == "scudo-1") {
522 auto element = root->FirstChildElement();
523 for (; element != nullptr; element = element->NextSiblingElement()) {
524 ASSERT_STREQ("alloc", element->Name());
525 int size;
526 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
527 int count;
528 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
529 total_allocated_bytes += size * count;
530 }
531 // Scudo only gives the information on the primary, so simply make
532 // sure that the value is non-zero.
533 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700534 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800535 // Do not verify output for debug malloc.
536 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700537 }
538#endif
539}
540
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800541TEST(malloc, calloc_usable_size) {
542 for (size_t size = 1; size <= 2048; size++) {
543 void* pointer = malloc(size);
544 ASSERT_TRUE(pointer != nullptr);
545 memset(pointer, 0xeb, malloc_usable_size(pointer));
546 free(pointer);
547
548 // We should get a previous pointer that has been set to non-zero.
549 // If calloc does not zero out all of the data, this will fail.
550 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
551 ASSERT_TRUE(pointer != nullptr);
552 size_t usable_size = malloc_usable_size(zero_mem);
553 for (size_t i = 0; i < usable_size; i++) {
554 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
555 }
556 free(zero_mem);
557 }
558}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800559
560TEST(malloc, malloc_0) {
561 void* p = malloc(0);
562 ASSERT_TRUE(p != nullptr);
563 free(p);
564}
565
566TEST(malloc, calloc_0_0) {
567 void* p = calloc(0, 0);
568 ASSERT_TRUE(p != nullptr);
569 free(p);
570}
571
572TEST(malloc, calloc_0_1) {
573 void* p = calloc(0, 1);
574 ASSERT_TRUE(p != nullptr);
575 free(p);
576}
577
578TEST(malloc, calloc_1_0) {
579 void* p = calloc(1, 0);
580 ASSERT_TRUE(p != nullptr);
581 free(p);
582}
583
584TEST(malloc, realloc_nullptr_0) {
585 // realloc(nullptr, size) is actually malloc(size).
586 void* p = realloc(nullptr, 0);
587 ASSERT_TRUE(p != nullptr);
588 free(p);
589}
590
591TEST(malloc, realloc_0) {
592 void* p = malloc(1024);
593 ASSERT_TRUE(p != nullptr);
594 // realloc(p, 0) is actually free(p).
595 void* p2 = realloc(p, 0);
596 ASSERT_TRUE(p2 == nullptr);
597}
Christopher Ferris72df6702016-02-11 15:51:31 -0800598
599constexpr size_t MAX_LOOPS = 200;
600
601// Make sure that memory returned by malloc is aligned to allow these data types.
602TEST(malloc, verify_alignment) {
603 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
604 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
605 long double** values_ldouble = new long double*[MAX_LOOPS];
606 // Use filler to attempt to force the allocator to get potentially bad alignments.
607 void** filler = new void*[MAX_LOOPS];
608
609 for (size_t i = 0; i < MAX_LOOPS; i++) {
610 // Check uint32_t pointers.
611 filler[i] = malloc(1);
612 ASSERT_TRUE(filler[i] != nullptr);
613
614 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
615 ASSERT_TRUE(values_32[i] != nullptr);
616 *values_32[i] = i;
617 ASSERT_EQ(*values_32[i], i);
618 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
619
620 free(filler[i]);
621 }
622
623 for (size_t i = 0; i < MAX_LOOPS; i++) {
624 // Check uint64_t pointers.
625 filler[i] = malloc(1);
626 ASSERT_TRUE(filler[i] != nullptr);
627
628 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
629 ASSERT_TRUE(values_64[i] != nullptr);
630 *values_64[i] = 0x1000 + i;
631 ASSERT_EQ(*values_64[i], 0x1000 + i);
632 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
633
634 free(filler[i]);
635 }
636
637 for (size_t i = 0; i < MAX_LOOPS; i++) {
638 // Check long double pointers.
639 filler[i] = malloc(1);
640 ASSERT_TRUE(filler[i] != nullptr);
641
642 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
643 ASSERT_TRUE(values_ldouble[i] != nullptr);
644 *values_ldouble[i] = 5.5 + i;
645 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
646 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
647 // required alignment to 0x7.
648#if !defined(__BIONIC__) && !defined(__LP64__)
649 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
650#else
651 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
652#endif
653
654 free(filler[i]);
655 }
656
657 for (size_t i = 0; i < MAX_LOOPS; i++) {
658 free(values_32[i]);
659 free(values_64[i]);
660 free(values_ldouble[i]);
661 }
662
663 delete[] filler;
664 delete[] values_32;
665 delete[] values_64;
666 delete[] values_ldouble;
667}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700668
669TEST(malloc, mallopt_smoke) {
Christopher Ferris2ef59372023-01-18 15:08:37 -0800670#if defined(__BIONIC__)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700671 errno = 0;
672 ASSERT_EQ(0, mallopt(-1000, 1));
673 // mallopt doesn't set errno.
Elliott Hughes95646e62023-09-21 14:11:19 -0700674 ASSERT_ERRNO(0);
Colin Cross7da20342021-07-28 11:18:11 -0700675#else
Christopher Ferris2ef59372023-01-18 15:08:37 -0800676 GTEST_SKIP() << "bionic-only test";
Colin Cross7da20342021-07-28 11:18:11 -0700677#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700678}
Elliott Hughesb1770852018-09-18 12:52:42 -0700679
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800680TEST(malloc, mallopt_decay) {
681#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800682 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Chia-hung Duan6abb4062024-04-17 19:08:48 -0700683 ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800684 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
685 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
686 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
687 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
Chia-hung Duan6abb4062024-04-17 19:08:48 -0700688 ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800689#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800690 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800691#endif
692}
693
694TEST(malloc, mallopt_purge) {
695#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800696 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800697 ASSERT_EQ(1, mallopt(M_PURGE, 0));
698#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800699 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800700#endif
701}
702
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800703TEST(malloc, mallopt_purge_all) {
704#if defined(__BIONIC__)
705 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800706 ASSERT_EQ(1, mallopt(M_PURGE_ALL, 0));
707#else
708 GTEST_SKIP() << "bionic-only test";
709#endif
710}
711
Christopher Ferrise9a7b812023-05-11 15:36:27 -0700712TEST(malloc, mallopt_log_stats) {
713#if defined(__BIONIC__)
714 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
715 ASSERT_EQ(1, mallopt(M_LOG_STATS, 0));
716#else
717 GTEST_SKIP() << "bionic-only test";
718#endif
719}
720
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800721// Verify that all of the mallopt values are unique.
722TEST(malloc, mallopt_unique_params) {
723#if defined(__BIONIC__)
724 std::vector<std::pair<int, std::string>> params{
725 std::make_pair(M_DECAY_TIME, "M_DECAY_TIME"),
726 std::make_pair(M_PURGE, "M_PURGE"),
727 std::make_pair(M_PURGE_ALL, "M_PURGE_ALL"),
728 std::make_pair(M_MEMTAG_TUNING, "M_MEMTAG_TUNING"),
729 std::make_pair(M_THREAD_DISABLE_MEM_INIT, "M_THREAD_DISABLE_MEM_INIT"),
730 std::make_pair(M_CACHE_COUNT_MAX, "M_CACHE_COUNT_MAX"),
731 std::make_pair(M_CACHE_SIZE_MAX, "M_CACHE_SIZE_MAX"),
732 std::make_pair(M_TSDS_COUNT_MAX, "M_TSDS_COUNT_MAX"),
733 std::make_pair(M_BIONIC_ZERO_INIT, "M_BIONIC_ZERO_INIT"),
734 std::make_pair(M_BIONIC_SET_HEAP_TAGGING_LEVEL, "M_BIONIC_SET_HEAP_TAGGING_LEVEL"),
Christopher Ferrise9a7b812023-05-11 15:36:27 -0700735 std::make_pair(M_LOG_STATS, "M_LOG_STATS"),
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800736 };
737
738 std::unordered_map<int, std::string> all_params;
739 for (const auto& param : params) {
740 EXPECT_TRUE(all_params.count(param.first) == 0)
741 << "mallopt params " << all_params[param.first] << " and " << param.second
742 << " have the same value " << param.first;
743 all_params.insert(param);
744 }
745#else
746 GTEST_SKIP() << "bionic-only test";
747#endif
748}
749
Christopher Ferris88448792020-07-28 14:15:31 -0700750#if defined(__BIONIC__)
751static void GetAllocatorVersion(bool* allocator_scudo) {
752 TemporaryFile tf;
753 ASSERT_TRUE(tf.fd != -1);
754 FILE* fp = fdopen(tf.fd, "w+");
755 tf.release();
756 ASSERT_TRUE(fp != nullptr);
Evgenii Stepanov4edbcee2021-09-17 14:59:15 -0700757 if (malloc_info(0, fp) != 0) {
758 *allocator_scudo = false;
759 return;
760 }
Christopher Ferris88448792020-07-28 14:15:31 -0700761 ASSERT_EQ(0, fclose(fp));
762
763 std::string contents;
764 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
765
766 tinyxml2::XMLDocument doc;
767 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
768
769 auto root = doc.FirstChildElement();
770 ASSERT_NE(nullptr, root);
771 ASSERT_STREQ("malloc", root->Name());
772 std::string version(root->Attribute("version"));
773 *allocator_scudo = (version == "scudo-1");
774}
775#endif
776
777TEST(malloc, mallopt_scudo_only_options) {
778#if defined(__BIONIC__)
779 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
780 bool allocator_scudo;
781 GetAllocatorVersion(&allocator_scudo);
782 if (!allocator_scudo) {
783 GTEST_SKIP() << "scudo allocator only test";
784 }
785 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
786 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
787 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
788#else
789 GTEST_SKIP() << "bionic-only test";
790#endif
791}
792
Elliott Hughesb1770852018-09-18 12:52:42 -0700793TEST(malloc, reallocarray_overflow) {
794#if HAVE_REALLOCARRAY
795 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
796 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
797 size_t b = 2;
798
799 errno = 0;
800 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
Elliott Hughes95646e62023-09-21 14:11:19 -0700801 ASSERT_ERRNO(ENOMEM);
Elliott Hughesb1770852018-09-18 12:52:42 -0700802
803 errno = 0;
804 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
Elliott Hughes95646e62023-09-21 14:11:19 -0700805 ASSERT_ERRNO(ENOMEM);
Elliott Hughesb1770852018-09-18 12:52:42 -0700806#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800807 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700808#endif
809}
810
811TEST(malloc, reallocarray) {
812#if HAVE_REALLOCARRAY
813 void* p = reallocarray(nullptr, 2, 32);
814 ASSERT_TRUE(p != nullptr);
815 ASSERT_GE(malloc_usable_size(p), 64U);
816#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800817 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700818#endif
819}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800820
821TEST(malloc, mallinfo) {
Colin Crossfdced952022-01-24 18:15:07 -0800822#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800823 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800824 static size_t sizes[] = {
825 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
826 };
827
Elliott Hughes30088842023-09-14 18:35:11 +0000828 static constexpr size_t kMaxAllocs = 50;
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800829
830 for (size_t size : sizes) {
831 // If some of these allocations are stuck in a thread cache, then keep
832 // looping until we make an allocation that changes the total size of the
833 // memory allocated.
834 // jemalloc implementations counts the thread cache allocations against
835 // total memory allocated.
836 void* ptrs[kMaxAllocs] = {};
837 bool pass = false;
838 for (size_t i = 0; i < kMaxAllocs; i++) {
839 size_t allocated = mallinfo().uordblks;
840 ptrs[i] = malloc(size);
841 ASSERT_TRUE(ptrs[i] != nullptr);
842 size_t new_allocated = mallinfo().uordblks;
843 if (allocated != new_allocated) {
844 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800845 // Only check if the total got bigger by at least allocation size.
846 // Sometimes the mallinfo numbers can go backwards due to compaction
847 // and/or freeing of cached data.
848 if (new_allocated >= allocated + usable_size) {
849 pass = true;
850 break;
851 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800852 }
853 }
854 for (void* ptr : ptrs) {
855 free(ptr);
856 }
857 ASSERT_TRUE(pass)
858 << "For size " << size << " allocated bytes did not increase after "
859 << kMaxAllocs << " allocations.";
860 }
861#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800862 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800863#endif
864}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000865
Christopher Ferris8248e622021-12-03 13:55:57 -0800866TEST(malloc, mallinfo2) {
Colin Crossfdced952022-01-24 18:15:07 -0800867#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Christopher Ferris8248e622021-12-03 13:55:57 -0800868 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
869 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
870
Elliott Hughes30088842023-09-14 18:35:11 +0000871 static constexpr size_t kMaxAllocs = 50;
Christopher Ferris8248e622021-12-03 13:55:57 -0800872
873 for (size_t size : sizes) {
874 // If some of these allocations are stuck in a thread cache, then keep
875 // looping until we make an allocation that changes the total size of the
876 // memory allocated.
877 // jemalloc implementations counts the thread cache allocations against
878 // total memory allocated.
879 void* ptrs[kMaxAllocs] = {};
880 bool pass = false;
881 for (size_t i = 0; i < kMaxAllocs; i++) {
882 struct mallinfo info = mallinfo();
883 struct mallinfo2 info2 = mallinfo2();
884 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800885 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
886 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
887 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
888 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
889 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
890 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
891 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
892 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
893 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
894 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800895
896 size_t allocated = info2.uordblks;
897 ptrs[i] = malloc(size);
898 ASSERT_TRUE(ptrs[i] != nullptr);
899
900 info = mallinfo();
901 info2 = mallinfo2();
902 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800903 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
904 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
905 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
906 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
907 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
908 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
909 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
910 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
911 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
912 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800913
914 size_t new_allocated = info2.uordblks;
915 if (allocated != new_allocated) {
916 size_t usable_size = malloc_usable_size(ptrs[i]);
917 // Only check if the total got bigger by at least allocation size.
918 // Sometimes the mallinfo2 numbers can go backwards due to compaction
919 // and/or freeing of cached data.
920 if (new_allocated >= allocated + usable_size) {
921 pass = true;
922 break;
923 }
924 }
925 }
926 for (void* ptr : ptrs) {
927 free(ptr);
928 }
929 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
930 << kMaxAllocs << " allocations.";
931 }
932#else
933 GTEST_SKIP() << "glibc is broken";
934#endif
935}
936
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800937template <typename Type>
938void __attribute__((optnone)) VerifyAlignment(Type* floating) {
939 size_t expected_alignment = alignof(Type);
940 if (expected_alignment != 0) {
941 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
Ryan Prichardf40f2582024-01-09 16:29:20 -0800942 << "Expected alignment " << expected_alignment << " ptr value "
943 << static_cast<void*>(floating);
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800944 }
945}
946
947template <typename Type>
948void __attribute__((optnone)) TestAllocateType() {
949 // The number of allocations to do in a row. This is to attempt to
950 // expose the worst case alignment for native allocators that use
951 // bins.
952 static constexpr size_t kMaxConsecutiveAllocs = 100;
953
954 // Verify using new directly.
955 Type* types[kMaxConsecutiveAllocs];
956 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
957 types[i] = new Type;
958 VerifyAlignment(types[i]);
959 if (::testing::Test::HasFatalFailure()) {
960 return;
961 }
962 }
963 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
964 delete types[i];
965 }
966
967 // Verify using malloc.
968 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
969 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
970 ASSERT_TRUE(types[i] != nullptr);
971 VerifyAlignment(types[i]);
972 if (::testing::Test::HasFatalFailure()) {
973 return;
974 }
975 }
976 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
977 free(types[i]);
978 }
979
980 // Verify using a vector.
981 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
982 for (size_t i = 0; i < type_vector.size(); i++) {
983 VerifyAlignment(&type_vector[i]);
984 if (::testing::Test::HasFatalFailure()) {
985 return;
986 }
987 }
988}
989
990#if defined(__ANDROID__)
991static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
992 void* ptrs[100];
993 uintptr_t mask = aligned_bytes - 1;
994 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
995 ptrs[i] = malloc(alloc_size);
996 ASSERT_TRUE(ptrs[i] != nullptr);
997 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
998 << "Expected at least " << aligned_bytes << " byte alignment: size "
999 << alloc_size << " actual ptr " << ptrs[i];
1000 }
1001}
1002#endif
1003
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001004void AlignCheck() {
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001005 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
1006 // for a discussion of type alignment.
1007 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
1008 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
1009 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
1010
1011 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
1012 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
1013 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
1014 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
1015 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
1016 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
1017 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
1018 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
1019 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
1020 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
1021 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
1022 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
1023 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
1024 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
1025
1026#if defined(__ANDROID__)
1027 // On Android, there is a lot of code that expects certain alignments:
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001028 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
1029 // must have at least 16 byte alignment.
1030 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
1031 // not 16 bytes, are only required to have at least 8 byte alignment.
1032 // In addition, on Android clang has been configured for 64 bit such that:
1033 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
1034 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
1035 // For 32 bit environments, only the first two requirements must be met.
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001036
1037 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
1038 // a discussion of this alignment mess. The code below is enforcing
1039 // strong-alignment, since who knows what code depends on this behavior now.
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001040 // As mentioned before, for 64 bit this will enforce the higher
1041 // requirement since clang expects this behavior on Android now.
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001042 for (size_t i = 1; i <= 128; i++) {
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001043#if defined(__LP64__)
1044 if (i <= 8) {
1045 AndroidVerifyAlignment(i, 8);
1046 } else {
1047 AndroidVerifyAlignment(i, 16);
1048 }
1049#else
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001050 size_t rounded = (i + 7) & ~7;
1051 if ((rounded % 16) == 0) {
1052 AndroidVerifyAlignment(i, 16);
1053 } else {
1054 AndroidVerifyAlignment(i, 8);
1055 }
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001056#endif
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001057 if (::testing::Test::HasFatalFailure()) {
1058 return;
1059 }
1060 }
1061#endif
1062}
1063
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001064TEST(malloc, align_check) {
1065 AlignCheck();
1066}
1067
Christopher Ferris201dcf42020-01-29 13:09:31 -08001068// Jemalloc doesn't pass this test right now, so leave it as disabled.
1069TEST(malloc, DISABLED_alloc_after_fork) {
1070 // Both of these need to be a power of 2.
1071 static constexpr size_t kMinAllocationSize = 8;
1072 static constexpr size_t kMaxAllocationSize = 2097152;
1073
1074 static constexpr size_t kNumAllocatingThreads = 5;
1075 static constexpr size_t kNumForkLoops = 100;
1076
1077 std::atomic_bool stop;
1078
1079 // Create threads that simply allocate and free different sizes.
1080 std::vector<std::thread*> threads;
1081 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1082 std::thread* t = new std::thread([&stop] {
1083 while (!stop) {
1084 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001085 void* ptr;
1086 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001087 free(ptr);
1088 }
1089 }
1090 });
1091 threads.push_back(t);
1092 }
1093
1094 // Create a thread to fork and allocate.
1095 for (size_t i = 0; i < kNumForkLoops; i++) {
1096 pid_t pid;
1097 if ((pid = fork()) == 0) {
1098 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001099 void* ptr;
1100 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001101 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -08001102 // Make sure we can touch all of the allocation.
1103 memset(ptr, 0x1, size);
1104 ASSERT_LE(size, malloc_usable_size(ptr));
1105 free(ptr);
1106 }
1107 _exit(10);
1108 }
1109 ASSERT_NE(-1, pid);
1110 AssertChildExited(pid, 10);
1111 }
1112
1113 stop = true;
1114 for (auto thread : threads) {
1115 thread->join();
1116 delete thread;
1117 }
1118}
1119
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001120TEST(android_mallopt, error_on_unexpected_option) {
1121#if defined(__BIONIC__)
1122 const int unrecognized_option = -1;
1123 errno = 0;
1124 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001125 EXPECT_ERRNO(ENOTSUP);
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001126#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001127 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001128#endif
1129}
1130
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001131bool IsDynamic() {
1132#if defined(__LP64__)
1133 Elf64_Ehdr ehdr;
1134#else
1135 Elf32_Ehdr ehdr;
1136#endif
1137 std::string path(android::base::GetExecutablePath());
1138
1139 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1140 if (fd == -1) {
1141 // Assume dynamic on error.
1142 return true;
1143 }
1144 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1145 close(fd);
1146 // Assume dynamic in error cases.
1147 return !read_completed || ehdr.e_type == ET_DYN;
1148}
1149
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001150TEST(android_mallopt, init_zygote_child_profiling) {
1151#if defined(__BIONIC__)
1152 // Successful call.
1153 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001154 if (IsDynamic()) {
1155 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001156 EXPECT_ERRNO(0);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001157 } else {
1158 // Not supported in static executables.
1159 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001160 EXPECT_ERRNO(ENOTSUP);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001161 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001162
1163 // Unexpected arguments rejected.
1164 errno = 0;
1165 char unexpected = 0;
1166 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001167 if (IsDynamic()) {
Elliott Hughes95646e62023-09-21 14:11:19 -07001168 EXPECT_ERRNO(EINVAL);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001169 } else {
Elliott Hughes95646e62023-09-21 14:11:19 -07001170 EXPECT_ERRNO(ENOTSUP);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001171 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001172#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001173 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001174#endif
1175}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001176
1177#if defined(__BIONIC__)
1178template <typename FuncType>
1179void CheckAllocationFunction(FuncType func) {
1180 // Assumes that no more than 108MB of memory is allocated before this.
1181 size_t limit = 128 * 1024 * 1024;
1182 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1183 if (!func(20 * 1024 * 1024))
1184 exit(1);
1185 if (func(128 * 1024 * 1024))
1186 exit(1);
1187 exit(0);
1188}
1189#endif
1190
1191TEST(android_mallopt, set_allocation_limit) {
1192#if defined(__BIONIC__)
1193 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1194 testing::ExitedWithCode(0), "");
1195 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1196 testing::ExitedWithCode(0), "");
1197 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1198 testing::ExitedWithCode(0), "");
1199 EXPECT_EXIT(CheckAllocationFunction(
1200 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1201 testing::ExitedWithCode(0), "");
1202 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1203 void* ptr;
1204 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1205 }),
1206 testing::ExitedWithCode(0), "");
1207 EXPECT_EXIT(CheckAllocationFunction(
1208 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1209 testing::ExitedWithCode(0), "");
1210 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1211 void* p = malloc(1024 * 1024);
1212 return realloc(p, bytes) != nullptr;
1213 }),
1214 testing::ExitedWithCode(0), "");
1215#if !defined(__LP64__)
1216 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1217 testing::ExitedWithCode(0), "");
1218 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1219 testing::ExitedWithCode(0), "");
1220#endif
1221#else
Elliott Hughes10907202019-03-27 08:51:02 -07001222 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001223#endif
1224}
1225
1226TEST(android_mallopt, set_allocation_limit_multiple) {
1227#if defined(__BIONIC__)
1228 // Only the first set should work.
1229 size_t limit = 256 * 1024 * 1024;
1230 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1231 limit = 32 * 1024 * 1024;
1232 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1233#else
Elliott Hughes10907202019-03-27 08:51:02 -07001234 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001235#endif
1236}
1237
1238#if defined(__BIONIC__)
1239static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1240
1241static size_t GetMaxAllocations() {
1242 size_t max_pointers = 0;
1243 void* ptrs[20];
1244 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1245 ptrs[i] = malloc(kAllocationSize);
1246 if (ptrs[i] == nullptr) {
1247 max_pointers = i;
1248 break;
1249 }
1250 }
1251 for (size_t i = 0; i < max_pointers; i++) {
1252 free(ptrs[i]);
1253 }
1254 return max_pointers;
1255}
1256
1257static void VerifyMaxPointers(size_t max_pointers) {
1258 // Now verify that we can allocate the same number as before.
1259 void* ptrs[20];
1260 for (size_t i = 0; i < max_pointers; i++) {
1261 ptrs[i] = malloc(kAllocationSize);
1262 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1263 }
1264
1265 // Make sure the next allocation still fails.
1266 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1267 for (size_t i = 0; i < max_pointers; i++) {
1268 free(ptrs[i]);
1269 }
1270}
1271#endif
1272
1273TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1274#if defined(__BIONIC__)
1275 size_t limit = 128 * 1024 * 1024;
1276 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1277
1278 size_t max_pointers = GetMaxAllocations();
1279 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1280
1281 void* memory = malloc(10 * 1024 * 1024);
1282 ASSERT_TRUE(memory != nullptr);
1283
1284 // Increase size.
1285 memory = realloc(memory, 20 * 1024 * 1024);
1286 ASSERT_TRUE(memory != nullptr);
1287 memory = realloc(memory, 40 * 1024 * 1024);
1288 ASSERT_TRUE(memory != nullptr);
1289 memory = realloc(memory, 60 * 1024 * 1024);
1290 ASSERT_TRUE(memory != nullptr);
1291 memory = realloc(memory, 80 * 1024 * 1024);
1292 ASSERT_TRUE(memory != nullptr);
1293 // Now push past limit.
1294 memory = realloc(memory, 130 * 1024 * 1024);
1295 ASSERT_TRUE(memory == nullptr);
1296
1297 VerifyMaxPointers(max_pointers);
1298#else
Elliott Hughes10907202019-03-27 08:51:02 -07001299 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001300#endif
1301}
1302
1303TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1304#if defined(__BIONIC__)
1305 size_t limit = 100 * 1024 * 1024;
1306 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1307
1308 size_t max_pointers = GetMaxAllocations();
1309 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1310
1311 void* memory = malloc(80 * 1024 * 1024);
1312 ASSERT_TRUE(memory != nullptr);
1313
1314 // Decrease size.
1315 memory = realloc(memory, 60 * 1024 * 1024);
1316 ASSERT_TRUE(memory != nullptr);
1317 memory = realloc(memory, 40 * 1024 * 1024);
1318 ASSERT_TRUE(memory != nullptr);
1319 memory = realloc(memory, 20 * 1024 * 1024);
1320 ASSERT_TRUE(memory != nullptr);
1321 memory = realloc(memory, 10 * 1024 * 1024);
1322 ASSERT_TRUE(memory != nullptr);
1323 free(memory);
1324
1325 VerifyMaxPointers(max_pointers);
1326#else
Elliott Hughes10907202019-03-27 08:51:02 -07001327 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001328#endif
1329}
1330
1331TEST(android_mallopt, set_allocation_limit_realloc_free) {
1332#if defined(__BIONIC__)
1333 size_t limit = 100 * 1024 * 1024;
1334 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1335
1336 size_t max_pointers = GetMaxAllocations();
1337 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1338
1339 void* memory = malloc(60 * 1024 * 1024);
1340 ASSERT_TRUE(memory != nullptr);
1341
1342 memory = realloc(memory, 0);
1343 ASSERT_TRUE(memory == nullptr);
1344
1345 VerifyMaxPointers(max_pointers);
1346#else
Elliott Hughes10907202019-03-27 08:51:02 -07001347 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001348#endif
1349}
1350
1351#if defined(__BIONIC__)
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001352static void SetAllocationLimitMultipleThreads() {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001353 static constexpr size_t kNumThreads = 4;
Christopher Ferrisfe130412023-07-20 16:37:43 -07001354 std::atomic_bool start_running = false;
1355 std::atomic<size_t> num_running;
1356 std::atomic<size_t> num_successful;
1357 std::unique_ptr<std::thread> threads[kNumThreads];
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001358 for (size_t i = 0; i < kNumThreads; i++) {
Christopher Ferrisfe130412023-07-20 16:37:43 -07001359 threads[i].reset(new std::thread([&num_running, &start_running, &num_successful] {
1360 ++num_running;
1361 while (!start_running) {
1362 }
1363 size_t limit = 500 * 1024 * 1024;
1364 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1365 ++num_successful;
1366 }
1367 }));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001368 }
1369
Christopher Ferrisfe130412023-07-20 16:37:43 -07001370 // Wait until all of the threads have started.
1371 while (num_running != kNumThreads)
1372 ;
1373
1374 // Now start all of the threads setting the mallopt at once.
1375 start_running = true;
1376
Ryan Savitski175c8862020-01-02 19:54:57 +00001377 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
Christopher Ferrisfe130412023-07-20 16:37:43 -07001378 // heapprofd handler. This will verify that changing the limit while
1379 // the allocation handlers are being changed at the same time works,
1380 // or that the limit handler is changed first and this also works properly.
1381 union sigval signal_value {};
Christopher Ferrisb874c332020-01-21 16:39:05 -08001382 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001383
Christopher Ferrisfe130412023-07-20 16:37:43 -07001384 // Wait for all of the threads to finish.
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001385 for (size_t i = 0; i < kNumThreads; i++) {
Christopher Ferrisfe130412023-07-20 16:37:43 -07001386 threads[i]->join();
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001387 }
Christopher Ferrisfe130412023-07-20 16:37:43 -07001388 ASSERT_EQ(1U, num_successful) << "Only one thread should be able to set the limit.";
Christopher Ferrise9ffc522023-08-03 17:34:05 -07001389 _exit(0);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001390}
1391#endif
1392
1393TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1394#if defined(__BIONIC__)
1395 if (IsDynamic()) {
1396 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1397 }
1398
1399 // Run this a number of times as a stress test.
1400 for (size_t i = 0; i < 100; i++) {
1401 // Not using ASSERT_EXIT because errors messages are not displayed.
1402 pid_t pid;
1403 if ((pid = fork()) == 0) {
1404 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1405 }
1406 ASSERT_NE(-1, pid);
1407 int status;
1408 ASSERT_EQ(pid, wait(&status));
1409 ASSERT_EQ(0, WEXITSTATUS(status));
1410 }
1411#else
Elliott Hughes10907202019-03-27 08:51:02 -07001412 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001413#endif
1414}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001415
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001416#if defined(__BIONIC__)
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001417using Mode = android_mallopt_gwp_asan_options_t::Mode;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001418TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1419 android_mallopt_gwp_asan_options_t options;
1420 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001421 options.mode = Mode::APP_MANIFEST_NEVER;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001422 // GWP-ASan should already be enabled. Trying to enable or disable it should
1423 // always pass.
1424 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001425 options.mode = Mode::APP_MANIFEST_DEFAULT;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001426 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1427}
1428#endif // defined(__BIONIC__)
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001429
Mitch Phillipse6997d52020-11-30 15:04:14 -08001430TEST(android_mallopt, multiple_enable_gwp_asan) {
1431#if defined(__BIONIC__)
1432 // Always enable GWP-Asan, with default options.
1433 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001434#else
1435 GTEST_SKIP() << "bionic extension";
1436#endif
1437}
1438
Florian Mayercc61ad82022-08-31 11:43:30 -07001439TEST(android_mallopt, memtag_stack_is_on) {
1440#if defined(__BIONIC__)
1441 bool memtag_stack;
1442 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1443#else
1444 GTEST_SKIP() << "bionic extension";
1445#endif
1446}
1447
Mitch Phillips9cad8422021-01-20 16:03:27 -08001448void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1449 std::vector<void*> allocs;
1450 constexpr int kMaxBytesToCheckZero = 64;
1451 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1452
1453 for (int i = 0; i < num_iterations; ++i) {
1454 int size = get_alloc_size(i);
1455 allocs.push_back(malloc(size));
1456 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1457 }
1458
1459 for (void* alloc : allocs) {
1460 free(alloc);
1461 }
1462 allocs.clear();
1463
1464 for (int i = 0; i < num_iterations; ++i) {
1465 int size = get_alloc_size(i);
1466 allocs.push_back(malloc(size));
1467 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1468 }
1469
1470 for (void* alloc : allocs) {
1471 free(alloc);
1472 }
1473}
1474
1475TEST(malloc, zero_init) {
1476#if defined(__BIONIC__)
1477 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1478 bool allocator_scudo;
1479 GetAllocatorVersion(&allocator_scudo);
1480 if (!allocator_scudo) {
1481 GTEST_SKIP() << "scudo allocator only test";
1482 }
1483
1484 mallopt(M_BIONIC_ZERO_INIT, 1);
1485
1486 // Test using a block of 4K small (1-32 byte) allocations.
1487 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1488 return 1 + iteration % 32;
1489 });
1490
1491 // Also test large allocations that land in the scudo secondary, as this is
1492 // the only part of Scudo that's changed by enabling zero initialization with
1493 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1494 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1495 // mallopt_decay. Ensure that we delay for at least a second before releasing
1496 // pages to the OS in order to avoid implicit zeroing by the kernel.
Chia-hung Duan6abb4062024-04-17 19:08:48 -07001497 mallopt(M_DECAY_TIME, 1);
Mitch Phillips9cad8422021-01-20 16:03:27 -08001498 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1499 return 1 << (19 + iteration % 4);
1500 });
1501
1502#else
1503 GTEST_SKIP() << "bionic-only test";
1504#endif
1505}
1506
1507// Note that MTE is enabled on cc_tests on devices that support MTE.
1508TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001509#if defined(__BIONIC__)
1510 if (!mte_supported()) {
1511 GTEST_SKIP() << "This function can only be tested with MTE";
1512 }
1513
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001514 sem_t sem;
1515 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1516
1517 pthread_t thread;
1518 ASSERT_EQ(0, pthread_create(
1519 &thread, nullptr,
1520 [](void* ptr) -> void* {
1521 auto* sem = reinterpret_cast<sem_t*>(ptr);
1522 sem_wait(sem);
1523 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1524 },
1525 &sem));
1526
Mitch Phillips9cad8422021-01-20 16:03:27 -08001527 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001528 ASSERT_EQ(0, sem_post(&sem));
1529
1530 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
Christopher Ferris2abfa9e2021-11-01 16:26:06 -07001531 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001532
1533 void* retval;
1534 ASSERT_EQ(0, pthread_join(thread, &retval));
1535 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1536 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001537#else
1538 GTEST_SKIP() << "bionic extension";
1539#endif
1540}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001541
1542TEST(malloc, allocation_slack) {
1543#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001544 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1545
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001546 bool allocator_scudo;
1547 GetAllocatorVersion(&allocator_scudo);
1548 if (!allocator_scudo) {
1549 GTEST_SKIP() << "scudo allocator only test";
1550 }
1551
1552 // Test that older target SDK levels let you access a few bytes off the end of
1553 // a large allocation.
1554 android_set_application_target_sdk_version(29);
1555 auto p = std::make_unique<char[]>(131072);
1556 volatile char *vp = p.get();
1557 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1558#else
1559 GTEST_SKIP() << "bionic extension";
1560#endif
1561}
Evgenii Stepanovf0d7a342021-11-16 17:34:39 -08001562
1563// Regression test for b/206701345 -- scudo bug, MTE only.
1564// Fix: https://reviews.llvm.org/D105261
1565// Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
1566TEST(malloc, realloc_mte_crash_b206701345) {
1567 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1568 // all size classes allow such placement - mmap size has to be divisible by
1569 // the block size. At the time of writing this could only be reproduced with
1570 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1571 // future. Try several different classes at the lower end.
1572 std::vector<void*> ptrs(10000);
1573 for (int i = 1; i < 32; ++i) {
1574 size_t sz = 16 * i - 1;
1575 for (void*& p : ptrs) {
1576 p = realloc(malloc(sz), sz + 1);
1577 }
1578
1579 for (void* p : ptrs) {
1580 free(p);
1581 }
1582 }
1583}
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001584
1585void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1586 std::vector<size_t>& test_sizes, size_t max_allocations) {
1587 // Vector of zero'd data used for comparisons. Make it twice the largest size.
1588 std::vector<char> zero(test_sizes.back() * 2, 0);
1589
1590 SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1591
1592 for (size_t test_size : test_sizes) {
1593 std::vector<void*> ptrs(max_allocations);
1594 for (size_t i = 0; i < ptrs.size(); i++) {
1595 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1596 ptrs[i] = alloc_func(test_size);
1597 ASSERT_TRUE(ptrs[i] != nullptr);
1598 size_t alloc_size = malloc_usable_size(ptrs[i]);
1599 ASSERT_LE(alloc_size, zero.size());
1600 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1601
1602 // Set the memory to non-zero to make sure if the pointer
1603 // is reused it's still zero.
1604 memset(ptrs[i], 0xab, alloc_size);
1605 }
1606 // Free the pointers.
1607 for (size_t i = 0; i < ptrs.size(); i++) {
1608 free(ptrs[i]);
1609 }
1610 for (size_t i = 0; i < ptrs.size(); i++) {
1611 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1612 ptrs[i] = malloc(test_size);
1613 ASSERT_TRUE(ptrs[i] != nullptr);
1614 size_t alloc_size = malloc_usable_size(ptrs[i]);
1615 ASSERT_LE(alloc_size, zero.size());
1616 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1617 }
1618 // Free all of the pointers later to maximize the chance of reusing from
1619 // the first loop.
1620 for (size_t i = 0; i < ptrs.size(); i++) {
1621 free(ptrs[i]);
1622 }
1623 }
1624}
1625
1626// Verify that small and medium allocations are always zero.
Christopher Ferris59075562023-04-04 14:37:26 -07001627// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001628TEST(malloc, zeroed_allocations_small_medium_sizes) {
1629#if !defined(__BIONIC__)
1630 GTEST_SKIP() << "Only valid on bionic";
1631#endif
Christopher Ferrisfb4b87b2024-07-31 23:43:31 +00001632 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001633
1634 if (IsLowRamDevice()) {
1635 GTEST_SKIP() << "Skipped on low memory devices.";
1636 }
1637
1638 constexpr size_t kMaxAllocations = 1024;
1639 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1640 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1641 kMaxAllocations);
1642
1643 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1644 test_sizes, kMaxAllocations);
1645
1646 VerifyAllocationsAreZero(
1647 [](size_t size) -> void* {
1648 void* ptr;
1649 if (posix_memalign(&ptr, 64, size) == 0) {
1650 return ptr;
1651 }
1652 return nullptr;
1653 },
1654 "posix_memalign", test_sizes, kMaxAllocations);
1655}
1656
1657// Verify that large allocations are always zero.
Christopher Ferris59075562023-04-04 14:37:26 -07001658// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001659TEST(malloc, zeroed_allocations_large_sizes) {
1660#if !defined(__BIONIC__)
1661 GTEST_SKIP() << "Only valid on bionic";
1662#endif
Christopher Ferrisfb4b87b2024-07-31 23:43:31 +00001663 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001664
1665 if (IsLowRamDevice()) {
1666 GTEST_SKIP() << "Skipped on low memory devices.";
1667 }
1668
1669 constexpr size_t kMaxAllocations = 20;
1670 std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1671 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1672 kMaxAllocations);
1673
1674 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1675 test_sizes, kMaxAllocations);
1676
1677 VerifyAllocationsAreZero(
1678 [](size_t size) -> void* {
1679 void* ptr;
1680 if (posix_memalign(&ptr, 64, size) == 0) {
1681 return ptr;
1682 }
1683 return nullptr;
1684 },
1685 "posix_memalign", test_sizes, kMaxAllocations);
1686}
1687
Christopher Ferris59075562023-04-04 14:37:26 -07001688// Verify that reallocs are zeroed when expanded.
1689// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001690TEST(malloc, zeroed_allocations_realloc) {
1691#if !defined(__BIONIC__)
1692 GTEST_SKIP() << "Only valid on bionic";
1693#endif
Christopher Ferrisfb4b87b2024-07-31 23:43:31 +00001694 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001695
1696 if (IsLowRamDevice()) {
1697 GTEST_SKIP() << "Skipped on low memory devices.";
1698 }
1699
1700 // Vector of zero'd data used for comparisons.
1701 constexpr size_t kMaxMemorySize = 131072;
1702 std::vector<char> zero(kMaxMemorySize, 0);
1703
1704 constexpr size_t kMaxAllocations = 1024;
1705 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1706 // Do a number of allocations and set them to non-zero.
1707 for (size_t test_size : test_sizes) {
1708 std::vector<void*> ptrs(kMaxAllocations);
1709 for (size_t i = 0; i < kMaxAllocations; i++) {
1710 ptrs[i] = malloc(test_size);
1711 ASSERT_TRUE(ptrs[i] != nullptr);
1712
1713 // Set the memory to non-zero to make sure if the pointer
1714 // is reused it's still zero.
1715 memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1716 }
1717 // Free the pointers.
1718 for (size_t i = 0; i < kMaxAllocations; i++) {
1719 free(ptrs[i]);
1720 }
1721 }
1722
1723 // Do the reallocs to a larger size and verify the rest of the allocation
1724 // is zero.
1725 constexpr size_t kInitialSize = 8;
1726 for (size_t test_size : test_sizes) {
1727 std::vector<void*> ptrs(kMaxAllocations);
1728 for (size_t i = 0; i < kMaxAllocations; i++) {
1729 ptrs[i] = malloc(kInitialSize);
1730 ASSERT_TRUE(ptrs[i] != nullptr);
1731 size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1732
1733 ptrs[i] = realloc(ptrs[i], test_size);
1734 ASSERT_TRUE(ptrs[i] != nullptr);
1735 size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1736 char* ptr = reinterpret_cast<char*>(ptrs[i]);
1737 ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1738 << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1739 }
1740 for (size_t i = 0; i < kMaxAllocations; i++) {
1741 free(ptrs[i]);
1742 }
1743 }
1744}
Christopher Ferrisb4e560e2023-10-26 17:00:00 -07001745
1746TEST(android_mallopt, get_decay_time_enabled_errors) {
1747#if defined(__BIONIC__)
1748 errno = 0;
1749 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, nullptr, sizeof(bool)));
1750 EXPECT_ERRNO(EINVAL);
1751
1752 errno = 0;
1753 int value;
1754 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1755 EXPECT_ERRNO(EINVAL);
1756#else
1757 GTEST_SKIP() << "bionic-only test";
1758#endif
1759}
1760
1761TEST(android_mallopt, get_decay_time_enabled) {
1762#if defined(__BIONIC__)
1763 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1764
1765 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 0));
1766
1767 bool value;
1768 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1769 EXPECT_FALSE(value);
1770
1771 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 1));
1772 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1773 EXPECT_TRUE(value);
Chia-hung Duan6abb4062024-04-17 19:08:48 -07001774
1775 EXPECT_EQ(1, mallopt(M_DECAY_TIME, -1));
1776 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1777 EXPECT_FALSE(value);
Christopher Ferrisb4e560e2023-10-26 17:00:00 -07001778#else
1779 GTEST_SKIP() << "bionic-only test";
1780#endif
1781}
Christopher Ferris0b231992023-10-31 15:09:37 -07001782
1783TEST(android_mallopt, DISABLED_verify_decay_time_on) {
1784#if defined(__BIONIC__)
1785 bool value;
1786 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1787 EXPECT_TRUE(value) << "decay time did not get enabled properly.";
1788#endif
1789}
1790
1791TEST(android_mallopt, decay_time_set_using_env_variable) {
1792#if defined(__BIONIC__)
1793 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1794
1795 bool value;
1796 ASSERT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1797 ASSERT_FALSE(value) << "decay time did not get disabled properly.";
1798
1799 // Verify that setting the environment variable here will be carried into
1800 // fork'd and exec'd processes.
1801 ASSERT_EQ(0, setenv("MALLOC_USE_APP_DEFAULTS", "1", 1));
1802 ExecTestHelper eth;
Christopher Ferris10111be2025-03-13 17:55:39 +00001803 std::string executable(testing::internal::GetArgvs()[0]);
1804 eth.SetArgs({executable.c_str(), "--gtest_also_run_disabled_tests",
Christopher Ferris0b231992023-10-31 15:09:37 -07001805 "--gtest_filter=android_mallopt.DISABLED_verify_decay_time_on", nullptr});
Christopher Ferris10111be2025-03-13 17:55:39 +00001806 eth.Run([&]() { execv(executable.c_str(), eth.GetArgs()); }, 0, R"(\[ PASSED \] 1 test)");
Christopher Ferris0b231992023-10-31 15:09:37 -07001807#else
1808 GTEST_SKIP() << "bionic-only test";
1809#endif
1810}