blob: 3f1ba7959aea34448cc63c0bdc24521436ca9552 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070030#include <sys/cdefs.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080031#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <sys/types.h>
33#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070034#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070035
Mitch Phillips9cad8422021-01-20 16:03:27 -080036#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080037#include <atomic>
Christopher Ferris02b6bbc2022-06-02 15:20:23 -070038#include <functional>
Christopher Ferrisd86eb862023-02-28 12:45:54 -080039#include <string>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080040#include <thread>
Christopher Ferrisd86eb862023-02-28 12:45:54 -080041#include <unordered_map>
42#include <utility>
Mitch Phillips9cad8422021-01-20 16:03:27 -080043#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080044
Dan Albert4caa1f02014-08-20 09:16:57 -070045#include <tinyxml2.h>
46
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080047#include <android-base/file.h>
Florian Mayer750dcd32022-04-15 15:54:47 -070048#include <android-base/test_utils.h>
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080049
Christopher Ferrisaad7abb2024-09-27 23:51:57 +000050#include "DoNotOptimize.h"
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080051#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000052
Elliott Hughesb1770852018-09-18 12:52:42 -070053#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080054
Peter Collingbourne45819dd2020-01-09 11:00:43 -080055#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080056#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080057
Christopher Ferrisb874c332020-01-21 16:39:05 -080058#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070059#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080060#include "platform/bionic/reserved_signals.h"
61#include "private/bionic_config.h"
62
Elliott Hughesb1770852018-09-18 12:52:42 -070063#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080064
Colin Cross7da20342021-07-28 11:18:11 -070065#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080066
Elliott Hughesb1770852018-09-18 12:52:42 -070067#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080068
Colin Cross4c5595c2021-08-16 15:51:59 -070069#elif defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -070070
71#define HAVE_REALLOCARRAY 1
72
Elliott Hughesb1770852018-09-18 12:52:42 -070073#endif
74
Christopher Ferris885f3b92013-05-21 17:48:01 -070075TEST(malloc, malloc_std) {
76 // Simple malloc test.
77 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070078 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070079 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070080 free(ptr);
81}
82
Christopher Ferrisa4037802014-06-09 19:14:11 -070083TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080084 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070085 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070086 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -070087 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -070088}
89
Christopher Ferris885f3b92013-05-21 17:48:01 -070090TEST(malloc, calloc_std) {
91 // Simple calloc test.
92 size_t alloc_len = 100;
93 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070094 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070095 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
96 for (size_t i = 0; i < alloc_len; i++) {
97 ASSERT_EQ(0, ptr[i]);
98 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070099 free(ptr);
100}
101
Peter Collingbourne978eb162020-09-21 15:26:02 -0700102TEST(malloc, calloc_mem_init_disabled) {
103#if defined(__BIONIC__)
104 // calloc should still zero memory if mem-init is disabled.
105 // With jemalloc the mallopts will fail but that shouldn't affect the
106 // execution of the test.
107 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
108 size_t alloc_len = 100;
109 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
110 for (size_t i = 0; i < alloc_len; i++) {
111 ASSERT_EQ(0, ptr[i]);
112 }
113 free(ptr);
114 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
115#else
116 GTEST_SKIP() << "bionic-only test";
117#endif
118}
119
Christopher Ferrisa4037802014-06-09 19:14:11 -0700120TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800121 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700122 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700123 ASSERT_EQ(nullptr, calloc(-1, 100));
Elliott Hughes95646e62023-09-21 14:11:19 -0700124 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700125}
126
127TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800128 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700129 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700130 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700131 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700132 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700133 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700134 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700135 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700136 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700137 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700138 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700139 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Elliott Hughes95646e62023-09-21 14:11:19 -0700140 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700141}
142
Christopher Ferris885f3b92013-05-21 17:48:01 -0700143TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800144 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700145 // Memalign test where the alignment is any value.
146 for (size_t i = 0; i <= 12; i++) {
147 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700148 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700149 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700150 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
151 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
152 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700153 free(ptr);
154 }
155 }
156}
157
Christopher Ferrisa4037802014-06-09 19:14:11 -0700158TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800159 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700160 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700161}
162
163TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800164 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700165 void* ptr;
166 for (size_t align = 0; align <= 256; align++) {
167 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700168 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700169 free(ptr);
170 }
171}
172
Christopher Ferris885f3b92013-05-21 17:48:01 -0700173TEST(malloc, memalign_realloc) {
174 // Memalign and then realloc the pointer a couple of times.
175 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
176 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700177 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700178 ASSERT_LE(100U, malloc_usable_size(ptr));
179 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
180 memset(ptr, 0x23, 100);
181
182 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700183 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700184 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700185 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700186 for (size_t i = 0; i < 100; i++) {
187 ASSERT_EQ(0x23, ptr[i]);
188 }
189 memset(ptr, 0x45, 200);
190
191 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700192 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700193 ASSERT_LE(300U, malloc_usable_size(ptr));
194 for (size_t i = 0; i < 200; i++) {
195 ASSERT_EQ(0x45, ptr[i]);
196 }
197 memset(ptr, 0x67, 300);
198
199 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700200 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700201 ASSERT_LE(250U, malloc_usable_size(ptr));
202 for (size_t i = 0; i < 250; i++) {
203 ASSERT_EQ(0x67, ptr[i]);
204 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700205 free(ptr);
206 }
207}
208
209TEST(malloc, malloc_realloc_larger) {
210 // Realloc to a larger size, malloc is used for the original allocation.
211 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700212 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700213 ASSERT_LE(100U, malloc_usable_size(ptr));
214 memset(ptr, 67, 100);
215
216 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700217 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700218 ASSERT_LE(200U, malloc_usable_size(ptr));
219 for (size_t i = 0; i < 100; i++) {
220 ASSERT_EQ(67, ptr[i]);
221 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700222 free(ptr);
223}
224
225TEST(malloc, malloc_realloc_smaller) {
226 // Realloc to a smaller size, malloc is used for the original allocation.
227 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700228 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700229 ASSERT_LE(200U, malloc_usable_size(ptr));
230 memset(ptr, 67, 200);
231
232 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700233 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700234 ASSERT_LE(100U, malloc_usable_size(ptr));
235 for (size_t i = 0; i < 100; i++) {
236 ASSERT_EQ(67, ptr[i]);
237 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700238 free(ptr);
239}
240
241TEST(malloc, malloc_multiple_realloc) {
242 // Multiple reallocs, malloc is used for the original allocation.
243 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700244 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700245 ASSERT_LE(200U, malloc_usable_size(ptr));
246 memset(ptr, 0x23, 200);
247
248 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700249 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700250 ASSERT_LE(100U, malloc_usable_size(ptr));
251 for (size_t i = 0; i < 100; i++) {
252 ASSERT_EQ(0x23, ptr[i]);
253 }
254
255 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700256 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700257 ASSERT_LE(50U, malloc_usable_size(ptr));
258 for (size_t i = 0; i < 50; i++) {
259 ASSERT_EQ(0x23, ptr[i]);
260 }
261
262 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700263 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700264 ASSERT_LE(150U, malloc_usable_size(ptr));
265 for (size_t i = 0; i < 50; i++) {
266 ASSERT_EQ(0x23, ptr[i]);
267 }
268 memset(ptr, 0x23, 150);
269
270 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700271 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700272 ASSERT_LE(425U, malloc_usable_size(ptr));
273 for (size_t i = 0; i < 150; i++) {
274 ASSERT_EQ(0x23, ptr[i]);
275 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700276 free(ptr);
277}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700278
Christopher Ferris885f3b92013-05-21 17:48:01 -0700279TEST(malloc, calloc_realloc_larger) {
280 // Realloc to a larger size, calloc is used for the original allocation.
281 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700282 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700283 ASSERT_LE(100U, malloc_usable_size(ptr));
284
285 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700286 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700287 ASSERT_LE(200U, malloc_usable_size(ptr));
288 for (size_t i = 0; i < 100; i++) {
289 ASSERT_EQ(0, ptr[i]);
290 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700291 free(ptr);
292}
293
294TEST(malloc, calloc_realloc_smaller) {
295 // Realloc to a smaller size, calloc is used for the original allocation.
296 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700297 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700298 ASSERT_LE(200U, malloc_usable_size(ptr));
299
300 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700301 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700302 ASSERT_LE(100U, malloc_usable_size(ptr));
303 for (size_t i = 0; i < 100; i++) {
304 ASSERT_EQ(0, ptr[i]);
305 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700306 free(ptr);
307}
308
309TEST(malloc, calloc_multiple_realloc) {
310 // Multiple reallocs, calloc is used for the original allocation.
311 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700312 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700313 ASSERT_LE(200U, malloc_usable_size(ptr));
314
315 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700316 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700317 ASSERT_LE(100U, malloc_usable_size(ptr));
318 for (size_t i = 0; i < 100; i++) {
319 ASSERT_EQ(0, ptr[i]);
320 }
321
322 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700323 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700324 ASSERT_LE(50U, malloc_usable_size(ptr));
325 for (size_t i = 0; i < 50; i++) {
326 ASSERT_EQ(0, ptr[i]);
327 }
328
329 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700330 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700331 ASSERT_LE(150U, malloc_usable_size(ptr));
332 for (size_t i = 0; i < 50; i++) {
333 ASSERT_EQ(0, ptr[i]);
334 }
335 memset(ptr, 0, 150);
336
337 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700338 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700339 ASSERT_LE(425U, malloc_usable_size(ptr));
340 for (size_t i = 0; i < 150; i++) {
341 ASSERT_EQ(0, ptr[i]);
342 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700343 free(ptr);
344}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700345
Christopher Ferrisa4037802014-06-09 19:14:11 -0700346TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800347 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700348 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700349 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700350 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700351 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700352 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700353 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700354 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700355 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700356 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700357}
358
Dan Alberte5fdaa42014-06-14 01:04:31 +0000359#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
360extern "C" void* pvalloc(size_t);
361extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700362#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000363
Christopher Ferrisa4037802014-06-09 19:14:11 -0700364TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700365#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700366 size_t pagesize = sysconf(_SC_PAGESIZE);
367 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700368 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700369 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
370 ASSERT_LE(pagesize, malloc_usable_size(ptr));
371 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700372#else
373 GTEST_SKIP() << "pvalloc not supported.";
374#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700375}
376
377TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700378#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700379 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700380#else
381 GTEST_SKIP() << "pvalloc not supported.";
382#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700383}
384
385TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700386#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700387 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700388 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700389 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700390 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
391 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700392#else
393 GTEST_SKIP() << "valloc not supported.";
394#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700395}
396
397TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700398#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700399 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700400#else
401 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000402#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700403}
Dan Albert4caa1f02014-08-20 09:16:57 -0700404
405TEST(malloc, malloc_info) {
406#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700407 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800408
409 TemporaryFile tf;
410 ASSERT_TRUE(tf.fd != -1);
411 FILE* fp = fdopen(tf.fd, "w+");
412 tf.release();
413 ASSERT_TRUE(fp != nullptr);
414 ASSERT_EQ(0, malloc_info(0, fp));
415 ASSERT_EQ(0, fclose(fp));
416
417 std::string contents;
418 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700419
420 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800421 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700422
423 auto root = doc.FirstChildElement();
424 ASSERT_NE(nullptr, root);
425 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700426 std::string version(root->Attribute("version"));
427 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800428 auto arena = root->FirstChildElement();
429 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
430 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700431
Christopher Ferris6c619a02019-03-01 17:59:51 -0800432 ASSERT_STREQ("heap", arena->Name());
433 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
434 ASSERT_EQ(tinyxml2::XML_SUCCESS,
435 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
438 ASSERT_EQ(tinyxml2::XML_SUCCESS,
439 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
440 ASSERT_EQ(tinyxml2::XML_SUCCESS,
441 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700442
Christopher Ferris6c619a02019-03-01 17:59:51 -0800443 auto bin = arena->FirstChildElement("bin");
444 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
445 if (strcmp(bin->Name(), "bin") == 0) {
446 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
447 ASSERT_EQ(tinyxml2::XML_SUCCESS,
448 bin->FirstChildElement("allocated")->QueryIntText(&val));
449 ASSERT_EQ(tinyxml2::XML_SUCCESS,
450 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
451 ASSERT_EQ(tinyxml2::XML_SUCCESS,
452 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
453 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700454 }
455 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800456 } else if (version == "scudo-1") {
457 auto element = root->FirstChildElement();
458 for (; element != nullptr; element = element->NextSiblingElement()) {
459 int val;
460
461 ASSERT_STREQ("alloc", element->Name());
462 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
463 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
464 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800465 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800466 // Do not verify output for debug malloc.
467 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700468 }
469#endif
470}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800471
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700472TEST(malloc, malloc_info_matches_mallinfo) {
473#ifdef __BIONIC__
474 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
475
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800476 TemporaryFile tf;
477 ASSERT_TRUE(tf.fd != -1);
478 FILE* fp = fdopen(tf.fd, "w+");
479 tf.release();
480 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700481 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800482 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700483 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800484 ASSERT_EQ(0, fclose(fp));
485
486 std::string contents;
487 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700488
489 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800490 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700491
492 size_t total_allocated_bytes = 0;
493 auto root = doc.FirstChildElement();
494 ASSERT_NE(nullptr, root);
495 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700496 std::string version(root->Attribute("version"));
497 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700498 auto arena = root->FirstChildElement();
499 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
500 int val;
501
502 ASSERT_STREQ("heap", arena->Name());
503 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
504 ASSERT_EQ(tinyxml2::XML_SUCCESS,
505 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
506 total_allocated_bytes += val;
507 ASSERT_EQ(tinyxml2::XML_SUCCESS,
508 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
509 total_allocated_bytes += val;
510 ASSERT_EQ(tinyxml2::XML_SUCCESS,
511 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
512 total_allocated_bytes += val;
513 ASSERT_EQ(tinyxml2::XML_SUCCESS,
514 arena->FirstChildElement("bins-total")->QueryIntText(&val));
515 }
516 // The total needs to be between the mallinfo call before and after
517 // since malloc_info allocates some memory.
518 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
519 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800520 } else if (version == "scudo-1") {
521 auto element = root->FirstChildElement();
522 for (; element != nullptr; element = element->NextSiblingElement()) {
523 ASSERT_STREQ("alloc", element->Name());
524 int size;
525 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
526 int count;
527 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
528 total_allocated_bytes += size * count;
529 }
530 // Scudo only gives the information on the primary, so simply make
531 // sure that the value is non-zero.
532 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700533 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800534 // Do not verify output for debug malloc.
535 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700536 }
537#endif
538}
539
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800540TEST(malloc, calloc_usable_size) {
541 for (size_t size = 1; size <= 2048; size++) {
542 void* pointer = malloc(size);
543 ASSERT_TRUE(pointer != nullptr);
544 memset(pointer, 0xeb, malloc_usable_size(pointer));
545 free(pointer);
546
547 // We should get a previous pointer that has been set to non-zero.
548 // If calloc does not zero out all of the data, this will fail.
549 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
550 ASSERT_TRUE(pointer != nullptr);
551 size_t usable_size = malloc_usable_size(zero_mem);
552 for (size_t i = 0; i < usable_size; i++) {
553 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
554 }
555 free(zero_mem);
556 }
557}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800558
559TEST(malloc, malloc_0) {
560 void* p = malloc(0);
561 ASSERT_TRUE(p != nullptr);
562 free(p);
563}
564
565TEST(malloc, calloc_0_0) {
566 void* p = calloc(0, 0);
567 ASSERT_TRUE(p != nullptr);
568 free(p);
569}
570
571TEST(malloc, calloc_0_1) {
572 void* p = calloc(0, 1);
573 ASSERT_TRUE(p != nullptr);
574 free(p);
575}
576
577TEST(malloc, calloc_1_0) {
578 void* p = calloc(1, 0);
579 ASSERT_TRUE(p != nullptr);
580 free(p);
581}
582
583TEST(malloc, realloc_nullptr_0) {
584 // realloc(nullptr, size) is actually malloc(size).
585 void* p = realloc(nullptr, 0);
586 ASSERT_TRUE(p != nullptr);
587 free(p);
588}
589
590TEST(malloc, realloc_0) {
591 void* p = malloc(1024);
592 ASSERT_TRUE(p != nullptr);
593 // realloc(p, 0) is actually free(p).
594 void* p2 = realloc(p, 0);
595 ASSERT_TRUE(p2 == nullptr);
596}
Christopher Ferris72df6702016-02-11 15:51:31 -0800597
598constexpr size_t MAX_LOOPS = 200;
599
600// Make sure that memory returned by malloc is aligned to allow these data types.
601TEST(malloc, verify_alignment) {
602 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
603 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
604 long double** values_ldouble = new long double*[MAX_LOOPS];
605 // Use filler to attempt to force the allocator to get potentially bad alignments.
606 void** filler = new void*[MAX_LOOPS];
607
608 for (size_t i = 0; i < MAX_LOOPS; i++) {
609 // Check uint32_t pointers.
610 filler[i] = malloc(1);
611 ASSERT_TRUE(filler[i] != nullptr);
612
613 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
614 ASSERT_TRUE(values_32[i] != nullptr);
615 *values_32[i] = i;
616 ASSERT_EQ(*values_32[i], i);
617 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
618
619 free(filler[i]);
620 }
621
622 for (size_t i = 0; i < MAX_LOOPS; i++) {
623 // Check uint64_t pointers.
624 filler[i] = malloc(1);
625 ASSERT_TRUE(filler[i] != nullptr);
626
627 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
628 ASSERT_TRUE(values_64[i] != nullptr);
629 *values_64[i] = 0x1000 + i;
630 ASSERT_EQ(*values_64[i], 0x1000 + i);
631 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
632
633 free(filler[i]);
634 }
635
636 for (size_t i = 0; i < MAX_LOOPS; i++) {
637 // Check long double pointers.
638 filler[i] = malloc(1);
639 ASSERT_TRUE(filler[i] != nullptr);
640
641 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
642 ASSERT_TRUE(values_ldouble[i] != nullptr);
643 *values_ldouble[i] = 5.5 + i;
644 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
645 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
646 // required alignment to 0x7.
647#if !defined(__BIONIC__) && !defined(__LP64__)
648 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
649#else
650 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
651#endif
652
653 free(filler[i]);
654 }
655
656 for (size_t i = 0; i < MAX_LOOPS; i++) {
657 free(values_32[i]);
658 free(values_64[i]);
659 free(values_ldouble[i]);
660 }
661
662 delete[] filler;
663 delete[] values_32;
664 delete[] values_64;
665 delete[] values_ldouble;
666}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700667
668TEST(malloc, mallopt_smoke) {
Christopher Ferris2ef59372023-01-18 15:08:37 -0800669#if defined(__BIONIC__)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700670 errno = 0;
671 ASSERT_EQ(0, mallopt(-1000, 1));
672 // mallopt doesn't set errno.
Elliott Hughes95646e62023-09-21 14:11:19 -0700673 ASSERT_ERRNO(0);
Colin Cross7da20342021-07-28 11:18:11 -0700674#else
Christopher Ferris2ef59372023-01-18 15:08:37 -0800675 GTEST_SKIP() << "bionic-only test";
Colin Cross7da20342021-07-28 11:18:11 -0700676#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700677}
Elliott Hughesb1770852018-09-18 12:52:42 -0700678
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800679TEST(malloc, mallopt_decay) {
680#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800681 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Chia-hung Duan6abb4062024-04-17 19:08:48 -0700682 ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800683 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
684 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
685 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
686 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
Chia-hung Duan6abb4062024-04-17 19:08:48 -0700687 ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800688#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800689 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800690#endif
691}
692
693TEST(malloc, mallopt_purge) {
694#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800695 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800696 ASSERT_EQ(1, mallopt(M_PURGE, 0));
697#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800698 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800699#endif
700}
701
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800702TEST(malloc, mallopt_purge_all) {
703#if defined(__BIONIC__)
704 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800705 ASSERT_EQ(1, mallopt(M_PURGE_ALL, 0));
706#else
707 GTEST_SKIP() << "bionic-only test";
708#endif
709}
710
Christopher Ferrise9a7b812023-05-11 15:36:27 -0700711TEST(malloc, mallopt_log_stats) {
712#if defined(__BIONIC__)
713 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
714 ASSERT_EQ(1, mallopt(M_LOG_STATS, 0));
715#else
716 GTEST_SKIP() << "bionic-only test";
717#endif
718}
719
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800720// Verify that all of the mallopt values are unique.
721TEST(malloc, mallopt_unique_params) {
722#if defined(__BIONIC__)
723 std::vector<std::pair<int, std::string>> params{
724 std::make_pair(M_DECAY_TIME, "M_DECAY_TIME"),
725 std::make_pair(M_PURGE, "M_PURGE"),
726 std::make_pair(M_PURGE_ALL, "M_PURGE_ALL"),
727 std::make_pair(M_MEMTAG_TUNING, "M_MEMTAG_TUNING"),
728 std::make_pair(M_THREAD_DISABLE_MEM_INIT, "M_THREAD_DISABLE_MEM_INIT"),
729 std::make_pair(M_CACHE_COUNT_MAX, "M_CACHE_COUNT_MAX"),
730 std::make_pair(M_CACHE_SIZE_MAX, "M_CACHE_SIZE_MAX"),
731 std::make_pair(M_TSDS_COUNT_MAX, "M_TSDS_COUNT_MAX"),
732 std::make_pair(M_BIONIC_ZERO_INIT, "M_BIONIC_ZERO_INIT"),
733 std::make_pair(M_BIONIC_SET_HEAP_TAGGING_LEVEL, "M_BIONIC_SET_HEAP_TAGGING_LEVEL"),
Christopher Ferrise9a7b812023-05-11 15:36:27 -0700734 std::make_pair(M_LOG_STATS, "M_LOG_STATS"),
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800735 };
736
737 std::unordered_map<int, std::string> all_params;
738 for (const auto& param : params) {
739 EXPECT_TRUE(all_params.count(param.first) == 0)
740 << "mallopt params " << all_params[param.first] << " and " << param.second
741 << " have the same value " << param.first;
742 all_params.insert(param);
743 }
744#else
745 GTEST_SKIP() << "bionic-only test";
746#endif
747}
748
Christopher Ferris88448792020-07-28 14:15:31 -0700749#if defined(__BIONIC__)
750static void GetAllocatorVersion(bool* allocator_scudo) {
751 TemporaryFile tf;
752 ASSERT_TRUE(tf.fd != -1);
753 FILE* fp = fdopen(tf.fd, "w+");
754 tf.release();
755 ASSERT_TRUE(fp != nullptr);
Evgenii Stepanov4edbcee2021-09-17 14:59:15 -0700756 if (malloc_info(0, fp) != 0) {
757 *allocator_scudo = false;
758 return;
759 }
Christopher Ferris88448792020-07-28 14:15:31 -0700760 ASSERT_EQ(0, fclose(fp));
761
762 std::string contents;
763 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
764
765 tinyxml2::XMLDocument doc;
766 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
767
768 auto root = doc.FirstChildElement();
769 ASSERT_NE(nullptr, root);
770 ASSERT_STREQ("malloc", root->Name());
771 std::string version(root->Attribute("version"));
772 *allocator_scudo = (version == "scudo-1");
773}
774#endif
775
776TEST(malloc, mallopt_scudo_only_options) {
777#if defined(__BIONIC__)
778 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
779 bool allocator_scudo;
780 GetAllocatorVersion(&allocator_scudo);
781 if (!allocator_scudo) {
782 GTEST_SKIP() << "scudo allocator only test";
783 }
784 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
785 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
786 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
787#else
788 GTEST_SKIP() << "bionic-only test";
789#endif
790}
791
Elliott Hughesb1770852018-09-18 12:52:42 -0700792TEST(malloc, reallocarray_overflow) {
793#if HAVE_REALLOCARRAY
794 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
795 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
796 size_t b = 2;
797
798 errno = 0;
799 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
Elliott Hughes95646e62023-09-21 14:11:19 -0700800 ASSERT_ERRNO(ENOMEM);
Elliott Hughesb1770852018-09-18 12:52:42 -0700801
802 errno = 0;
803 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
Elliott Hughes95646e62023-09-21 14:11:19 -0700804 ASSERT_ERRNO(ENOMEM);
Elliott Hughesb1770852018-09-18 12:52:42 -0700805#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800806 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700807#endif
808}
809
810TEST(malloc, reallocarray) {
811#if HAVE_REALLOCARRAY
812 void* p = reallocarray(nullptr, 2, 32);
813 ASSERT_TRUE(p != nullptr);
814 ASSERT_GE(malloc_usable_size(p), 64U);
815#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800816 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700817#endif
818}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800819
820TEST(malloc, mallinfo) {
Colin Crossfdced952022-01-24 18:15:07 -0800821#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800822 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800823 static size_t sizes[] = {
824 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
825 };
826
Elliott Hughes30088842023-09-14 18:35:11 +0000827 static constexpr size_t kMaxAllocs = 50;
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800828
829 for (size_t size : sizes) {
830 // If some of these allocations are stuck in a thread cache, then keep
831 // looping until we make an allocation that changes the total size of the
832 // memory allocated.
833 // jemalloc implementations counts the thread cache allocations against
834 // total memory allocated.
835 void* ptrs[kMaxAllocs] = {};
836 bool pass = false;
837 for (size_t i = 0; i < kMaxAllocs; i++) {
838 size_t allocated = mallinfo().uordblks;
839 ptrs[i] = malloc(size);
840 ASSERT_TRUE(ptrs[i] != nullptr);
841 size_t new_allocated = mallinfo().uordblks;
842 if (allocated != new_allocated) {
843 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800844 // Only check if the total got bigger by at least allocation size.
845 // Sometimes the mallinfo numbers can go backwards due to compaction
846 // and/or freeing of cached data.
847 if (new_allocated >= allocated + usable_size) {
848 pass = true;
849 break;
850 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800851 }
852 }
853 for (void* ptr : ptrs) {
854 free(ptr);
855 }
856 ASSERT_TRUE(pass)
857 << "For size " << size << " allocated bytes did not increase after "
858 << kMaxAllocs << " allocations.";
859 }
860#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800861 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800862#endif
863}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000864
Christopher Ferris8248e622021-12-03 13:55:57 -0800865TEST(malloc, mallinfo2) {
Colin Crossfdced952022-01-24 18:15:07 -0800866#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Christopher Ferris8248e622021-12-03 13:55:57 -0800867 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
868 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
869
Elliott Hughes30088842023-09-14 18:35:11 +0000870 static constexpr size_t kMaxAllocs = 50;
Christopher Ferris8248e622021-12-03 13:55:57 -0800871
872 for (size_t size : sizes) {
873 // If some of these allocations are stuck in a thread cache, then keep
874 // looping until we make an allocation that changes the total size of the
875 // memory allocated.
876 // jemalloc implementations counts the thread cache allocations against
877 // total memory allocated.
878 void* ptrs[kMaxAllocs] = {};
879 bool pass = false;
880 for (size_t i = 0; i < kMaxAllocs; i++) {
881 struct mallinfo info = mallinfo();
882 struct mallinfo2 info2 = mallinfo2();
883 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800884 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
885 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
886 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
887 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
888 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
889 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
890 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
891 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
892 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
893 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800894
895 size_t allocated = info2.uordblks;
896 ptrs[i] = malloc(size);
897 ASSERT_TRUE(ptrs[i] != nullptr);
898
899 info = mallinfo();
900 info2 = mallinfo2();
901 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800902 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
903 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
904 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
905 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
906 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
907 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
908 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
909 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
910 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
911 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800912
913 size_t new_allocated = info2.uordblks;
914 if (allocated != new_allocated) {
915 size_t usable_size = malloc_usable_size(ptrs[i]);
916 // Only check if the total got bigger by at least allocation size.
917 // Sometimes the mallinfo2 numbers can go backwards due to compaction
918 // and/or freeing of cached data.
919 if (new_allocated >= allocated + usable_size) {
920 pass = true;
921 break;
922 }
923 }
924 }
925 for (void* ptr : ptrs) {
926 free(ptr);
927 }
928 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
929 << kMaxAllocs << " allocations.";
930 }
931#else
932 GTEST_SKIP() << "glibc is broken";
933#endif
934}
935
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800936template <typename Type>
937void __attribute__((optnone)) VerifyAlignment(Type* floating) {
938 size_t expected_alignment = alignof(Type);
939 if (expected_alignment != 0) {
940 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
Ryan Prichardf40f2582024-01-09 16:29:20 -0800941 << "Expected alignment " << expected_alignment << " ptr value "
942 << static_cast<void*>(floating);
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800943 }
944}
945
946template <typename Type>
947void __attribute__((optnone)) TestAllocateType() {
948 // The number of allocations to do in a row. This is to attempt to
949 // expose the worst case alignment for native allocators that use
950 // bins.
951 static constexpr size_t kMaxConsecutiveAllocs = 100;
952
953 // Verify using new directly.
954 Type* types[kMaxConsecutiveAllocs];
955 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
956 types[i] = new Type;
957 VerifyAlignment(types[i]);
958 if (::testing::Test::HasFatalFailure()) {
959 return;
960 }
961 }
962 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
963 delete types[i];
964 }
965
966 // Verify using malloc.
967 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
968 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
969 ASSERT_TRUE(types[i] != nullptr);
970 VerifyAlignment(types[i]);
971 if (::testing::Test::HasFatalFailure()) {
972 return;
973 }
974 }
975 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
976 free(types[i]);
977 }
978
979 // Verify using a vector.
980 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
981 for (size_t i = 0; i < type_vector.size(); i++) {
982 VerifyAlignment(&type_vector[i]);
983 if (::testing::Test::HasFatalFailure()) {
984 return;
985 }
986 }
987}
988
989#if defined(__ANDROID__)
990static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
991 void* ptrs[100];
992 uintptr_t mask = aligned_bytes - 1;
993 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
994 ptrs[i] = malloc(alloc_size);
995 ASSERT_TRUE(ptrs[i] != nullptr);
996 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
997 << "Expected at least " << aligned_bytes << " byte alignment: size "
998 << alloc_size << " actual ptr " << ptrs[i];
999 }
1000}
1001#endif
1002
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001003void AlignCheck() {
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001004 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
1005 // for a discussion of type alignment.
1006 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
1007 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
1008 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
1009
1010 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
1011 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
1012 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
1013 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
1014 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
1015 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
1016 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
1017 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
1018 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
1019 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
1020 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
1021 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
1022 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
1023 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
1024
1025#if defined(__ANDROID__)
1026 // On Android, there is a lot of code that expects certain alignments:
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001027 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
1028 // must have at least 16 byte alignment.
1029 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
1030 // not 16 bytes, are only required to have at least 8 byte alignment.
1031 // In addition, on Android clang has been configured for 64 bit such that:
1032 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
1033 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
1034 // For 32 bit environments, only the first two requirements must be met.
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001035
1036 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
1037 // a discussion of this alignment mess. The code below is enforcing
1038 // strong-alignment, since who knows what code depends on this behavior now.
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001039 // As mentioned before, for 64 bit this will enforce the higher
1040 // requirement since clang expects this behavior on Android now.
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001041 for (size_t i = 1; i <= 128; i++) {
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001042#if defined(__LP64__)
1043 if (i <= 8) {
1044 AndroidVerifyAlignment(i, 8);
1045 } else {
1046 AndroidVerifyAlignment(i, 16);
1047 }
1048#else
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001049 size_t rounded = (i + 7) & ~7;
1050 if ((rounded % 16) == 0) {
1051 AndroidVerifyAlignment(i, 16);
1052 } else {
1053 AndroidVerifyAlignment(i, 8);
1054 }
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001055#endif
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001056 if (::testing::Test::HasFatalFailure()) {
1057 return;
1058 }
1059 }
1060#endif
1061}
1062
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001063TEST(malloc, align_check) {
1064 AlignCheck();
1065}
1066
Christopher Ferris201dcf42020-01-29 13:09:31 -08001067// Jemalloc doesn't pass this test right now, so leave it as disabled.
1068TEST(malloc, DISABLED_alloc_after_fork) {
1069 // Both of these need to be a power of 2.
1070 static constexpr size_t kMinAllocationSize = 8;
1071 static constexpr size_t kMaxAllocationSize = 2097152;
1072
1073 static constexpr size_t kNumAllocatingThreads = 5;
1074 static constexpr size_t kNumForkLoops = 100;
1075
1076 std::atomic_bool stop;
1077
1078 // Create threads that simply allocate and free different sizes.
1079 std::vector<std::thread*> threads;
1080 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1081 std::thread* t = new std::thread([&stop] {
1082 while (!stop) {
1083 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001084 void* ptr;
1085 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001086 free(ptr);
1087 }
1088 }
1089 });
1090 threads.push_back(t);
1091 }
1092
1093 // Create a thread to fork and allocate.
1094 for (size_t i = 0; i < kNumForkLoops; i++) {
1095 pid_t pid;
1096 if ((pid = fork()) == 0) {
1097 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001098 void* ptr;
1099 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001100 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -08001101 // Make sure we can touch all of the allocation.
1102 memset(ptr, 0x1, size);
1103 ASSERT_LE(size, malloc_usable_size(ptr));
1104 free(ptr);
1105 }
1106 _exit(10);
1107 }
1108 ASSERT_NE(-1, pid);
1109 AssertChildExited(pid, 10);
1110 }
1111
1112 stop = true;
1113 for (auto thread : threads) {
1114 thread->join();
1115 delete thread;
1116 }
1117}
1118
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001119TEST(android_mallopt, error_on_unexpected_option) {
1120#if defined(__BIONIC__)
1121 const int unrecognized_option = -1;
1122 errno = 0;
1123 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001124 EXPECT_ERRNO(ENOTSUP);
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001125#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001126 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001127#endif
1128}
1129
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001130bool IsDynamic() {
1131#if defined(__LP64__)
1132 Elf64_Ehdr ehdr;
1133#else
1134 Elf32_Ehdr ehdr;
1135#endif
1136 std::string path(android::base::GetExecutablePath());
1137
1138 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1139 if (fd == -1) {
1140 // Assume dynamic on error.
1141 return true;
1142 }
1143 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1144 close(fd);
1145 // Assume dynamic in error cases.
1146 return !read_completed || ehdr.e_type == ET_DYN;
1147}
1148
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001149TEST(android_mallopt, init_zygote_child_profiling) {
1150#if defined(__BIONIC__)
1151 // Successful call.
1152 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001153 if (IsDynamic()) {
1154 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001155 EXPECT_ERRNO(0);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001156 } else {
1157 // Not supported in static executables.
1158 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001159 EXPECT_ERRNO(ENOTSUP);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001160 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001161
1162 // Unexpected arguments rejected.
1163 errno = 0;
1164 char unexpected = 0;
1165 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001166 if (IsDynamic()) {
Elliott Hughes95646e62023-09-21 14:11:19 -07001167 EXPECT_ERRNO(EINVAL);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001168 } else {
Elliott Hughes95646e62023-09-21 14:11:19 -07001169 EXPECT_ERRNO(ENOTSUP);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001170 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001171#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001172 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001173#endif
1174}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001175
1176#if defined(__BIONIC__)
1177template <typename FuncType>
1178void CheckAllocationFunction(FuncType func) {
1179 // Assumes that no more than 108MB of memory is allocated before this.
1180 size_t limit = 128 * 1024 * 1024;
1181 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1182 if (!func(20 * 1024 * 1024))
1183 exit(1);
1184 if (func(128 * 1024 * 1024))
1185 exit(1);
1186 exit(0);
1187}
1188#endif
1189
1190TEST(android_mallopt, set_allocation_limit) {
1191#if defined(__BIONIC__)
1192 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1193 testing::ExitedWithCode(0), "");
1194 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1195 testing::ExitedWithCode(0), "");
1196 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1197 testing::ExitedWithCode(0), "");
1198 EXPECT_EXIT(CheckAllocationFunction(
1199 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1200 testing::ExitedWithCode(0), "");
1201 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1202 void* ptr;
1203 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1204 }),
1205 testing::ExitedWithCode(0), "");
1206 EXPECT_EXIT(CheckAllocationFunction(
1207 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1208 testing::ExitedWithCode(0), "");
1209 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1210 void* p = malloc(1024 * 1024);
1211 return realloc(p, bytes) != nullptr;
1212 }),
1213 testing::ExitedWithCode(0), "");
1214#if !defined(__LP64__)
1215 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1216 testing::ExitedWithCode(0), "");
1217 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1218 testing::ExitedWithCode(0), "");
1219#endif
1220#else
Elliott Hughes10907202019-03-27 08:51:02 -07001221 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001222#endif
1223}
1224
1225TEST(android_mallopt, set_allocation_limit_multiple) {
1226#if defined(__BIONIC__)
1227 // Only the first set should work.
1228 size_t limit = 256 * 1024 * 1024;
1229 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1230 limit = 32 * 1024 * 1024;
1231 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1232#else
Elliott Hughes10907202019-03-27 08:51:02 -07001233 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001234#endif
1235}
1236
1237#if defined(__BIONIC__)
1238static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1239
1240static size_t GetMaxAllocations() {
1241 size_t max_pointers = 0;
1242 void* ptrs[20];
1243 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1244 ptrs[i] = malloc(kAllocationSize);
1245 if (ptrs[i] == nullptr) {
1246 max_pointers = i;
1247 break;
1248 }
1249 }
1250 for (size_t i = 0; i < max_pointers; i++) {
1251 free(ptrs[i]);
1252 }
1253 return max_pointers;
1254}
1255
1256static void VerifyMaxPointers(size_t max_pointers) {
1257 // Now verify that we can allocate the same number as before.
1258 void* ptrs[20];
1259 for (size_t i = 0; i < max_pointers; i++) {
1260 ptrs[i] = malloc(kAllocationSize);
1261 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1262 }
1263
1264 // Make sure the next allocation still fails.
1265 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1266 for (size_t i = 0; i < max_pointers; i++) {
1267 free(ptrs[i]);
1268 }
1269}
1270#endif
1271
1272TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1273#if defined(__BIONIC__)
1274 size_t limit = 128 * 1024 * 1024;
1275 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1276
1277 size_t max_pointers = GetMaxAllocations();
1278 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1279
1280 void* memory = malloc(10 * 1024 * 1024);
1281 ASSERT_TRUE(memory != nullptr);
1282
1283 // Increase size.
1284 memory = realloc(memory, 20 * 1024 * 1024);
1285 ASSERT_TRUE(memory != nullptr);
1286 memory = realloc(memory, 40 * 1024 * 1024);
1287 ASSERT_TRUE(memory != nullptr);
1288 memory = realloc(memory, 60 * 1024 * 1024);
1289 ASSERT_TRUE(memory != nullptr);
1290 memory = realloc(memory, 80 * 1024 * 1024);
1291 ASSERT_TRUE(memory != nullptr);
1292 // Now push past limit.
1293 memory = realloc(memory, 130 * 1024 * 1024);
1294 ASSERT_TRUE(memory == nullptr);
1295
1296 VerifyMaxPointers(max_pointers);
1297#else
Elliott Hughes10907202019-03-27 08:51:02 -07001298 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001299#endif
1300}
1301
1302TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1303#if defined(__BIONIC__)
1304 size_t limit = 100 * 1024 * 1024;
1305 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1306
1307 size_t max_pointers = GetMaxAllocations();
1308 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1309
1310 void* memory = malloc(80 * 1024 * 1024);
1311 ASSERT_TRUE(memory != nullptr);
1312
1313 // Decrease size.
1314 memory = realloc(memory, 60 * 1024 * 1024);
1315 ASSERT_TRUE(memory != nullptr);
1316 memory = realloc(memory, 40 * 1024 * 1024);
1317 ASSERT_TRUE(memory != nullptr);
1318 memory = realloc(memory, 20 * 1024 * 1024);
1319 ASSERT_TRUE(memory != nullptr);
1320 memory = realloc(memory, 10 * 1024 * 1024);
1321 ASSERT_TRUE(memory != nullptr);
1322 free(memory);
1323
1324 VerifyMaxPointers(max_pointers);
1325#else
Elliott Hughes10907202019-03-27 08:51:02 -07001326 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001327#endif
1328}
1329
1330TEST(android_mallopt, set_allocation_limit_realloc_free) {
1331#if defined(__BIONIC__)
1332 size_t limit = 100 * 1024 * 1024;
1333 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1334
1335 size_t max_pointers = GetMaxAllocations();
1336 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1337
1338 void* memory = malloc(60 * 1024 * 1024);
1339 ASSERT_TRUE(memory != nullptr);
1340
1341 memory = realloc(memory, 0);
1342 ASSERT_TRUE(memory == nullptr);
1343
1344 VerifyMaxPointers(max_pointers);
1345#else
Elliott Hughes10907202019-03-27 08:51:02 -07001346 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001347#endif
1348}
1349
1350#if defined(__BIONIC__)
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001351static void SetAllocationLimitMultipleThreads() {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001352 static constexpr size_t kNumThreads = 4;
Christopher Ferrisfe130412023-07-20 16:37:43 -07001353 std::atomic_bool start_running = false;
1354 std::atomic<size_t> num_running;
1355 std::atomic<size_t> num_successful;
1356 std::unique_ptr<std::thread> threads[kNumThreads];
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001357 for (size_t i = 0; i < kNumThreads; i++) {
Christopher Ferrisfe130412023-07-20 16:37:43 -07001358 threads[i].reset(new std::thread([&num_running, &start_running, &num_successful] {
1359 ++num_running;
1360 while (!start_running) {
1361 }
1362 size_t limit = 500 * 1024 * 1024;
1363 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1364 ++num_successful;
1365 }
1366 }));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001367 }
1368
Christopher Ferrisfe130412023-07-20 16:37:43 -07001369 // Wait until all of the threads have started.
1370 while (num_running != kNumThreads)
1371 ;
1372
1373 // Now start all of the threads setting the mallopt at once.
1374 start_running = true;
1375
Ryan Savitski175c8862020-01-02 19:54:57 +00001376 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
Christopher Ferrisfe130412023-07-20 16:37:43 -07001377 // heapprofd handler. This will verify that changing the limit while
1378 // the allocation handlers are being changed at the same time works,
1379 // or that the limit handler is changed first and this also works properly.
1380 union sigval signal_value {};
Christopher Ferrisb874c332020-01-21 16:39:05 -08001381 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001382
Christopher Ferrisfe130412023-07-20 16:37:43 -07001383 // Wait for all of the threads to finish.
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001384 for (size_t i = 0; i < kNumThreads; i++) {
Christopher Ferrisfe130412023-07-20 16:37:43 -07001385 threads[i]->join();
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001386 }
Christopher Ferrisfe130412023-07-20 16:37:43 -07001387 ASSERT_EQ(1U, num_successful) << "Only one thread should be able to set the limit.";
Christopher Ferrise9ffc522023-08-03 17:34:05 -07001388 _exit(0);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001389}
1390#endif
1391
1392TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1393#if defined(__BIONIC__)
1394 if (IsDynamic()) {
1395 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1396 }
1397
1398 // Run this a number of times as a stress test.
1399 for (size_t i = 0; i < 100; i++) {
1400 // Not using ASSERT_EXIT because errors messages are not displayed.
1401 pid_t pid;
1402 if ((pid = fork()) == 0) {
1403 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1404 }
1405 ASSERT_NE(-1, pid);
1406 int status;
1407 ASSERT_EQ(pid, wait(&status));
1408 ASSERT_EQ(0, WEXITSTATUS(status));
1409 }
1410#else
Elliott Hughes10907202019-03-27 08:51:02 -07001411 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001412#endif
1413}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001414
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001415#if defined(__BIONIC__)
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001416using Mode = android_mallopt_gwp_asan_options_t::Mode;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001417TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1418 android_mallopt_gwp_asan_options_t options;
1419 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001420 options.mode = Mode::APP_MANIFEST_NEVER;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001421 // GWP-ASan should already be enabled. Trying to enable or disable it should
1422 // always pass.
1423 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001424 options.mode = Mode::APP_MANIFEST_DEFAULT;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001425 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1426}
1427#endif // defined(__BIONIC__)
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001428
Mitch Phillipse6997d52020-11-30 15:04:14 -08001429TEST(android_mallopt, multiple_enable_gwp_asan) {
1430#if defined(__BIONIC__)
1431 // Always enable GWP-Asan, with default options.
1432 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001433#else
1434 GTEST_SKIP() << "bionic extension";
1435#endif
1436}
1437
Florian Mayercc61ad82022-08-31 11:43:30 -07001438TEST(android_mallopt, memtag_stack_is_on) {
1439#if defined(__BIONIC__)
1440 bool memtag_stack;
1441 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1442#else
1443 GTEST_SKIP() << "bionic extension";
1444#endif
1445}
1446
Mitch Phillips9cad8422021-01-20 16:03:27 -08001447void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1448 std::vector<void*> allocs;
1449 constexpr int kMaxBytesToCheckZero = 64;
1450 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1451
1452 for (int i = 0; i < num_iterations; ++i) {
1453 int size = get_alloc_size(i);
1454 allocs.push_back(malloc(size));
1455 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1456 }
1457
1458 for (void* alloc : allocs) {
1459 free(alloc);
1460 }
1461 allocs.clear();
1462
1463 for (int i = 0; i < num_iterations; ++i) {
1464 int size = get_alloc_size(i);
1465 allocs.push_back(malloc(size));
1466 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1467 }
1468
1469 for (void* alloc : allocs) {
1470 free(alloc);
1471 }
1472}
1473
1474TEST(malloc, zero_init) {
1475#if defined(__BIONIC__)
1476 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1477 bool allocator_scudo;
1478 GetAllocatorVersion(&allocator_scudo);
1479 if (!allocator_scudo) {
1480 GTEST_SKIP() << "scudo allocator only test";
1481 }
1482
1483 mallopt(M_BIONIC_ZERO_INIT, 1);
1484
1485 // Test using a block of 4K small (1-32 byte) allocations.
1486 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1487 return 1 + iteration % 32;
1488 });
1489
1490 // Also test large allocations that land in the scudo secondary, as this is
1491 // the only part of Scudo that's changed by enabling zero initialization with
1492 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1493 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1494 // mallopt_decay. Ensure that we delay for at least a second before releasing
1495 // pages to the OS in order to avoid implicit zeroing by the kernel.
Chia-hung Duan6abb4062024-04-17 19:08:48 -07001496 mallopt(M_DECAY_TIME, 1);
Mitch Phillips9cad8422021-01-20 16:03:27 -08001497 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1498 return 1 << (19 + iteration % 4);
1499 });
1500
1501#else
1502 GTEST_SKIP() << "bionic-only test";
1503#endif
1504}
1505
1506// Note that MTE is enabled on cc_tests on devices that support MTE.
1507TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001508#if defined(__BIONIC__)
1509 if (!mte_supported()) {
1510 GTEST_SKIP() << "This function can only be tested with MTE";
1511 }
1512
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001513 sem_t sem;
1514 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1515
1516 pthread_t thread;
1517 ASSERT_EQ(0, pthread_create(
1518 &thread, nullptr,
1519 [](void* ptr) -> void* {
1520 auto* sem = reinterpret_cast<sem_t*>(ptr);
1521 sem_wait(sem);
1522 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1523 },
1524 &sem));
1525
Mitch Phillips9cad8422021-01-20 16:03:27 -08001526 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001527 ASSERT_EQ(0, sem_post(&sem));
1528
1529 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
Christopher Ferris2abfa9e2021-11-01 16:26:06 -07001530 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001531
1532 void* retval;
1533 ASSERT_EQ(0, pthread_join(thread, &retval));
1534 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1535 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001536#else
1537 GTEST_SKIP() << "bionic extension";
1538#endif
1539}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001540
1541TEST(malloc, allocation_slack) {
1542#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001543 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1544
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001545 bool allocator_scudo;
1546 GetAllocatorVersion(&allocator_scudo);
1547 if (!allocator_scudo) {
1548 GTEST_SKIP() << "scudo allocator only test";
1549 }
1550
1551 // Test that older target SDK levels let you access a few bytes off the end of
1552 // a large allocation.
1553 android_set_application_target_sdk_version(29);
1554 auto p = std::make_unique<char[]>(131072);
1555 volatile char *vp = p.get();
1556 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1557#else
1558 GTEST_SKIP() << "bionic extension";
1559#endif
1560}
Evgenii Stepanovf0d7a342021-11-16 17:34:39 -08001561
1562// Regression test for b/206701345 -- scudo bug, MTE only.
1563// Fix: https://reviews.llvm.org/D105261
1564// Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
1565TEST(malloc, realloc_mte_crash_b206701345) {
1566 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1567 // all size classes allow such placement - mmap size has to be divisible by
1568 // the block size. At the time of writing this could only be reproduced with
1569 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1570 // future. Try several different classes at the lower end.
1571 std::vector<void*> ptrs(10000);
1572 for (int i = 1; i < 32; ++i) {
1573 size_t sz = 16 * i - 1;
1574 for (void*& p : ptrs) {
1575 p = realloc(malloc(sz), sz + 1);
1576 }
1577
1578 for (void* p : ptrs) {
1579 free(p);
1580 }
1581 }
1582}
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001583
1584void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1585 std::vector<size_t>& test_sizes, size_t max_allocations) {
1586 // Vector of zero'd data used for comparisons. Make it twice the largest size.
1587 std::vector<char> zero(test_sizes.back() * 2, 0);
1588
1589 SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1590
1591 for (size_t test_size : test_sizes) {
1592 std::vector<void*> ptrs(max_allocations);
1593 for (size_t i = 0; i < ptrs.size(); i++) {
1594 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1595 ptrs[i] = alloc_func(test_size);
1596 ASSERT_TRUE(ptrs[i] != nullptr);
1597 size_t alloc_size = malloc_usable_size(ptrs[i]);
1598 ASSERT_LE(alloc_size, zero.size());
1599 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1600
1601 // Set the memory to non-zero to make sure if the pointer
1602 // is reused it's still zero.
1603 memset(ptrs[i], 0xab, alloc_size);
1604 }
1605 // Free the pointers.
1606 for (size_t i = 0; i < ptrs.size(); i++) {
1607 free(ptrs[i]);
1608 }
1609 for (size_t i = 0; i < ptrs.size(); i++) {
1610 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1611 ptrs[i] = malloc(test_size);
1612 ASSERT_TRUE(ptrs[i] != nullptr);
1613 size_t alloc_size = malloc_usable_size(ptrs[i]);
1614 ASSERT_LE(alloc_size, zero.size());
1615 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1616 }
1617 // Free all of the pointers later to maximize the chance of reusing from
1618 // the first loop.
1619 for (size_t i = 0; i < ptrs.size(); i++) {
1620 free(ptrs[i]);
1621 }
1622 }
1623}
1624
1625// Verify that small and medium allocations are always zero.
Christopher Ferris59075562023-04-04 14:37:26 -07001626// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001627TEST(malloc, zeroed_allocations_small_medium_sizes) {
1628#if !defined(__BIONIC__)
1629 GTEST_SKIP() << "Only valid on bionic";
1630#endif
Christopher Ferrisfb4b87b2024-07-31 23:43:31 +00001631 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001632
1633 if (IsLowRamDevice()) {
1634 GTEST_SKIP() << "Skipped on low memory devices.";
1635 }
1636
1637 constexpr size_t kMaxAllocations = 1024;
1638 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1639 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1640 kMaxAllocations);
1641
1642 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1643 test_sizes, kMaxAllocations);
1644
1645 VerifyAllocationsAreZero(
1646 [](size_t size) -> void* {
1647 void* ptr;
1648 if (posix_memalign(&ptr, 64, size) == 0) {
1649 return ptr;
1650 }
1651 return nullptr;
1652 },
1653 "posix_memalign", test_sizes, kMaxAllocations);
1654}
1655
1656// Verify that large allocations are always zero.
Christopher Ferris59075562023-04-04 14:37:26 -07001657// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001658TEST(malloc, zeroed_allocations_large_sizes) {
1659#if !defined(__BIONIC__)
1660 GTEST_SKIP() << "Only valid on bionic";
1661#endif
Christopher Ferrisfb4b87b2024-07-31 23:43:31 +00001662 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001663
1664 if (IsLowRamDevice()) {
1665 GTEST_SKIP() << "Skipped on low memory devices.";
1666 }
1667
1668 constexpr size_t kMaxAllocations = 20;
1669 std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1670 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1671 kMaxAllocations);
1672
1673 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1674 test_sizes, kMaxAllocations);
1675
1676 VerifyAllocationsAreZero(
1677 [](size_t size) -> void* {
1678 void* ptr;
1679 if (posix_memalign(&ptr, 64, size) == 0) {
1680 return ptr;
1681 }
1682 return nullptr;
1683 },
1684 "posix_memalign", test_sizes, kMaxAllocations);
1685}
1686
Christopher Ferris59075562023-04-04 14:37:26 -07001687// Verify that reallocs are zeroed when expanded.
1688// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001689TEST(malloc, zeroed_allocations_realloc) {
1690#if !defined(__BIONIC__)
1691 GTEST_SKIP() << "Only valid on bionic";
1692#endif
Christopher Ferrisfb4b87b2024-07-31 23:43:31 +00001693 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001694
1695 if (IsLowRamDevice()) {
1696 GTEST_SKIP() << "Skipped on low memory devices.";
1697 }
1698
1699 // Vector of zero'd data used for comparisons.
1700 constexpr size_t kMaxMemorySize = 131072;
1701 std::vector<char> zero(kMaxMemorySize, 0);
1702
1703 constexpr size_t kMaxAllocations = 1024;
1704 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1705 // Do a number of allocations and set them to non-zero.
1706 for (size_t test_size : test_sizes) {
1707 std::vector<void*> ptrs(kMaxAllocations);
1708 for (size_t i = 0; i < kMaxAllocations; i++) {
1709 ptrs[i] = malloc(test_size);
1710 ASSERT_TRUE(ptrs[i] != nullptr);
1711
1712 // Set the memory to non-zero to make sure if the pointer
1713 // is reused it's still zero.
1714 memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1715 }
1716 // Free the pointers.
1717 for (size_t i = 0; i < kMaxAllocations; i++) {
1718 free(ptrs[i]);
1719 }
1720 }
1721
1722 // Do the reallocs to a larger size and verify the rest of the allocation
1723 // is zero.
1724 constexpr size_t kInitialSize = 8;
1725 for (size_t test_size : test_sizes) {
1726 std::vector<void*> ptrs(kMaxAllocations);
1727 for (size_t i = 0; i < kMaxAllocations; i++) {
1728 ptrs[i] = malloc(kInitialSize);
1729 ASSERT_TRUE(ptrs[i] != nullptr);
1730 size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1731
1732 ptrs[i] = realloc(ptrs[i], test_size);
1733 ASSERT_TRUE(ptrs[i] != nullptr);
1734 size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1735 char* ptr = reinterpret_cast<char*>(ptrs[i]);
1736 ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1737 << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1738 }
1739 for (size_t i = 0; i < kMaxAllocations; i++) {
1740 free(ptrs[i]);
1741 }
1742 }
1743}
Christopher Ferrisb4e560e2023-10-26 17:00:00 -07001744
1745TEST(android_mallopt, get_decay_time_enabled_errors) {
1746#if defined(__BIONIC__)
1747 errno = 0;
1748 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, nullptr, sizeof(bool)));
1749 EXPECT_ERRNO(EINVAL);
1750
1751 errno = 0;
1752 int value;
1753 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1754 EXPECT_ERRNO(EINVAL);
1755#else
1756 GTEST_SKIP() << "bionic-only test";
1757#endif
1758}
1759
1760TEST(android_mallopt, get_decay_time_enabled) {
1761#if defined(__BIONIC__)
1762 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1763
1764 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 0));
1765
1766 bool value;
1767 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1768 EXPECT_FALSE(value);
1769
1770 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 1));
1771 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1772 EXPECT_TRUE(value);
Chia-hung Duan6abb4062024-04-17 19:08:48 -07001773
1774 EXPECT_EQ(1, mallopt(M_DECAY_TIME, -1));
1775 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1776 EXPECT_FALSE(value);
Christopher Ferrisb4e560e2023-10-26 17:00:00 -07001777#else
1778 GTEST_SKIP() << "bionic-only test";
1779#endif
1780}