blob: 26e869f462efd8c19f276e6278f39dcc987538e3 [file] [log] [blame]
Christopher Ferris885f3b92013-05-21 17:48:01 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080019#include <elf.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070020#include <limits.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000021#include <malloc.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080022#include <pthread.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070023#include <semaphore.h>
Ryan Savitski175c8862020-01-02 19:54:57 +000024#include <signal.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070025#include <stdint.h>
Christopher Ferris6c619a02019-03-01 17:59:51 -080026#include <stdio.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070027#include <stdlib.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080028#include <string.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080029#include <sys/auxv.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070030#include <sys/cdefs.h>
Peter Collingbourne45819dd2020-01-09 11:00:43 -080031#include <sys/prctl.h>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080032#include <sys/types.h>
33#include <sys/wait.h>
Christopher Ferrisa4037802014-06-09 19:14:11 -070034#include <unistd.h>
Christopher Ferris885f3b92013-05-21 17:48:01 -070035
Mitch Phillips9cad8422021-01-20 16:03:27 -080036#include <algorithm>
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -080037#include <atomic>
Christopher Ferris02b6bbc2022-06-02 15:20:23 -070038#include <functional>
Christopher Ferrisd86eb862023-02-28 12:45:54 -080039#include <string>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080040#include <thread>
Christopher Ferrisd86eb862023-02-28 12:45:54 -080041#include <unordered_map>
42#include <utility>
Mitch Phillips9cad8422021-01-20 16:03:27 -080043#include <vector>
Christopher Ferrisf32494c2020-01-08 14:19:10 -080044
Dan Albert4caa1f02014-08-20 09:16:57 -070045#include <tinyxml2.h>
46
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080047#include <android-base/file.h>
Florian Mayer750dcd32022-04-15 15:54:47 -070048#include <android-base/test_utils.h>
Christopher Ferrise4cdbc42019-02-08 17:30:58 -080049
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080050#include "utils.h"
Dan Alberte5fdaa42014-06-14 01:04:31 +000051
Elliott Hughesb1770852018-09-18 12:52:42 -070052#if defined(__BIONIC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080053
Peter Collingbourne45819dd2020-01-09 11:00:43 -080054#include "SignalUtils.h"
Peter Collingbourne2659d7b2021-03-05 13:31:41 -080055#include "dlext_private.h"
Peter Collingbourne45819dd2020-01-09 11:00:43 -080056
Christopher Ferrisb874c332020-01-21 16:39:05 -080057#include "platform/bionic/malloc.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070058#include "platform/bionic/mte.h"
Christopher Ferrisb874c332020-01-21 16:39:05 -080059#include "platform/bionic/reserved_signals.h"
60#include "private/bionic_config.h"
61
Elliott Hughesb1770852018-09-18 12:52:42 -070062#define HAVE_REALLOCARRAY 1
Christopher Ferrisb874c332020-01-21 16:39:05 -080063
Colin Cross7da20342021-07-28 11:18:11 -070064#elif defined(__GLIBC__)
Christopher Ferrisb874c332020-01-21 16:39:05 -080065
Elliott Hughesb1770852018-09-18 12:52:42 -070066#define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
Christopher Ferrisb874c332020-01-21 16:39:05 -080067
Colin Cross4c5595c2021-08-16 15:51:59 -070068#elif defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -070069
70#define HAVE_REALLOCARRAY 1
71
Elliott Hughesb1770852018-09-18 12:52:42 -070072#endif
73
Christopher Ferris885f3b92013-05-21 17:48:01 -070074TEST(malloc, malloc_std) {
75 // Simple malloc test.
76 void *ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -070077 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070078 ASSERT_LE(100U, malloc_usable_size(ptr));
Christopher Ferris885f3b92013-05-21 17:48:01 -070079 free(ptr);
80}
81
Christopher Ferrisa4037802014-06-09 19:14:11 -070082TEST(malloc, malloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -080083 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -070084 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -070085 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -070086 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -070087}
88
Christopher Ferris885f3b92013-05-21 17:48:01 -070089TEST(malloc, calloc_std) {
90 // Simple calloc test.
91 size_t alloc_len = 100;
92 char *ptr = (char *)calloc(1, alloc_len);
Yi Kong32bc0fc2018-08-02 17:31:13 -070093 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -070094 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
95 for (size_t i = 0; i < alloc_len; i++) {
96 ASSERT_EQ(0, ptr[i]);
97 }
Christopher Ferris885f3b92013-05-21 17:48:01 -070098 free(ptr);
99}
100
Peter Collingbourne978eb162020-09-21 15:26:02 -0700101TEST(malloc, calloc_mem_init_disabled) {
102#if defined(__BIONIC__)
103 // calloc should still zero memory if mem-init is disabled.
104 // With jemalloc the mallopts will fail but that shouldn't affect the
105 // execution of the test.
106 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
107 size_t alloc_len = 100;
108 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
109 for (size_t i = 0; i < alloc_len; i++) {
110 ASSERT_EQ(0, ptr[i]);
111 }
112 free(ptr);
113 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
114#else
115 GTEST_SKIP() << "bionic-only test";
116#endif
117}
118
Christopher Ferrisa4037802014-06-09 19:14:11 -0700119TEST(malloc, calloc_illegal) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800120 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700121 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700122 ASSERT_EQ(nullptr, calloc(-1, 100));
Elliott Hughes95646e62023-09-21 14:11:19 -0700123 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700124}
125
126TEST(malloc, calloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800127 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700128 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700129 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700130 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700131 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700132 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700133 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700134 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700135 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700136 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700137 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700138 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
Elliott Hughes95646e62023-09-21 14:11:19 -0700139 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700140}
141
Christopher Ferris885f3b92013-05-21 17:48:01 -0700142TEST(malloc, memalign_multiple) {
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800143 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
Christopher Ferris885f3b92013-05-21 17:48:01 -0700144 // Memalign test where the alignment is any value.
145 for (size_t i = 0; i <= 12; i++) {
146 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
Christopher Ferrisa4037802014-06-09 19:14:11 -0700147 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700148 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700149 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
150 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
151 << "Failed at alignment " << alignment;
Christopher Ferris885f3b92013-05-21 17:48:01 -0700152 free(ptr);
153 }
154 }
155}
156
Christopher Ferrisa4037802014-06-09 19:14:11 -0700157TEST(malloc, memalign_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800158 SKIP_WITH_HWASAN;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700159 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
Christopher Ferrisa4037802014-06-09 19:14:11 -0700160}
161
162TEST(malloc, memalign_non_power2) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800163 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700164 void* ptr;
165 for (size_t align = 0; align <= 256; align++) {
166 ptr = memalign(align, 1024);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700167 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700168 free(ptr);
169 }
170}
171
Christopher Ferris885f3b92013-05-21 17:48:01 -0700172TEST(malloc, memalign_realloc) {
173 // Memalign and then realloc the pointer a couple of times.
174 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
175 char *ptr = (char*)memalign(alignment, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700176 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700177 ASSERT_LE(100U, malloc_usable_size(ptr));
178 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
179 memset(ptr, 0x23, 100);
180
181 ptr = (char*)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700182 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700183 ASSERT_LE(200U, malloc_usable_size(ptr));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700184 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700185 for (size_t i = 0; i < 100; i++) {
186 ASSERT_EQ(0x23, ptr[i]);
187 }
188 memset(ptr, 0x45, 200);
189
190 ptr = (char*)realloc(ptr, 300);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700191 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700192 ASSERT_LE(300U, malloc_usable_size(ptr));
193 for (size_t i = 0; i < 200; i++) {
194 ASSERT_EQ(0x45, ptr[i]);
195 }
196 memset(ptr, 0x67, 300);
197
198 ptr = (char*)realloc(ptr, 250);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700199 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700200 ASSERT_LE(250U, malloc_usable_size(ptr));
201 for (size_t i = 0; i < 250; i++) {
202 ASSERT_EQ(0x67, ptr[i]);
203 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700204 free(ptr);
205 }
206}
207
208TEST(malloc, malloc_realloc_larger) {
209 // Realloc to a larger size, malloc is used for the original allocation.
210 char *ptr = (char *)malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700211 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700212 ASSERT_LE(100U, malloc_usable_size(ptr));
213 memset(ptr, 67, 100);
214
215 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700216 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700217 ASSERT_LE(200U, malloc_usable_size(ptr));
218 for (size_t i = 0; i < 100; i++) {
219 ASSERT_EQ(67, ptr[i]);
220 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700221 free(ptr);
222}
223
224TEST(malloc, malloc_realloc_smaller) {
225 // Realloc to a smaller size, malloc is used for the original allocation.
226 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700227 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700228 ASSERT_LE(200U, malloc_usable_size(ptr));
229 memset(ptr, 67, 200);
230
231 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700232 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700233 ASSERT_LE(100U, malloc_usable_size(ptr));
234 for (size_t i = 0; i < 100; i++) {
235 ASSERT_EQ(67, ptr[i]);
236 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700237 free(ptr);
238}
239
240TEST(malloc, malloc_multiple_realloc) {
241 // Multiple reallocs, malloc is used for the original allocation.
242 char *ptr = (char *)malloc(200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700243 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700244 ASSERT_LE(200U, malloc_usable_size(ptr));
245 memset(ptr, 0x23, 200);
246
247 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700248 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700249 ASSERT_LE(100U, malloc_usable_size(ptr));
250 for (size_t i = 0; i < 100; i++) {
251 ASSERT_EQ(0x23, ptr[i]);
252 }
253
254 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700255 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700256 ASSERT_LE(50U, malloc_usable_size(ptr));
257 for (size_t i = 0; i < 50; i++) {
258 ASSERT_EQ(0x23, ptr[i]);
259 }
260
261 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700262 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700263 ASSERT_LE(150U, malloc_usable_size(ptr));
264 for (size_t i = 0; i < 50; i++) {
265 ASSERT_EQ(0x23, ptr[i]);
266 }
267 memset(ptr, 0x23, 150);
268
269 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700270 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700271 ASSERT_LE(425U, malloc_usable_size(ptr));
272 for (size_t i = 0; i < 150; i++) {
273 ASSERT_EQ(0x23, ptr[i]);
274 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700275 free(ptr);
276}
Christopher Ferrisa4037802014-06-09 19:14:11 -0700277
Christopher Ferris885f3b92013-05-21 17:48:01 -0700278TEST(malloc, calloc_realloc_larger) {
279 // Realloc to a larger size, calloc is used for the original allocation.
280 char *ptr = (char *)calloc(1, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700281 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700282 ASSERT_LE(100U, malloc_usable_size(ptr));
283
284 ptr = (char *)realloc(ptr, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700285 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700286 ASSERT_LE(200U, malloc_usable_size(ptr));
287 for (size_t i = 0; i < 100; i++) {
288 ASSERT_EQ(0, ptr[i]);
289 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700290 free(ptr);
291}
292
293TEST(malloc, calloc_realloc_smaller) {
294 // Realloc to a smaller size, calloc is used for the original allocation.
295 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700296 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700297 ASSERT_LE(200U, malloc_usable_size(ptr));
298
299 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700300 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700301 ASSERT_LE(100U, malloc_usable_size(ptr));
302 for (size_t i = 0; i < 100; i++) {
303 ASSERT_EQ(0, ptr[i]);
304 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700305 free(ptr);
306}
307
308TEST(malloc, calloc_multiple_realloc) {
309 // Multiple reallocs, calloc is used for the original allocation.
310 char *ptr = (char *)calloc(1, 200);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700311 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700312 ASSERT_LE(200U, malloc_usable_size(ptr));
313
314 ptr = (char *)realloc(ptr, 100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700315 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700316 ASSERT_LE(100U, malloc_usable_size(ptr));
317 for (size_t i = 0; i < 100; i++) {
318 ASSERT_EQ(0, ptr[i]);
319 }
320
321 ptr = (char*)realloc(ptr, 50);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700322 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700323 ASSERT_LE(50U, malloc_usable_size(ptr));
324 for (size_t i = 0; i < 50; i++) {
325 ASSERT_EQ(0, ptr[i]);
326 }
327
328 ptr = (char*)realloc(ptr, 150);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700329 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700330 ASSERT_LE(150U, malloc_usable_size(ptr));
331 for (size_t i = 0; i < 50; i++) {
332 ASSERT_EQ(0, ptr[i]);
333 }
334 memset(ptr, 0, 150);
335
336 ptr = (char*)realloc(ptr, 425);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700337 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris885f3b92013-05-21 17:48:01 -0700338 ASSERT_LE(425U, malloc_usable_size(ptr));
339 for (size_t i = 0; i < 150; i++) {
340 ASSERT_EQ(0, ptr[i]);
341 }
Christopher Ferris885f3b92013-05-21 17:48:01 -0700342 free(ptr);
343}
Christopher Ferris72bbd422014-05-08 11:14:03 -0700344
Christopher Ferrisa4037802014-06-09 19:14:11 -0700345TEST(malloc, realloc_overflow) {
Evgenii Stepanovacd6f4f2018-11-06 16:48:27 -0800346 SKIP_WITH_HWASAN;
Christopher Ferrisa4037802014-06-09 19:14:11 -0700347 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700348 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700349 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700350 void* ptr = malloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700351 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700352 errno = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700353 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
Elliott Hughes95646e62023-09-21 14:11:19 -0700354 ASSERT_ERRNO(ENOMEM);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700355 free(ptr);
Christopher Ferris72bbd422014-05-08 11:14:03 -0700356}
357
Dan Alberte5fdaa42014-06-14 01:04:31 +0000358#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
359extern "C" void* pvalloc(size_t);
360extern "C" void* valloc(size_t);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700361#endif
Dan Alberte5fdaa42014-06-14 01:04:31 +0000362
Christopher Ferrisa4037802014-06-09 19:14:11 -0700363TEST(malloc, pvalloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700364#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700365 size_t pagesize = sysconf(_SC_PAGESIZE);
366 void* ptr = pvalloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700367 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700368 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
369 ASSERT_LE(pagesize, malloc_usable_size(ptr));
370 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700371#else
372 GTEST_SKIP() << "pvalloc not supported.";
373#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700374}
375
376TEST(malloc, pvalloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700377#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700378 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700379#else
380 GTEST_SKIP() << "pvalloc not supported.";
381#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700382}
383
384TEST(malloc, valloc_std) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700385#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Christopher Ferrisa4037802014-06-09 19:14:11 -0700386 size_t pagesize = sysconf(_SC_PAGESIZE);
Christopher Ferrisd5ab0a52019-06-19 12:03:57 -0700387 void* ptr = valloc(100);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700388 ASSERT_TRUE(ptr != nullptr);
Christopher Ferrisa4037802014-06-09 19:14:11 -0700389 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
390 free(ptr);
Christopher Ferris804cebe2019-06-20 08:50:23 -0700391#else
392 GTEST_SKIP() << "valloc not supported.";
393#endif
Christopher Ferrisa4037802014-06-09 19:14:11 -0700394}
395
396TEST(malloc, valloc_overflow) {
Christopher Ferris804cebe2019-06-20 08:50:23 -0700397#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
Yi Kong32bc0fc2018-08-02 17:31:13 -0700398 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
Christopher Ferris804cebe2019-06-20 08:50:23 -0700399#else
400 GTEST_SKIP() << "valloc not supported.";
Dan Alberte5fdaa42014-06-14 01:04:31 +0000401#endif
Christopher Ferris804cebe2019-06-20 08:50:23 -0700402}
Dan Albert4caa1f02014-08-20 09:16:57 -0700403
404TEST(malloc, malloc_info) {
405#ifdef __BIONIC__
Evgenii Stepanov8de6b462019-03-22 13:22:28 -0700406 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800407
408 TemporaryFile tf;
409 ASSERT_TRUE(tf.fd != -1);
410 FILE* fp = fdopen(tf.fd, "w+");
411 tf.release();
412 ASSERT_TRUE(fp != nullptr);
413 ASSERT_EQ(0, malloc_info(0, fp));
414 ASSERT_EQ(0, fclose(fp));
415
416 std::string contents;
417 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Dan Albert4caa1f02014-08-20 09:16:57 -0700418
419 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800420 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Dan Albert4caa1f02014-08-20 09:16:57 -0700421
422 auto root = doc.FirstChildElement();
423 ASSERT_NE(nullptr, root);
424 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700425 std::string version(root->Attribute("version"));
426 if (version == "jemalloc-1") {
Christopher Ferris6c619a02019-03-01 17:59:51 -0800427 auto arena = root->FirstChildElement();
428 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
429 int val;
Dan Albert4caa1f02014-08-20 09:16:57 -0700430
Christopher Ferris6c619a02019-03-01 17:59:51 -0800431 ASSERT_STREQ("heap", arena->Name());
432 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
433 ASSERT_EQ(tinyxml2::XML_SUCCESS,
434 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
435 ASSERT_EQ(tinyxml2::XML_SUCCESS,
436 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
437 ASSERT_EQ(tinyxml2::XML_SUCCESS,
438 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
439 ASSERT_EQ(tinyxml2::XML_SUCCESS,
440 arena->FirstChildElement("bins-total")->QueryIntText(&val));
Dan Albert4caa1f02014-08-20 09:16:57 -0700441
Christopher Ferris6c619a02019-03-01 17:59:51 -0800442 auto bin = arena->FirstChildElement("bin");
443 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
444 if (strcmp(bin->Name(), "bin") == 0) {
445 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
446 ASSERT_EQ(tinyxml2::XML_SUCCESS,
447 bin->FirstChildElement("allocated")->QueryIntText(&val));
448 ASSERT_EQ(tinyxml2::XML_SUCCESS,
449 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
450 ASSERT_EQ(tinyxml2::XML_SUCCESS,
451 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
452 }
Dan Albert4caa1f02014-08-20 09:16:57 -0700453 }
454 }
Christopher Ferriscce88c02020-02-12 17:41:01 -0800455 } else if (version == "scudo-1") {
456 auto element = root->FirstChildElement();
457 for (; element != nullptr; element = element->NextSiblingElement()) {
458 int val;
459
460 ASSERT_STREQ("alloc", element->Name());
461 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
462 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
463 }
Christopher Ferris6c619a02019-03-01 17:59:51 -0800464 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800465 // Do not verify output for debug malloc.
466 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Dan Albert4caa1f02014-08-20 09:16:57 -0700467 }
468#endif
469}
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800470
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700471TEST(malloc, malloc_info_matches_mallinfo) {
472#ifdef __BIONIC__
473 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
474
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800475 TemporaryFile tf;
476 ASSERT_TRUE(tf.fd != -1);
477 FILE* fp = fdopen(tf.fd, "w+");
478 tf.release();
479 ASSERT_TRUE(fp != nullptr);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700480 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800481 ASSERT_EQ(0, malloc_info(0, fp));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700482 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800483 ASSERT_EQ(0, fclose(fp));
484
485 std::string contents;
486 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700487
488 tinyxml2::XMLDocument doc;
Christopher Ferrisff88fb02019-11-04 18:40:00 -0800489 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700490
491 size_t total_allocated_bytes = 0;
492 auto root = doc.FirstChildElement();
493 ASSERT_NE(nullptr, root);
494 ASSERT_STREQ("malloc", root->Name());
Christopher Ferris85169652019-10-09 18:41:55 -0700495 std::string version(root->Attribute("version"));
496 if (version == "jemalloc-1") {
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700497 auto arena = root->FirstChildElement();
498 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
499 int val;
500
501 ASSERT_STREQ("heap", arena->Name());
502 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
503 ASSERT_EQ(tinyxml2::XML_SUCCESS,
504 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
505 total_allocated_bytes += val;
506 ASSERT_EQ(tinyxml2::XML_SUCCESS,
507 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
508 total_allocated_bytes += val;
509 ASSERT_EQ(tinyxml2::XML_SUCCESS,
510 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
511 total_allocated_bytes += val;
512 ASSERT_EQ(tinyxml2::XML_SUCCESS,
513 arena->FirstChildElement("bins-total")->QueryIntText(&val));
514 }
515 // The total needs to be between the mallinfo call before and after
516 // since malloc_info allocates some memory.
517 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
518 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
Christopher Ferriscce88c02020-02-12 17:41:01 -0800519 } else if (version == "scudo-1") {
520 auto element = root->FirstChildElement();
521 for (; element != nullptr; element = element->NextSiblingElement()) {
522 ASSERT_STREQ("alloc", element->Name());
523 int size;
524 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
525 int count;
526 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
527 total_allocated_bytes += size * count;
528 }
529 // Scudo only gives the information on the primary, so simply make
530 // sure that the value is non-zero.
531 EXPECT_NE(0U, total_allocated_bytes);
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700532 } else {
Christopher Ferriscce88c02020-02-12 17:41:01 -0800533 // Do not verify output for debug malloc.
534 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
Christopher Ferrisdb9706a2019-05-02 18:33:11 -0700535 }
536#endif
537}
538
Christopher Ferrisad33ebe2015-12-16 12:07:25 -0800539TEST(malloc, calloc_usable_size) {
540 for (size_t size = 1; size <= 2048; size++) {
541 void* pointer = malloc(size);
542 ASSERT_TRUE(pointer != nullptr);
543 memset(pointer, 0xeb, malloc_usable_size(pointer));
544 free(pointer);
545
546 // We should get a previous pointer that has been set to non-zero.
547 // If calloc does not zero out all of the data, this will fail.
548 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
549 ASSERT_TRUE(pointer != nullptr);
550 size_t usable_size = malloc_usable_size(zero_mem);
551 for (size_t i = 0; i < usable_size; i++) {
552 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
553 }
554 free(zero_mem);
555 }
556}
Elliott Hughes884f76e2016-02-10 20:43:22 -0800557
558TEST(malloc, malloc_0) {
559 void* p = malloc(0);
560 ASSERT_TRUE(p != nullptr);
561 free(p);
562}
563
564TEST(malloc, calloc_0_0) {
565 void* p = calloc(0, 0);
566 ASSERT_TRUE(p != nullptr);
567 free(p);
568}
569
570TEST(malloc, calloc_0_1) {
571 void* p = calloc(0, 1);
572 ASSERT_TRUE(p != nullptr);
573 free(p);
574}
575
576TEST(malloc, calloc_1_0) {
577 void* p = calloc(1, 0);
578 ASSERT_TRUE(p != nullptr);
579 free(p);
580}
581
582TEST(malloc, realloc_nullptr_0) {
583 // realloc(nullptr, size) is actually malloc(size).
584 void* p = realloc(nullptr, 0);
585 ASSERT_TRUE(p != nullptr);
586 free(p);
587}
588
589TEST(malloc, realloc_0) {
590 void* p = malloc(1024);
591 ASSERT_TRUE(p != nullptr);
592 // realloc(p, 0) is actually free(p).
593 void* p2 = realloc(p, 0);
594 ASSERT_TRUE(p2 == nullptr);
595}
Christopher Ferris72df6702016-02-11 15:51:31 -0800596
597constexpr size_t MAX_LOOPS = 200;
598
599// Make sure that memory returned by malloc is aligned to allow these data types.
600TEST(malloc, verify_alignment) {
601 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
602 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
603 long double** values_ldouble = new long double*[MAX_LOOPS];
604 // Use filler to attempt to force the allocator to get potentially bad alignments.
605 void** filler = new void*[MAX_LOOPS];
606
607 for (size_t i = 0; i < MAX_LOOPS; i++) {
608 // Check uint32_t pointers.
609 filler[i] = malloc(1);
610 ASSERT_TRUE(filler[i] != nullptr);
611
612 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
613 ASSERT_TRUE(values_32[i] != nullptr);
614 *values_32[i] = i;
615 ASSERT_EQ(*values_32[i], i);
616 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
617
618 free(filler[i]);
619 }
620
621 for (size_t i = 0; i < MAX_LOOPS; i++) {
622 // Check uint64_t pointers.
623 filler[i] = malloc(1);
624 ASSERT_TRUE(filler[i] != nullptr);
625
626 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
627 ASSERT_TRUE(values_64[i] != nullptr);
628 *values_64[i] = 0x1000 + i;
629 ASSERT_EQ(*values_64[i], 0x1000 + i);
630 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
631
632 free(filler[i]);
633 }
634
635 for (size_t i = 0; i < MAX_LOOPS; i++) {
636 // Check long double pointers.
637 filler[i] = malloc(1);
638 ASSERT_TRUE(filler[i] != nullptr);
639
640 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
641 ASSERT_TRUE(values_ldouble[i] != nullptr);
642 *values_ldouble[i] = 5.5 + i;
643 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
644 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
645 // required alignment to 0x7.
646#if !defined(__BIONIC__) && !defined(__LP64__)
647 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
648#else
649 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
650#endif
651
652 free(filler[i]);
653 }
654
655 for (size_t i = 0; i < MAX_LOOPS; i++) {
656 free(values_32[i]);
657 free(values_64[i]);
658 free(values_ldouble[i]);
659 }
660
661 delete[] filler;
662 delete[] values_32;
663 delete[] values_64;
664 delete[] values_ldouble;
665}
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700666
667TEST(malloc, mallopt_smoke) {
Christopher Ferris2ef59372023-01-18 15:08:37 -0800668#if defined(__BIONIC__)
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700669 errno = 0;
670 ASSERT_EQ(0, mallopt(-1000, 1));
671 // mallopt doesn't set errno.
Elliott Hughes95646e62023-09-21 14:11:19 -0700672 ASSERT_ERRNO(0);
Colin Cross7da20342021-07-28 11:18:11 -0700673#else
Christopher Ferris2ef59372023-01-18 15:08:37 -0800674 GTEST_SKIP() << "bionic-only test";
Colin Cross7da20342021-07-28 11:18:11 -0700675#endif
Christopher Ferrisa1c0d2f2017-05-15 15:50:19 -0700676}
Elliott Hughesb1770852018-09-18 12:52:42 -0700677
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800678TEST(malloc, mallopt_decay) {
679#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800680 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800681 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
682 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
683 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
684 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
685#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800686 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800687#endif
688}
689
690TEST(malloc, mallopt_purge) {
691#if defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800692 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800693 ASSERT_EQ(1, mallopt(M_PURGE, 0));
694#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800695 GTEST_SKIP() << "bionic-only test";
Christopher Ferrisaf1b8dd2018-11-07 15:28:16 -0800696#endif
697}
698
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800699TEST(malloc, mallopt_purge_all) {
700#if defined(__BIONIC__)
701 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800702 ASSERT_EQ(1, mallopt(M_PURGE_ALL, 0));
703#else
704 GTEST_SKIP() << "bionic-only test";
705#endif
706}
707
Christopher Ferrise9a7b812023-05-11 15:36:27 -0700708TEST(malloc, mallopt_log_stats) {
709#if defined(__BIONIC__)
710 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
711 ASSERT_EQ(1, mallopt(M_LOG_STATS, 0));
712#else
713 GTEST_SKIP() << "bionic-only test";
714#endif
715}
716
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800717// Verify that all of the mallopt values are unique.
718TEST(malloc, mallopt_unique_params) {
719#if defined(__BIONIC__)
720 std::vector<std::pair<int, std::string>> params{
721 std::make_pair(M_DECAY_TIME, "M_DECAY_TIME"),
722 std::make_pair(M_PURGE, "M_PURGE"),
723 std::make_pair(M_PURGE_ALL, "M_PURGE_ALL"),
724 std::make_pair(M_MEMTAG_TUNING, "M_MEMTAG_TUNING"),
725 std::make_pair(M_THREAD_DISABLE_MEM_INIT, "M_THREAD_DISABLE_MEM_INIT"),
726 std::make_pair(M_CACHE_COUNT_MAX, "M_CACHE_COUNT_MAX"),
727 std::make_pair(M_CACHE_SIZE_MAX, "M_CACHE_SIZE_MAX"),
728 std::make_pair(M_TSDS_COUNT_MAX, "M_TSDS_COUNT_MAX"),
729 std::make_pair(M_BIONIC_ZERO_INIT, "M_BIONIC_ZERO_INIT"),
730 std::make_pair(M_BIONIC_SET_HEAP_TAGGING_LEVEL, "M_BIONIC_SET_HEAP_TAGGING_LEVEL"),
Christopher Ferrise9a7b812023-05-11 15:36:27 -0700731 std::make_pair(M_LOG_STATS, "M_LOG_STATS"),
Christopher Ferrisd86eb862023-02-28 12:45:54 -0800732 };
733
734 std::unordered_map<int, std::string> all_params;
735 for (const auto& param : params) {
736 EXPECT_TRUE(all_params.count(param.first) == 0)
737 << "mallopt params " << all_params[param.first] << " and " << param.second
738 << " have the same value " << param.first;
739 all_params.insert(param);
740 }
741#else
742 GTEST_SKIP() << "bionic-only test";
743#endif
744}
745
Christopher Ferris88448792020-07-28 14:15:31 -0700746#if defined(__BIONIC__)
747static void GetAllocatorVersion(bool* allocator_scudo) {
748 TemporaryFile tf;
749 ASSERT_TRUE(tf.fd != -1);
750 FILE* fp = fdopen(tf.fd, "w+");
751 tf.release();
752 ASSERT_TRUE(fp != nullptr);
Evgenii Stepanov4edbcee2021-09-17 14:59:15 -0700753 if (malloc_info(0, fp) != 0) {
754 *allocator_scudo = false;
755 return;
756 }
Christopher Ferris88448792020-07-28 14:15:31 -0700757 ASSERT_EQ(0, fclose(fp));
758
759 std::string contents;
760 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
761
762 tinyxml2::XMLDocument doc;
763 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
764
765 auto root = doc.FirstChildElement();
766 ASSERT_NE(nullptr, root);
767 ASSERT_STREQ("malloc", root->Name());
768 std::string version(root->Attribute("version"));
769 *allocator_scudo = (version == "scudo-1");
770}
771#endif
772
773TEST(malloc, mallopt_scudo_only_options) {
774#if defined(__BIONIC__)
775 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
776 bool allocator_scudo;
777 GetAllocatorVersion(&allocator_scudo);
778 if (!allocator_scudo) {
779 GTEST_SKIP() << "scudo allocator only test";
780 }
781 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
782 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
783 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
784#else
785 GTEST_SKIP() << "bionic-only test";
786#endif
787}
788
Elliott Hughesb1770852018-09-18 12:52:42 -0700789TEST(malloc, reallocarray_overflow) {
790#if HAVE_REALLOCARRAY
791 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
792 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
793 size_t b = 2;
794
795 errno = 0;
796 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
Elliott Hughes95646e62023-09-21 14:11:19 -0700797 ASSERT_ERRNO(ENOMEM);
Elliott Hughesb1770852018-09-18 12:52:42 -0700798
799 errno = 0;
800 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
Elliott Hughes95646e62023-09-21 14:11:19 -0700801 ASSERT_ERRNO(ENOMEM);
Elliott Hughesb1770852018-09-18 12:52:42 -0700802#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800803 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700804#endif
805}
806
807TEST(malloc, reallocarray) {
808#if HAVE_REALLOCARRAY
809 void* p = reallocarray(nullptr, 2, 32);
810 ASSERT_TRUE(p != nullptr);
811 ASSERT_GE(malloc_usable_size(p), 64U);
812#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800813 GTEST_SKIP() << "reallocarray not available";
Elliott Hughesb1770852018-09-18 12:52:42 -0700814#endif
815}
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800816
817TEST(malloc, mallinfo) {
Colin Crossfdced952022-01-24 18:15:07 -0800818#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800819 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800820 static size_t sizes[] = {
821 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
822 };
823
Elliott Hughes30088842023-09-14 18:35:11 +0000824 static constexpr size_t kMaxAllocs = 50;
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800825
826 for (size_t size : sizes) {
827 // If some of these allocations are stuck in a thread cache, then keep
828 // looping until we make an allocation that changes the total size of the
829 // memory allocated.
830 // jemalloc implementations counts the thread cache allocations against
831 // total memory allocated.
832 void* ptrs[kMaxAllocs] = {};
833 bool pass = false;
834 for (size_t i = 0; i < kMaxAllocs; i++) {
835 size_t allocated = mallinfo().uordblks;
836 ptrs[i] = malloc(size);
837 ASSERT_TRUE(ptrs[i] != nullptr);
838 size_t new_allocated = mallinfo().uordblks;
839 if (allocated != new_allocated) {
840 size_t usable_size = malloc_usable_size(ptrs[i]);
Christopher Ferris4e562282019-02-07 14:20:03 -0800841 // Only check if the total got bigger by at least allocation size.
842 // Sometimes the mallinfo numbers can go backwards due to compaction
843 // and/or freeing of cached data.
844 if (new_allocated >= allocated + usable_size) {
845 pass = true;
846 break;
847 }
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800848 }
849 }
850 for (void* ptr : ptrs) {
851 free(ptr);
852 }
853 ASSERT_TRUE(pass)
854 << "For size " << size << " allocated bytes did not increase after "
855 << kMaxAllocs << " allocations.";
856 }
857#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800858 GTEST_SKIP() << "glibc is broken";
Christopher Ferris09a19aa2018-11-16 13:28:56 -0800859#endif
860}
Ryan Savitskiecc37e32018-12-14 15:57:21 +0000861
Christopher Ferris8248e622021-12-03 13:55:57 -0800862TEST(malloc, mallinfo2) {
Colin Crossfdced952022-01-24 18:15:07 -0800863#if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
Christopher Ferris8248e622021-12-03 13:55:57 -0800864 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
865 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
866
Elliott Hughes30088842023-09-14 18:35:11 +0000867 static constexpr size_t kMaxAllocs = 50;
Christopher Ferris8248e622021-12-03 13:55:57 -0800868
869 for (size_t size : sizes) {
870 // If some of these allocations are stuck in a thread cache, then keep
871 // looping until we make an allocation that changes the total size of the
872 // memory allocated.
873 // jemalloc implementations counts the thread cache allocations against
874 // total memory allocated.
875 void* ptrs[kMaxAllocs] = {};
876 bool pass = false;
877 for (size_t i = 0; i < kMaxAllocs; i++) {
878 struct mallinfo info = mallinfo();
879 struct mallinfo2 info2 = mallinfo2();
880 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800881 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
882 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
883 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
884 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
885 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
886 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
887 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
888 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
889 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
890 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800891
892 size_t allocated = info2.uordblks;
893 ptrs[i] = malloc(size);
894 ASSERT_TRUE(ptrs[i] != nullptr);
895
896 info = mallinfo();
897 info2 = mallinfo2();
898 // Verify that mallinfo and mallinfo2 are exactly the same.
Colin Crossfdced952022-01-24 18:15:07 -0800899 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
900 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
901 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
902 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
903 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
904 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
905 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
906 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
907 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
908 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
Christopher Ferris8248e622021-12-03 13:55:57 -0800909
910 size_t new_allocated = info2.uordblks;
911 if (allocated != new_allocated) {
912 size_t usable_size = malloc_usable_size(ptrs[i]);
913 // Only check if the total got bigger by at least allocation size.
914 // Sometimes the mallinfo2 numbers can go backwards due to compaction
915 // and/or freeing of cached data.
916 if (new_allocated >= allocated + usable_size) {
917 pass = true;
918 break;
919 }
920 }
921 }
922 for (void* ptr : ptrs) {
923 free(ptr);
924 }
925 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
926 << kMaxAllocs << " allocations.";
927 }
928#else
929 GTEST_SKIP() << "glibc is broken";
930#endif
931}
932
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800933template <typename Type>
934void __attribute__((optnone)) VerifyAlignment(Type* floating) {
935 size_t expected_alignment = alignof(Type);
936 if (expected_alignment != 0) {
937 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
Ryan Prichardf40f2582024-01-09 16:29:20 -0800938 << "Expected alignment " << expected_alignment << " ptr value "
939 << static_cast<void*>(floating);
Christopher Ferrisf32494c2020-01-08 14:19:10 -0800940 }
941}
942
943template <typename Type>
944void __attribute__((optnone)) TestAllocateType() {
945 // The number of allocations to do in a row. This is to attempt to
946 // expose the worst case alignment for native allocators that use
947 // bins.
948 static constexpr size_t kMaxConsecutiveAllocs = 100;
949
950 // Verify using new directly.
951 Type* types[kMaxConsecutiveAllocs];
952 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
953 types[i] = new Type;
954 VerifyAlignment(types[i]);
955 if (::testing::Test::HasFatalFailure()) {
956 return;
957 }
958 }
959 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
960 delete types[i];
961 }
962
963 // Verify using malloc.
964 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
965 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
966 ASSERT_TRUE(types[i] != nullptr);
967 VerifyAlignment(types[i]);
968 if (::testing::Test::HasFatalFailure()) {
969 return;
970 }
971 }
972 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
973 free(types[i]);
974 }
975
976 // Verify using a vector.
977 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
978 for (size_t i = 0; i < type_vector.size(); i++) {
979 VerifyAlignment(&type_vector[i]);
980 if (::testing::Test::HasFatalFailure()) {
981 return;
982 }
983 }
984}
985
986#if defined(__ANDROID__)
987static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
988 void* ptrs[100];
989 uintptr_t mask = aligned_bytes - 1;
990 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
991 ptrs[i] = malloc(alloc_size);
992 ASSERT_TRUE(ptrs[i] != nullptr);
993 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
994 << "Expected at least " << aligned_bytes << " byte alignment: size "
995 << alloc_size << " actual ptr " << ptrs[i];
996 }
997}
998#endif
999
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001000void AlignCheck() {
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001001 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
1002 // for a discussion of type alignment.
1003 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
1004 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
1005 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
1006
1007 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
1008 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
1009 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
1010 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
1011 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
1012 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
1013 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
1014 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
1015 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
1016 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
1017 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
1018 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
1019 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
1020 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
1021
1022#if defined(__ANDROID__)
1023 // On Android, there is a lot of code that expects certain alignments:
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001024 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
1025 // must have at least 16 byte alignment.
1026 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
1027 // not 16 bytes, are only required to have at least 8 byte alignment.
1028 // In addition, on Android clang has been configured for 64 bit such that:
1029 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
1030 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
1031 // For 32 bit environments, only the first two requirements must be met.
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001032
1033 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
1034 // a discussion of this alignment mess. The code below is enforcing
1035 // strong-alignment, since who knows what code depends on this behavior now.
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001036 // As mentioned before, for 64 bit this will enforce the higher
1037 // requirement since clang expects this behavior on Android now.
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001038 for (size_t i = 1; i <= 128; i++) {
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001039#if defined(__LP64__)
1040 if (i <= 8) {
1041 AndroidVerifyAlignment(i, 8);
1042 } else {
1043 AndroidVerifyAlignment(i, 16);
1044 }
1045#else
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001046 size_t rounded = (i + 7) & ~7;
1047 if ((rounded % 16) == 0) {
1048 AndroidVerifyAlignment(i, 16);
1049 } else {
1050 AndroidVerifyAlignment(i, 8);
1051 }
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001052#endif
Christopher Ferrisf32494c2020-01-08 14:19:10 -08001053 if (::testing::Test::HasFatalFailure()) {
1054 return;
1055 }
1056 }
1057#endif
1058}
1059
Christopher Ferrisb3cac0f2021-09-21 10:32:40 -07001060TEST(malloc, align_check) {
1061 AlignCheck();
1062}
1063
Christopher Ferris201dcf42020-01-29 13:09:31 -08001064// Jemalloc doesn't pass this test right now, so leave it as disabled.
1065TEST(malloc, DISABLED_alloc_after_fork) {
1066 // Both of these need to be a power of 2.
1067 static constexpr size_t kMinAllocationSize = 8;
1068 static constexpr size_t kMaxAllocationSize = 2097152;
1069
1070 static constexpr size_t kNumAllocatingThreads = 5;
1071 static constexpr size_t kNumForkLoops = 100;
1072
1073 std::atomic_bool stop;
1074
1075 // Create threads that simply allocate and free different sizes.
1076 std::vector<std::thread*> threads;
1077 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1078 std::thread* t = new std::thread([&stop] {
1079 while (!stop) {
1080 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001081 void* ptr;
1082 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001083 free(ptr);
1084 }
1085 }
1086 });
1087 threads.push_back(t);
1088 }
1089
1090 // Create a thread to fork and allocate.
1091 for (size_t i = 0; i < kNumForkLoops; i++) {
1092 pid_t pid;
1093 if ((pid = fork()) == 0) {
1094 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
Elliott Hughes7cda75f2020-10-22 13:22:35 -07001095 void* ptr;
1096 DoNotOptimize(ptr = malloc(size));
Christopher Ferris201dcf42020-01-29 13:09:31 -08001097 ASSERT_TRUE(ptr != nullptr);
Christopher Ferris201dcf42020-01-29 13:09:31 -08001098 // Make sure we can touch all of the allocation.
1099 memset(ptr, 0x1, size);
1100 ASSERT_LE(size, malloc_usable_size(ptr));
1101 free(ptr);
1102 }
1103 _exit(10);
1104 }
1105 ASSERT_NE(-1, pid);
1106 AssertChildExited(pid, 10);
1107 }
1108
1109 stop = true;
1110 for (auto thread : threads) {
1111 thread->join();
1112 delete thread;
1113 }
1114}
1115
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001116TEST(android_mallopt, error_on_unexpected_option) {
1117#if defined(__BIONIC__)
1118 const int unrecognized_option = -1;
1119 errno = 0;
1120 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001121 EXPECT_ERRNO(ENOTSUP);
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001122#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001123 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001124#endif
1125}
1126
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001127bool IsDynamic() {
1128#if defined(__LP64__)
1129 Elf64_Ehdr ehdr;
1130#else
1131 Elf32_Ehdr ehdr;
1132#endif
1133 std::string path(android::base::GetExecutablePath());
1134
1135 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1136 if (fd == -1) {
1137 // Assume dynamic on error.
1138 return true;
1139 }
1140 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1141 close(fd);
1142 // Assume dynamic in error cases.
1143 return !read_completed || ehdr.e_type == ET_DYN;
1144}
1145
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001146TEST(android_mallopt, init_zygote_child_profiling) {
1147#if defined(__BIONIC__)
1148 // Successful call.
1149 errno = 0;
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001150 if (IsDynamic()) {
1151 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001152 EXPECT_ERRNO(0);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001153 } else {
1154 // Not supported in static executables.
1155 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
Elliott Hughes95646e62023-09-21 14:11:19 -07001156 EXPECT_ERRNO(ENOTSUP);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001157 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001158
1159 // Unexpected arguments rejected.
1160 errno = 0;
1161 char unexpected = 0;
1162 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001163 if (IsDynamic()) {
Elliott Hughes95646e62023-09-21 14:11:19 -07001164 EXPECT_ERRNO(EINVAL);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001165 } else {
Elliott Hughes95646e62023-09-21 14:11:19 -07001166 EXPECT_ERRNO(ENOTSUP);
Christopher Ferrise4cdbc42019-02-08 17:30:58 -08001167 }
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001168#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001169 GTEST_SKIP() << "bionic-only test";
Ryan Savitskiecc37e32018-12-14 15:57:21 +00001170#endif
1171}
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001172
1173#if defined(__BIONIC__)
1174template <typename FuncType>
1175void CheckAllocationFunction(FuncType func) {
1176 // Assumes that no more than 108MB of memory is allocated before this.
1177 size_t limit = 128 * 1024 * 1024;
1178 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1179 if (!func(20 * 1024 * 1024))
1180 exit(1);
1181 if (func(128 * 1024 * 1024))
1182 exit(1);
1183 exit(0);
1184}
1185#endif
1186
1187TEST(android_mallopt, set_allocation_limit) {
1188#if defined(__BIONIC__)
1189 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1190 testing::ExitedWithCode(0), "");
1191 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1192 testing::ExitedWithCode(0), "");
1193 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1194 testing::ExitedWithCode(0), "");
1195 EXPECT_EXIT(CheckAllocationFunction(
1196 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1197 testing::ExitedWithCode(0), "");
1198 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1199 void* ptr;
1200 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1201 }),
1202 testing::ExitedWithCode(0), "");
1203 EXPECT_EXIT(CheckAllocationFunction(
1204 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1205 testing::ExitedWithCode(0), "");
1206 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1207 void* p = malloc(1024 * 1024);
1208 return realloc(p, bytes) != nullptr;
1209 }),
1210 testing::ExitedWithCode(0), "");
1211#if !defined(__LP64__)
1212 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1213 testing::ExitedWithCode(0), "");
1214 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1215 testing::ExitedWithCode(0), "");
1216#endif
1217#else
Elliott Hughes10907202019-03-27 08:51:02 -07001218 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001219#endif
1220}
1221
1222TEST(android_mallopt, set_allocation_limit_multiple) {
1223#if defined(__BIONIC__)
1224 // Only the first set should work.
1225 size_t limit = 256 * 1024 * 1024;
1226 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1227 limit = 32 * 1024 * 1024;
1228 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1229#else
Elliott Hughes10907202019-03-27 08:51:02 -07001230 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001231#endif
1232}
1233
1234#if defined(__BIONIC__)
1235static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1236
1237static size_t GetMaxAllocations() {
1238 size_t max_pointers = 0;
1239 void* ptrs[20];
1240 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1241 ptrs[i] = malloc(kAllocationSize);
1242 if (ptrs[i] == nullptr) {
1243 max_pointers = i;
1244 break;
1245 }
1246 }
1247 for (size_t i = 0; i < max_pointers; i++) {
1248 free(ptrs[i]);
1249 }
1250 return max_pointers;
1251}
1252
1253static void VerifyMaxPointers(size_t max_pointers) {
1254 // Now verify that we can allocate the same number as before.
1255 void* ptrs[20];
1256 for (size_t i = 0; i < max_pointers; i++) {
1257 ptrs[i] = malloc(kAllocationSize);
1258 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1259 }
1260
1261 // Make sure the next allocation still fails.
1262 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1263 for (size_t i = 0; i < max_pointers; i++) {
1264 free(ptrs[i]);
1265 }
1266}
1267#endif
1268
1269TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1270#if defined(__BIONIC__)
1271 size_t limit = 128 * 1024 * 1024;
1272 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1273
1274 size_t max_pointers = GetMaxAllocations();
1275 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1276
1277 void* memory = malloc(10 * 1024 * 1024);
1278 ASSERT_TRUE(memory != nullptr);
1279
1280 // Increase size.
1281 memory = realloc(memory, 20 * 1024 * 1024);
1282 ASSERT_TRUE(memory != nullptr);
1283 memory = realloc(memory, 40 * 1024 * 1024);
1284 ASSERT_TRUE(memory != nullptr);
1285 memory = realloc(memory, 60 * 1024 * 1024);
1286 ASSERT_TRUE(memory != nullptr);
1287 memory = realloc(memory, 80 * 1024 * 1024);
1288 ASSERT_TRUE(memory != nullptr);
1289 // Now push past limit.
1290 memory = realloc(memory, 130 * 1024 * 1024);
1291 ASSERT_TRUE(memory == nullptr);
1292
1293 VerifyMaxPointers(max_pointers);
1294#else
Elliott Hughes10907202019-03-27 08:51:02 -07001295 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001296#endif
1297}
1298
1299TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1300#if defined(__BIONIC__)
1301 size_t limit = 100 * 1024 * 1024;
1302 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1303
1304 size_t max_pointers = GetMaxAllocations();
1305 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1306
1307 void* memory = malloc(80 * 1024 * 1024);
1308 ASSERT_TRUE(memory != nullptr);
1309
1310 // Decrease size.
1311 memory = realloc(memory, 60 * 1024 * 1024);
1312 ASSERT_TRUE(memory != nullptr);
1313 memory = realloc(memory, 40 * 1024 * 1024);
1314 ASSERT_TRUE(memory != nullptr);
1315 memory = realloc(memory, 20 * 1024 * 1024);
1316 ASSERT_TRUE(memory != nullptr);
1317 memory = realloc(memory, 10 * 1024 * 1024);
1318 ASSERT_TRUE(memory != nullptr);
1319 free(memory);
1320
1321 VerifyMaxPointers(max_pointers);
1322#else
Elliott Hughes10907202019-03-27 08:51:02 -07001323 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001324#endif
1325}
1326
1327TEST(android_mallopt, set_allocation_limit_realloc_free) {
1328#if defined(__BIONIC__)
1329 size_t limit = 100 * 1024 * 1024;
1330 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1331
1332 size_t max_pointers = GetMaxAllocations();
1333 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1334
1335 void* memory = malloc(60 * 1024 * 1024);
1336 ASSERT_TRUE(memory != nullptr);
1337
1338 memory = realloc(memory, 0);
1339 ASSERT_TRUE(memory == nullptr);
1340
1341 VerifyMaxPointers(max_pointers);
1342#else
Elliott Hughes10907202019-03-27 08:51:02 -07001343 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001344#endif
1345}
1346
1347#if defined(__BIONIC__)
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001348static void SetAllocationLimitMultipleThreads() {
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001349 static constexpr size_t kNumThreads = 4;
Christopher Ferrisfe130412023-07-20 16:37:43 -07001350 std::atomic_bool start_running = false;
1351 std::atomic<size_t> num_running;
1352 std::atomic<size_t> num_successful;
1353 std::unique_ptr<std::thread> threads[kNumThreads];
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001354 for (size_t i = 0; i < kNumThreads; i++) {
Christopher Ferrisfe130412023-07-20 16:37:43 -07001355 threads[i].reset(new std::thread([&num_running, &start_running, &num_successful] {
1356 ++num_running;
1357 while (!start_running) {
1358 }
1359 size_t limit = 500 * 1024 * 1024;
1360 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1361 ++num_successful;
1362 }
1363 }));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001364 }
1365
Christopher Ferrisfe130412023-07-20 16:37:43 -07001366 // Wait until all of the threads have started.
1367 while (num_running != kNumThreads)
1368 ;
1369
1370 // Now start all of the threads setting the mallopt at once.
1371 start_running = true;
1372
Ryan Savitski175c8862020-01-02 19:54:57 +00001373 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
Christopher Ferrisfe130412023-07-20 16:37:43 -07001374 // heapprofd handler. This will verify that changing the limit while
1375 // the allocation handlers are being changed at the same time works,
1376 // or that the limit handler is changed first and this also works properly.
1377 union sigval signal_value {};
Christopher Ferrisb874c332020-01-21 16:39:05 -08001378 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001379
Christopher Ferrisfe130412023-07-20 16:37:43 -07001380 // Wait for all of the threads to finish.
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001381 for (size_t i = 0; i < kNumThreads; i++) {
Christopher Ferrisfe130412023-07-20 16:37:43 -07001382 threads[i]->join();
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001383 }
Christopher Ferrisfe130412023-07-20 16:37:43 -07001384 ASSERT_EQ(1U, num_successful) << "Only one thread should be able to set the limit.";
Christopher Ferrise9ffc522023-08-03 17:34:05 -07001385 _exit(0);
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001386}
1387#endif
1388
1389TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1390#if defined(__BIONIC__)
1391 if (IsDynamic()) {
1392 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1393 }
1394
1395 // Run this a number of times as a stress test.
1396 for (size_t i = 0; i < 100; i++) {
1397 // Not using ASSERT_EXIT because errors messages are not displayed.
1398 pid_t pid;
1399 if ((pid = fork()) == 0) {
1400 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1401 }
1402 ASSERT_NE(-1, pid);
1403 int status;
1404 ASSERT_EQ(pid, wait(&status));
1405 ASSERT_EQ(0, WEXITSTATUS(status));
1406 }
1407#else
Elliott Hughes10907202019-03-27 08:51:02 -07001408 GTEST_SKIP() << "bionic extension";
Christopher Ferris1fc5ccf2019-02-15 18:06:15 -08001409#endif
1410}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001411
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001412#if defined(__BIONIC__)
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001413using Mode = android_mallopt_gwp_asan_options_t::Mode;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001414TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1415 android_mallopt_gwp_asan_options_t options;
1416 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001417 options.mode = Mode::APP_MANIFEST_NEVER;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001418 // GWP-ASan should already be enabled. Trying to enable or disable it should
1419 // always pass.
1420 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
Mitch Phillipsebc2ac92024-05-02 13:25:46 +02001421 options.mode = Mode::APP_MANIFEST_DEFAULT;
Mitch Phillipse6997d52020-11-30 15:04:14 -08001422 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1423}
1424#endif // defined(__BIONIC__)
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001425
Mitch Phillipse6997d52020-11-30 15:04:14 -08001426TEST(android_mallopt, multiple_enable_gwp_asan) {
1427#if defined(__BIONIC__)
1428 // Always enable GWP-Asan, with default options.
1429 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
Christopher Ferris8f9713e2021-09-20 17:25:46 -07001430#else
1431 GTEST_SKIP() << "bionic extension";
1432#endif
1433}
1434
Florian Mayercc61ad82022-08-31 11:43:30 -07001435TEST(android_mallopt, memtag_stack_is_on) {
1436#if defined(__BIONIC__)
1437 bool memtag_stack;
1438 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1439#else
1440 GTEST_SKIP() << "bionic extension";
1441#endif
1442}
1443
Mitch Phillips9cad8422021-01-20 16:03:27 -08001444void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1445 std::vector<void*> allocs;
1446 constexpr int kMaxBytesToCheckZero = 64;
1447 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1448
1449 for (int i = 0; i < num_iterations; ++i) {
1450 int size = get_alloc_size(i);
1451 allocs.push_back(malloc(size));
1452 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1453 }
1454
1455 for (void* alloc : allocs) {
1456 free(alloc);
1457 }
1458 allocs.clear();
1459
1460 for (int i = 0; i < num_iterations; ++i) {
1461 int size = get_alloc_size(i);
1462 allocs.push_back(malloc(size));
1463 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1464 }
1465
1466 for (void* alloc : allocs) {
1467 free(alloc);
1468 }
1469}
1470
1471TEST(malloc, zero_init) {
1472#if defined(__BIONIC__)
1473 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1474 bool allocator_scudo;
1475 GetAllocatorVersion(&allocator_scudo);
1476 if (!allocator_scudo) {
1477 GTEST_SKIP() << "scudo allocator only test";
1478 }
1479
1480 mallopt(M_BIONIC_ZERO_INIT, 1);
1481
1482 // Test using a block of 4K small (1-32 byte) allocations.
1483 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1484 return 1 + iteration % 32;
1485 });
1486
1487 // Also test large allocations that land in the scudo secondary, as this is
1488 // the only part of Scudo that's changed by enabling zero initialization with
1489 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1490 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1491 // mallopt_decay. Ensure that we delay for at least a second before releasing
1492 // pages to the OS in order to avoid implicit zeroing by the kernel.
1493 mallopt(M_DECAY_TIME, 1000);
1494 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1495 return 1 << (19 + iteration % 4);
1496 });
1497
1498#else
1499 GTEST_SKIP() << "bionic-only test";
1500#endif
1501}
1502
1503// Note that MTE is enabled on cc_tests on devices that support MTE.
1504TEST(malloc, disable_mte) {
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001505#if defined(__BIONIC__)
1506 if (!mte_supported()) {
1507 GTEST_SKIP() << "This function can only be tested with MTE";
1508 }
1509
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001510 sem_t sem;
1511 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1512
1513 pthread_t thread;
1514 ASSERT_EQ(0, pthread_create(
1515 &thread, nullptr,
1516 [](void* ptr) -> void* {
1517 auto* sem = reinterpret_cast<sem_t*>(ptr);
1518 sem_wait(sem);
1519 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1520 },
1521 &sem));
1522
Mitch Phillips9cad8422021-01-20 16:03:27 -08001523 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001524 ASSERT_EQ(0, sem_post(&sem));
1525
1526 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
Christopher Ferris2abfa9e2021-11-01 16:26:06 -07001527 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001528
1529 void* retval;
1530 ASSERT_EQ(0, pthread_join(thread, &retval));
1531 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1532 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07001533#else
1534 GTEST_SKIP() << "bionic extension";
1535#endif
1536}
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001537
1538TEST(malloc, allocation_slack) {
1539#if defined(__BIONIC__)
Christopher Ferris7c0ce862021-06-08 15:33:22 -07001540 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1541
Peter Collingbourne2659d7b2021-03-05 13:31:41 -08001542 bool allocator_scudo;
1543 GetAllocatorVersion(&allocator_scudo);
1544 if (!allocator_scudo) {
1545 GTEST_SKIP() << "scudo allocator only test";
1546 }
1547
1548 // Test that older target SDK levels let you access a few bytes off the end of
1549 // a large allocation.
1550 android_set_application_target_sdk_version(29);
1551 auto p = std::make_unique<char[]>(131072);
1552 volatile char *vp = p.get();
1553 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1554#else
1555 GTEST_SKIP() << "bionic extension";
1556#endif
1557}
Evgenii Stepanovf0d7a342021-11-16 17:34:39 -08001558
1559// Regression test for b/206701345 -- scudo bug, MTE only.
1560// Fix: https://reviews.llvm.org/D105261
1561// Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
1562TEST(malloc, realloc_mte_crash_b206701345) {
1563 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1564 // all size classes allow such placement - mmap size has to be divisible by
1565 // the block size. At the time of writing this could only be reproduced with
1566 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1567 // future. Try several different classes at the lower end.
1568 std::vector<void*> ptrs(10000);
1569 for (int i = 1; i < 32; ++i) {
1570 size_t sz = 16 * i - 1;
1571 for (void*& p : ptrs) {
1572 p = realloc(malloc(sz), sz + 1);
1573 }
1574
1575 for (void* p : ptrs) {
1576 free(p);
1577 }
1578 }
1579}
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001580
1581void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1582 std::vector<size_t>& test_sizes, size_t max_allocations) {
1583 // Vector of zero'd data used for comparisons. Make it twice the largest size.
1584 std::vector<char> zero(test_sizes.back() * 2, 0);
1585
1586 SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1587
1588 for (size_t test_size : test_sizes) {
1589 std::vector<void*> ptrs(max_allocations);
1590 for (size_t i = 0; i < ptrs.size(); i++) {
1591 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1592 ptrs[i] = alloc_func(test_size);
1593 ASSERT_TRUE(ptrs[i] != nullptr);
1594 size_t alloc_size = malloc_usable_size(ptrs[i]);
1595 ASSERT_LE(alloc_size, zero.size());
1596 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1597
1598 // Set the memory to non-zero to make sure if the pointer
1599 // is reused it's still zero.
1600 memset(ptrs[i], 0xab, alloc_size);
1601 }
1602 // Free the pointers.
1603 for (size_t i = 0; i < ptrs.size(); i++) {
1604 free(ptrs[i]);
1605 }
1606 for (size_t i = 0; i < ptrs.size(); i++) {
1607 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1608 ptrs[i] = malloc(test_size);
1609 ASSERT_TRUE(ptrs[i] != nullptr);
1610 size_t alloc_size = malloc_usable_size(ptrs[i]);
1611 ASSERT_LE(alloc_size, zero.size());
1612 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1613 }
1614 // Free all of the pointers later to maximize the chance of reusing from
1615 // the first loop.
1616 for (size_t i = 0; i < ptrs.size(); i++) {
1617 free(ptrs[i]);
1618 }
1619 }
1620}
1621
1622// Verify that small and medium allocations are always zero.
Christopher Ferris59075562023-04-04 14:37:26 -07001623// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001624TEST(malloc, zeroed_allocations_small_medium_sizes) {
1625#if !defined(__BIONIC__)
1626 GTEST_SKIP() << "Only valid on bionic";
1627#endif
1628
1629 if (IsLowRamDevice()) {
1630 GTEST_SKIP() << "Skipped on low memory devices.";
1631 }
1632
1633 constexpr size_t kMaxAllocations = 1024;
1634 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1635 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1636 kMaxAllocations);
1637
1638 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1639 test_sizes, kMaxAllocations);
1640
1641 VerifyAllocationsAreZero(
1642 [](size_t size) -> void* {
1643 void* ptr;
1644 if (posix_memalign(&ptr, 64, size) == 0) {
1645 return ptr;
1646 }
1647 return nullptr;
1648 },
1649 "posix_memalign", test_sizes, kMaxAllocations);
1650}
1651
1652// Verify that large allocations are always zero.
Christopher Ferris59075562023-04-04 14:37:26 -07001653// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001654TEST(malloc, zeroed_allocations_large_sizes) {
1655#if !defined(__BIONIC__)
1656 GTEST_SKIP() << "Only valid on bionic";
1657#endif
1658
1659 if (IsLowRamDevice()) {
1660 GTEST_SKIP() << "Skipped on low memory devices.";
1661 }
1662
1663 constexpr size_t kMaxAllocations = 20;
1664 std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1665 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1666 kMaxAllocations);
1667
1668 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1669 test_sizes, kMaxAllocations);
1670
1671 VerifyAllocationsAreZero(
1672 [](size_t size) -> void* {
1673 void* ptr;
1674 if (posix_memalign(&ptr, 64, size) == 0) {
1675 return ptr;
1676 }
1677 return nullptr;
1678 },
1679 "posix_memalign", test_sizes, kMaxAllocations);
1680}
1681
Christopher Ferris59075562023-04-04 14:37:26 -07001682// Verify that reallocs are zeroed when expanded.
1683// @CddTest = 9.7/C-4-1
Christopher Ferris02b6bbc2022-06-02 15:20:23 -07001684TEST(malloc, zeroed_allocations_realloc) {
1685#if !defined(__BIONIC__)
1686 GTEST_SKIP() << "Only valid on bionic";
1687#endif
1688
1689 if (IsLowRamDevice()) {
1690 GTEST_SKIP() << "Skipped on low memory devices.";
1691 }
1692
1693 // Vector of zero'd data used for comparisons.
1694 constexpr size_t kMaxMemorySize = 131072;
1695 std::vector<char> zero(kMaxMemorySize, 0);
1696
1697 constexpr size_t kMaxAllocations = 1024;
1698 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1699 // Do a number of allocations and set them to non-zero.
1700 for (size_t test_size : test_sizes) {
1701 std::vector<void*> ptrs(kMaxAllocations);
1702 for (size_t i = 0; i < kMaxAllocations; i++) {
1703 ptrs[i] = malloc(test_size);
1704 ASSERT_TRUE(ptrs[i] != nullptr);
1705
1706 // Set the memory to non-zero to make sure if the pointer
1707 // is reused it's still zero.
1708 memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1709 }
1710 // Free the pointers.
1711 for (size_t i = 0; i < kMaxAllocations; i++) {
1712 free(ptrs[i]);
1713 }
1714 }
1715
1716 // Do the reallocs to a larger size and verify the rest of the allocation
1717 // is zero.
1718 constexpr size_t kInitialSize = 8;
1719 for (size_t test_size : test_sizes) {
1720 std::vector<void*> ptrs(kMaxAllocations);
1721 for (size_t i = 0; i < kMaxAllocations; i++) {
1722 ptrs[i] = malloc(kInitialSize);
1723 ASSERT_TRUE(ptrs[i] != nullptr);
1724 size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1725
1726 ptrs[i] = realloc(ptrs[i], test_size);
1727 ASSERT_TRUE(ptrs[i] != nullptr);
1728 size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1729 char* ptr = reinterpret_cast<char*>(ptrs[i]);
1730 ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1731 << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1732 }
1733 for (size_t i = 0; i < kMaxAllocations; i++) {
1734 free(ptrs[i]);
1735 }
1736 }
1737}
Christopher Ferrisb4e560e2023-10-26 17:00:00 -07001738
1739TEST(android_mallopt, get_decay_time_enabled_errors) {
1740#if defined(__BIONIC__)
1741 errno = 0;
1742 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, nullptr, sizeof(bool)));
1743 EXPECT_ERRNO(EINVAL);
1744
1745 errno = 0;
1746 int value;
1747 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1748 EXPECT_ERRNO(EINVAL);
1749#else
1750 GTEST_SKIP() << "bionic-only test";
1751#endif
1752}
1753
1754TEST(android_mallopt, get_decay_time_enabled) {
1755#if defined(__BIONIC__)
1756 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1757
1758 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 0));
1759
1760 bool value;
1761 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1762 EXPECT_FALSE(value);
1763
1764 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 1));
1765 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1766 EXPECT_TRUE(value);
1767#else
1768 GTEST_SKIP() << "bionic-only test";
1769#endif
1770}