blob: 23e9b3e58a3972cdac5474ccac4c83c3b7677099 [file] [log] [blame]
Elliott Hughese6c57fc2014-05-23 20:06:03 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Hans Boehm00aaea32014-08-19 16:14:01 -070017#include <gtest/gtest.h>
Tom Cherry76e2b152019-07-18 13:15:47 -070018
Elliott Hugheseb53f072025-03-05 12:49:09 -080019// The real <stdatomic.h> checks for the availability of C++'s <atomic> and
20// uses that instead if present.
21// We want to test the C interfaces, so we instead include
22// <bits/stdatomic.h> directly.
23// This doesn't entirely work because gtest also (transitively) pulls in <atomic>.
24// It's not clear there's a good fix for this,
25// other than switching to a non-C++ unit test framework for bionic.
Tom Cherry76e2b152019-07-18 13:15:47 -070026#include <bits/stdatomic.h>
Tom Cherry76e2b152019-07-18 13:15:47 -070027
Hans Boehm00aaea32014-08-19 16:14:01 -070028#include <pthread.h>
29#include <stdint.h>
Elliott Hughese6c57fc2014-05-23 20:06:03 -070030
31TEST(stdatomic, LOCK_FREE) {
32 ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
33 ASSERT_TRUE(ATOMIC_CHAR16_T_LOCK_FREE);
34 ASSERT_TRUE(ATOMIC_CHAR32_T_LOCK_FREE);
35 ASSERT_TRUE(ATOMIC_CHAR_LOCK_FREE);
36 ASSERT_TRUE(ATOMIC_INT_LOCK_FREE);
37 ASSERT_TRUE(ATOMIC_LLONG_LOCK_FREE);
38 ASSERT_TRUE(ATOMIC_LONG_LOCK_FREE);
39 ASSERT_TRUE(ATOMIC_POINTER_LOCK_FREE);
40 ASSERT_TRUE(ATOMIC_SHORT_LOCK_FREE);
41 ASSERT_TRUE(ATOMIC_WCHAR_T_LOCK_FREE);
42}
43
44TEST(stdatomic, init) {
Elliott Hugheseb53f072025-03-05 12:49:09 -080045 // ATOMIC_VAR_INIT has been removed from C23,
46 // but is still in POSIX 2024.
47 // Even if it is removed from there,
48 // we should probably keep it indefinitely for source compatibility.
49 // libc++'s <atomic> (which we can't entirely avoid: see above)
50 // marks the macro deprecated,
51 // so we need to silence that.
52#pragma clang diagnostic push
53#pragma clang diagnostic ignored "-Wdeprecated-pragma"
54 atomic_int v = ATOMIC_VAR_INIT(123);
Elliott Hughese6c57fc2014-05-23 20:06:03 -070055 ASSERT_EQ(123, atomic_load(&v));
Elliott Hugheseb53f072025-03-05 12:49:09 -080056#pragma clang diagnostic pop
Elliott Hughese6c57fc2014-05-23 20:06:03 -070057
Nick Desaulniers2e65afe2024-11-19 09:27:06 -080058 atomic_store_explicit(&v, 456, memory_order_relaxed);
Elliott Hughese6c57fc2014-05-23 20:06:03 -070059 ASSERT_EQ(456, atomic_load(&v));
60
61 atomic_flag f = ATOMIC_FLAG_INIT;
62 ASSERT_FALSE(atomic_flag_test_and_set(&f));
63}
64
65TEST(stdatomic, atomic_thread_fence) {
66 atomic_thread_fence(memory_order_relaxed);
67 atomic_thread_fence(memory_order_consume);
68 atomic_thread_fence(memory_order_acquire);
69 atomic_thread_fence(memory_order_release);
70 atomic_thread_fence(memory_order_acq_rel);
71 atomic_thread_fence(memory_order_seq_cst);
72}
73
74TEST(stdatomic, atomic_signal_fence) {
75 atomic_signal_fence(memory_order_relaxed);
76 atomic_signal_fence(memory_order_consume);
77 atomic_signal_fence(memory_order_acquire);
78 atomic_signal_fence(memory_order_release);
79 atomic_signal_fence(memory_order_acq_rel);
80 atomic_signal_fence(memory_order_seq_cst);
81}
82
83TEST(stdatomic, atomic_is_lock_free) {
84 atomic_char small;
Elliott Hughese6c57fc2014-05-23 20:06:03 -070085 ASSERT_TRUE(atomic_is_lock_free(&small));
Hans Boehm32429602014-08-28 15:21:32 -070086 atomic_intmax_t big;
Elliott Hughese6c57fc2014-05-23 20:06:03 -070087 ASSERT_TRUE(atomic_is_lock_free(&big));
88}
89
90TEST(stdatomic, atomic_flag) {
91 atomic_flag f = ATOMIC_FLAG_INIT;
92 ASSERT_FALSE(atomic_flag_test_and_set(&f));
93 ASSERT_TRUE(atomic_flag_test_and_set(&f));
94
95 atomic_flag_clear(&f);
96
97 ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
98 ASSERT_TRUE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
99
100 atomic_flag_clear_explicit(&f, memory_order_relaxed);
101 ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
102}
103
104TEST(stdatomic, atomic_store) {
105 atomic_int i;
106 atomic_store(&i, 123);
107 ASSERT_EQ(123, atomic_load(&i));
108 atomic_store_explicit(&i, 123, memory_order_relaxed);
109 ASSERT_EQ(123, atomic_load_explicit(&i, memory_order_relaxed));
110}
111
112TEST(stdatomic, atomic_exchange) {
113 atomic_int i;
114 atomic_store(&i, 123);
115 ASSERT_EQ(123, atomic_exchange(&i, 456));
116 ASSERT_EQ(456, atomic_exchange_explicit(&i, 123, memory_order_relaxed));
117}
118
119TEST(stdatomic, atomic_compare_exchange) {
120 atomic_int i;
Dan Albert6b3beb22014-05-28 16:27:32 -0700121 int expected;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700122
123 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700124 expected = 123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700125 ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
126 ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
Dan Albert6b3beb22014-05-28 16:27:32 -0700127 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700128
129 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700130 expected = 123;
Hans Boehm590a4102017-04-04 17:34:59 -0700131 ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
132 memory_order_relaxed));
133 ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
134 memory_order_relaxed));
Dan Albert6b3beb22014-05-28 16:27:32 -0700135 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700136
137 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700138 expected = 123;
Hans Boehm590a4102017-04-04 17:34:59 -0700139 int iter_count = 0;
140 do {
141 ++iter_count;
142 ASSERT_LT(iter_count, 100); // Arbitrary limit on spurious compare_exchange failures.
143 ASSERT_EQ(expected, 123);
144 } while(!atomic_compare_exchange_weak(&i, &expected, 456));
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700145 ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
Dan Albert6b3beb22014-05-28 16:27:32 -0700146 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700147
148 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700149 expected = 123;
Hans Boehm590a4102017-04-04 17:34:59 -0700150 iter_count = 0;
151 do {
152 ++iter_count;
153 ASSERT_LT(iter_count, 100);
154 ASSERT_EQ(expected, 123);
155 } while(!atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
156 memory_order_relaxed));
157 ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
158 memory_order_relaxed));
Dan Albert6b3beb22014-05-28 16:27:32 -0700159 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700160}
161
162TEST(stdatomic, atomic_fetch_add) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700163 atomic_int i = 123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700164 ASSERT_EQ(123, atomic_fetch_add(&i, 1));
165 ASSERT_EQ(124, atomic_fetch_add_explicit(&i, 1, memory_order_relaxed));
166 ASSERT_EQ(125, atomic_load(&i));
167}
168
169TEST(stdatomic, atomic_fetch_sub) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700170 atomic_int i = 123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700171 ASSERT_EQ(123, atomic_fetch_sub(&i, 1));
172 ASSERT_EQ(122, atomic_fetch_sub_explicit(&i, 1, memory_order_relaxed));
173 ASSERT_EQ(121, atomic_load(&i));
174}
175
176TEST(stdatomic, atomic_fetch_or) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700177 atomic_int i = 0x100;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700178 ASSERT_EQ(0x100, atomic_fetch_or(&i, 0x020));
179 ASSERT_EQ(0x120, atomic_fetch_or_explicit(&i, 0x003, memory_order_relaxed));
180 ASSERT_EQ(0x123, atomic_load(&i));
181}
182
183TEST(stdatomic, atomic_fetch_xor) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700184 atomic_int i = 0x100;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700185 ASSERT_EQ(0x100, atomic_fetch_xor(&i, 0x120));
186 ASSERT_EQ(0x020, atomic_fetch_xor_explicit(&i, 0x103, memory_order_relaxed));
187 ASSERT_EQ(0x123, atomic_load(&i));
188}
189
190TEST(stdatomic, atomic_fetch_and) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700191 atomic_int i = 0x123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700192 ASSERT_EQ(0x123, atomic_fetch_and(&i, 0x00f));
193 ASSERT_EQ(0x003, atomic_fetch_and_explicit(&i, 0x2, memory_order_relaxed));
194 ASSERT_EQ(0x002, atomic_load(&i));
195}
196
Hans Boehm00aaea32014-08-19 16:14:01 -0700197// And a rudimentary test of acquire-release memory ordering:
198
Hans Boehm71eb46f2023-11-13 15:55:05 -0800199static constexpr uint_least32_t BIG = 30'000'000ul;
200static_assert((BIG % 2) == 0); // Assumed below.
Hans Boehm00aaea32014-08-19 16:14:01 -0700201
202struct three_atomics {
203 atomic_uint_least32_t x;
204 char a[123]; // Everything in different cache lines,
205 // increase chance of compiler getting alignment wrong.
206 atomic_uint_least32_t y;
207 char b[4013];
208 atomic_uint_least32_t z;
209};
210
Hans Boehm71eb46f2023-11-13 15:55:05 -0800211atomic_bool read_enough(false);
212
Elliott Hughes68ae6ad2020-07-21 16:11:30 -0700213// Very simple acquire/release memory ordering smoke test.
Hans Boehm00aaea32014-08-19 16:14:01 -0700214static void* writer(void* arg) {
215 three_atomics* a = reinterpret_cast<three_atomics*>(arg);
216 for (uint_least32_t i = 0; i <= BIG; i+=2) {
217 atomic_store_explicit(&a->x, i, memory_order_relaxed);
218 atomic_store_explicit(&a->z, i, memory_order_relaxed);
219 atomic_store_explicit(&a->y, i, memory_order_release);
Hans Boehm71eb46f2023-11-13 15:55:05 -0800220
221 // Force stores to be visible in spite of being overwritten below.
222 asm volatile("" ::: "memory");
223
Hans Boehm00aaea32014-08-19 16:14:01 -0700224 atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
225 atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
226 atomic_store_explicit(&a->y, i+1, memory_order_release);
Hans Boehm71eb46f2023-11-13 15:55:05 -0800227 if (i >= BIG - 1000 && !atomic_load(&read_enough)) {
228 // Give reader a chance to catch up, at the expense of making the test
229 // less effective.
230 usleep(1000);
231 }
Hans Boehm00aaea32014-08-19 16:14:01 -0700232 }
Yi Kong32bc0fc2018-08-02 17:31:13 -0700233 return nullptr;
Hans Boehm00aaea32014-08-19 16:14:01 -0700234}
235
236static void* reader(void* arg) {
237 three_atomics* a = reinterpret_cast<three_atomics*>(arg);
238 uint_least32_t xval = 0, yval = 0, zval = 0;
239 size_t repeat = 0;
240 size_t repeat_limit = 1000;
241 while (yval != BIG + 1) {
242 yval = atomic_load_explicit(&a->y, memory_order_acquire);
243 zval = atomic_load_explicit(&a->z, memory_order_relaxed);
244 xval = atomic_load_explicit(&a->x, memory_order_relaxed);
245 // If we see a given value of y, the immediately preceding
246 // stores to z and x, or later ones, should also be visible.
247 if (zval < yval) {
248 // Cant just ASSERT, since we are in a non-void function.
249 ADD_FAILURE() << "acquire-release ordering violation: "
250 << zval << " < " << yval << ", " << xval << "\n";
Yi Kong32bc0fc2018-08-02 17:31:13 -0700251 return nullptr; // Only report once.
Hans Boehm00aaea32014-08-19 16:14:01 -0700252 }
253 if (xval < yval) {
254 // Cant just ASSERT, since we are in a non-void function.
255 ADD_FAILURE() << "acquire-release ordering violation: "
256 << xval << " < " << yval << ", " << zval << "\n";
Yi Kong32bc0fc2018-08-02 17:31:13 -0700257 return nullptr; // Only report once.
Hans Boehm00aaea32014-08-19 16:14:01 -0700258 }
Hans Boehm71eb46f2023-11-13 15:55:05 -0800259 if (repeat < repeat_limit) {
260 ++repeat;
261 } else if (!atomic_load_explicit(&read_enough, memory_order_relaxed)) {
262 atomic_store_explicit(&read_enough, true, memory_order_relaxed);
263 }
Hans Boehm00aaea32014-08-19 16:14:01 -0700264 }
265 // The following assertion is not technically guaranteed to hold.
266 // But if it fails to hold, this test was useless, and we have a
267 // serious scheduling issue that we should probably know about.
268 EXPECT_EQ(repeat, repeat_limit);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700269 return nullptr;
Hans Boehm00aaea32014-08-19 16:14:01 -0700270}
271
272TEST(stdatomic, ordering) {
Elliott Hughes68ae6ad2020-07-21 16:11:30 -0700273 // Run a memory ordering smoke test.
Hans Boehm00aaea32014-08-19 16:14:01 -0700274 void* result;
275 three_atomics a;
Nick Desaulniers2e65afe2024-11-19 09:27:06 -0800276 atomic_store_explicit(&a.x, 0ul, memory_order_relaxed);
277 atomic_store_explicit(&a.y, 0ul, memory_order_relaxed);
278 atomic_store_explicit(&a.z, 0ul, memory_order_relaxed);
Hans Boehm00aaea32014-08-19 16:14:01 -0700279 pthread_t t1,t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700280 ASSERT_EQ(0, pthread_create(&t1, nullptr, reader, &a));
281 ASSERT_EQ(0, pthread_create(&t2, nullptr, writer, &a));
Hans Boehm00aaea32014-08-19 16:14:01 -0700282 ASSERT_EQ(0, pthread_join(t1, &result));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700283 EXPECT_EQ(nullptr, result);
Hans Boehm00aaea32014-08-19 16:14:01 -0700284 ASSERT_EQ(0, pthread_join(t2, &result));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700285 EXPECT_EQ(nullptr, result);
Hans Boehm00aaea32014-08-19 16:14:01 -0700286 EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
287 EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
288 EXPECT_EQ(atomic_load(&a.z), BIG + 1);
289}