blob: 06a0f3dc338b1d89ada86c884bfd1f134a7fa01d [file] [log] [blame]
Elliott Hughesbfeab1b2012-09-05 17:47:37 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
Elliott Hughes5b9310e2013-10-02 16:59:05 -070020#include <inttypes.h>
Elliott Hughesb95cf0d2013-07-15 14:51:07 -070021#include <limits.h>
Elliott Hughes04620a32014-03-07 17:59:05 -080022#include <malloc.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070023#include <pthread.h>
Christopher Ferrisf04935c2013-12-20 18:43:21 -080024#include <signal.h>
Yabin Cui140f3672015-02-03 10:32:00 -080025#include <stdio.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070026#include <sys/cdefs.h>
Elliott Hughes70b24b12013-11-15 11:51:07 -080027#include <sys/mman.h>
Elliott Hughes4d098ca2016-04-11 12:43:05 -070028#include <sys/prctl.h>
George Burgess IV08fd0722019-01-15 19:00:11 -080029#include <sys/resource.h>
Elliott Hughes57b7a612014-08-25 17:26:50 -070030#include <sys/syscall.h>
Narayan Kamath51e6cb32014-03-03 15:38:51 +000031#include <time.h>
Elliott Hughes4d014e12012-09-07 16:47:54 -070032#include <unistd.h>
Yabin Cui33ac04a2015-09-22 11:16:15 -070033#include <unwind.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070034
Yabin Cui08ee8d22015-02-11 17:04:36 -080035#include <atomic>
Josh Gaoddf757e2018-10-17 15:23:03 -070036#include <future>
Yabin Cuib5845722015-03-16 22:46:42 -070037#include <vector>
Yabin Cui08ee8d22015-02-11 17:04:36 -080038
Elliott Hughes5e62b342018-10-25 11:00:00 -070039#include <android-base/macros.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080040#include <android-base/parseint.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070041#include <android-base/scopeguard.h>
Elliott Hughes141b9172021-04-09 17:13:09 -070042#include <android-base/silent_death_test.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080043#include <android-base/strings.h>
Florian Mayerf9666202023-02-28 14:26:06 -080044#include <android-base/test_utils.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070045
Yabin Cuic9a659c2015-11-05 15:36:08 -080046#include "private/bionic_constants.h"
Elliott Hughes71ba5892018-02-07 12:44:45 -080047#include "SignalUtils.h"
Elliott Hughes15dfd632015-09-22 16:40:14 -070048#include "utils.h"
49
Elliott Hughes141b9172021-04-09 17:13:09 -070050using pthread_DeathTest = SilentDeathTest;
Elliott Hughese657eb42021-02-18 17:11:56 -080051
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070052TEST(pthread, pthread_key_create) {
53 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -070054 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070055 ASSERT_EQ(0, pthread_key_delete(key));
56 // Can't delete a key that's already been deleted.
57 ASSERT_EQ(EINVAL, pthread_key_delete(key));
58}
Elliott Hughes4d014e12012-09-07 16:47:54 -070059
Dan Albertc4bcc752014-09-30 11:48:24 -070060TEST(pthread, pthread_keys_max) {
Yabin Cui6c238f22014-12-11 20:50:41 -080061 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
62 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070063}
Elliott Hughes718a5b52014-01-28 17:02:03 -080064
Yabin Cui6c238f22014-12-11 20:50:41 -080065TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
Dan Albertc4bcc752014-09-30 11:48:24 -070066 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
Yabin Cui6c238f22014-12-11 20:50:41 -080067 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070068}
69
70TEST(pthread, pthread_key_many_distinct) {
Yabin Cui6c238f22014-12-11 20:50:41 -080071 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
72 // pthread keys, but We should be able to allocate at least this many keys.
73 int nkeys = PTHREAD_KEYS_MAX / 2;
Dan Albertc4bcc752014-09-30 11:48:24 -070074 std::vector<pthread_key_t> keys;
75
Tom Cherryb8ab6182017-04-05 16:20:29 -070076 auto scope_guard = android::base::make_scope_guard([&keys] {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -070077 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -070078 EXPECT_EQ(0, pthread_key_delete(key));
79 }
80 });
81
82 for (int i = 0; i < nkeys; ++i) {
83 pthread_key_t key;
Elliott Hughes61706932015-03-31 10:56:58 -070084 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
Yi Kong32bc0fc2018-08-02 17:31:13 -070085 ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
Dan Albertc4bcc752014-09-30 11:48:24 -070086 keys.push_back(key);
87 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
88 }
89
90 for (int i = keys.size() - 1; i >= 0; --i) {
91 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
92 pthread_key_t key = keys.back();
93 keys.pop_back();
94 ASSERT_EQ(0, pthread_key_delete(key));
95 }
96}
97
Yabin Cui6c238f22014-12-11 20:50:41 -080098TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000099 std::vector<pthread_key_t> keys;
Dan Albertc4bcc752014-09-30 11:48:24 -0700100 int rv = 0;
Yabin Cui6c238f22014-12-11 20:50:41 -0800101
102 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
103 // be more than we are allowed to allocate now.
104 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000105 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700106 rv = pthread_key_create(&key, nullptr);
Dan Albertc4bcc752014-09-30 11:48:24 -0700107 if (rv == EAGAIN) {
108 break;
109 }
110 EXPECT_EQ(0, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000111 keys.push_back(key);
112 }
113
Dan Albertc4bcc752014-09-30 11:48:24 -0700114 // Don't leak keys.
Elliott Hughes0b2acdf2015-10-02 18:25:19 -0700115 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -0700116 EXPECT_EQ(0, pthread_key_delete(key));
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000117 }
Dan Albertc4bcc752014-09-30 11:48:24 -0700118 keys.clear();
119
120 // We should have eventually reached the maximum number of keys and received
121 // EAGAIN.
122 ASSERT_EQ(EAGAIN, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000123}
124
Elliott Hughesebb770f2014-06-25 13:46:46 -0700125TEST(pthread, pthread_key_delete) {
126 void* expected = reinterpret_cast<void*>(1234);
127 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700128 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700129 ASSERT_EQ(0, pthread_setspecific(key, expected));
130 ASSERT_EQ(expected, pthread_getspecific(key));
131 ASSERT_EQ(0, pthread_key_delete(key));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700132 // After deletion, pthread_getspecific returns nullptr.
133 ASSERT_EQ(nullptr, pthread_getspecific(key));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700134 // And you can't use pthread_setspecific with the deleted key.
135 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
136}
137
Elliott Hughes40a52172014-07-30 14:48:10 -0700138TEST(pthread, pthread_key_fork) {
139 void* expected = reinterpret_cast<void*>(1234);
140 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700141 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700142 ASSERT_EQ(0, pthread_setspecific(key, expected));
143 ASSERT_EQ(expected, pthread_getspecific(key));
144
145 pid_t pid = fork();
146 ASSERT_NE(-1, pid) << strerror(errno);
147
148 if (pid == 0) {
149 // The surviving thread inherits all the forking thread's TLS values...
150 ASSERT_EQ(expected, pthread_getspecific(key));
151 _exit(99);
152 }
153
Elliott Hughes33697a02016-01-26 13:04:57 -0800154 AssertChildExited(pid, 99);
Elliott Hughes40a52172014-07-30 14:48:10 -0700155
156 ASSERT_EQ(expected, pthread_getspecific(key));
Dan Albert1d53ae22014-09-02 15:24:26 -0700157 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700158}
159
160static void* DirtyKeyFn(void* key) {
161 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
162}
163
164TEST(pthread, pthread_key_dirty) {
165 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700166 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700167
Yabin Cuia36158a2015-11-16 21:06:16 -0800168 size_t stack_size = 640 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700169 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
Elliott Hughes40a52172014-07-30 14:48:10 -0700170 ASSERT_NE(MAP_FAILED, stack);
171 memset(stack, 0xff, stack_size);
172
173 pthread_attr_t attr;
174 ASSERT_EQ(0, pthread_attr_init(&attr));
175 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
176
177 pthread_t t;
178 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
179
180 void* result;
181 ASSERT_EQ(0, pthread_join(t, &result));
182 ASSERT_EQ(nullptr, result); // Not ~0!
183
184 ASSERT_EQ(0, munmap(stack, stack_size));
Dan Albert1d53ae22014-09-02 15:24:26 -0700185 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700186}
187
Florian Mayerf9666202023-02-28 14:26:06 -0800188static void* FnWithStackFrame(void*) {
189 int x;
190 *const_cast<volatile int*>(&x) = 1;
191 return nullptr;
192}
193
194TEST(pthread, pthread_heap_allocated_stack) {
195 SKIP_WITH_HWASAN; // TODO(b/148982147): Re-enable when fixed.
196
197 size_t stack_size = 640 * 1024;
198 std::vector<char> stack_vec(stack_size, '\xff');
199 void* stack = stack_vec.data();
200
201 pthread_attr_t attr;
202 ASSERT_EQ(0, pthread_attr_init(&attr));
203 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
204
205 pthread_t t;
206 ASSERT_EQ(0, pthread_create(&t, &attr, FnWithStackFrame, nullptr));
207
208 void* result;
209 ASSERT_EQ(0, pthread_join(t, &result));
210}
211
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800212TEST(pthread, static_pthread_key_used_before_creation) {
213#if defined(__BIONIC__)
214 // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
215 // So here tests if the static/global default value 0 can be detected as invalid key.
216 static pthread_key_t key;
217 ASSERT_EQ(nullptr, pthread_getspecific(key));
218 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
219 ASSERT_EQ(EINVAL, pthread_key_delete(key));
220#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800221 GTEST_SKIP() << "bionic-only test";
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800222#endif
223}
224
Elliott Hughes4d014e12012-09-07 16:47:54 -0700225static void* IdFn(void* arg) {
226 return arg;
227}
228
Yabin Cui63481602014-12-01 17:41:04 -0800229class SpinFunctionHelper {
230 public:
231 SpinFunctionHelper() {
232 SpinFunctionHelper::spin_flag_ = true;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400233 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700234
Yabin Cui63481602014-12-01 17:41:04 -0800235 ~SpinFunctionHelper() {
236 UnSpin();
237 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700238
Yabin Cui63481602014-12-01 17:41:04 -0800239 auto GetFunction() -> void* (*)(void*) {
240 return SpinFunctionHelper::SpinFn;
241 }
242
243 void UnSpin() {
244 SpinFunctionHelper::spin_flag_ = false;
245 }
246
247 private:
248 static void* SpinFn(void*) {
249 while (spin_flag_) {}
Yi Kong32bc0fc2018-08-02 17:31:13 -0700250 return nullptr;
Yabin Cui63481602014-12-01 17:41:04 -0800251 }
Yabin Cuia36158a2015-11-16 21:06:16 -0800252 static std::atomic<bool> spin_flag_;
Yabin Cui63481602014-12-01 17:41:04 -0800253};
254
255// It doesn't matter if spin_flag_ is used in several tests,
256// because it is always set to false after each test. Each thread
257// loops on spin_flag_ can find it becomes false at some time.
Yabin Cuia36158a2015-11-16 21:06:16 -0800258std::atomic<bool> SpinFunctionHelper::spin_flag_;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400259
Elliott Hughes4d014e12012-09-07 16:47:54 -0700260static void* JoinFn(void* arg) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700261 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700262}
263
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400264static void AssertDetached(pthread_t t, bool is_detached) {
265 pthread_attr_t attr;
266 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
267 int detach_state;
268 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
269 pthread_attr_destroy(&attr);
270 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
271}
272
Elliott Hughes7484c212017-02-02 02:41:38 +0000273static void MakeDeadThread(pthread_t& t) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700274 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
275 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000276}
277
Elliott Hughes4d014e12012-09-07 16:47:54 -0700278TEST(pthread, pthread_create) {
279 void* expected_result = reinterpret_cast<void*>(123);
280 // Can we create a thread?
281 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700282 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700283 // If we join, do we get the expected value back?
284 void* result;
285 ASSERT_EQ(0, pthread_join(t, &result));
286 ASSERT_EQ(expected_result, result);
287}
288
Elliott Hughes3e898472013-02-12 16:40:24 +0000289TEST(pthread, pthread_create_EAGAIN) {
290 pthread_attr_t attributes;
291 ASSERT_EQ(0, pthread_attr_init(&attributes));
292 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
293
294 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700295 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000296}
297
Elliott Hughes4d014e12012-09-07 16:47:54 -0700298TEST(pthread, pthread_no_join_after_detach) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700299 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800300
Elliott Hughes4d014e12012-09-07 16:47:54 -0700301 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700302 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700303
304 // After a pthread_detach...
305 ASSERT_EQ(0, pthread_detach(t1));
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400306 AssertDetached(t1, true);
Elliott Hughes4d014e12012-09-07 16:47:54 -0700307
308 // ...pthread_join should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700309 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700310}
311
312TEST(pthread, pthread_no_op_detach_after_join) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700313 SpinFunctionHelper spin_helper;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400314
Elliott Hughes4d014e12012-09-07 16:47:54 -0700315 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700316 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700317
318 // If thread 2 is already waiting to join thread 1...
319 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700320 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700321
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400322 sleep(1); // (Give t2 a chance to call pthread_join.)
Elliott Hughes4d014e12012-09-07 16:47:54 -0700323
Yabin Cuibbb04322015-03-19 15:19:25 -0700324#if defined(__BIONIC__)
325 ASSERT_EQ(EINVAL, pthread_detach(t1));
326#else
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400327 ASSERT_EQ(0, pthread_detach(t1));
Yabin Cuibbb04322015-03-19 15:19:25 -0700328#endif
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400329 AssertDetached(t1, false);
330
Elliott Hughes725b2a92016-03-23 11:20:47 -0700331 spin_helper.UnSpin();
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400332
333 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
Elliott Hughes4d014e12012-09-07 16:47:54 -0700334 void* join_result;
335 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700336 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700337}
Elliott Hughes14f19592012-10-29 10:19:44 -0700338
339TEST(pthread, pthread_join_self) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700340 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
Elliott Hughes14f19592012-10-29 10:19:44 -0700341}
Elliott Hughes4f251be2012-11-01 16:33:29 -0700342
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800343struct TestBug37410 {
344 pthread_t main_thread;
345 pthread_mutex_t mutex;
Elliott Hughes4f251be2012-11-01 16:33:29 -0700346
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800347 static void main() {
348 TestBug37410 data;
349 data.main_thread = pthread_self();
Yi Kong32bc0fc2018-08-02 17:31:13 -0700350 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800351 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
352
353 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700354 ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800355
356 // Wait for the thread to be running...
357 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
358 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
359
360 // ...and exit.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700361 pthread_exit(nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800362 }
363
364 private:
365 static void* thread_fn(void* arg) {
366 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
367
Evgenii Stepanov352853a2019-02-05 17:37:37 -0800368 // Unlocking data->mutex will cause the main thread to exit, invalidating *data. Save the handle.
369 pthread_t main_thread = data->main_thread;
370
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800371 // Let the main thread know we're running.
372 pthread_mutex_unlock(&data->mutex);
373
374 // And wait for the main thread to exit.
Evgenii Stepanov352853a2019-02-05 17:37:37 -0800375 pthread_join(main_thread, nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800376
Yi Kong32bc0fc2018-08-02 17:31:13 -0700377 return nullptr;
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800378 }
379};
Elliott Hughes4f251be2012-11-01 16:33:29 -0700380
Elliott Hughes7fd803c2013-02-14 16:33:52 -0800381// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
382// run this test (which exits normally) in its own process.
Yabin Cui9df70402014-11-05 18:01:01 -0800383TEST_F(pthread_DeathTest, pthread_bug_37410) {
Elliott Hughes4f251be2012-11-01 16:33:29 -0700384 // http://code.google.com/p/android/issues/detail?id=37410
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800385 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
Elliott Hughes4f251be2012-11-01 16:33:29 -0700386}
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800387
388static void* SignalHandlerFn(void* arg) {
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800389 sigset64_t wait_set;
390 sigfillset64(&wait_set);
391 return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800392}
393
394TEST(pthread, pthread_sigmask) {
Elliott Hughes19e62322013-10-15 11:23:57 -0700395 // Check that SIGUSR1 isn't blocked.
396 sigset_t original_set;
397 sigemptyset(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700398 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700399 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
400
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800401 // Block SIGUSR1.
402 sigset_t set;
403 sigemptyset(&set);
404 sigaddset(&set, SIGUSR1);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700405 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800406
Elliott Hughes19e62322013-10-15 11:23:57 -0700407 // Check that SIGUSR1 is blocked.
408 sigset_t final_set;
409 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700410 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700411 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
412 // ...and that sigprocmask agrees with pthread_sigmask.
413 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700414 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700415 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
416
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800417 // Spawn a thread that calls sigwait and tells us what it received.
418 pthread_t signal_thread;
419 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700420 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800421
422 // Send that thread SIGUSR1.
423 pthread_kill(signal_thread, SIGUSR1);
424
425 // See what it got.
426 void* join_result;
427 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
428 ASSERT_EQ(SIGUSR1, received_signal);
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700429 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes19e62322013-10-15 11:23:57 -0700430
431 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700432 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800433}
Elliott Hughes5e3fc432013-02-11 16:36:48 -0800434
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800435TEST(pthread, pthread_sigmask64_SIGTRMIN) {
436 // Check that SIGRTMIN isn't blocked.
437 sigset64_t original_set;
438 sigemptyset64(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700439 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800440 ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
441
442 // Block SIGRTMIN.
443 sigset64_t set;
444 sigemptyset64(&set);
445 sigaddset64(&set, SIGRTMIN);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700446 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800447
448 // Check that SIGRTMIN is blocked.
449 sigset64_t final_set;
450 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700451 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800452 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
453 // ...and that sigprocmask64 agrees with pthread_sigmask64.
454 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700455 ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800456 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
457
458 // Spawn a thread that calls sigwait64 and tells us what it received.
459 pthread_t signal_thread;
460 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700461 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800462
463 // Send that thread SIGRTMIN.
464 pthread_kill(signal_thread, SIGRTMIN);
465
466 // See what it got.
467 void* join_result;
468 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
469 ASSERT_EQ(SIGRTMIN, received_signal);
470 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
471
472 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700473 ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800474}
475
Elliott Hughes725b2a92016-03-23 11:20:47 -0700476static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
477 ASSERT_EQ(0, pthread_setname_np(t, "short"));
478 char name[32];
479 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
480 ASSERT_STREQ("short", name);
481
Elliott Hughesd1aea302015-04-25 10:05:24 -0700482 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
Elliott Hughes725b2a92016-03-23 11:20:47 -0700483 ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
484 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
485 ASSERT_STREQ("123456789012345", name);
486
487 ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
488
489 // The passed-in buffer should be at least 16 bytes.
490 ASSERT_EQ(0, pthread_getname_np(t, name, 16));
491 ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
Elliott Hughes3e898472013-02-12 16:40:24 +0000492}
493
Elliott Hughes725b2a92016-03-23 11:20:47 -0700494TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
495 test_pthread_setname_np__pthread_getname_np(pthread_self());
Elliott Hughes3e898472013-02-12 16:40:24 +0000496}
497
Elliott Hughes725b2a92016-03-23 11:20:47 -0700498TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
499 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800500
Elliott Hughes725b2a92016-03-23 11:20:47 -0700501 pthread_t t;
Elliott Hughes4d098ca2016-04-11 12:43:05 -0700502 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
503 test_pthread_setname_np__pthread_getname_np(t);
504 spin_helper.UnSpin();
505 ASSERT_EQ(0, pthread_join(t, nullptr));
506}
507
508// http://b/28051133: a kernel misfeature means that you can't change the
509// name of another thread if you've set PR_SET_DUMPABLE to 0.
510TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
511 ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
512
513 SpinFunctionHelper spin_helper;
514
515 pthread_t t;
516 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700517 test_pthread_setname_np__pthread_getname_np(t);
518 spin_helper.UnSpin();
519 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000520}
521
Elliott Hughes11859d42017-02-13 17:59:29 -0800522TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000523 pthread_t dead_thread;
524 MakeDeadThread(dead_thread);
525
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800526 EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"),
527 "invalid pthread_t (.*) passed to pthread_setname_np");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800528}
529
530TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
531 pthread_t null_thread = 0;
532 EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
Elliott Hughes11859d42017-02-13 17:59:29 -0800533}
534
535TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
536 pthread_t dead_thread;
537 MakeDeadThread(dead_thread);
538
Elliott Hughesbcb15292017-02-07 21:05:30 +0000539 char name[64];
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800540 EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)),
541 "invalid pthread_t (.*) passed to pthread_getname_np");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800542}
543
544TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
545 pthread_t null_thread = 0;
546
547 char name[64];
548 EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000549}
550
Elliott Hughes9d23e042013-02-15 19:21:51 -0800551TEST(pthread, pthread_kill__0) {
552 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
553 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
554}
555
556TEST(pthread, pthread_kill__invalid_signal) {
557 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
558}
559
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800560static void pthread_kill__in_signal_handler_helper(int signal_number) {
561 static int count = 0;
562 ASSERT_EQ(SIGALRM, signal_number);
563 if (++count == 1) {
564 // Can we call pthread_kill from a signal handler?
565 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
566 }
567}
568
569TEST(pthread, pthread_kill__in_signal_handler) {
Elliott Hughes4b558f52014-03-04 15:58:02 -0800570 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800571 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
572}
573
Josh Gaoddf757e2018-10-17 15:23:03 -0700574TEST(pthread, pthread_kill__exited_thread) {
575 static std::promise<pid_t> tid_promise;
576 pthread_t thread;
577 ASSERT_EQ(0, pthread_create(&thread, nullptr,
578 [](void*) -> void* {
579 tid_promise.set_value(gettid());
580 return nullptr;
581 },
582 nullptr));
583
584 pid_t tid = tid_promise.get_future().get();
585 while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
586 continue;
587 }
588 ASSERT_EQ(ESRCH, errno);
589
590 ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
591}
592
Elliott Hughes11859d42017-02-13 17:59:29 -0800593TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000594 pthread_t dead_thread;
595 MakeDeadThread(dead_thread);
596
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800597 EXPECT_DEATH(pthread_detach(dead_thread),
598 "invalid pthread_t (.*) passed to pthread_detach");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800599}
600
601TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
602 pthread_t null_thread = 0;
603 EXPECT_EQ(ESRCH, pthread_detach(null_thread));
Elliott Hughes7484c212017-02-02 02:41:38 +0000604}
605
Jeff Hao9b06cc32013-08-15 14:51:16 -0700606TEST(pthread, pthread_getcpuclockid__clock_gettime) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700607 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800608
Jeff Hao9b06cc32013-08-15 14:51:16 -0700609 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700610 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700611
612 clockid_t c;
613 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
614 timespec ts;
615 ASSERT_EQ(0, clock_gettime(c, &ts));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700616 spin_helper.UnSpin();
Yabin Cuia36158a2015-11-16 21:06:16 -0800617 ASSERT_EQ(0, pthread_join(t, nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700618}
619
Elliott Hughes11859d42017-02-13 17:59:29 -0800620TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000621 pthread_t dead_thread;
622 MakeDeadThread(dead_thread);
623
624 clockid_t c;
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800625 EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c),
626 "invalid pthread_t (.*) passed to pthread_getcpuclockid");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800627}
628
629TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
630 pthread_t null_thread = 0;
631 clockid_t c;
632 EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000633}
634
Elliott Hughes11859d42017-02-13 17:59:29 -0800635TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000636 pthread_t dead_thread;
637 MakeDeadThread(dead_thread);
638
639 int policy;
640 sched_param param;
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800641 EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param),
642 "invalid pthread_t (.*) passed to pthread_getschedparam");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800643}
644
645TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
646 pthread_t null_thread = 0;
647 int policy;
648 sched_param param;
649 EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000650}
651
Elliott Hughes11859d42017-02-13 17:59:29 -0800652TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000653 pthread_t dead_thread;
654 MakeDeadThread(dead_thread);
655
656 int policy = 0;
657 sched_param param;
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800658 EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param),
659 "invalid pthread_t (.*) passed to pthread_setschedparam");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800660}
661
662TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
663 pthread_t null_thread = 0;
664 int policy = 0;
665 sched_param param;
666 EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000667}
668
Elliott Hughesdff08ce2017-10-16 09:58:45 -0700669TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
670 pthread_t dead_thread;
671 MakeDeadThread(dead_thread);
672
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800673 EXPECT_DEATH(pthread_setschedprio(dead_thread, 123),
674 "invalid pthread_t (.*) passed to pthread_setschedprio");
Elliott Hughesdff08ce2017-10-16 09:58:45 -0700675}
676
677TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
678 pthread_t null_thread = 0;
679 EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
680}
681
Elliott Hughes11859d42017-02-13 17:59:29 -0800682TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000683 pthread_t dead_thread;
684 MakeDeadThread(dead_thread);
685
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800686 EXPECT_DEATH(pthread_join(dead_thread, nullptr),
687 "invalid pthread_t (.*) passed to pthread_join");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800688}
689
690TEST_F(pthread_DeathTest, pthread_join__null_thread) {
691 pthread_t null_thread = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700692 EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000693}
694
Elliott Hughes11859d42017-02-13 17:59:29 -0800695TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000696 pthread_t dead_thread;
697 MakeDeadThread(dead_thread);
698
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800699 EXPECT_DEATH(pthread_kill(dead_thread, 0),
700 "invalid pthread_t (.*) passed to pthread_kill");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800701}
702
703TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
704 pthread_t null_thread = 0;
705 EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
Elliott Hughes7484c212017-02-02 02:41:38 +0000706}
707
msg5550f020d12013-06-06 14:59:28 -0400708TEST(pthread, pthread_join__multijoin) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700709 SpinFunctionHelper spin_helper;
msg5550f020d12013-06-06 14:59:28 -0400710
711 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700712 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
msg5550f020d12013-06-06 14:59:28 -0400713
714 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700715 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
msg5550f020d12013-06-06 14:59:28 -0400716
717 sleep(1); // (Give t2 a chance to call pthread_join.)
718
719 // Multiple joins to the same thread should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700720 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
msg5550f020d12013-06-06 14:59:28 -0400721
Elliott Hughes725b2a92016-03-23 11:20:47 -0700722 spin_helper.UnSpin();
msg5550f020d12013-06-06 14:59:28 -0400723
724 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
725 void* join_result;
726 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700727 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
msg5550f020d12013-06-06 14:59:28 -0400728}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700729
Elliott Hughes70b24b12013-11-15 11:51:07 -0800730TEST(pthread, pthread_join__race) {
731 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
732 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
733 for (size_t i = 0; i < 1024; ++i) {
Yabin Cuia36158a2015-11-16 21:06:16 -0800734 size_t stack_size = 640*1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700735 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
Elliott Hughes70b24b12013-11-15 11:51:07 -0800736
737 pthread_attr_t a;
738 pthread_attr_init(&a);
739 pthread_attr_setstack(&a, stack, stack_size);
740
741 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700742 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
743 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes70b24b12013-11-15 11:51:07 -0800744 ASSERT_EQ(0, munmap(stack, stack_size));
745 }
746}
747
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700748static void* GetActualGuardSizeFn(void* arg) {
749 pthread_attr_t attributes;
750 pthread_getattr_np(pthread_self(), &attributes);
751 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700752 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700753}
754
755static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
756 size_t result;
757 pthread_t t;
758 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700759 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700760 return result;
761}
762
763static void* GetActualStackSizeFn(void* arg) {
764 pthread_attr_t attributes;
765 pthread_getattr_np(pthread_self(), &attributes);
766 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700767 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700768}
769
770static size_t GetActualStackSize(const pthread_attr_t& attributes) {
771 size_t result;
772 pthread_t t;
773 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700774 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700775 return result;
776}
777
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700778TEST(pthread, pthread_attr_setguardsize_tiny) {
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700779 pthread_attr_t attributes;
780 ASSERT_EQ(0, pthread_attr_init(&attributes));
781
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700782 // No such thing as too small: will be rounded up to one page by pthread_create.
783 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
784 size_t guard_size;
785 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
786 ASSERT_EQ(128U, guard_size);
787 ASSERT_EQ(4096U, GetActualGuardSize(attributes));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700788}
789
790TEST(pthread, pthread_attr_setguardsize_reasonable) {
791 pthread_attr_t attributes;
792 ASSERT_EQ(0, pthread_attr_init(&attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700793
794 // Large enough and a multiple of the page size.
795 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700796 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700797 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
798 ASSERT_EQ(32*1024U, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700799 ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
800}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700801
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700802TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
803 pthread_attr_t attributes;
804 ASSERT_EQ(0, pthread_attr_init(&attributes));
805
806 // Large enough but not a multiple of the page size.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700807 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700808 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700809 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
810 ASSERT_EQ(32*1024U + 1, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700811 ASSERT_EQ(36*1024U, GetActualGuardSize(attributes));
812}
813
814TEST(pthread, pthread_attr_setguardsize_enormous) {
815 pthread_attr_t attributes;
816 ASSERT_EQ(0, pthread_attr_init(&attributes));
817
818 // Larger than the stack itself. (Historically we mistakenly carved
819 // the guard out of the stack itself, rather than adding it after the
820 // end.)
821 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
822 size_t guard_size;
823 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
824 ASSERT_EQ(32*1024*1024U, guard_size);
825 ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700826}
827
828TEST(pthread, pthread_attr_setstacksize) {
829 pthread_attr_t attributes;
830 ASSERT_EQ(0, pthread_attr_init(&attributes));
831
832 // Get the default stack size.
833 size_t default_stack_size;
834 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
835
836 // Too small.
837 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
838 size_t stack_size;
839 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
840 ASSERT_EQ(default_stack_size, stack_size);
841 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
842
Yabin Cui917d3902015-01-08 12:32:42 -0800843 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700844 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
845 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
846 ASSERT_EQ(32*1024U, stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -0800847 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700848
Yabin Cui917d3902015-01-08 12:32:42 -0800849 // Large enough but not aligned; will be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700850 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
851 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
852 ASSERT_EQ(32*1024U + 1, stack_size);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800853#if defined(__BIONIC__)
Yabin Cui917d3902015-01-08 12:32:42 -0800854 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800855#else // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700856 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
857 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800858#endif // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700859}
Elliott Hughesc3f11402013-10-30 14:40:09 -0700860
Yabin Cui76615da2015-03-17 14:22:09 -0700861TEST(pthread, pthread_rwlockattr_smoke) {
862 pthread_rwlockattr_t attr;
863 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
864
865 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
866 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
867 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
868 int pshared;
869 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
870 ASSERT_EQ(pshared_value_array[i], pshared);
871 }
872
Colin Cross4c5595c2021-08-16 15:51:59 -0700873#if !defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -0700874 // musl doesn't have pthread_rwlockattr_setkind_np
Yabin Cui76615da2015-03-17 14:22:09 -0700875 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
876 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
877 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
878 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
879 int kind;
880 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
881 ASSERT_EQ(kind_array[i], kind);
882 }
Colin Cross7da20342021-07-28 11:18:11 -0700883#endif
Yabin Cui76615da2015-03-17 14:22:09 -0700884
885 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
886}
887
888TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
889 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
890 pthread_rwlock_t lock2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700891 ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -0700892 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
893}
894
Elliott Hughesc3f11402013-10-30 14:40:09 -0700895TEST(pthread, pthread_rwlock_smoke) {
896 pthread_rwlock_t l;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700897 ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
Elliott Hughesc3f11402013-10-30 14:40:09 -0700898
Calin Juravle76f352e2014-05-19 13:41:10 +0100899 // Single read lock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700900 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
901 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
902
Calin Juravle76f352e2014-05-19 13:41:10 +0100903 // Multiple read lock
904 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
905 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
906 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
907 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
908
909 // Write lock
Calin Juravle92687e42014-05-22 19:21:22 +0100910 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
911 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100912
913 // Try writer lock
914 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
915 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
916 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
917 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
918
919 // Try reader lock
920 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
921 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
922 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
923 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
924 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
925
926 // Try writer lock after unlock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700927 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
928 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
929
Calin Juravle76f352e2014-05-19 13:41:10 +0100930 // EDEADLK in "read after write"
931 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
932 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
933 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
934
935 // EDEADLK in "write after write"
936 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
937 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
938 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100939
Elliott Hughesc3f11402013-10-30 14:40:09 -0700940 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
941}
942
Yabin Cui08ee8d22015-02-11 17:04:36 -0800943struct RwlockWakeupHelperArg {
944 pthread_rwlock_t lock;
945 enum Progress {
946 LOCK_INITIALIZED,
947 LOCK_WAITING,
948 LOCK_RELEASED,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800949 LOCK_ACCESSED,
950 LOCK_TIMEDOUT,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800951 };
952 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -0700953 std::atomic<pid_t> tid;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800954 std::function<int (pthread_rwlock_t*)> trylock_function;
955 std::function<int (pthread_rwlock_t*)> lock_function;
956 std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -0800957 clockid_t clock;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800958};
959
Yabin Cuic9a659c2015-11-05 15:36:08 -0800960static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
Yabin Cuif7969852015-04-02 17:47:48 -0700961 arg->tid = gettid();
Yabin Cui08ee8d22015-02-11 17:04:36 -0800962 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
963 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
964
Yabin Cuic9a659c2015-11-05 15:36:08 -0800965 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
966 ASSERT_EQ(0, arg->lock_function(&arg->lock));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800967 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
968 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
969
970 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
971}
972
Yabin Cuic9a659c2015-11-05 15:36:08 -0800973static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800974 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700975 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800976 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
977 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700978 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -0800979 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800980 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800981
982 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700983 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800984 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700985 WaitUntilThreadSleep(wakeup_arg.tid);
986 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
987
Yabin Cui08ee8d22015-02-11 17:04:36 -0800988 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
989 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
990
Yi Kong32bc0fc2018-08-02 17:31:13 -0700991 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800992 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
993 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
994}
995
Yabin Cuic9a659c2015-11-05 15:36:08 -0800996TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
997 test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800998}
999
Yabin Cuic9a659c2015-11-05 15:36:08 -08001000TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
1001 timespec ts;
1002 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1003 ts.tv_sec += 1;
1004 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1005 return pthread_rwlock_timedwrlock(lock, &ts);
1006 });
1007}
1008
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001009TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
1010#if defined(__BIONIC__)
1011 timespec ts;
1012 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1013 ts.tv_sec += 1;
1014 test_pthread_rwlock_reader_wakeup_writer(
1015 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
1016#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001017 GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001018#endif // __BIONIC__
1019}
1020
Tom Cherry69010802019-05-07 20:33:05 -07001021TEST(pthread, pthread_rwlock_reader_wakeup_writer_clockwait) {
1022#if defined(__BIONIC__)
1023 timespec ts;
1024 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1025 ts.tv_sec += 1;
1026 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1027 return pthread_rwlock_clockwrlock(lock, CLOCK_MONOTONIC, &ts);
1028 });
1029
1030 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1031 ts.tv_sec += 1;
1032 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1033 return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, &ts);
1034 });
1035#else // __BIONIC__
1036 GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1037#endif // __BIONIC__
1038}
1039
Yabin Cuic9a659c2015-11-05 15:36:08 -08001040static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -08001041 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001042 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001043 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1044 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07001045 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001046 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001047 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -08001048
1049 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001050 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -08001051 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -07001052 WaitUntilThreadSleep(wakeup_arg.tid);
1053 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1054
Yabin Cui08ee8d22015-02-11 17:04:36 -08001055 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1056 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1057
Yi Kong32bc0fc2018-08-02 17:31:13 -07001058 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001059 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1060 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1061}
1062
Yabin Cuic9a659c2015-11-05 15:36:08 -08001063TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1064 test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1065}
1066
1067TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1068 timespec ts;
1069 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1070 ts.tv_sec += 1;
1071 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1072 return pthread_rwlock_timedrdlock(lock, &ts);
1073 });
1074}
1075
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001076TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1077#if defined(__BIONIC__)
1078 timespec ts;
1079 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1080 ts.tv_sec += 1;
1081 test_pthread_rwlock_writer_wakeup_reader(
1082 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1083#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001084 GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001085#endif // __BIONIC__
1086}
1087
Tom Cherry69010802019-05-07 20:33:05 -07001088TEST(pthread, pthread_rwlock_writer_wakeup_reader_clockwait) {
1089#if defined(__BIONIC__)
1090 timespec ts;
1091 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1092 ts.tv_sec += 1;
1093 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1094 return pthread_rwlock_clockrdlock(lock, CLOCK_MONOTONIC, &ts);
1095 });
1096
1097 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1098 ts.tv_sec += 1;
1099 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1100 return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, &ts);
1101 });
1102#else // __BIONIC__
1103 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1104#endif // __BIONIC__
1105}
1106
Yabin Cuic9a659c2015-11-05 15:36:08 -08001107static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1108 arg->tid = gettid();
1109 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1110 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1111
1112 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1113
1114 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001115 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001116 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1117 ts.tv_nsec = -1;
1118 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1119 ts.tv_nsec = NS_PER_S;
1120 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1121 ts.tv_nsec = NS_PER_S - 1;
1122 ts.tv_sec = -1;
1123 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001124 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001125 ts.tv_sec += 1;
1126 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1127 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1128 arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1129}
1130
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001131static void pthread_rwlock_timedrdlock_timeout_helper(
1132 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001133 RwlockWakeupHelperArg wakeup_arg;
1134 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1135 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1136 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1137 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001138 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001139 wakeup_arg.timed_lock_function = lock_function;
1140 wakeup_arg.clock = clock;
1141
1142 pthread_t thread;
1143 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1144 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1145 WaitUntilThreadSleep(wakeup_arg.tid);
1146 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1147
1148 ASSERT_EQ(0, pthread_join(thread, nullptr));
1149 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1150 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1151 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1152}
1153
1154TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1155 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1156}
1157
1158TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1159#if defined(__BIONIC__)
1160 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1161 pthread_rwlock_timedrdlock_monotonic_np);
1162#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001163 GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001164#endif // __BIONIC__
1165}
1166
Tom Cherry69010802019-05-07 20:33:05 -07001167TEST(pthread, pthread_rwlock_clockrdlock_monotonic_timeout) {
1168#if defined(__BIONIC__)
1169 pthread_rwlock_timedrdlock_timeout_helper(
1170 CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1171 return pthread_rwlock_clockrdlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1172 });
1173#else // __BIONIC__
1174 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1175#endif // __BIONIC__
1176}
1177
1178TEST(pthread, pthread_rwlock_clockrdlock_realtime_timeout) {
1179#if defined(__BIONIC__)
1180 pthread_rwlock_timedrdlock_timeout_helper(
1181 CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1182 return pthread_rwlock_clockrdlock(__rwlock, CLOCK_REALTIME, __timeout);
1183 });
1184#else // __BIONIC__
1185 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1186#endif // __BIONIC__
1187}
1188
1189TEST(pthread, pthread_rwlock_clockrdlock_invalid) {
1190#if defined(__BIONIC__)
1191 pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1192 timespec ts;
1193 EXPECT_EQ(EINVAL, pthread_rwlock_clockrdlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1194#else // __BIONIC__
1195 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1196#endif // __BIONIC__
1197}
1198
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001199static void pthread_rwlock_timedwrlock_timeout_helper(
1200 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1201 RwlockWakeupHelperArg wakeup_arg;
1202 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1203 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1204 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1205 wakeup_arg.tid = 0;
1206 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1207 wakeup_arg.timed_lock_function = lock_function;
1208 wakeup_arg.clock = clock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001209
1210 pthread_t thread;
1211 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1212 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1213 WaitUntilThreadSleep(wakeup_arg.tid);
1214 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1215
1216 ASSERT_EQ(0, pthread_join(thread, nullptr));
1217 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1218 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1219 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1220}
1221
1222TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001223 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1224}
Yabin Cuic9a659c2015-11-05 15:36:08 -08001225
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001226TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1227#if defined(__BIONIC__)
1228 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1229 pthread_rwlock_timedwrlock_monotonic_np);
1230#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001231 GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001232#endif // __BIONIC__
Yabin Cuic9a659c2015-11-05 15:36:08 -08001233}
1234
Tom Cherry69010802019-05-07 20:33:05 -07001235TEST(pthread, pthread_rwlock_clockwrlock_monotonic_timeout) {
1236#if defined(__BIONIC__)
1237 pthread_rwlock_timedwrlock_timeout_helper(
1238 CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1239 return pthread_rwlock_clockwrlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1240 });
1241#else // __BIONIC__
1242 GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1243#endif // __BIONIC__
1244}
1245
1246TEST(pthread, pthread_rwlock_clockwrlock_realtime_timeout) {
1247#if defined(__BIONIC__)
1248 pthread_rwlock_timedwrlock_timeout_helper(
1249 CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1250 return pthread_rwlock_clockwrlock(__rwlock, CLOCK_REALTIME, __timeout);
1251 });
1252#else // __BIONIC__
1253 GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1254#endif // __BIONIC__
1255}
1256
1257TEST(pthread, pthread_rwlock_clockwrlock_invalid) {
1258#if defined(__BIONIC__)
1259 pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1260 timespec ts;
1261 EXPECT_EQ(EINVAL, pthread_rwlock_clockwrlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1262#else // __BIONIC__
1263 GTEST_SKIP() << "pthread_rwlock_clockrwlock not available";
1264#endif // __BIONIC__
1265}
1266
Colin Cross4c5595c2021-08-16 15:51:59 -07001267#if !defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -07001268// musl doesn't have pthread_rwlockattr_setkind_np
Yabin Cui76615da2015-03-17 14:22:09 -07001269class RwlockKindTestHelper {
1270 private:
1271 struct ThreadArg {
1272 RwlockKindTestHelper* helper;
1273 std::atomic<pid_t>& tid;
1274
1275 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1276 : helper(helper), tid(tid) { }
1277 };
1278
1279 public:
1280 pthread_rwlock_t lock;
1281
1282 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07001283 explicit RwlockKindTestHelper(int kind_type) {
Yabin Cui76615da2015-03-17 14:22:09 -07001284 InitRwlock(kind_type);
1285 }
1286
1287 ~RwlockKindTestHelper() {
1288 DestroyRwlock();
1289 }
1290
1291 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1292 tid = 0;
1293 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001294 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001295 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1296 }
1297
1298 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1299 tid = 0;
1300 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001301 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001302 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1303 }
1304
1305 private:
1306 void InitRwlock(int kind_type) {
1307 pthread_rwlockattr_t attr;
1308 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1309 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1310 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1311 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1312 }
1313
1314 void DestroyRwlock() {
1315 ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1316 }
1317
1318 static void WriterThreadFn(ThreadArg* arg) {
1319 arg->tid = gettid();
1320
1321 RwlockKindTestHelper* helper = arg->helper;
1322 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1323 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1324 delete arg;
1325 }
1326
1327 static void ReaderThreadFn(ThreadArg* arg) {
1328 arg->tid = gettid();
1329
1330 RwlockKindTestHelper* helper = arg->helper;
1331 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1332 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1333 delete arg;
1334 }
1335};
Colin Cross7da20342021-07-28 11:18:11 -07001336#endif
Yabin Cui76615da2015-03-17 14:22:09 -07001337
1338TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
Colin Cross4c5595c2021-08-16 15:51:59 -07001339#if !defined(ANDROID_HOST_MUSL)
Yabin Cui76615da2015-03-17 14:22:09 -07001340 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1341 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1342
1343 pthread_t writer_thread;
1344 std::atomic<pid_t> writer_tid;
1345 helper.CreateWriterThread(writer_thread, writer_tid);
1346 WaitUntilThreadSleep(writer_tid);
1347
1348 pthread_t reader_thread;
1349 std::atomic<pid_t> reader_tid;
1350 helper.CreateReaderThread(reader_thread, reader_tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001351 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001352
1353 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001354 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
Colin Cross7da20342021-07-28 11:18:11 -07001355#else
1356 GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1357#endif
Yabin Cui76615da2015-03-17 14:22:09 -07001358}
1359
1360TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
Colin Cross4c5595c2021-08-16 15:51:59 -07001361#if !defined(ANDROID_HOST_MUSL)
Yabin Cui76615da2015-03-17 14:22:09 -07001362 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1363 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1364
1365 pthread_t writer_thread;
1366 std::atomic<pid_t> writer_tid;
1367 helper.CreateWriterThread(writer_thread, writer_tid);
1368 WaitUntilThreadSleep(writer_tid);
1369
1370 pthread_t reader_thread;
1371 std::atomic<pid_t> reader_tid;
1372 helper.CreateReaderThread(reader_thread, reader_tid);
1373 WaitUntilThreadSleep(reader_tid);
1374
1375 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001376 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1377 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Colin Cross7da20342021-07-28 11:18:11 -07001378#else
1379 GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1380#endif
Yabin Cui76615da2015-03-17 14:22:09 -07001381}
1382
Elliott Hughes1728b232014-05-14 10:02:03 -07001383static int g_once_fn_call_count = 0;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001384static void OnceFn() {
Elliott Hughes1728b232014-05-14 10:02:03 -07001385 ++g_once_fn_call_count;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001386}
1387
1388TEST(pthread, pthread_once_smoke) {
1389 pthread_once_t once_control = PTHREAD_ONCE_INIT;
1390 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1391 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
Elliott Hughes1728b232014-05-14 10:02:03 -07001392 ASSERT_EQ(1, g_once_fn_call_count);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001393}
1394
Elliott Hughes3694ec62014-05-14 11:46:08 -07001395static std::string pthread_once_1934122_result = "";
1396
1397static void Routine2() {
1398 pthread_once_1934122_result += "2";
1399}
1400
1401static void Routine1() {
1402 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1403 pthread_once_1934122_result += "1";
1404 pthread_once(&once_control_2, &Routine2);
1405}
1406
1407TEST(pthread, pthread_once_1934122) {
1408 // Very old versions of Android couldn't call pthread_once from a
1409 // pthread_once init routine. http://b/1934122.
1410 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1411 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1412 ASSERT_EQ("12", pthread_once_1934122_result);
1413}
1414
Elliott Hughes1728b232014-05-14 10:02:03 -07001415static int g_atfork_prepare_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001416static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
1417static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001418static int g_atfork_parent_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001419static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
1420static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001421static int g_atfork_child_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001422static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
1423static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
Elliott Hughesc3f11402013-10-30 14:40:09 -07001424
Dmitriy Ivanov00e37812014-11-20 16:53:47 -08001425TEST(pthread, pthread_atfork_smoke) {
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001426 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1427 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
Elliott Hughesc3f11402013-10-30 14:40:09 -07001428
Elliott Hughes33697a02016-01-26 13:04:57 -08001429 pid_t pid = fork();
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001430 ASSERT_NE(-1, pid) << strerror(errno);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001431
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001432 // Child and parent calls are made in the order they were registered.
1433 if (pid == 0) {
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001434 ASSERT_EQ(12, g_atfork_child_calls);
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001435 _exit(0);
1436 }
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001437 ASSERT_EQ(12, g_atfork_parent_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001438
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001439 // Prepare calls are made in the reverse order.
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001440 ASSERT_EQ(21, g_atfork_prepare_calls);
Elliott Hughes33697a02016-01-26 13:04:57 -08001441 AssertChildExited(pid, 0);
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001442}
1443
Elliott Hughesc3f11402013-10-30 14:40:09 -07001444TEST(pthread, pthread_attr_getscope) {
1445 pthread_attr_t attr;
1446 ASSERT_EQ(0, pthread_attr_init(&attr));
1447
1448 int scope;
1449 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1450 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1451}
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001452
1453TEST(pthread, pthread_condattr_init) {
1454 pthread_condattr_t attr;
1455 pthread_condattr_init(&attr);
1456
1457 clockid_t clock;
1458 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1459 ASSERT_EQ(CLOCK_REALTIME, clock);
1460
1461 int pshared;
1462 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1463 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1464}
1465
1466TEST(pthread, pthread_condattr_setclock) {
1467 pthread_condattr_t attr;
1468 pthread_condattr_init(&attr);
1469
1470 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1471 clockid_t clock;
1472 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1473 ASSERT_EQ(CLOCK_REALTIME, clock);
1474
1475 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1476 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1477 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1478
1479 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1480}
1481
1482TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
Yabin Cui32651b82015-03-13 20:30:00 -07001483#if defined(__BIONIC__)
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001484 pthread_condattr_t attr;
1485 pthread_condattr_init(&attr);
1486
1487 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1488 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1489
1490 pthread_cond_t cond_var;
1491 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1492
1493 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1494 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1495
Yabin Cui32651b82015-03-13 20:30:00 -07001496 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001497 clockid_t clock;
1498 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1499 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1500 int pshared;
1501 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1502 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
Yabin Cui32651b82015-03-13 20:30:00 -07001503#else // !defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001504 GTEST_SKIP() << "bionic-only test";
Yabin Cui32651b82015-03-13 20:30:00 -07001505#endif // !defined(__BIONIC__)
1506}
1507
1508class pthread_CondWakeupTest : public ::testing::Test {
1509 protected:
1510 pthread_mutex_t mutex;
1511 pthread_cond_t cond;
1512
1513 enum Progress {
1514 INITIALIZED,
1515 WAITING,
1516 SIGNALED,
1517 FINISHED,
1518 };
1519 std::atomic<Progress> progress;
1520 pthread_t thread;
Peter Collingbournec5b81842021-12-02 12:38:46 -08001521 timespec ts;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001522 std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
Yabin Cui32651b82015-03-13 20:30:00 -07001523
1524 protected:
Yabin Cuic9a659c2015-11-05 15:36:08 -08001525 void SetUp() override {
1526 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1527 }
1528
1529 void InitCond(clockid_t clock=CLOCK_REALTIME) {
1530 pthread_condattr_t attr;
1531 ASSERT_EQ(0, pthread_condattr_init(&attr));
1532 ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1533 ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1534 ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1535 }
1536
Tom Cherry69010802019-05-07 20:33:05 -07001537 void StartWaitingThread(
1538 std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
Yabin Cui32651b82015-03-13 20:30:00 -07001539 progress = INITIALIZED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001540 this->wait_function = wait_function;
Tom Cherry69010802019-05-07 20:33:05 -07001541 ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn),
1542 this));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001543 while (progress != WAITING) {
Yabin Cui32651b82015-03-13 20:30:00 -07001544 usleep(5000);
1545 }
1546 usleep(5000);
1547 }
1548
Tom Cherry69010802019-05-07 20:33:05 -07001549 void RunTimedTest(
1550 clockid_t clock,
1551 std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* timeout)>
1552 wait_function) {
Tom Cherry69010802019-05-07 20:33:05 -07001553 ASSERT_EQ(0, clock_gettime(clock, &ts));
1554 ts.tv_sec += 1;
1555
Peter Collingbournec5b81842021-12-02 12:38:46 -08001556 StartWaitingThread([&wait_function, this](pthread_cond_t* cond, pthread_mutex_t* mutex) {
Tom Cherry69010802019-05-07 20:33:05 -07001557 return wait_function(cond, mutex, &ts);
1558 });
1559
1560 progress = SIGNALED;
1561 ASSERT_EQ(0, pthread_cond_signal(&cond));
1562 }
1563
1564 void RunTimedTest(clockid_t clock, std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex,
1565 clockid_t clock, const timespec* timeout)>
1566 wait_function) {
1567 RunTimedTest(clock, [clock, &wait_function](pthread_cond_t* cond, pthread_mutex_t* mutex,
1568 const timespec* timeout) {
1569 return wait_function(cond, mutex, clock, timeout);
1570 });
1571 }
1572
Yabin Cuic9a659c2015-11-05 15:36:08 -08001573 void TearDown() override {
1574 ASSERT_EQ(0, pthread_join(thread, nullptr));
1575 ASSERT_EQ(FINISHED, progress);
1576 ASSERT_EQ(0, pthread_cond_destroy(&cond));
1577 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1578 }
1579
Yabin Cui32651b82015-03-13 20:30:00 -07001580 private:
1581 static void WaitThreadFn(pthread_CondWakeupTest* test) {
1582 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1583 test->progress = WAITING;
1584 while (test->progress == WAITING) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001585 ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
Yabin Cui32651b82015-03-13 20:30:00 -07001586 }
1587 ASSERT_EQ(SIGNALED, test->progress);
1588 test->progress = FINISHED;
1589 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1590 }
1591};
1592
Yabin Cuic9a659c2015-11-05 15:36:08 -08001593TEST_F(pthread_CondWakeupTest, signal_wait) {
1594 InitCond();
1595 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1596 return pthread_cond_wait(cond, mutex);
1597 });
Yabin Cui32651b82015-03-13 20:30:00 -07001598 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001599 ASSERT_EQ(0, pthread_cond_signal(&cond));
Yabin Cui32651b82015-03-13 20:30:00 -07001600}
1601
Yabin Cuic9a659c2015-11-05 15:36:08 -08001602TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1603 InitCond();
1604 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1605 return pthread_cond_wait(cond, mutex);
1606 });
Yabin Cui32651b82015-03-13 20:30:00 -07001607 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001608 ASSERT_EQ(0, pthread_cond_broadcast(&cond));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001609}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001610
Yabin Cuic9a659c2015-11-05 15:36:08 -08001611TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1612 InitCond(CLOCK_REALTIME);
Tom Cherry69010802019-05-07 20:33:05 -07001613 RunTimedTest(CLOCK_REALTIME, pthread_cond_timedwait);
Yabin Cuic9a659c2015-11-05 15:36:08 -08001614}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001615
Yabin Cuic9a659c2015-11-05 15:36:08 -08001616TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1617 InitCond(CLOCK_MONOTONIC);
Tom Cherry69010802019-05-07 20:33:05 -07001618 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait);
Yabin Cuic9a659c2015-11-05 15:36:08 -08001619}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001620
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001621TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1622#if defined(__BIONIC__)
1623 InitCond(CLOCK_REALTIME);
Tom Cherry69010802019-05-07 20:33:05 -07001624 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001625#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001626 GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001627#endif // __BIONIC__
1628}
1629
Tom Cherry69010802019-05-07 20:33:05 -07001630TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_monotonic) {
1631#if defined(__BIONIC__)
1632 InitCond(CLOCK_MONOTONIC);
1633 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1634#else // __BIONIC__
1635 GTEST_SKIP() << "pthread_cond_clockwait not available";
1636#endif // __BIONIC__
1637}
1638
1639TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_realtime) {
1640#if defined(__BIONIC__)
1641 InitCond(CLOCK_MONOTONIC);
1642 RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1643#else // __BIONIC__
1644 GTEST_SKIP() << "pthread_cond_clockwait not available";
1645#endif // __BIONIC__
1646}
1647
1648TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_monotonic) {
1649#if defined(__BIONIC__)
1650 InitCond(CLOCK_REALTIME);
1651 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1652#else // __BIONIC__
1653 GTEST_SKIP() << "pthread_cond_clockwait not available";
1654#endif // __BIONIC__
1655}
1656
1657TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_realtime) {
1658#if defined(__BIONIC__)
1659 InitCond(CLOCK_REALTIME);
1660 RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1661#else // __BIONIC__
1662 GTEST_SKIP() << "pthread_cond_clockwait not available";
1663#endif // __BIONIC__
1664}
1665
Tom Cherry800c1a92019-07-17 10:45:18 -07001666static void pthread_cond_timedwait_timeout_helper(bool init_monotonic, clockid_t clock,
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001667 int (*wait_function)(pthread_cond_t* __cond,
1668 pthread_mutex_t* __mutex,
1669 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001670 pthread_mutex_t mutex;
1671 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1672 pthread_cond_t cond;
Tom Cherry800c1a92019-07-17 10:45:18 -07001673
1674 if (init_monotonic) {
1675 pthread_condattr_t attr;
1676 pthread_condattr_init(&attr);
1677
1678 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1679 clockid_t clock;
1680 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1681 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1682
1683 ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1684 } else {
1685 ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1686 }
Yabin Cuic9a659c2015-11-05 15:36:08 -08001687 ASSERT_EQ(0, pthread_mutex_lock(&mutex));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001688
Yabin Cuic9a659c2015-11-05 15:36:08 -08001689 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001690 ASSERT_EQ(0, clock_gettime(clock, &ts));
1691 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001692 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001693 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001694 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001695 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001696 ts.tv_nsec = NS_PER_S - 1;
1697 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001698 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001699 ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
Elliott Hughes0e714a52014-03-03 16:42:47 -08001700}
Elliott Hughes57b7a612014-08-25 17:26:50 -07001701
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001702TEST(pthread, pthread_cond_timedwait_timeout) {
Tom Cherry800c1a92019-07-17 10:45:18 -07001703 pthread_cond_timedwait_timeout_helper(false, CLOCK_REALTIME, pthread_cond_timedwait);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001704}
1705
1706TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1707#if defined(__BIONIC__)
Tom Cherry800c1a92019-07-17 10:45:18 -07001708 pthread_cond_timedwait_timeout_helper(false, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1709 pthread_cond_timedwait_timeout_helper(true, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001710#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001711 GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001712#endif // __BIONIC__
1713}
1714
Tom Cherry69010802019-05-07 20:33:05 -07001715TEST(pthread, pthread_cond_clockwait_timeout) {
1716#if defined(__BIONIC__)
1717 pthread_cond_timedwait_timeout_helper(
Tom Cherry800c1a92019-07-17 10:45:18 -07001718 false, CLOCK_MONOTONIC,
Tom Cherry69010802019-05-07 20:33:05 -07001719 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1720 return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1721 });
1722 pthread_cond_timedwait_timeout_helper(
Tom Cherry800c1a92019-07-17 10:45:18 -07001723 true, CLOCK_MONOTONIC,
1724 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1725 return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1726 });
1727 pthread_cond_timedwait_timeout_helper(
1728 false, CLOCK_REALTIME,
1729 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1730 return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1731 });
1732 pthread_cond_timedwait_timeout_helper(
1733 true, CLOCK_REALTIME,
Tom Cherry69010802019-05-07 20:33:05 -07001734 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1735 return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1736 });
1737#else // __BIONIC__
1738 GTEST_SKIP() << "pthread_cond_clockwait not available";
1739#endif // __BIONIC__
1740}
1741
1742TEST(pthread, pthread_cond_clockwait_invalid) {
1743#if defined(__BIONIC__)
1744 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
1745 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1746 timespec ts;
1747 EXPECT_EQ(EINVAL, pthread_cond_clockwait(&cond, &mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
1748
1749#else // __BIONIC__
1750 GTEST_SKIP() << "pthread_cond_clockwait not available";
1751#endif // __BIONIC__
1752}
1753
Elliott Hughes57b7a612014-08-25 17:26:50 -07001754TEST(pthread, pthread_attr_getstack__main_thread) {
1755 // This test is only meaningful for the main thread, so make sure we're running on it!
1756 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1757
1758 // Get the main thread's attributes.
1759 pthread_attr_t attributes;
1760 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1761
1762 // Check that we correctly report that the main thread has no guard page.
1763 size_t guard_size;
1764 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1765 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1766
1767 // Get the stack base and the stack size (both ways).
1768 void* stack_base;
1769 size_t stack_size;
1770 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1771 size_t stack_size2;
1772 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1773
1774 // The two methods of asking for the stack size should agree.
1775 EXPECT_EQ(stack_size, stack_size2);
1776
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001777#if defined(__BIONIC__)
dimitry6dfa5b52018-01-30 13:24:28 +01001778 // Find stack in /proc/self/maps using a pointer to the stack.
1779 //
1780 // We do not use "[stack]" label because in native-bridge environment it is not
1781 // guaranteed to point to the right stack. A native bridge implementation may
1782 // keep separate stack for the guest code.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001783 void* maps_stack_hi = nullptr;
Elliott Hughes15dfd632015-09-22 16:40:14 -07001784 std::vector<map_record> maps;
1785 ASSERT_TRUE(Maps::parse_maps(&maps));
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001786 uintptr_t stack_address = reinterpret_cast<uintptr_t>(untag_address(&maps_stack_hi));
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07001787 for (const auto& map : maps) {
dimitry6dfa5b52018-01-30 13:24:28 +01001788 if (map.addr_start <= stack_address && map.addr_end > stack_address){
Elliott Hughes15dfd632015-09-22 16:40:14 -07001789 maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
Elliott Hughes57b7a612014-08-25 17:26:50 -07001790 break;
1791 }
1792 }
Elliott Hughes57b7a612014-08-25 17:26:50 -07001793
dimitry6dfa5b52018-01-30 13:24:28 +01001794 // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001795 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1796 // region isn't very interesting.
1797 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1798
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001799 // The stack size should correspond to RLIMIT_STACK.
Elliott Hughes57b7a612014-08-25 17:26:50 -07001800 rlimit rl;
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001801 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001802 uint64_t original_rlim_cur = rl.rlim_cur;
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001803 if (rl.rlim_cur == RLIM_INFINITY) {
1804 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1805 }
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001806 EXPECT_EQ(rl.rlim_cur, stack_size);
1807
Tom Cherryb8ab6182017-04-05 16:20:29 -07001808 auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001809 rl.rlim_cur = original_rlim_cur;
1810 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1811 });
1812
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001813 //
1814 // What if RLIMIT_STACK is smaller than the stack's current extent?
1815 //
Elliott Hughes57b7a612014-08-25 17:26:50 -07001816 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1817 rl.rlim_max = RLIM_INFINITY;
1818 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1819
1820 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1821 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1822 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1823
1824 EXPECT_EQ(stack_size, stack_size2);
1825 ASSERT_EQ(1024U, stack_size);
1826
1827 //
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001828 // What if RLIMIT_STACK isn't a whole number of pages?
Elliott Hughes57b7a612014-08-25 17:26:50 -07001829 //
1830 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1831 rl.rlim_max = RLIM_INFINITY;
1832 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1833
1834 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1835 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1836 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1837
1838 EXPECT_EQ(stack_size, stack_size2);
1839 ASSERT_EQ(6666U, stack_size);
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001840#endif
Elliott Hughes57b7a612014-08-25 17:26:50 -07001841}
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001842
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001843struct GetStackSignalHandlerArg {
1844 volatile bool done;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001845 void* signal_stack_base;
1846 size_t signal_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001847 void* main_stack_base;
1848 size_t main_stack_size;
1849};
1850
1851static GetStackSignalHandlerArg getstack_signal_handler_arg;
1852
1853static void getstack_signal_handler(int sig) {
1854 ASSERT_EQ(SIGUSR1, sig);
1855 // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1856 sleep(1);
1857 pthread_attr_t attr;
1858 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1859 void* stack_base;
1860 size_t stack_size;
1861 ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001862
1863 // Verify if the stack used by the signal handler is the alternate stack just registered.
1864 ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001865 ASSERT_LT(static_cast<void*>(untag_address(&attr)),
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001866 static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001867 getstack_signal_handler_arg.signal_stack_size);
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001868
1869 // Verify if the main thread's stack got in the signal handler is correct.
1870 ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1871 ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1872
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001873 getstack_signal_handler_arg.done = true;
1874}
1875
1876// The previous code obtained the main thread's stack by reading the entry in
1877// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1878// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1879// switches a process while the main thread is in an alternate stack, then the kernel will label
1880// the wrong map with [stack]. This test verifies that when the above situation happens, the main
1881// thread's stack is found correctly.
1882TEST(pthread, pthread_attr_getstack_in_signal_handler) {
Yabin Cui61e4d462016-03-07 17:44:58 -08001883 // This test is only meaningful for the main thread, so make sure we're running on it!
1884 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1885
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001886 const size_t sig_stack_size = 16 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001887 void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001888 -1, 0);
1889 ASSERT_NE(MAP_FAILED, sig_stack);
1890 stack_t ss;
1891 ss.ss_sp = sig_stack;
1892 ss.ss_size = sig_stack_size;
1893 ss.ss_flags = 0;
1894 stack_t oss;
1895 ASSERT_EQ(0, sigaltstack(&ss, &oss));
1896
Yabin Cui61e4d462016-03-07 17:44:58 -08001897 pthread_attr_t attr;
1898 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1899 void* main_stack_base;
1900 size_t main_stack_size;
1901 ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1902
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001903 ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1904 getstack_signal_handler_arg.done = false;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001905 getstack_signal_handler_arg.signal_stack_base = sig_stack;
1906 getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1907 getstack_signal_handler_arg.main_stack_base = main_stack_base;
1908 getstack_signal_handler_arg.main_stack_size = main_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001909 kill(getpid(), SIGUSR1);
1910 ASSERT_EQ(true, getstack_signal_handler_arg.done);
1911
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001912 ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1913 ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1914}
1915
Yabin Cui917d3902015-01-08 12:32:42 -08001916static void pthread_attr_getstack_18908062_helper(void*) {
1917 char local_variable;
1918 pthread_attr_t attributes;
1919 pthread_getattr_np(pthread_self(), &attributes);
1920 void* stack_base;
1921 size_t stack_size;
1922 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1923
1924 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1925 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001926 ASSERT_LT(untag_address(&local_variable), reinterpret_cast<char*>(stack_base) + stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -08001927}
1928
1929// Check whether something on stack is in the range of
1930// [stack_base, stack_base + stack_size). see b/18908062.
1931TEST(pthread, pthread_attr_getstack_18908062) {
1932 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001933 ASSERT_EQ(0, pthread_create(&t, nullptr,
Yabin Cui917d3902015-01-08 12:32:42 -08001934 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
Yi Kong32bc0fc2018-08-02 17:31:13 -07001935 nullptr));
1936 ASSERT_EQ(0, pthread_join(t, nullptr));
Yabin Cui917d3902015-01-08 12:32:42 -08001937}
1938
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001939#if defined(__BIONIC__)
Elliott Hughesf2083612015-11-11 13:32:28 -08001940static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
1941
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001942static void* pthread_gettid_np_helper(void* arg) {
1943 *reinterpret_cast<pid_t*>(arg) = gettid();
Elliott Hughesf2083612015-11-11 13:32:28 -08001944
1945 // Wait for our parent to call pthread_gettid_np on us before exiting.
1946 pthread_mutex_lock(&pthread_gettid_np_mutex);
1947 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001948 return nullptr;
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001949}
1950#endif
1951
1952TEST(pthread, pthread_gettid_np) {
1953#if defined(__BIONIC__)
1954 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1955
Elliott Hughesf2083612015-11-11 13:32:28 -08001956 // Ensure the other thread doesn't exit until after we've called
1957 // pthread_gettid_np on it.
1958 pthread_mutex_lock(&pthread_gettid_np_mutex);
1959
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001960 pid_t t_gettid_result;
1961 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001962 pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001963
1964 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1965
Elliott Hughesf2083612015-11-11 13:32:28 -08001966 // Release the other thread and wait for it to exit.
1967 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001968 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001969
1970 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1971#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001972 GTEST_SKIP() << "pthread_gettid_np not available";
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001973#endif
1974}
Elliott Hughes34c987a2014-09-22 16:01:26 -07001975
1976static size_t cleanup_counter = 0;
1977
Derek Xue41996952014-09-25 11:05:32 +01001978static void AbortCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001979 abort();
1980}
1981
Derek Xue41996952014-09-25 11:05:32 +01001982static void CountCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001983 ++cleanup_counter;
1984}
1985
Derek Xue41996952014-09-25 11:05:32 +01001986static void PthreadCleanupTester() {
Yi Kong32bc0fc2018-08-02 17:31:13 -07001987 pthread_cleanup_push(CountCleanupRoutine, nullptr);
1988 pthread_cleanup_push(CountCleanupRoutine, nullptr);
1989 pthread_cleanup_push(AbortCleanupRoutine, nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07001990
1991 pthread_cleanup_pop(0); // Pop the abort without executing it.
1992 pthread_cleanup_pop(1); // Pop one count while executing it.
1993 ASSERT_EQ(1U, cleanup_counter);
1994 // Exit while the other count is still on the cleanup stack.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001995 pthread_exit(nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07001996
1997 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1998 pthread_cleanup_pop(0);
1999}
2000
Derek Xue41996952014-09-25 11:05:32 +01002001static void* PthreadCleanupStartRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07002002 PthreadCleanupTester();
Yi Kong32bc0fc2018-08-02 17:31:13 -07002003 return nullptr;
Elliott Hughes34c987a2014-09-22 16:01:26 -07002004}
2005
2006TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
2007 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002008 ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
2009 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes34c987a2014-09-22 16:01:26 -07002010 ASSERT_EQ(2U, cleanup_counter);
2011}
Derek Xue41996952014-09-25 11:05:32 +01002012
2013TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
2014 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
2015}
2016
2017TEST(pthread, pthread_mutexattr_gettype) {
2018 pthread_mutexattr_t attr;
2019 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2020
2021 int attr_type;
2022
2023 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
2024 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2025 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
2026
2027 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
2028 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2029 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
2030
2031 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
2032 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2033 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002034
2035 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2036}
2037
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002038TEST(pthread, pthread_mutexattr_protocol) {
2039 pthread_mutexattr_t attr;
2040 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2041
2042 int protocol;
2043 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2044 ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
2045 for (size_t repeat = 0; repeat < 2; ++repeat) {
2046 for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
2047 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
2048 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2049 ASSERT_EQ(protocol, set_protocol);
2050 }
2051 }
2052}
2053
Yabin Cui17393b02015-03-21 15:08:25 -07002054struct PthreadMutex {
2055 pthread_mutex_t lock;
2056
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002057 explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
2058 init(mutex_type, protocol);
Yabin Cui17393b02015-03-21 15:08:25 -07002059 }
2060
2061 ~PthreadMutex() {
2062 destroy();
2063 }
2064
2065 private:
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002066 void init(int mutex_type, int protocol) {
Yabin Cui17393b02015-03-21 15:08:25 -07002067 pthread_mutexattr_t attr;
2068 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2069 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002070 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
Yabin Cui17393b02015-03-21 15:08:25 -07002071 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
2072 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2073 }
2074
2075 void destroy() {
2076 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
2077 }
2078
2079 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
2080};
Derek Xue41996952014-09-25 11:05:32 +01002081
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002082static int UnlockFromAnotherThread(pthread_mutex_t* mutex) {
2083 pthread_t thread;
2084 pthread_create(&thread, nullptr, [](void* mutex_voidp) -> void* {
2085 pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(mutex_voidp);
2086 intptr_t result = pthread_mutex_unlock(mutex);
2087 return reinterpret_cast<void*>(result);
2088 }, mutex);
2089 void* result;
2090 EXPECT_EQ(0, pthread_join(thread, &result));
2091 return reinterpret_cast<intptr_t>(result);
2092};
2093
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002094static void TestPthreadMutexLockNormal(int protocol) {
2095 PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
Derek Xue41996952014-09-25 11:05:32 +01002096
Yabin Cui17393b02015-03-21 15:08:25 -07002097 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002098 if (protocol == PTHREAD_PRIO_INHERIT) {
2099 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2100 }
Yabin Cui17393b02015-03-21 15:08:25 -07002101 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08002102 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2103 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2104 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01002105}
2106
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002107static void TestPthreadMutexLockErrorCheck(int protocol) {
2108 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
Derek Xue41996952014-09-25 11:05:32 +01002109
Yabin Cui17393b02015-03-21 15:08:25 -07002110 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002111 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002112 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
2113 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2114 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002115 if (protocol == PTHREAD_PRIO_NONE) {
2116 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2117 } else {
2118 ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
2119 }
Yabin Cui17393b02015-03-21 15:08:25 -07002120 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2121 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01002122}
2123
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002124static void TestPthreadMutexLockRecursive(int protocol) {
2125 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
Derek Xue41996952014-09-25 11:05:32 +01002126
Yabin Cui17393b02015-03-21 15:08:25 -07002127 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002128 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002129 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002130 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002131 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2132 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2133 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08002134 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2135 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002136 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2137 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2138}
2139
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002140TEST(pthread, pthread_mutex_lock_NORMAL) {
2141 TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
2142}
2143
2144TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
2145 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
2146}
2147
2148TEST(pthread, pthread_mutex_lock_RECURSIVE) {
2149 TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
2150}
2151
2152TEST(pthread, pthread_mutex_lock_pi) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002153 TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
2154 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
2155 TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
2156}
2157
Yabin Cui5a00ba72018-01-26 17:32:31 -08002158TEST(pthread, pthread_mutex_pi_count_limit) {
2159#if defined(__BIONIC__) && !defined(__LP64__)
2160 // Bionic only supports 65536 pi mutexes in 32-bit programs.
2161 pthread_mutexattr_t attr;
2162 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2163 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
2164 std::vector<pthread_mutex_t> mutexes(65536);
2165 // Test if we can use 65536 pi mutexes at the same time.
2166 // Run 2 times to check if freed pi mutexes can be recycled.
2167 for (int repeat = 0; repeat < 2; ++repeat) {
2168 for (auto& m : mutexes) {
2169 ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
2170 }
2171 pthread_mutex_t m;
2172 ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
2173 for (auto& m : mutexes) {
2174 ASSERT_EQ(0, pthread_mutex_lock(&m));
2175 }
2176 for (auto& m : mutexes) {
2177 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2178 }
2179 for (auto& m : mutexes) {
2180 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2181 }
2182 }
2183 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2184#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002185 GTEST_SKIP() << "pi mutex count not limited to 64Ki";
Yabin Cui5a00ba72018-01-26 17:32:31 -08002186#endif
2187}
2188
Yabin Cui17393b02015-03-21 15:08:25 -07002189TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
2190 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
2191 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
2192 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
2193 pthread_mutex_destroy(&lock_normal);
2194
Colin Cross4c5595c2021-08-16 15:51:59 -07002195#if !defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -07002196 // musl doesn't support PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP or
2197 // PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.
Yabin Cui17393b02015-03-21 15:08:25 -07002198 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
2199 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
2200 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
2201 pthread_mutex_destroy(&lock_errorcheck);
2202
2203 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
2204 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
2205 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
2206 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
Colin Cross7da20342021-07-28 11:18:11 -07002207#endif
Derek Xue41996952014-09-25 11:05:32 +01002208}
Yabin Cui5a00ba72018-01-26 17:32:31 -08002209
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002210class MutexWakeupHelper {
2211 private:
Yabin Cui17393b02015-03-21 15:08:25 -07002212 PthreadMutex m;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002213 enum Progress {
2214 LOCK_INITIALIZED,
2215 LOCK_WAITING,
2216 LOCK_RELEASED,
2217 LOCK_ACCESSED
2218 };
2219 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -07002220 std::atomic<pid_t> tid;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002221
2222 static void thread_fn(MutexWakeupHelper* helper) {
Yabin Cuif7969852015-04-02 17:47:48 -07002223 helper->tid = gettid();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002224 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2225 helper->progress = LOCK_WAITING;
2226
Yabin Cui17393b02015-03-21 15:08:25 -07002227 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002228 ASSERT_EQ(LOCK_RELEASED, helper->progress);
Yabin Cui17393b02015-03-21 15:08:25 -07002229 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002230
2231 helper->progress = LOCK_ACCESSED;
2232 }
2233
2234 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07002235 explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
Yabin Cui17393b02015-03-21 15:08:25 -07002236 }
2237
2238 void test() {
2239 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002240 progress = LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07002241 tid = 0;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002242
2243 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002244 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002245 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
2246
Yabin Cuif7969852015-04-02 17:47:48 -07002247 WaitUntilThreadSleep(tid);
2248 ASSERT_EQ(LOCK_WAITING, progress);
2249
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002250 progress = LOCK_RELEASED;
Yabin Cui17393b02015-03-21 15:08:25 -07002251 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002252
Yi Kong32bc0fc2018-08-02 17:31:13 -07002253 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002254 ASSERT_EQ(LOCK_ACCESSED, progress);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002255 }
2256};
2257
2258TEST(pthread, pthread_mutex_NORMAL_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002259 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
2260 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002261}
2262
2263TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002264 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2265 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002266}
2267
2268TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002269 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2270 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002271}
2272
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002273static int GetThreadPriority(pid_t tid) {
2274 // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2275 // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2276 std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2277 std::string content;
2278 int result = INT_MAX;
2279 if (!android::base::ReadFileToString(filename, &content)) {
2280 return result;
2281 }
2282 std::vector<std::string> strs = android::base::Split(content, " ");
2283 if (strs.size() < 18) {
2284 return result;
2285 }
2286 if (!android::base::ParseInt(strs[17], &result)) {
2287 return INT_MAX;
2288 }
2289 return result;
2290}
2291
2292class PIMutexWakeupHelper {
2293private:
2294 PthreadMutex m;
2295 int protocol;
2296 enum Progress {
2297 LOCK_INITIALIZED,
2298 LOCK_CHILD_READY,
2299 LOCK_WAITING,
2300 LOCK_RELEASED,
2301 };
2302 std::atomic<Progress> progress;
2303 std::atomic<pid_t> main_tid;
2304 std::atomic<pid_t> child_tid;
2305 PthreadMutex start_thread_m;
2306
2307 static void thread_fn(PIMutexWakeupHelper* helper) {
2308 helper->child_tid = gettid();
2309 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2310 ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2311 ASSERT_EQ(21, GetThreadPriority(gettid()));
2312 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2313 helper->progress = LOCK_CHILD_READY;
2314 ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2315
2316 ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2317 WaitUntilThreadSleep(helper->main_tid);
2318 ASSERT_EQ(LOCK_WAITING, helper->progress);
2319
2320 if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2321 ASSERT_EQ(20, GetThreadPriority(gettid()));
2322 } else {
2323 ASSERT_EQ(21, GetThreadPriority(gettid()));
2324 }
2325 helper->progress = LOCK_RELEASED;
2326 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2327 }
2328
2329public:
2330 explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2331 : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2332 }
2333
2334 void test() {
2335 ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2336 main_tid = gettid();
2337 ASSERT_EQ(20, GetThreadPriority(main_tid));
2338 progress = LOCK_INITIALIZED;
2339 child_tid = 0;
2340
2341 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002342 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002343 reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2344
2345 WaitUntilThreadSleep(child_tid);
2346 ASSERT_EQ(LOCK_CHILD_READY, progress);
2347 ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2348 progress = LOCK_WAITING;
2349 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2350
2351 ASSERT_EQ(LOCK_RELEASED, progress);
2352 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2353 ASSERT_EQ(0, pthread_join(thread, nullptr));
2354 }
2355};
2356
2357TEST(pthread, pthread_mutex_pi_wakeup) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002358 for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2359 for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2360 PIMutexWakeupHelper helper(type, protocol);
2361 helper.test();
2362 }
2363 }
2364}
2365
Yabin Cui140f3672015-02-03 10:32:00 -08002366TEST(pthread, pthread_mutex_owner_tid_limit) {
Yabin Cuie69c2452015-02-13 16:21:25 -08002367#if defined(__BIONIC__) && !defined(__LP64__)
Yabin Cui140f3672015-02-03 10:32:00 -08002368 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
Yi Kong32bc0fc2018-08-02 17:31:13 -07002369 ASSERT_TRUE(fp != nullptr);
Yabin Cui140f3672015-02-03 10:32:00 -08002370 long pid_max;
2371 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2372 fclose(fp);
Yabin Cuie69c2452015-02-13 16:21:25 -08002373 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
Yabin Cui140f3672015-02-03 10:32:00 -08002374 ASSERT_LE(pid_max, 65536);
Yabin Cuie69c2452015-02-13 16:21:25 -08002375#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002376 GTEST_SKIP() << "pthread_mutex supports 32-bit tid";
Yabin Cuie69c2452015-02-13 16:21:25 -08002377#endif
Yabin Cui140f3672015-02-03 10:32:00 -08002378}
Yabin Cuib5845722015-03-16 22:46:42 -07002379
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002380static void pthread_mutex_timedlock_helper(clockid_t clock,
2381 int (*lock_function)(pthread_mutex_t* __mutex,
2382 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08002383 pthread_mutex_t m;
2384 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2385
2386 // If the mutex is already locked, pthread_mutex_timedlock should time out.
2387 ASSERT_EQ(0, pthread_mutex_lock(&m));
2388
2389 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002390 ASSERT_EQ(0, clock_gettime(clock, &ts));
2391 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002392 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002393 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002394 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002395 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002396 ts.tv_nsec = NS_PER_S - 1;
2397 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002398 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002399
2400 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2401 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2402
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002403 ASSERT_EQ(0, clock_gettime(clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002404 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002405 ASSERT_EQ(0, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002406
2407 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2408 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2409}
2410
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002411TEST(pthread, pthread_mutex_timedlock) {
2412 pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2413}
2414
2415TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2416#if defined(__BIONIC__)
2417 pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2418#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002419 GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002420#endif // __BIONIC__
2421}
2422
Tom Cherry69010802019-05-07 20:33:05 -07002423TEST(pthread, pthread_mutex_clocklock) {
2424#if defined(__BIONIC__)
2425 pthread_mutex_timedlock_helper(
2426 CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2427 return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2428 });
2429 pthread_mutex_timedlock_helper(
2430 CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2431 return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2432 });
2433#else // __BIONIC__
2434 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2435#endif // __BIONIC__
2436}
2437
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002438static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2439 int (*lock_function)(pthread_mutex_t* __mutex,
2440 const timespec* __timeout)) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002441 PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002442
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002443 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002444 clock_gettime(clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002445 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002446 ASSERT_EQ(0, lock_function(&m.lock, &ts));
2447
2448 struct ThreadArgs {
2449 clockid_t clock;
2450 int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2451 PthreadMutex& m;
2452 };
2453
2454 ThreadArgs thread_args = {
2455 .clock = clock,
2456 .lock_function = lock_function,
2457 .m = m,
2458 };
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002459
2460 auto ThreadFn = [](void* arg) -> void* {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002461 auto args = static_cast<ThreadArgs*>(arg);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002462 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002463 clock_gettime(args->clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002464 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002465 intptr_t result = args->lock_function(&args->m.lock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002466 return reinterpret_cast<void*>(result);
2467 };
2468
2469 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002470 ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002471 void* result;
2472 ASSERT_EQ(0, pthread_join(thread, &result));
2473 ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
2474 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2475}
2476
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002477TEST(pthread, pthread_mutex_timedlock_pi) {
2478 pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2479}
2480
2481TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2482#if defined(__BIONIC__)
2483 pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2484#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002485 GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002486#endif // __BIONIC__
2487}
2488
Tom Cherry69010802019-05-07 20:33:05 -07002489TEST(pthread, pthread_mutex_clocklock_pi) {
2490#if defined(__BIONIC__)
2491 pthread_mutex_timedlock_pi_helper(
2492 CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2493 return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2494 });
2495 pthread_mutex_timedlock_pi_helper(
2496 CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2497 return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2498 });
2499#else // __BIONIC__
2500 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2501#endif // __BIONIC__
2502}
2503
2504TEST(pthread, pthread_mutex_clocklock_invalid) {
2505#if defined(__BIONIC__)
2506 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
2507 timespec ts;
2508 EXPECT_EQ(EINVAL, pthread_mutex_clocklock(&mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
2509#else // __BIONIC__
2510 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2511#endif // __BIONIC__
2512}
2513
Elliott Hughese657eb42021-02-18 17:11:56 -08002514TEST_F(pthread_DeathTest, pthread_mutex_using_destroyed_mutex) {
Yabin Cui9651fdf2018-03-14 12:02:21 -07002515#if defined(__BIONIC__)
2516 pthread_mutex_t m;
2517 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2518 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2519 ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2520 "pthread_mutex_lock called on a destroyed mutex");
2521 ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2522 "pthread_mutex_unlock called on a destroyed mutex");
2523 ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2524 "pthread_mutex_trylock called on a destroyed mutex");
2525 timespec ts;
2526 ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2527 "pthread_mutex_timedlock called on a destroyed mutex");
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002528 ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2529 "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
Tom Cherry69010802019-05-07 20:33:05 -07002530 ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_MONOTONIC, &ts), ::testing::KilledBySignal(SIGABRT),
2531 "pthread_mutex_clocklock called on a destroyed mutex");
2532 ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_REALTIME, &ts), ::testing::KilledBySignal(SIGABRT),
2533 "pthread_mutex_clocklock called on a destroyed mutex");
2534 ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_PROCESS_CPUTIME_ID, &ts),
2535 ::testing::KilledBySignal(SIGABRT),
2536 "pthread_mutex_clocklock called on a destroyed mutex");
Yabin Cui9651fdf2018-03-14 12:02:21 -07002537 ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2538 "pthread_mutex_destroy called on a destroyed mutex");
2539#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002540 GTEST_SKIP() << "bionic-only test";
Yabin Cui9651fdf2018-03-14 12:02:21 -07002541#endif
2542}
2543
Yabin Cuib5845722015-03-16 22:46:42 -07002544class StrictAlignmentAllocator {
2545 public:
2546 void* allocate(size_t size, size_t alignment) {
2547 char* p = new char[size + alignment * 2];
2548 allocated_array.push_back(p);
2549 while (!is_strict_aligned(p, alignment)) {
2550 ++p;
2551 }
2552 return p;
2553 }
2554
2555 ~StrictAlignmentAllocator() {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07002556 for (const auto& p : allocated_array) {
2557 delete[] p;
Yabin Cuib5845722015-03-16 22:46:42 -07002558 }
2559 }
2560
2561 private:
2562 bool is_strict_aligned(char* p, size_t alignment) {
2563 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2564 }
2565
2566 std::vector<char*> allocated_array;
2567};
2568
2569TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2570#if defined(__BIONIC__)
2571 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2572 StrictAlignmentAllocator allocator;
2573 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2574 allocator.allocate(sizeof(pthread_mutex_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002575 ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002576 ASSERT_EQ(0, pthread_mutex_lock(mutex));
2577 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2578 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2579
2580 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2581 allocator.allocate(sizeof(pthread_cond_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002582 ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002583 ASSERT_EQ(0, pthread_cond_signal(cond));
2584 ASSERT_EQ(0, pthread_cond_broadcast(cond));
2585 ASSERT_EQ(0, pthread_cond_destroy(cond));
2586
2587 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2588 allocator.allocate(sizeof(pthread_rwlock_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002589 ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002590 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2591 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2592 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2593 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2594 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2595
2596#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002597 GTEST_SKIP() << "bionic-only test";
Yabin Cuib5845722015-03-16 22:46:42 -07002598#endif
2599}
Christopher Ferris60907c72015-06-09 18:46:15 -07002600
2601TEST(pthread, pthread_mutex_lock_null_32) {
2602#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002603 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2604 // EINVAL in that case: http://b/19995172.
2605 //
2606 // We decorate the public defintion with _Nonnull so that people recompiling
2607 // their code with get a warning and might fix their bug, but need to pass
2608 // NULL here to test that we remain compatible.
2609 pthread_mutex_t* null_value = nullptr;
2610 ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002611#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002612 GTEST_SKIP() << "32-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002613#endif
2614}
2615
2616TEST(pthread, pthread_mutex_unlock_null_32) {
2617#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002618 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2619 // EINVAL in that case: http://b/19995172.
2620 //
2621 // We decorate the public defintion with _Nonnull so that people recompiling
2622 // their code with get a warning and might fix their bug, but need to pass
2623 // NULL here to test that we remain compatible.
2624 pthread_mutex_t* null_value = nullptr;
2625 ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002626#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002627 GTEST_SKIP() << "32-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002628#endif
2629}
2630
2631TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2632#if defined(__BIONIC__) && defined(__LP64__)
2633 pthread_mutex_t* null_value = nullptr;
2634 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2635#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002636 GTEST_SKIP() << "64-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002637#endif
2638}
2639
2640TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2641#if defined(__BIONIC__) && defined(__LP64__)
2642 pthread_mutex_t* null_value = nullptr;
2643 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2644#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002645 GTEST_SKIP() << "64-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002646#endif
2647}
Yabin Cui33ac04a2015-09-22 11:16:15 -07002648
2649extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2650
2651static volatile bool signal_handler_on_altstack_done;
2652
Josh Gao61db9ac2017-03-15 19:42:05 -07002653__attribute__((__noinline__))
2654static void signal_handler_backtrace() {
2655 // Check if we have enough stack space for unwinding.
2656 int count = 0;
2657 _Unwind_Backtrace(FrameCounter, &count);
2658 ASSERT_GT(count, 0);
2659}
2660
2661__attribute__((__noinline__))
2662static void signal_handler_logging() {
2663 // Check if we have enough stack space for logging.
2664 std::string s(2048, '*');
2665 GTEST_LOG_(INFO) << s;
2666 signal_handler_on_altstack_done = true;
2667}
2668
2669__attribute__((__noinline__))
2670static void signal_handler_snprintf() {
2671 // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2672 char buf[PATH_MAX + 2048];
2673 ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2674}
2675
Yabin Cui33ac04a2015-09-22 11:16:15 -07002676static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2677 ASSERT_EQ(SIGUSR1, signo);
Josh Gao61db9ac2017-03-15 19:42:05 -07002678 signal_handler_backtrace();
2679 signal_handler_logging();
2680 signal_handler_snprintf();
Yabin Cui33ac04a2015-09-22 11:16:15 -07002681}
2682
Josh Gao415daa82017-03-06 17:45:33 -08002683TEST(pthread, big_enough_signal_stack) {
Yabin Cui33ac04a2015-09-22 11:16:15 -07002684 signal_handler_on_altstack_done = false;
2685 ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2686 kill(getpid(), SIGUSR1);
2687 ASSERT_TRUE(signal_handler_on_altstack_done);
2688}
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002689
2690TEST(pthread, pthread_barrierattr_smoke) {
2691 pthread_barrierattr_t attr;
2692 ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2693 int pshared;
2694 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2695 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2696 ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2697 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2698 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2699 ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2700}
2701
Yabin Cui81d27972016-03-22 13:45:55 -07002702struct BarrierTestHelperData {
2703 size_t thread_count;
2704 pthread_barrier_t barrier;
2705 std::atomic<int> finished_mask;
2706 std::atomic<int> serial_thread_count;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002707 size_t iteration_count;
Yabin Cui81d27972016-03-22 13:45:55 -07002708 std::atomic<size_t> finished_iteration_count;
2709
2710 BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2711 : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2712 iteration_count(iteration_count), finished_iteration_count(0) {
2713 }
2714};
2715
2716struct BarrierTestHelperArg {
2717 int id;
2718 BarrierTestHelperData* data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002719};
2720
2721static void BarrierTestHelper(BarrierTestHelperArg* arg) {
Yabin Cui81d27972016-03-22 13:45:55 -07002722 for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2723 int result = pthread_barrier_wait(&arg->data->barrier);
2724 if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2725 arg->data->serial_thread_count++;
2726 } else {
2727 ASSERT_EQ(0, result);
2728 }
Yabin Cuid5c04c52017-05-02 12:57:39 -07002729 int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
Yabin Cuiab4cddc2017-05-02 16:18:13 -07002730 mask |= 1 << arg->id;
Yabin Cuid5c04c52017-05-02 12:57:39 -07002731 if (mask == ((1 << arg->data->thread_count) - 1)) {
Yabin Cui81d27972016-03-22 13:45:55 -07002732 ASSERT_EQ(1, arg->data->serial_thread_count);
2733 arg->data->finished_iteration_count++;
2734 arg->data->finished_mask = 0;
2735 arg->data->serial_thread_count = 0;
2736 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002737 }
2738}
2739
2740TEST(pthread, pthread_barrier_smoke) {
2741 const size_t BARRIER_ITERATION_COUNT = 10;
2742 const size_t BARRIER_THREAD_COUNT = 10;
Yabin Cui81d27972016-03-22 13:45:55 -07002743 BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2744 ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2745 std::vector<pthread_t> threads(data.thread_count);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002746 std::vector<BarrierTestHelperArg> args(threads.size());
2747 for (size_t i = 0; i < threads.size(); ++i) {
Yabin Cui81d27972016-03-22 13:45:55 -07002748 args[i].id = i;
2749 args[i].data = &data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002750 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2751 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2752 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002753 for (size_t i = 0; i < threads.size(); ++i) {
2754 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2755 }
Yabin Cui81d27972016-03-22 13:45:55 -07002756 ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2757 ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2758}
2759
2760struct BarrierDestroyTestArg {
2761 std::atomic<int> tid;
2762 pthread_barrier_t* barrier;
2763};
2764
2765static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2766 arg->tid = gettid();
2767 ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002768}
2769
2770TEST(pthread, pthread_barrier_destroy) {
2771 pthread_barrier_t barrier;
2772 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2773 pthread_t thread;
Yabin Cui81d27972016-03-22 13:45:55 -07002774 BarrierDestroyTestArg arg;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002775 arg.tid = 0;
2776 arg.barrier = &barrier;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002777 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui81d27972016-03-22 13:45:55 -07002778 reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002779 WaitUntilThreadSleep(arg.tid);
2780 ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2781 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2782 // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2783 ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2784 ASSERT_EQ(0, pthread_join(thread, nullptr));
2785#if defined(__BIONIC__)
2786 ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2787#endif
2788}
2789
2790struct BarrierOrderingTestHelperArg {
2791 pthread_barrier_t* barrier;
2792 size_t* array;
2793 size_t array_length;
2794 size_t id;
2795};
2796
2797void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2798 const size_t ITERATION_COUNT = 10000;
2799 for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2800 arg->array[arg->id] = i;
Yabin Cuic9a659c2015-11-05 15:36:08 -08002801 int result = pthread_barrier_wait(arg->barrier);
2802 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002803 for (size_t j = 0; j < arg->array_length; ++j) {
2804 ASSERT_EQ(i, arg->array[j]);
2805 }
Yabin Cuic9a659c2015-11-05 15:36:08 -08002806 result = pthread_barrier_wait(arg->barrier);
2807 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002808 }
2809}
2810
2811TEST(pthread, pthread_barrier_check_ordering) {
2812 const size_t THREAD_COUNT = 4;
2813 pthread_barrier_t barrier;
2814 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2815 size_t array[THREAD_COUNT];
2816 std::vector<pthread_t> threads(THREAD_COUNT);
2817 std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2818 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2819 args[i].barrier = &barrier;
2820 args[i].array = array;
2821 args[i].array_length = THREAD_COUNT;
2822 args[i].id = i;
2823 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2824 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2825 &args[i]));
2826 }
2827 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2828 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2829 }
2830}
Yabin Cuife3a83a2015-11-17 16:03:18 -08002831
Elliott Hughes463faad2018-07-06 14:34:49 -07002832TEST(pthread, pthread_barrier_init_zero_count) {
2833 pthread_barrier_t barrier;
2834 ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2835}
2836
Yabin Cuife3a83a2015-11-17 16:03:18 -08002837TEST(pthread, pthread_spinlock_smoke) {
2838 pthread_spinlock_t lock;
2839 ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2840 ASSERT_EQ(0, pthread_spin_trylock(&lock));
2841 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2842 ASSERT_EQ(0, pthread_spin_lock(&lock));
2843 ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2844 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2845 ASSERT_EQ(0, pthread_spin_destroy(&lock));
2846}
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002847
Elliott Hughes8aecba72017-10-17 15:34:41 -07002848TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002849 pthread_attr_t attr;
2850 ASSERT_EQ(0, pthread_attr_init(&attr));
2851
Elliott Hughes8aecba72017-10-17 15:34:41 -07002852 int state;
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002853 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002854 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2855 ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2856
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002857 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002858 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2859 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2860
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002861 ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002862 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2863 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002864}
2865
2866TEST(pthread, pthread_create__mmap_failures) {
Evgeny Eltsinb4f7aaa2020-06-09 15:49:20 +02002867 // After thread is successfully created, native_bridge might need more memory to run it.
2868 SKIP_WITH_NATIVE_BRIDGE;
2869
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002870 pthread_attr_t attr;
2871 ASSERT_EQ(0, pthread_attr_init(&attr));
2872 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2873
2874 const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2875
Elliott Hughes57512982017-10-02 22:49:18 -07002876 // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002877 std::vector<void*> pages;
Elliott Hughes57512982017-10-02 22:49:18 -07002878 pages.reserve(64 * 1024);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002879 int prot = PROT_NONE;
2880 while (true) {
2881 void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2882 if (page == MAP_FAILED) break;
2883 pages.push_back(page);
2884 prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2885 }
2886
2887 // Try creating threads, freeing up a page each time we fail.
2888 size_t EAGAIN_count = 0;
2889 size_t i = 0;
2890 for (; i < pages.size(); ++i) {
2891 pthread_t t;
2892 int status = pthread_create(&t, &attr, IdFn, nullptr);
2893 if (status != EAGAIN) break;
2894 ++EAGAIN_count;
2895 ASSERT_EQ(0, munmap(pages[i], kPageSize));
2896 }
2897
Ryan Prichard45d13492019-01-03 02:51:30 -08002898 // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
2899 // side. So we should have seen at least three failures.
2900 ASSERT_GE(EAGAIN_count, 3U);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002901
2902 for (; i < pages.size(); ++i) {
2903 ASSERT_EQ(0, munmap(pages[i], kPageSize));
2904 }
2905}
Elliott Hughesdff08ce2017-10-16 09:58:45 -07002906
2907TEST(pthread, pthread_setschedparam) {
2908 sched_param p = { .sched_priority = INT_MIN };
2909 ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
2910}
2911
2912TEST(pthread, pthread_setschedprio) {
2913 ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
2914}
Elliott Hughes8aecba72017-10-17 15:34:41 -07002915
2916TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
2917 pthread_attr_t attr;
2918 ASSERT_EQ(0, pthread_attr_init(&attr));
2919
2920 int state;
2921 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2922 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2923 ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
2924
2925 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2926 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2927 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2928
2929 ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
2930 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2931 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2932}
2933
2934TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
2935 pthread_attr_t attr;
2936 ASSERT_EQ(0, pthread_attr_init(&attr));
2937
2938 // If we set invalid scheduling attributes but choose to inherit, everything's fine...
2939 sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
2940 ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
2941 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
2942 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2943
2944 pthread_t t;
2945 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
2946 ASSERT_EQ(0, pthread_join(t, nullptr));
2947
Elliott Hughes7a660662017-10-30 09:26:06 -07002948#if defined(__LP64__)
2949 // If we ask to use them, though, we'll see a failure...
Elliott Hughes8aecba72017-10-17 15:34:41 -07002950 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2951 ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
Elliott Hughes7a660662017-10-30 09:26:06 -07002952#else
2953 // For backwards compatibility with broken apps, we just ignore failures
2954 // to set scheduler attributes on LP32.
2955#endif
Elliott Hughes8aecba72017-10-17 15:34:41 -07002956}
2957
2958TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
2959 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2960 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002961 if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
Elliott Hughes8aecba72017-10-17 15:34:41 -07002962 ASSERT_EQ(0, rc);
2963
2964 pthread_attr_t attr;
2965 ASSERT_EQ(0, pthread_attr_init(&attr));
2966 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2967
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002968 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002969 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002970 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002971 int actual_policy;
2972 sched_param actual_param;
2973 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2974 ASSERT_EQ(SCHED_FIFO, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002975 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002976 ASSERT_EQ(0, pthread_join(t, nullptr));
2977}
2978
2979TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
2980 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2981 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002982 if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
Elliott Hughes8aecba72017-10-17 15:34:41 -07002983 ASSERT_EQ(0, rc);
2984
2985 pthread_attr_t attr;
2986 ASSERT_EQ(0, pthread_attr_init(&attr));
2987 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2988 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
2989
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002990 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002991 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002992 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002993 int actual_policy;
2994 sched_param actual_param;
2995 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2996 ASSERT_EQ(SCHED_OTHER, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002997 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002998 ASSERT_EQ(0, pthread_join(t, nullptr));
2999}
3000
3001TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
3002 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3003 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
Elliott Hughesbcaa4542019-03-08 15:20:23 -08003004 if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
Elliott Hughes8aecba72017-10-17 15:34:41 -07003005 ASSERT_EQ(0, rc);
3006
3007 pthread_attr_t attr;
3008 ASSERT_EQ(0, pthread_attr_init(&attr));
3009 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3010
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003011 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07003012 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003013 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07003014 int actual_policy;
3015 sched_param actual_param;
3016 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3017 ASSERT_EQ(SCHED_FIFO | SCHED_RESET_ON_FORK, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003018 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07003019 ASSERT_EQ(0, pthread_join(t, nullptr));
3020}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07003021
3022extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
3023
3024TEST(pthread, run_on_all_threads) {
3025#if defined(__BIONIC__)
3026 pthread_t t;
3027 ASSERT_EQ(
3028 0, pthread_create(
3029 &t, nullptr,
3030 [](void*) -> void* {
3031 pthread_attr_t detached;
3032 if (pthread_attr_init(&detached) != 0 ||
3033 pthread_attr_setdetachstate(&detached, PTHREAD_CREATE_DETACHED) != 0) {
3034 return reinterpret_cast<void*>(errno);
3035 }
3036
3037 for (int i = 0; i != 1000; ++i) {
3038 pthread_t t1, t2;
3039 if (pthread_create(
3040 &t1, &detached, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3041 pthread_create(
3042 &t2, nullptr, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3043 pthread_join(t2, nullptr) != 0) {
3044 return reinterpret_cast<void*>(errno);
3045 }
3046 }
3047
3048 if (pthread_attr_destroy(&detached) != 0) {
3049 return reinterpret_cast<void*>(errno);
3050 }
3051 return nullptr;
3052 },
3053 nullptr));
3054
3055 for (int i = 0; i != 1000; ++i) {
3056 ASSERT_TRUE(android_run_on_all_threads([](void* arg) { return arg == nullptr; }, nullptr));
3057 }
3058
3059 void *retval;
3060 ASSERT_EQ(0, pthread_join(t, &retval));
3061 ASSERT_EQ(nullptr, retval);
3062#else
3063 GTEST_SKIP() << "bionic-only test";
3064#endif
3065}