blob: 0918d0e33b0485a26dad2e442a921e96633c7568 [file] [log] [blame]
Elliott Hughesbfeab1b2012-09-05 17:47:37 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
Elliott Hughes5b9310e2013-10-02 16:59:05 -070020#include <inttypes.h>
Elliott Hughesb95cf0d2013-07-15 14:51:07 -070021#include <limits.h>
Elliott Hughes04620a32014-03-07 17:59:05 -080022#include <malloc.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070023#include <pthread.h>
Christopher Ferrisf04935c2013-12-20 18:43:21 -080024#include <signal.h>
Yabin Cui140f3672015-02-03 10:32:00 -080025#include <stdio.h>
Elliott Hughes70b24b12013-11-15 11:51:07 -080026#include <sys/mman.h>
Elliott Hughes4d098ca2016-04-11 12:43:05 -070027#include <sys/prctl.h>
Elliott Hughes57b7a612014-08-25 17:26:50 -070028#include <sys/syscall.h>
Narayan Kamath51e6cb32014-03-03 15:38:51 +000029#include <time.h>
Elliott Hughes4d014e12012-09-07 16:47:54 -070030#include <unistd.h>
Yabin Cui33ac04a2015-09-22 11:16:15 -070031#include <unwind.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070032
Yabin Cui08ee8d22015-02-11 17:04:36 -080033#include <atomic>
Josh Gaoddf757e2018-10-17 15:23:03 -070034#include <future>
Yabin Cuib5845722015-03-16 22:46:42 -070035#include <vector>
Yabin Cui08ee8d22015-02-11 17:04:36 -080036
Elliott Hughes5e62b342018-10-25 11:00:00 -070037#include <android-base/macros.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080038#include <android-base/parseint.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070039#include <android-base/scopeguard.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080040#include <android-base/strings.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070041
Yabin Cuic9a659c2015-11-05 15:36:08 -080042#include "private/bionic_constants.h"
Yabin Cui17393b02015-03-21 15:08:25 -070043#include "BionicDeathTest.h"
Elliott Hughes71ba5892018-02-07 12:44:45 -080044#include "SignalUtils.h"
Elliott Hughes15dfd632015-09-22 16:40:14 -070045#include "utils.h"
46
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070047TEST(pthread, pthread_key_create) {
48 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -070049 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070050 ASSERT_EQ(0, pthread_key_delete(key));
51 // Can't delete a key that's already been deleted.
52 ASSERT_EQ(EINVAL, pthread_key_delete(key));
53}
Elliott Hughes4d014e12012-09-07 16:47:54 -070054
Dan Albertc4bcc752014-09-30 11:48:24 -070055TEST(pthread, pthread_keys_max) {
Yabin Cui6c238f22014-12-11 20:50:41 -080056 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
57 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070058}
Elliott Hughes718a5b52014-01-28 17:02:03 -080059
Yabin Cui6c238f22014-12-11 20:50:41 -080060TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
Dan Albertc4bcc752014-09-30 11:48:24 -070061 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
Yabin Cui6c238f22014-12-11 20:50:41 -080062 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070063}
64
65TEST(pthread, pthread_key_many_distinct) {
Yabin Cui6c238f22014-12-11 20:50:41 -080066 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
67 // pthread keys, but We should be able to allocate at least this many keys.
68 int nkeys = PTHREAD_KEYS_MAX / 2;
Dan Albertc4bcc752014-09-30 11:48:24 -070069 std::vector<pthread_key_t> keys;
70
Tom Cherryb8ab6182017-04-05 16:20:29 -070071 auto scope_guard = android::base::make_scope_guard([&keys] {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -070072 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -070073 EXPECT_EQ(0, pthread_key_delete(key));
74 }
75 });
76
77 for (int i = 0; i < nkeys; ++i) {
78 pthread_key_t key;
Elliott Hughes61706932015-03-31 10:56:58 -070079 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
Yi Kong32bc0fc2018-08-02 17:31:13 -070080 ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
Dan Albertc4bcc752014-09-30 11:48:24 -070081 keys.push_back(key);
82 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
83 }
84
85 for (int i = keys.size() - 1; i >= 0; --i) {
86 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
87 pthread_key_t key = keys.back();
88 keys.pop_back();
89 ASSERT_EQ(0, pthread_key_delete(key));
90 }
91}
92
Yabin Cui6c238f22014-12-11 20:50:41 -080093TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000094 std::vector<pthread_key_t> keys;
Dan Albertc4bcc752014-09-30 11:48:24 -070095 int rv = 0;
Yabin Cui6c238f22014-12-11 20:50:41 -080096
97 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
98 // be more than we are allowed to allocate now.
99 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000100 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700101 rv = pthread_key_create(&key, nullptr);
Dan Albertc4bcc752014-09-30 11:48:24 -0700102 if (rv == EAGAIN) {
103 break;
104 }
105 EXPECT_EQ(0, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000106 keys.push_back(key);
107 }
108
Dan Albertc4bcc752014-09-30 11:48:24 -0700109 // Don't leak keys.
Elliott Hughes0b2acdf2015-10-02 18:25:19 -0700110 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -0700111 EXPECT_EQ(0, pthread_key_delete(key));
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000112 }
Dan Albertc4bcc752014-09-30 11:48:24 -0700113 keys.clear();
114
115 // We should have eventually reached the maximum number of keys and received
116 // EAGAIN.
117 ASSERT_EQ(EAGAIN, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000118}
119
Elliott Hughesebb770f2014-06-25 13:46:46 -0700120TEST(pthread, pthread_key_delete) {
121 void* expected = reinterpret_cast<void*>(1234);
122 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700123 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700124 ASSERT_EQ(0, pthread_setspecific(key, expected));
125 ASSERT_EQ(expected, pthread_getspecific(key));
126 ASSERT_EQ(0, pthread_key_delete(key));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700127 // After deletion, pthread_getspecific returns nullptr.
128 ASSERT_EQ(nullptr, pthread_getspecific(key));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700129 // And you can't use pthread_setspecific with the deleted key.
130 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
131}
132
Elliott Hughes40a52172014-07-30 14:48:10 -0700133TEST(pthread, pthread_key_fork) {
134 void* expected = reinterpret_cast<void*>(1234);
135 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700136 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700137 ASSERT_EQ(0, pthread_setspecific(key, expected));
138 ASSERT_EQ(expected, pthread_getspecific(key));
139
140 pid_t pid = fork();
141 ASSERT_NE(-1, pid) << strerror(errno);
142
143 if (pid == 0) {
144 // The surviving thread inherits all the forking thread's TLS values...
145 ASSERT_EQ(expected, pthread_getspecific(key));
146 _exit(99);
147 }
148
Elliott Hughes33697a02016-01-26 13:04:57 -0800149 AssertChildExited(pid, 99);
Elliott Hughes40a52172014-07-30 14:48:10 -0700150
151 ASSERT_EQ(expected, pthread_getspecific(key));
Dan Albert1d53ae22014-09-02 15:24:26 -0700152 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700153}
154
155static void* DirtyKeyFn(void* key) {
156 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
157}
158
159TEST(pthread, pthread_key_dirty) {
160 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700161 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700162
Yabin Cuia36158a2015-11-16 21:06:16 -0800163 size_t stack_size = 640 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700164 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
Elliott Hughes40a52172014-07-30 14:48:10 -0700165 ASSERT_NE(MAP_FAILED, stack);
166 memset(stack, 0xff, stack_size);
167
168 pthread_attr_t attr;
169 ASSERT_EQ(0, pthread_attr_init(&attr));
170 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
171
172 pthread_t t;
173 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
174
175 void* result;
176 ASSERT_EQ(0, pthread_join(t, &result));
177 ASSERT_EQ(nullptr, result); // Not ~0!
178
179 ASSERT_EQ(0, munmap(stack, stack_size));
Dan Albert1d53ae22014-09-02 15:24:26 -0700180 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700181}
182
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800183TEST(pthread, static_pthread_key_used_before_creation) {
184#if defined(__BIONIC__)
185 // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
186 // So here tests if the static/global default value 0 can be detected as invalid key.
187 static pthread_key_t key;
188 ASSERT_EQ(nullptr, pthread_getspecific(key));
189 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
190 ASSERT_EQ(EINVAL, pthread_key_delete(key));
191#else
192 GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
193#endif
194}
195
Elliott Hughes4d014e12012-09-07 16:47:54 -0700196static void* IdFn(void* arg) {
197 return arg;
198}
199
Yabin Cui63481602014-12-01 17:41:04 -0800200class SpinFunctionHelper {
201 public:
202 SpinFunctionHelper() {
203 SpinFunctionHelper::spin_flag_ = true;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400204 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700205
Yabin Cui63481602014-12-01 17:41:04 -0800206 ~SpinFunctionHelper() {
207 UnSpin();
208 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700209
Yabin Cui63481602014-12-01 17:41:04 -0800210 auto GetFunction() -> void* (*)(void*) {
211 return SpinFunctionHelper::SpinFn;
212 }
213
214 void UnSpin() {
215 SpinFunctionHelper::spin_flag_ = false;
216 }
217
218 private:
219 static void* SpinFn(void*) {
220 while (spin_flag_) {}
Yi Kong32bc0fc2018-08-02 17:31:13 -0700221 return nullptr;
Yabin Cui63481602014-12-01 17:41:04 -0800222 }
Yabin Cuia36158a2015-11-16 21:06:16 -0800223 static std::atomic<bool> spin_flag_;
Yabin Cui63481602014-12-01 17:41:04 -0800224};
225
226// It doesn't matter if spin_flag_ is used in several tests,
227// because it is always set to false after each test. Each thread
228// loops on spin_flag_ can find it becomes false at some time.
Yabin Cuia36158a2015-11-16 21:06:16 -0800229std::atomic<bool> SpinFunctionHelper::spin_flag_;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400230
Elliott Hughes4d014e12012-09-07 16:47:54 -0700231static void* JoinFn(void* arg) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700232 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700233}
234
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400235static void AssertDetached(pthread_t t, bool is_detached) {
236 pthread_attr_t attr;
237 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
238 int detach_state;
239 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
240 pthread_attr_destroy(&attr);
241 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
242}
243
Elliott Hughes7484c212017-02-02 02:41:38 +0000244static void MakeDeadThread(pthread_t& t) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700245 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
246 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000247}
248
Elliott Hughes4d014e12012-09-07 16:47:54 -0700249TEST(pthread, pthread_create) {
250 void* expected_result = reinterpret_cast<void*>(123);
251 // Can we create a thread?
252 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700253 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700254 // If we join, do we get the expected value back?
255 void* result;
256 ASSERT_EQ(0, pthread_join(t, &result));
257 ASSERT_EQ(expected_result, result);
258}
259
Elliott Hughes3e898472013-02-12 16:40:24 +0000260TEST(pthread, pthread_create_EAGAIN) {
261 pthread_attr_t attributes;
262 ASSERT_EQ(0, pthread_attr_init(&attributes));
263 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
264
265 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700266 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000267}
268
Elliott Hughes4d014e12012-09-07 16:47:54 -0700269TEST(pthread, pthread_no_join_after_detach) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700270 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800271
Elliott Hughes4d014e12012-09-07 16:47:54 -0700272 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700273 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700274
275 // After a pthread_detach...
276 ASSERT_EQ(0, pthread_detach(t1));
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400277 AssertDetached(t1, true);
Elliott Hughes4d014e12012-09-07 16:47:54 -0700278
279 // ...pthread_join should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700280 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700281}
282
283TEST(pthread, pthread_no_op_detach_after_join) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700284 SpinFunctionHelper spin_helper;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400285
Elliott Hughes4d014e12012-09-07 16:47:54 -0700286 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700287 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700288
289 // If thread 2 is already waiting to join thread 1...
290 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700291 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700292
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400293 sleep(1); // (Give t2 a chance to call pthread_join.)
Elliott Hughes4d014e12012-09-07 16:47:54 -0700294
Yabin Cuibbb04322015-03-19 15:19:25 -0700295#if defined(__BIONIC__)
296 ASSERT_EQ(EINVAL, pthread_detach(t1));
297#else
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400298 ASSERT_EQ(0, pthread_detach(t1));
Yabin Cuibbb04322015-03-19 15:19:25 -0700299#endif
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400300 AssertDetached(t1, false);
301
Elliott Hughes725b2a92016-03-23 11:20:47 -0700302 spin_helper.UnSpin();
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400303
304 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
Elliott Hughes4d014e12012-09-07 16:47:54 -0700305 void* join_result;
306 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700307 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700308}
Elliott Hughes14f19592012-10-29 10:19:44 -0700309
310TEST(pthread, pthread_join_self) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700311 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
Elliott Hughes14f19592012-10-29 10:19:44 -0700312}
Elliott Hughes4f251be2012-11-01 16:33:29 -0700313
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800314struct TestBug37410 {
315 pthread_t main_thread;
316 pthread_mutex_t mutex;
Elliott Hughes4f251be2012-11-01 16:33:29 -0700317
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800318 static void main() {
319 TestBug37410 data;
320 data.main_thread = pthread_self();
Yi Kong32bc0fc2018-08-02 17:31:13 -0700321 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800322 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
323
324 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700325 ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800326
327 // Wait for the thread to be running...
328 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
329 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
330
331 // ...and exit.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700332 pthread_exit(nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800333 }
334
335 private:
336 static void* thread_fn(void* arg) {
337 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
338
Evgenii Stepanov352853a2019-02-05 17:37:37 -0800339 // Unlocking data->mutex will cause the main thread to exit, invalidating *data. Save the handle.
340 pthread_t main_thread = data->main_thread;
341
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800342 // Let the main thread know we're running.
343 pthread_mutex_unlock(&data->mutex);
344
345 // And wait for the main thread to exit.
Evgenii Stepanov352853a2019-02-05 17:37:37 -0800346 pthread_join(main_thread, nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800347
Yi Kong32bc0fc2018-08-02 17:31:13 -0700348 return nullptr;
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800349 }
350};
Elliott Hughes4f251be2012-11-01 16:33:29 -0700351
Elliott Hughes7fd803c2013-02-14 16:33:52 -0800352// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
353// run this test (which exits normally) in its own process.
Yabin Cui9df70402014-11-05 18:01:01 -0800354
355class pthread_DeathTest : public BionicDeathTest {};
356
357TEST_F(pthread_DeathTest, pthread_bug_37410) {
Elliott Hughes4f251be2012-11-01 16:33:29 -0700358 // http://code.google.com/p/android/issues/detail?id=37410
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800359 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
Elliott Hughes4f251be2012-11-01 16:33:29 -0700360}
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800361
362static void* SignalHandlerFn(void* arg) {
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800363 sigset64_t wait_set;
364 sigfillset64(&wait_set);
365 return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800366}
367
368TEST(pthread, pthread_sigmask) {
Elliott Hughes19e62322013-10-15 11:23:57 -0700369 // Check that SIGUSR1 isn't blocked.
370 sigset_t original_set;
371 sigemptyset(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700372 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700373 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
374
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800375 // Block SIGUSR1.
376 sigset_t set;
377 sigemptyset(&set);
378 sigaddset(&set, SIGUSR1);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700379 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800380
Elliott Hughes19e62322013-10-15 11:23:57 -0700381 // Check that SIGUSR1 is blocked.
382 sigset_t final_set;
383 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700384 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700385 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
386 // ...and that sigprocmask agrees with pthread_sigmask.
387 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700388 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700389 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
390
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800391 // Spawn a thread that calls sigwait and tells us what it received.
392 pthread_t signal_thread;
393 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700394 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800395
396 // Send that thread SIGUSR1.
397 pthread_kill(signal_thread, SIGUSR1);
398
399 // See what it got.
400 void* join_result;
401 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
402 ASSERT_EQ(SIGUSR1, received_signal);
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700403 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes19e62322013-10-15 11:23:57 -0700404
405 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700406 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800407}
Elliott Hughes5e3fc432013-02-11 16:36:48 -0800408
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800409TEST(pthread, pthread_sigmask64_SIGTRMIN) {
410 // Check that SIGRTMIN isn't blocked.
411 sigset64_t original_set;
412 sigemptyset64(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700413 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800414 ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
415
416 // Block SIGRTMIN.
417 sigset64_t set;
418 sigemptyset64(&set);
419 sigaddset64(&set, SIGRTMIN);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700420 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800421
422 // Check that SIGRTMIN is blocked.
423 sigset64_t final_set;
424 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700425 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800426 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
427 // ...and that sigprocmask64 agrees with pthread_sigmask64.
428 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700429 ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800430 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
431
432 // Spawn a thread that calls sigwait64 and tells us what it received.
433 pthread_t signal_thread;
434 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700435 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800436
437 // Send that thread SIGRTMIN.
438 pthread_kill(signal_thread, SIGRTMIN);
439
440 // See what it got.
441 void* join_result;
442 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
443 ASSERT_EQ(SIGRTMIN, received_signal);
444 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
445
446 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700447 ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800448}
449
Elliott Hughes725b2a92016-03-23 11:20:47 -0700450static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
451 ASSERT_EQ(0, pthread_setname_np(t, "short"));
452 char name[32];
453 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
454 ASSERT_STREQ("short", name);
455
Elliott Hughesd1aea302015-04-25 10:05:24 -0700456 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
Elliott Hughes725b2a92016-03-23 11:20:47 -0700457 ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
458 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
459 ASSERT_STREQ("123456789012345", name);
460
461 ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
462
463 // The passed-in buffer should be at least 16 bytes.
464 ASSERT_EQ(0, pthread_getname_np(t, name, 16));
465 ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
Elliott Hughes3e898472013-02-12 16:40:24 +0000466}
467
Elliott Hughes725b2a92016-03-23 11:20:47 -0700468TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
469 test_pthread_setname_np__pthread_getname_np(pthread_self());
Elliott Hughes3e898472013-02-12 16:40:24 +0000470}
471
Elliott Hughes725b2a92016-03-23 11:20:47 -0700472TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
473 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800474
Elliott Hughes725b2a92016-03-23 11:20:47 -0700475 pthread_t t;
Elliott Hughes4d098ca2016-04-11 12:43:05 -0700476 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
477 test_pthread_setname_np__pthread_getname_np(t);
478 spin_helper.UnSpin();
479 ASSERT_EQ(0, pthread_join(t, nullptr));
480}
481
482// http://b/28051133: a kernel misfeature means that you can't change the
483// name of another thread if you've set PR_SET_DUMPABLE to 0.
484TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
485 ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
486
487 SpinFunctionHelper spin_helper;
488
489 pthread_t t;
490 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700491 test_pthread_setname_np__pthread_getname_np(t);
492 spin_helper.UnSpin();
493 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000494}
495
Elliott Hughes11859d42017-02-13 17:59:29 -0800496TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000497 pthread_t dead_thread;
498 MakeDeadThread(dead_thread);
499
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800500 EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"), "invalid pthread_t");
501}
502
503TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
504 pthread_t null_thread = 0;
505 EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
Elliott Hughes11859d42017-02-13 17:59:29 -0800506}
507
508TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
509 pthread_t dead_thread;
510 MakeDeadThread(dead_thread);
511
Elliott Hughesbcb15292017-02-07 21:05:30 +0000512 char name[64];
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800513 EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)), "invalid pthread_t");
514}
515
516TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
517 pthread_t null_thread = 0;
518
519 char name[64];
520 EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000521}
522
Elliott Hughes9d23e042013-02-15 19:21:51 -0800523TEST(pthread, pthread_kill__0) {
524 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
525 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
526}
527
528TEST(pthread, pthread_kill__invalid_signal) {
529 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
530}
531
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800532static void pthread_kill__in_signal_handler_helper(int signal_number) {
533 static int count = 0;
534 ASSERT_EQ(SIGALRM, signal_number);
535 if (++count == 1) {
536 // Can we call pthread_kill from a signal handler?
537 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
538 }
539}
540
541TEST(pthread, pthread_kill__in_signal_handler) {
Elliott Hughes4b558f52014-03-04 15:58:02 -0800542 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800543 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
544}
545
Josh Gaoddf757e2018-10-17 15:23:03 -0700546TEST(pthread, pthread_kill__exited_thread) {
547 static std::promise<pid_t> tid_promise;
548 pthread_t thread;
549 ASSERT_EQ(0, pthread_create(&thread, nullptr,
550 [](void*) -> void* {
551 tid_promise.set_value(gettid());
552 return nullptr;
553 },
554 nullptr));
555
556 pid_t tid = tid_promise.get_future().get();
557 while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
558 continue;
559 }
560 ASSERT_EQ(ESRCH, errno);
561
562 ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
563}
564
Elliott Hughes11859d42017-02-13 17:59:29 -0800565TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000566 pthread_t dead_thread;
567 MakeDeadThread(dead_thread);
568
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800569 EXPECT_DEATH(pthread_detach(dead_thread), "invalid pthread_t");
570}
571
572TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
573 pthread_t null_thread = 0;
574 EXPECT_EQ(ESRCH, pthread_detach(null_thread));
Elliott Hughes7484c212017-02-02 02:41:38 +0000575}
576
Jeff Hao9b06cc32013-08-15 14:51:16 -0700577TEST(pthread, pthread_getcpuclockid__clock_gettime) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700578 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800579
Jeff Hao9b06cc32013-08-15 14:51:16 -0700580 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700581 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700582
583 clockid_t c;
584 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
585 timespec ts;
586 ASSERT_EQ(0, clock_gettime(c, &ts));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700587 spin_helper.UnSpin();
Yabin Cuia36158a2015-11-16 21:06:16 -0800588 ASSERT_EQ(0, pthread_join(t, nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700589}
590
Elliott Hughes11859d42017-02-13 17:59:29 -0800591TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000592 pthread_t dead_thread;
593 MakeDeadThread(dead_thread);
594
595 clockid_t c;
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800596 EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c), "invalid pthread_t");
597}
598
599TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
600 pthread_t null_thread = 0;
601 clockid_t c;
602 EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000603}
604
Elliott Hughes11859d42017-02-13 17:59:29 -0800605TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000606 pthread_t dead_thread;
607 MakeDeadThread(dead_thread);
608
609 int policy;
610 sched_param param;
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800611 EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param), "invalid pthread_t");
612}
613
614TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
615 pthread_t null_thread = 0;
616 int policy;
617 sched_param param;
618 EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000619}
620
Elliott Hughes11859d42017-02-13 17:59:29 -0800621TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000622 pthread_t dead_thread;
623 MakeDeadThread(dead_thread);
624
625 int policy = 0;
626 sched_param param;
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800627 EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param), "invalid pthread_t");
628}
629
630TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
631 pthread_t null_thread = 0;
632 int policy = 0;
633 sched_param param;
634 EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000635}
636
Elliott Hughesdff08ce2017-10-16 09:58:45 -0700637TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
638 pthread_t dead_thread;
639 MakeDeadThread(dead_thread);
640
641 EXPECT_DEATH(pthread_setschedprio(dead_thread, 123), "invalid pthread_t");
642}
643
644TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
645 pthread_t null_thread = 0;
646 EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
647}
648
Elliott Hughes11859d42017-02-13 17:59:29 -0800649TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000650 pthread_t dead_thread;
651 MakeDeadThread(dead_thread);
652
Yi Kong32bc0fc2018-08-02 17:31:13 -0700653 EXPECT_DEATH(pthread_join(dead_thread, nullptr), "invalid pthread_t");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800654}
655
656TEST_F(pthread_DeathTest, pthread_join__null_thread) {
657 pthread_t null_thread = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700658 EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000659}
660
Elliott Hughes11859d42017-02-13 17:59:29 -0800661TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000662 pthread_t dead_thread;
663 MakeDeadThread(dead_thread);
664
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800665 EXPECT_DEATH(pthread_kill(dead_thread, 0), "invalid pthread_t");
666}
667
668TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
669 pthread_t null_thread = 0;
670 EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
Elliott Hughes7484c212017-02-02 02:41:38 +0000671}
672
msg5550f020d12013-06-06 14:59:28 -0400673TEST(pthread, pthread_join__multijoin) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700674 SpinFunctionHelper spin_helper;
msg5550f020d12013-06-06 14:59:28 -0400675
676 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700677 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
msg5550f020d12013-06-06 14:59:28 -0400678
679 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700680 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
msg5550f020d12013-06-06 14:59:28 -0400681
682 sleep(1); // (Give t2 a chance to call pthread_join.)
683
684 // Multiple joins to the same thread should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700685 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
msg5550f020d12013-06-06 14:59:28 -0400686
Elliott Hughes725b2a92016-03-23 11:20:47 -0700687 spin_helper.UnSpin();
msg5550f020d12013-06-06 14:59:28 -0400688
689 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
690 void* join_result;
691 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700692 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
msg5550f020d12013-06-06 14:59:28 -0400693}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700694
Elliott Hughes70b24b12013-11-15 11:51:07 -0800695TEST(pthread, pthread_join__race) {
696 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
697 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
698 for (size_t i = 0; i < 1024; ++i) {
Yabin Cuia36158a2015-11-16 21:06:16 -0800699 size_t stack_size = 640*1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700700 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
Elliott Hughes70b24b12013-11-15 11:51:07 -0800701
702 pthread_attr_t a;
703 pthread_attr_init(&a);
704 pthread_attr_setstack(&a, stack, stack_size);
705
706 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700707 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
708 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes70b24b12013-11-15 11:51:07 -0800709 ASSERT_EQ(0, munmap(stack, stack_size));
710 }
711}
712
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700713static void* GetActualGuardSizeFn(void* arg) {
714 pthread_attr_t attributes;
715 pthread_getattr_np(pthread_self(), &attributes);
716 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700717 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700718}
719
720static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
721 size_t result;
722 pthread_t t;
723 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700724 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700725 return result;
726}
727
728static void* GetActualStackSizeFn(void* arg) {
729 pthread_attr_t attributes;
730 pthread_getattr_np(pthread_self(), &attributes);
731 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700732 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700733}
734
735static size_t GetActualStackSize(const pthread_attr_t& attributes) {
736 size_t result;
737 pthread_t t;
738 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700739 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700740 return result;
741}
742
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700743TEST(pthread, pthread_attr_setguardsize_tiny) {
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700744 pthread_attr_t attributes;
745 ASSERT_EQ(0, pthread_attr_init(&attributes));
746
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700747 // No such thing as too small: will be rounded up to one page by pthread_create.
748 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
749 size_t guard_size;
750 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
751 ASSERT_EQ(128U, guard_size);
752 ASSERT_EQ(4096U, GetActualGuardSize(attributes));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700753}
754
755TEST(pthread, pthread_attr_setguardsize_reasonable) {
756 pthread_attr_t attributes;
757 ASSERT_EQ(0, pthread_attr_init(&attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700758
759 // Large enough and a multiple of the page size.
760 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700761 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700762 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
763 ASSERT_EQ(32*1024U, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700764 ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
765}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700766
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700767TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
768 pthread_attr_t attributes;
769 ASSERT_EQ(0, pthread_attr_init(&attributes));
770
771 // Large enough but not a multiple of the page size.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700772 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700773 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700774 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
775 ASSERT_EQ(32*1024U + 1, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700776 ASSERT_EQ(36*1024U, GetActualGuardSize(attributes));
777}
778
779TEST(pthread, pthread_attr_setguardsize_enormous) {
780 pthread_attr_t attributes;
781 ASSERT_EQ(0, pthread_attr_init(&attributes));
782
783 // Larger than the stack itself. (Historically we mistakenly carved
784 // the guard out of the stack itself, rather than adding it after the
785 // end.)
786 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
787 size_t guard_size;
788 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
789 ASSERT_EQ(32*1024*1024U, guard_size);
790 ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700791}
792
793TEST(pthread, pthread_attr_setstacksize) {
794 pthread_attr_t attributes;
795 ASSERT_EQ(0, pthread_attr_init(&attributes));
796
797 // Get the default stack size.
798 size_t default_stack_size;
799 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
800
801 // Too small.
802 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
803 size_t stack_size;
804 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
805 ASSERT_EQ(default_stack_size, stack_size);
806 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
807
Yabin Cui917d3902015-01-08 12:32:42 -0800808 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700809 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
810 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
811 ASSERT_EQ(32*1024U, stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -0800812 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700813
Yabin Cui917d3902015-01-08 12:32:42 -0800814 // Large enough but not aligned; will be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700815 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
816 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
817 ASSERT_EQ(32*1024U + 1, stack_size);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800818#if defined(__BIONIC__)
Yabin Cui917d3902015-01-08 12:32:42 -0800819 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800820#else // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700821 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
822 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800823#endif // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700824}
Elliott Hughesc3f11402013-10-30 14:40:09 -0700825
Yabin Cui76615da2015-03-17 14:22:09 -0700826TEST(pthread, pthread_rwlockattr_smoke) {
827 pthread_rwlockattr_t attr;
828 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
829
830 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
831 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
832 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
833 int pshared;
834 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
835 ASSERT_EQ(pshared_value_array[i], pshared);
836 }
837
838 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
839 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
840 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
841 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
842 int kind;
843 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
844 ASSERT_EQ(kind_array[i], kind);
845 }
846
847 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
848}
849
850TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
851 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
852 pthread_rwlock_t lock2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700853 ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -0700854 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
855}
856
Elliott Hughesc3f11402013-10-30 14:40:09 -0700857TEST(pthread, pthread_rwlock_smoke) {
858 pthread_rwlock_t l;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700859 ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
Elliott Hughesc3f11402013-10-30 14:40:09 -0700860
Calin Juravle76f352e2014-05-19 13:41:10 +0100861 // Single read lock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700862 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
863 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
864
Calin Juravle76f352e2014-05-19 13:41:10 +0100865 // Multiple read lock
866 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
867 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
868 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
869 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
870
871 // Write lock
Calin Juravle92687e42014-05-22 19:21:22 +0100872 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
873 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100874
875 // Try writer lock
876 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
877 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
878 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
879 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
880
881 // Try reader lock
882 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
883 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
884 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
885 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
886 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
887
888 // Try writer lock after unlock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700889 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
890 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
891
Calin Juravle76f352e2014-05-19 13:41:10 +0100892 // EDEADLK in "read after write"
893 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
894 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
895 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
896
897 // EDEADLK in "write after write"
898 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
899 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
900 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100901
Elliott Hughesc3f11402013-10-30 14:40:09 -0700902 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
903}
904
Yabin Cui08ee8d22015-02-11 17:04:36 -0800905struct RwlockWakeupHelperArg {
906 pthread_rwlock_t lock;
907 enum Progress {
908 LOCK_INITIALIZED,
909 LOCK_WAITING,
910 LOCK_RELEASED,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800911 LOCK_ACCESSED,
912 LOCK_TIMEDOUT,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800913 };
914 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -0700915 std::atomic<pid_t> tid;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800916 std::function<int (pthread_rwlock_t*)> trylock_function;
917 std::function<int (pthread_rwlock_t*)> lock_function;
918 std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -0800919 clockid_t clock;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800920};
921
Yabin Cuic9a659c2015-11-05 15:36:08 -0800922static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
Yabin Cuif7969852015-04-02 17:47:48 -0700923 arg->tid = gettid();
Yabin Cui08ee8d22015-02-11 17:04:36 -0800924 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
925 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
926
Yabin Cuic9a659c2015-11-05 15:36:08 -0800927 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
928 ASSERT_EQ(0, arg->lock_function(&arg->lock));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800929 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
930 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
931
932 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
933}
934
Yabin Cuic9a659c2015-11-05 15:36:08 -0800935static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800936 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700937 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800938 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
939 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700940 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -0800941 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800942 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800943
944 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700945 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800946 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700947 WaitUntilThreadSleep(wakeup_arg.tid);
948 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
949
Yabin Cui08ee8d22015-02-11 17:04:36 -0800950 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
951 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
952
Yi Kong32bc0fc2018-08-02 17:31:13 -0700953 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800954 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
955 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
956}
957
Yabin Cuic9a659c2015-11-05 15:36:08 -0800958TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
959 test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800960}
961
Yabin Cuic9a659c2015-11-05 15:36:08 -0800962TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
963 timespec ts;
964 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
965 ts.tv_sec += 1;
966 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
967 return pthread_rwlock_timedwrlock(lock, &ts);
968 });
969}
970
Tom Cherryc6b5bcd2018-03-05 14:14:44 -0800971TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
972#if defined(__BIONIC__)
973 timespec ts;
974 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
975 ts.tv_sec += 1;
976 test_pthread_rwlock_reader_wakeup_writer(
977 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
978#else // __BIONIC__
979 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedwrlock_monotonic_np is "
980 "only supported on bionic";
981#endif // __BIONIC__
982}
983
Yabin Cuic9a659c2015-11-05 15:36:08 -0800984static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800985 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700986 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800987 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
988 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700989 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -0800990 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800991 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800992
993 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700994 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800995 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700996 WaitUntilThreadSleep(wakeup_arg.tid);
997 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
998
Yabin Cui08ee8d22015-02-11 17:04:36 -0800999 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1000 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1001
Yi Kong32bc0fc2018-08-02 17:31:13 -07001002 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001003 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1004 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1005}
1006
Yabin Cuic9a659c2015-11-05 15:36:08 -08001007TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1008 test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1009}
1010
1011TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1012 timespec ts;
1013 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1014 ts.tv_sec += 1;
1015 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1016 return pthread_rwlock_timedrdlock(lock, &ts);
1017 });
1018}
1019
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001020TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1021#if defined(__BIONIC__)
1022 timespec ts;
1023 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1024 ts.tv_sec += 1;
1025 test_pthread_rwlock_writer_wakeup_reader(
1026 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1027#else // __BIONIC__
1028 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedrdlock_monotonic_np is "
1029 "only supported on bionic";
1030#endif // __BIONIC__
1031}
1032
Yabin Cuic9a659c2015-11-05 15:36:08 -08001033static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1034 arg->tid = gettid();
1035 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1036 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1037
1038 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1039
1040 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001041 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001042 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1043 ts.tv_nsec = -1;
1044 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1045 ts.tv_nsec = NS_PER_S;
1046 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1047 ts.tv_nsec = NS_PER_S - 1;
1048 ts.tv_sec = -1;
1049 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001050 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001051 ts.tv_sec += 1;
1052 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1053 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1054 arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1055}
1056
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001057static void pthread_rwlock_timedrdlock_timeout_helper(
1058 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001059 RwlockWakeupHelperArg wakeup_arg;
1060 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1061 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1062 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1063 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001064 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001065 wakeup_arg.timed_lock_function = lock_function;
1066 wakeup_arg.clock = clock;
1067
1068 pthread_t thread;
1069 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1070 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1071 WaitUntilThreadSleep(wakeup_arg.tid);
1072 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1073
1074 ASSERT_EQ(0, pthread_join(thread, nullptr));
1075 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1076 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1077 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1078}
1079
1080TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1081 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1082}
1083
1084TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1085#if defined(__BIONIC__)
1086 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1087 pthread_rwlock_timedrdlock_monotonic_np);
1088#else // __BIONIC__
1089 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedrdlock_monotonic_np is "
1090 "only supported on bionic";
1091#endif // __BIONIC__
1092}
1093
1094static void pthread_rwlock_timedwrlock_timeout_helper(
1095 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1096 RwlockWakeupHelperArg wakeup_arg;
1097 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1098 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1099 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1100 wakeup_arg.tid = 0;
1101 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1102 wakeup_arg.timed_lock_function = lock_function;
1103 wakeup_arg.clock = clock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001104
1105 pthread_t thread;
1106 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1107 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1108 WaitUntilThreadSleep(wakeup_arg.tid);
1109 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1110
1111 ASSERT_EQ(0, pthread_join(thread, nullptr));
1112 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1113 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1114 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1115}
1116
1117TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001118 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1119}
Yabin Cuic9a659c2015-11-05 15:36:08 -08001120
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001121TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1122#if defined(__BIONIC__)
1123 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1124 pthread_rwlock_timedwrlock_monotonic_np);
1125#else // __BIONIC__
1126 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedwrlock_monotonic_np is "
1127 "only supported on bionic";
1128#endif // __BIONIC__
Yabin Cuic9a659c2015-11-05 15:36:08 -08001129}
1130
Yabin Cui76615da2015-03-17 14:22:09 -07001131class RwlockKindTestHelper {
1132 private:
1133 struct ThreadArg {
1134 RwlockKindTestHelper* helper;
1135 std::atomic<pid_t>& tid;
1136
1137 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1138 : helper(helper), tid(tid) { }
1139 };
1140
1141 public:
1142 pthread_rwlock_t lock;
1143
1144 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07001145 explicit RwlockKindTestHelper(int kind_type) {
Yabin Cui76615da2015-03-17 14:22:09 -07001146 InitRwlock(kind_type);
1147 }
1148
1149 ~RwlockKindTestHelper() {
1150 DestroyRwlock();
1151 }
1152
1153 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1154 tid = 0;
1155 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001156 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001157 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1158 }
1159
1160 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1161 tid = 0;
1162 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001163 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001164 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1165 }
1166
1167 private:
1168 void InitRwlock(int kind_type) {
1169 pthread_rwlockattr_t attr;
1170 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1171 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1172 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1173 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1174 }
1175
1176 void DestroyRwlock() {
1177 ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1178 }
1179
1180 static void WriterThreadFn(ThreadArg* arg) {
1181 arg->tid = gettid();
1182
1183 RwlockKindTestHelper* helper = arg->helper;
1184 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1185 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1186 delete arg;
1187 }
1188
1189 static void ReaderThreadFn(ThreadArg* arg) {
1190 arg->tid = gettid();
1191
1192 RwlockKindTestHelper* helper = arg->helper;
1193 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1194 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1195 delete arg;
1196 }
1197};
1198
1199TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
1200 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1201 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1202
1203 pthread_t writer_thread;
1204 std::atomic<pid_t> writer_tid;
1205 helper.CreateWriterThread(writer_thread, writer_tid);
1206 WaitUntilThreadSleep(writer_tid);
1207
1208 pthread_t reader_thread;
1209 std::atomic<pid_t> reader_tid;
1210 helper.CreateReaderThread(reader_thread, reader_tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001211 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001212
1213 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001214 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001215}
1216
1217TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
1218 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1219 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1220
1221 pthread_t writer_thread;
1222 std::atomic<pid_t> writer_tid;
1223 helper.CreateWriterThread(writer_thread, writer_tid);
1224 WaitUntilThreadSleep(writer_tid);
1225
1226 pthread_t reader_thread;
1227 std::atomic<pid_t> reader_tid;
1228 helper.CreateReaderThread(reader_thread, reader_tid);
1229 WaitUntilThreadSleep(reader_tid);
1230
1231 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001232 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1233 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001234}
1235
Elliott Hughes1728b232014-05-14 10:02:03 -07001236static int g_once_fn_call_count = 0;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001237static void OnceFn() {
Elliott Hughes1728b232014-05-14 10:02:03 -07001238 ++g_once_fn_call_count;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001239}
1240
1241TEST(pthread, pthread_once_smoke) {
1242 pthread_once_t once_control = PTHREAD_ONCE_INIT;
1243 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1244 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
Elliott Hughes1728b232014-05-14 10:02:03 -07001245 ASSERT_EQ(1, g_once_fn_call_count);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001246}
1247
Elliott Hughes3694ec62014-05-14 11:46:08 -07001248static std::string pthread_once_1934122_result = "";
1249
1250static void Routine2() {
1251 pthread_once_1934122_result += "2";
1252}
1253
1254static void Routine1() {
1255 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1256 pthread_once_1934122_result += "1";
1257 pthread_once(&once_control_2, &Routine2);
1258}
1259
1260TEST(pthread, pthread_once_1934122) {
1261 // Very old versions of Android couldn't call pthread_once from a
1262 // pthread_once init routine. http://b/1934122.
1263 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1264 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1265 ASSERT_EQ("12", pthread_once_1934122_result);
1266}
1267
Elliott Hughes1728b232014-05-14 10:02:03 -07001268static int g_atfork_prepare_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001269static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
1270static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001271static int g_atfork_parent_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001272static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
1273static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001274static int g_atfork_child_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001275static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
1276static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
Elliott Hughesc3f11402013-10-30 14:40:09 -07001277
Dmitriy Ivanov00e37812014-11-20 16:53:47 -08001278TEST(pthread, pthread_atfork_smoke) {
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001279 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1280 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
Elliott Hughesc3f11402013-10-30 14:40:09 -07001281
Elliott Hughes33697a02016-01-26 13:04:57 -08001282 pid_t pid = fork();
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001283 ASSERT_NE(-1, pid) << strerror(errno);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001284
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001285 // Child and parent calls are made in the order they were registered.
1286 if (pid == 0) {
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001287 ASSERT_EQ(12, g_atfork_child_calls);
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001288 _exit(0);
1289 }
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001290 ASSERT_EQ(12, g_atfork_parent_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001291
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001292 // Prepare calls are made in the reverse order.
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001293 ASSERT_EQ(21, g_atfork_prepare_calls);
Elliott Hughes33697a02016-01-26 13:04:57 -08001294 AssertChildExited(pid, 0);
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001295}
1296
Elliott Hughesc3f11402013-10-30 14:40:09 -07001297TEST(pthread, pthread_attr_getscope) {
1298 pthread_attr_t attr;
1299 ASSERT_EQ(0, pthread_attr_init(&attr));
1300
1301 int scope;
1302 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1303 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1304}
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001305
1306TEST(pthread, pthread_condattr_init) {
1307 pthread_condattr_t attr;
1308 pthread_condattr_init(&attr);
1309
1310 clockid_t clock;
1311 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1312 ASSERT_EQ(CLOCK_REALTIME, clock);
1313
1314 int pshared;
1315 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1316 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1317}
1318
1319TEST(pthread, pthread_condattr_setclock) {
1320 pthread_condattr_t attr;
1321 pthread_condattr_init(&attr);
1322
1323 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1324 clockid_t clock;
1325 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1326 ASSERT_EQ(CLOCK_REALTIME, clock);
1327
1328 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1329 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1330 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1331
1332 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1333}
1334
1335TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
Yabin Cui32651b82015-03-13 20:30:00 -07001336#if defined(__BIONIC__)
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001337 pthread_condattr_t attr;
1338 pthread_condattr_init(&attr);
1339
1340 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1341 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1342
1343 pthread_cond_t cond_var;
1344 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1345
1346 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1347 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1348
Yabin Cui32651b82015-03-13 20:30:00 -07001349 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001350 clockid_t clock;
1351 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1352 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1353 int pshared;
1354 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1355 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
Yabin Cui32651b82015-03-13 20:30:00 -07001356#else // !defined(__BIONIC__)
1357 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
1358#endif // !defined(__BIONIC__)
1359}
1360
1361class pthread_CondWakeupTest : public ::testing::Test {
1362 protected:
1363 pthread_mutex_t mutex;
1364 pthread_cond_t cond;
1365
1366 enum Progress {
1367 INITIALIZED,
1368 WAITING,
1369 SIGNALED,
1370 FINISHED,
1371 };
1372 std::atomic<Progress> progress;
1373 pthread_t thread;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001374 std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
Yabin Cui32651b82015-03-13 20:30:00 -07001375
1376 protected:
Yabin Cuic9a659c2015-11-05 15:36:08 -08001377 void SetUp() override {
1378 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1379 }
1380
1381 void InitCond(clockid_t clock=CLOCK_REALTIME) {
1382 pthread_condattr_t attr;
1383 ASSERT_EQ(0, pthread_condattr_init(&attr));
1384 ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1385 ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1386 ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1387 }
1388
1389 void StartWaitingThread(std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
Yabin Cui32651b82015-03-13 20:30:00 -07001390 progress = INITIALIZED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001391 this->wait_function = wait_function;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001392 ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001393 while (progress != WAITING) {
Yabin Cui32651b82015-03-13 20:30:00 -07001394 usleep(5000);
1395 }
1396 usleep(5000);
1397 }
1398
Yabin Cuic9a659c2015-11-05 15:36:08 -08001399 void TearDown() override {
1400 ASSERT_EQ(0, pthread_join(thread, nullptr));
1401 ASSERT_EQ(FINISHED, progress);
1402 ASSERT_EQ(0, pthread_cond_destroy(&cond));
1403 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1404 }
1405
Yabin Cui32651b82015-03-13 20:30:00 -07001406 private:
1407 static void WaitThreadFn(pthread_CondWakeupTest* test) {
1408 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1409 test->progress = WAITING;
1410 while (test->progress == WAITING) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001411 ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
Yabin Cui32651b82015-03-13 20:30:00 -07001412 }
1413 ASSERT_EQ(SIGNALED, test->progress);
1414 test->progress = FINISHED;
1415 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1416 }
1417};
1418
Yabin Cuic9a659c2015-11-05 15:36:08 -08001419TEST_F(pthread_CondWakeupTest, signal_wait) {
1420 InitCond();
1421 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1422 return pthread_cond_wait(cond, mutex);
1423 });
Yabin Cui32651b82015-03-13 20:30:00 -07001424 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001425 ASSERT_EQ(0, pthread_cond_signal(&cond));
Yabin Cui32651b82015-03-13 20:30:00 -07001426}
1427
Yabin Cuic9a659c2015-11-05 15:36:08 -08001428TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1429 InitCond();
1430 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1431 return pthread_cond_wait(cond, mutex);
1432 });
Yabin Cui32651b82015-03-13 20:30:00 -07001433 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001434 ASSERT_EQ(0, pthread_cond_broadcast(&cond));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001435}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001436
Yabin Cuic9a659c2015-11-05 15:36:08 -08001437TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1438 InitCond(CLOCK_REALTIME);
Elliott Hughes0e714a52014-03-03 16:42:47 -08001439 timespec ts;
1440 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001441 ts.tv_sec += 1;
1442 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1443 return pthread_cond_timedwait(cond, mutex, &ts);
1444 });
1445 progress = SIGNALED;
1446 ASSERT_EQ(0, pthread_cond_signal(&cond));
1447}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001448
Yabin Cuic9a659c2015-11-05 15:36:08 -08001449TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1450 InitCond(CLOCK_MONOTONIC);
1451 timespec ts;
1452 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1453 ts.tv_sec += 1;
1454 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1455 return pthread_cond_timedwait(cond, mutex, &ts);
1456 });
1457 progress = SIGNALED;
1458 ASSERT_EQ(0, pthread_cond_signal(&cond));
1459}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001460
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001461TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1462#if defined(__BIONIC__)
1463 InitCond(CLOCK_REALTIME);
1464 timespec ts;
1465 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1466 ts.tv_sec += 1;
1467 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1468 return pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
1469 });
1470 progress = SIGNALED;
1471 ASSERT_EQ(0, pthread_cond_signal(&cond));
1472#else // __BIONIC__
1473 GTEST_LOG_(INFO) << "This test does nothing since pthread_cond_timedwait_monotonic_np is only "
1474 "supported on bionic";
1475#endif // __BIONIC__
1476}
1477
1478static void pthread_cond_timedwait_timeout_helper(clockid_t clock,
1479 int (*wait_function)(pthread_cond_t* __cond,
1480 pthread_mutex_t* __mutex,
1481 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001482 pthread_mutex_t mutex;
1483 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1484 pthread_cond_t cond;
1485 ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1486 ASSERT_EQ(0, pthread_mutex_lock(&mutex));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001487
Yabin Cuic9a659c2015-11-05 15:36:08 -08001488 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001489 ASSERT_EQ(0, clock_gettime(clock, &ts));
1490 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001491 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001492 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001493 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001494 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001495 ts.tv_nsec = NS_PER_S - 1;
1496 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001497 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001498 ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
Elliott Hughes0e714a52014-03-03 16:42:47 -08001499}
Elliott Hughes57b7a612014-08-25 17:26:50 -07001500
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001501TEST(pthread, pthread_cond_timedwait_timeout) {
1502 pthread_cond_timedwait_timeout_helper(CLOCK_REALTIME, pthread_cond_timedwait);
1503}
1504
1505TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1506#if defined(__BIONIC__)
1507 pthread_cond_timedwait_timeout_helper(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1508#else // __BIONIC__
1509 GTEST_LOG_(INFO) << "This test does nothing since pthread_cond_timedwait_monotonic_np is only "
1510 "supported on bionic";
1511#endif // __BIONIC__
1512}
1513
Elliott Hughes57b7a612014-08-25 17:26:50 -07001514TEST(pthread, pthread_attr_getstack__main_thread) {
1515 // This test is only meaningful for the main thread, so make sure we're running on it!
1516 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1517
1518 // Get the main thread's attributes.
1519 pthread_attr_t attributes;
1520 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1521
1522 // Check that we correctly report that the main thread has no guard page.
1523 size_t guard_size;
1524 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1525 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1526
1527 // Get the stack base and the stack size (both ways).
1528 void* stack_base;
1529 size_t stack_size;
1530 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1531 size_t stack_size2;
1532 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1533
1534 // The two methods of asking for the stack size should agree.
1535 EXPECT_EQ(stack_size, stack_size2);
1536
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001537#if defined(__BIONIC__)
dimitry6dfa5b52018-01-30 13:24:28 +01001538 // Find stack in /proc/self/maps using a pointer to the stack.
1539 //
1540 // We do not use "[stack]" label because in native-bridge environment it is not
1541 // guaranteed to point to the right stack. A native bridge implementation may
1542 // keep separate stack for the guest code.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001543 void* maps_stack_hi = nullptr;
Elliott Hughes15dfd632015-09-22 16:40:14 -07001544 std::vector<map_record> maps;
1545 ASSERT_TRUE(Maps::parse_maps(&maps));
dimitry6dfa5b52018-01-30 13:24:28 +01001546 uintptr_t stack_address = reinterpret_cast<uintptr_t>(&maps_stack_hi);
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07001547 for (const auto& map : maps) {
dimitry6dfa5b52018-01-30 13:24:28 +01001548 if (map.addr_start <= stack_address && map.addr_end > stack_address){
Elliott Hughes15dfd632015-09-22 16:40:14 -07001549 maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
Elliott Hughes57b7a612014-08-25 17:26:50 -07001550 break;
1551 }
1552 }
Elliott Hughes57b7a612014-08-25 17:26:50 -07001553
dimitry6dfa5b52018-01-30 13:24:28 +01001554 // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001555 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1556 // region isn't very interesting.
1557 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1558
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001559 // The stack size should correspond to RLIMIT_STACK.
Elliott Hughes57b7a612014-08-25 17:26:50 -07001560 rlimit rl;
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001561 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001562 uint64_t original_rlim_cur = rl.rlim_cur;
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001563 if (rl.rlim_cur == RLIM_INFINITY) {
1564 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1565 }
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001566 EXPECT_EQ(rl.rlim_cur, stack_size);
1567
Tom Cherryb8ab6182017-04-05 16:20:29 -07001568 auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001569 rl.rlim_cur = original_rlim_cur;
1570 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1571 });
1572
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001573 //
1574 // What if RLIMIT_STACK is smaller than the stack's current extent?
1575 //
Elliott Hughes57b7a612014-08-25 17:26:50 -07001576 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1577 rl.rlim_max = RLIM_INFINITY;
1578 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1579
1580 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1581 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1582 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1583
1584 EXPECT_EQ(stack_size, stack_size2);
1585 ASSERT_EQ(1024U, stack_size);
1586
1587 //
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001588 // What if RLIMIT_STACK isn't a whole number of pages?
Elliott Hughes57b7a612014-08-25 17:26:50 -07001589 //
1590 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1591 rl.rlim_max = RLIM_INFINITY;
1592 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1593
1594 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1595 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1596 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1597
1598 EXPECT_EQ(stack_size, stack_size2);
1599 ASSERT_EQ(6666U, stack_size);
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001600#endif
Elliott Hughes57b7a612014-08-25 17:26:50 -07001601}
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001602
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001603struct GetStackSignalHandlerArg {
1604 volatile bool done;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001605 void* signal_stack_base;
1606 size_t signal_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001607 void* main_stack_base;
1608 size_t main_stack_size;
1609};
1610
1611static GetStackSignalHandlerArg getstack_signal_handler_arg;
1612
1613static void getstack_signal_handler(int sig) {
1614 ASSERT_EQ(SIGUSR1, sig);
1615 // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1616 sleep(1);
1617 pthread_attr_t attr;
1618 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1619 void* stack_base;
1620 size_t stack_size;
1621 ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001622
1623 // Verify if the stack used by the signal handler is the alternate stack just registered.
1624 ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
1625 ASSERT_LT(static_cast<void*>(&attr),
1626 static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
1627 getstack_signal_handler_arg.signal_stack_size);
1628
1629 // Verify if the main thread's stack got in the signal handler is correct.
1630 ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1631 ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1632
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001633 getstack_signal_handler_arg.done = true;
1634}
1635
1636// The previous code obtained the main thread's stack by reading the entry in
1637// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1638// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1639// switches a process while the main thread is in an alternate stack, then the kernel will label
1640// the wrong map with [stack]. This test verifies that when the above situation happens, the main
1641// thread's stack is found correctly.
1642TEST(pthread, pthread_attr_getstack_in_signal_handler) {
Yabin Cui61e4d462016-03-07 17:44:58 -08001643 // This test is only meaningful for the main thread, so make sure we're running on it!
1644 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1645
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001646 const size_t sig_stack_size = 16 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001647 void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001648 -1, 0);
1649 ASSERT_NE(MAP_FAILED, sig_stack);
1650 stack_t ss;
1651 ss.ss_sp = sig_stack;
1652 ss.ss_size = sig_stack_size;
1653 ss.ss_flags = 0;
1654 stack_t oss;
1655 ASSERT_EQ(0, sigaltstack(&ss, &oss));
1656
Yabin Cui61e4d462016-03-07 17:44:58 -08001657 pthread_attr_t attr;
1658 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1659 void* main_stack_base;
1660 size_t main_stack_size;
1661 ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1662
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001663 ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1664 getstack_signal_handler_arg.done = false;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001665 getstack_signal_handler_arg.signal_stack_base = sig_stack;
1666 getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1667 getstack_signal_handler_arg.main_stack_base = main_stack_base;
1668 getstack_signal_handler_arg.main_stack_size = main_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001669 kill(getpid(), SIGUSR1);
1670 ASSERT_EQ(true, getstack_signal_handler_arg.done);
1671
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001672 ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1673 ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1674}
1675
Yabin Cui917d3902015-01-08 12:32:42 -08001676static void pthread_attr_getstack_18908062_helper(void*) {
1677 char local_variable;
1678 pthread_attr_t attributes;
1679 pthread_getattr_np(pthread_self(), &attributes);
1680 void* stack_base;
1681 size_t stack_size;
1682 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1683
1684 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1685 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1686 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1687}
1688
1689// Check whether something on stack is in the range of
1690// [stack_base, stack_base + stack_size). see b/18908062.
1691TEST(pthread, pthread_attr_getstack_18908062) {
1692 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001693 ASSERT_EQ(0, pthread_create(&t, nullptr,
Yabin Cui917d3902015-01-08 12:32:42 -08001694 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
Yi Kong32bc0fc2018-08-02 17:31:13 -07001695 nullptr));
1696 ASSERT_EQ(0, pthread_join(t, nullptr));
Yabin Cui917d3902015-01-08 12:32:42 -08001697}
1698
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001699#if defined(__BIONIC__)
Elliott Hughesf2083612015-11-11 13:32:28 -08001700static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
1701
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001702static void* pthread_gettid_np_helper(void* arg) {
1703 *reinterpret_cast<pid_t*>(arg) = gettid();
Elliott Hughesf2083612015-11-11 13:32:28 -08001704
1705 // Wait for our parent to call pthread_gettid_np on us before exiting.
1706 pthread_mutex_lock(&pthread_gettid_np_mutex);
1707 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001708 return nullptr;
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001709}
1710#endif
1711
1712TEST(pthread, pthread_gettid_np) {
1713#if defined(__BIONIC__)
1714 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1715
Elliott Hughesf2083612015-11-11 13:32:28 -08001716 // Ensure the other thread doesn't exit until after we've called
1717 // pthread_gettid_np on it.
1718 pthread_mutex_lock(&pthread_gettid_np_mutex);
1719
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001720 pid_t t_gettid_result;
1721 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001722 pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001723
1724 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1725
Elliott Hughesf2083612015-11-11 13:32:28 -08001726 // Release the other thread and wait for it to exit.
1727 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001728 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001729
1730 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1731#else
1732 GTEST_LOG_(INFO) << "This test does nothing.\n";
1733#endif
1734}
Elliott Hughes34c987a2014-09-22 16:01:26 -07001735
1736static size_t cleanup_counter = 0;
1737
Derek Xue41996952014-09-25 11:05:32 +01001738static void AbortCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001739 abort();
1740}
1741
Derek Xue41996952014-09-25 11:05:32 +01001742static void CountCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001743 ++cleanup_counter;
1744}
1745
Derek Xue41996952014-09-25 11:05:32 +01001746static void PthreadCleanupTester() {
Yi Kong32bc0fc2018-08-02 17:31:13 -07001747 pthread_cleanup_push(CountCleanupRoutine, nullptr);
1748 pthread_cleanup_push(CountCleanupRoutine, nullptr);
1749 pthread_cleanup_push(AbortCleanupRoutine, nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07001750
1751 pthread_cleanup_pop(0); // Pop the abort without executing it.
1752 pthread_cleanup_pop(1); // Pop one count while executing it.
1753 ASSERT_EQ(1U, cleanup_counter);
1754 // Exit while the other count is still on the cleanup stack.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001755 pthread_exit(nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07001756
1757 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1758 pthread_cleanup_pop(0);
1759}
1760
Derek Xue41996952014-09-25 11:05:32 +01001761static void* PthreadCleanupStartRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001762 PthreadCleanupTester();
Yi Kong32bc0fc2018-08-02 17:31:13 -07001763 return nullptr;
Elliott Hughes34c987a2014-09-22 16:01:26 -07001764}
1765
1766TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1767 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001768 ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
1769 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes34c987a2014-09-22 16:01:26 -07001770 ASSERT_EQ(2U, cleanup_counter);
1771}
Derek Xue41996952014-09-25 11:05:32 +01001772
1773TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1774 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1775}
1776
1777TEST(pthread, pthread_mutexattr_gettype) {
1778 pthread_mutexattr_t attr;
1779 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1780
1781 int attr_type;
1782
1783 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1784 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1785 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1786
1787 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1788 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1789 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1790
1791 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1792 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1793 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001794
1795 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1796}
1797
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001798TEST(pthread, pthread_mutexattr_protocol) {
1799 pthread_mutexattr_t attr;
1800 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1801
1802 int protocol;
1803 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
1804 ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
1805 for (size_t repeat = 0; repeat < 2; ++repeat) {
1806 for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
1807 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
1808 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
1809 ASSERT_EQ(protocol, set_protocol);
1810 }
1811 }
1812}
1813
Yabin Cui17393b02015-03-21 15:08:25 -07001814struct PthreadMutex {
1815 pthread_mutex_t lock;
1816
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001817 explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
1818 init(mutex_type, protocol);
Yabin Cui17393b02015-03-21 15:08:25 -07001819 }
1820
1821 ~PthreadMutex() {
1822 destroy();
1823 }
1824
1825 private:
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001826 void init(int mutex_type, int protocol) {
Yabin Cui17393b02015-03-21 15:08:25 -07001827 pthread_mutexattr_t attr;
1828 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1829 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001830 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
Yabin Cui17393b02015-03-21 15:08:25 -07001831 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1832 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1833 }
1834
1835 void destroy() {
1836 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1837 }
1838
1839 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1840};
Derek Xue41996952014-09-25 11:05:32 +01001841
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001842static void TestPthreadMutexLockNormal(int protocol) {
1843 PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
Derek Xue41996952014-09-25 11:05:32 +01001844
Yabin Cui17393b02015-03-21 15:08:25 -07001845 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1846 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08001847 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1848 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1849 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001850}
1851
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001852static void TestPthreadMutexLockErrorCheck(int protocol) {
1853 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
Derek Xue41996952014-09-25 11:05:32 +01001854
Yabin Cui17393b02015-03-21 15:08:25 -07001855 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1856 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1857 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1858 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001859 if (protocol == PTHREAD_PRIO_NONE) {
1860 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1861 } else {
1862 ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
1863 }
Yabin Cui17393b02015-03-21 15:08:25 -07001864 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1865 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001866}
1867
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001868static void TestPthreadMutexLockRecursive(int protocol) {
1869 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
Derek Xue41996952014-09-25 11:05:32 +01001870
Yabin Cui17393b02015-03-21 15:08:25 -07001871 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1872 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1873 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1874 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1875 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08001876 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1877 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07001878 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1879 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1880}
1881
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001882TEST(pthread, pthread_mutex_lock_NORMAL) {
1883 TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
1884}
1885
1886TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1887 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
1888}
1889
1890TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1891 TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
1892}
1893
1894TEST(pthread, pthread_mutex_lock_pi) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001895 TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
1896 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
1897 TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
1898}
1899
Yabin Cui5a00ba72018-01-26 17:32:31 -08001900TEST(pthread, pthread_mutex_pi_count_limit) {
1901#if defined(__BIONIC__) && !defined(__LP64__)
1902 // Bionic only supports 65536 pi mutexes in 32-bit programs.
1903 pthread_mutexattr_t attr;
1904 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1905 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
1906 std::vector<pthread_mutex_t> mutexes(65536);
1907 // Test if we can use 65536 pi mutexes at the same time.
1908 // Run 2 times to check if freed pi mutexes can be recycled.
1909 for (int repeat = 0; repeat < 2; ++repeat) {
1910 for (auto& m : mutexes) {
1911 ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
1912 }
1913 pthread_mutex_t m;
1914 ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
1915 for (auto& m : mutexes) {
1916 ASSERT_EQ(0, pthread_mutex_lock(&m));
1917 }
1918 for (auto& m : mutexes) {
1919 ASSERT_EQ(0, pthread_mutex_unlock(&m));
1920 }
1921 for (auto& m : mutexes) {
1922 ASSERT_EQ(0, pthread_mutex_destroy(&m));
1923 }
1924 }
1925 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1926#else
1927 GTEST_LOG_(INFO) << "This test does nothing as pi mutex count isn't limited.\n";
1928#endif
1929}
1930
Yabin Cui17393b02015-03-21 15:08:25 -07001931TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1932 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1933 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1934 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1935 pthread_mutex_destroy(&lock_normal);
1936
1937 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1938 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1939 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1940 pthread_mutex_destroy(&lock_errorcheck);
1941
1942 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1943 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1944 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1945 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
Derek Xue41996952014-09-25 11:05:32 +01001946}
Yabin Cui5a00ba72018-01-26 17:32:31 -08001947
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001948class MutexWakeupHelper {
1949 private:
Yabin Cui17393b02015-03-21 15:08:25 -07001950 PthreadMutex m;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001951 enum Progress {
1952 LOCK_INITIALIZED,
1953 LOCK_WAITING,
1954 LOCK_RELEASED,
1955 LOCK_ACCESSED
1956 };
1957 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -07001958 std::atomic<pid_t> tid;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001959
1960 static void thread_fn(MutexWakeupHelper* helper) {
Yabin Cuif7969852015-04-02 17:47:48 -07001961 helper->tid = gettid();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001962 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1963 helper->progress = LOCK_WAITING;
1964
Yabin Cui17393b02015-03-21 15:08:25 -07001965 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001966 ASSERT_EQ(LOCK_RELEASED, helper->progress);
Yabin Cui17393b02015-03-21 15:08:25 -07001967 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001968
1969 helper->progress = LOCK_ACCESSED;
1970 }
1971
1972 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07001973 explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
Yabin Cui17393b02015-03-21 15:08:25 -07001974 }
1975
1976 void test() {
1977 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001978 progress = LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07001979 tid = 0;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001980
1981 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001982 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001983 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1984
Yabin Cuif7969852015-04-02 17:47:48 -07001985 WaitUntilThreadSleep(tid);
1986 ASSERT_EQ(LOCK_WAITING, progress);
1987
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001988 progress = LOCK_RELEASED;
Yabin Cui17393b02015-03-21 15:08:25 -07001989 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001990
Yi Kong32bc0fc2018-08-02 17:31:13 -07001991 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001992 ASSERT_EQ(LOCK_ACCESSED, progress);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001993 }
1994};
1995
1996TEST(pthread, pthread_mutex_NORMAL_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001997 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1998 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001999}
2000
2001TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002002 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2003 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002004}
2005
2006TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002007 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2008 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002009}
2010
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002011static int GetThreadPriority(pid_t tid) {
2012 // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2013 // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2014 std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2015 std::string content;
2016 int result = INT_MAX;
2017 if (!android::base::ReadFileToString(filename, &content)) {
2018 return result;
2019 }
2020 std::vector<std::string> strs = android::base::Split(content, " ");
2021 if (strs.size() < 18) {
2022 return result;
2023 }
2024 if (!android::base::ParseInt(strs[17], &result)) {
2025 return INT_MAX;
2026 }
2027 return result;
2028}
2029
2030class PIMutexWakeupHelper {
2031private:
2032 PthreadMutex m;
2033 int protocol;
2034 enum Progress {
2035 LOCK_INITIALIZED,
2036 LOCK_CHILD_READY,
2037 LOCK_WAITING,
2038 LOCK_RELEASED,
2039 };
2040 std::atomic<Progress> progress;
2041 std::atomic<pid_t> main_tid;
2042 std::atomic<pid_t> child_tid;
2043 PthreadMutex start_thread_m;
2044
2045 static void thread_fn(PIMutexWakeupHelper* helper) {
2046 helper->child_tid = gettid();
2047 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2048 ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2049 ASSERT_EQ(21, GetThreadPriority(gettid()));
2050 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2051 helper->progress = LOCK_CHILD_READY;
2052 ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2053
2054 ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2055 WaitUntilThreadSleep(helper->main_tid);
2056 ASSERT_EQ(LOCK_WAITING, helper->progress);
2057
2058 if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2059 ASSERT_EQ(20, GetThreadPriority(gettid()));
2060 } else {
2061 ASSERT_EQ(21, GetThreadPriority(gettid()));
2062 }
2063 helper->progress = LOCK_RELEASED;
2064 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2065 }
2066
2067public:
2068 explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2069 : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2070 }
2071
2072 void test() {
2073 ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2074 main_tid = gettid();
2075 ASSERT_EQ(20, GetThreadPriority(main_tid));
2076 progress = LOCK_INITIALIZED;
2077 child_tid = 0;
2078
2079 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002080 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002081 reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2082
2083 WaitUntilThreadSleep(child_tid);
2084 ASSERT_EQ(LOCK_CHILD_READY, progress);
2085 ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2086 progress = LOCK_WAITING;
2087 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2088
2089 ASSERT_EQ(LOCK_RELEASED, progress);
2090 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2091 ASSERT_EQ(0, pthread_join(thread, nullptr));
2092 }
2093};
2094
2095TEST(pthread, pthread_mutex_pi_wakeup) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002096 for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2097 for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2098 PIMutexWakeupHelper helper(type, protocol);
2099 helper.test();
2100 }
2101 }
2102}
2103
Yabin Cui140f3672015-02-03 10:32:00 -08002104TEST(pthread, pthread_mutex_owner_tid_limit) {
Yabin Cuie69c2452015-02-13 16:21:25 -08002105#if defined(__BIONIC__) && !defined(__LP64__)
Yabin Cui140f3672015-02-03 10:32:00 -08002106 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
Yi Kong32bc0fc2018-08-02 17:31:13 -07002107 ASSERT_TRUE(fp != nullptr);
Yabin Cui140f3672015-02-03 10:32:00 -08002108 long pid_max;
2109 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2110 fclose(fp);
Yabin Cuie69c2452015-02-13 16:21:25 -08002111 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
Yabin Cui140f3672015-02-03 10:32:00 -08002112 ASSERT_LE(pid_max, 65536);
Yabin Cuie69c2452015-02-13 16:21:25 -08002113#else
2114 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
2115#endif
Yabin Cui140f3672015-02-03 10:32:00 -08002116}
Yabin Cuib5845722015-03-16 22:46:42 -07002117
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002118static void pthread_mutex_timedlock_helper(clockid_t clock,
2119 int (*lock_function)(pthread_mutex_t* __mutex,
2120 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08002121 pthread_mutex_t m;
2122 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2123
2124 // If the mutex is already locked, pthread_mutex_timedlock should time out.
2125 ASSERT_EQ(0, pthread_mutex_lock(&m));
2126
2127 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002128 ASSERT_EQ(0, clock_gettime(clock, &ts));
2129 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002130 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002131 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002132 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002133 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002134 ts.tv_nsec = NS_PER_S - 1;
2135 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002136 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002137
2138 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2139 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2140
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002141 ASSERT_EQ(0, clock_gettime(clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002142 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002143 ASSERT_EQ(0, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002144
2145 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2146 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2147}
2148
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002149TEST(pthread, pthread_mutex_timedlock) {
2150 pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2151}
2152
2153TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2154#if defined(__BIONIC__)
2155 pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2156#else // __BIONIC__
2157 GTEST_LOG_(INFO) << "This test does nothing since pthread_mutex_timedlock_monotonic_np is only "
2158 "supported on bionic";
2159#endif // __BIONIC__
2160}
2161
2162static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2163 int (*lock_function)(pthread_mutex_t* __mutex,
2164 const timespec* __timeout)) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002165 PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002166
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002167 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002168 clock_gettime(clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002169 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002170 ASSERT_EQ(0, lock_function(&m.lock, &ts));
2171
2172 struct ThreadArgs {
2173 clockid_t clock;
2174 int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2175 PthreadMutex& m;
2176 };
2177
2178 ThreadArgs thread_args = {
2179 .clock = clock,
2180 .lock_function = lock_function,
2181 .m = m,
2182 };
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002183
2184 auto ThreadFn = [](void* arg) -> void* {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002185 auto args = static_cast<ThreadArgs*>(arg);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002186 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002187 clock_gettime(args->clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002188 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002189 intptr_t result = args->lock_function(&args->m.lock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002190 return reinterpret_cast<void*>(result);
2191 };
2192
2193 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002194 ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002195 void* result;
2196 ASSERT_EQ(0, pthread_join(thread, &result));
2197 ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
2198 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2199}
2200
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002201TEST(pthread, pthread_mutex_timedlock_pi) {
2202 pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2203}
2204
2205TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2206#if defined(__BIONIC__)
2207 pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2208#else // __BIONIC__
2209 GTEST_LOG_(INFO) << "This test does nothing since pthread_mutex_timedlock_monotonic_np is only "
2210 "supported on bionic";
2211#endif // __BIONIC__
2212}
2213
Yabin Cui9651fdf2018-03-14 12:02:21 -07002214TEST(pthread, pthread_mutex_using_destroyed_mutex) {
2215#if defined(__BIONIC__)
2216 pthread_mutex_t m;
2217 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2218 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2219 ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2220 "pthread_mutex_lock called on a destroyed mutex");
2221 ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2222 "pthread_mutex_unlock called on a destroyed mutex");
2223 ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2224 "pthread_mutex_trylock called on a destroyed mutex");
2225 timespec ts;
2226 ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2227 "pthread_mutex_timedlock called on a destroyed mutex");
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002228 ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2229 "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
Yabin Cui9651fdf2018-03-14 12:02:21 -07002230 ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2231 "pthread_mutex_destroy called on a destroyed mutex");
2232#else
2233 GTEST_LOG_(INFO) << "This test tests bionic pthread mutex implementation details.";
2234#endif
2235}
2236
Yabin Cuib5845722015-03-16 22:46:42 -07002237class StrictAlignmentAllocator {
2238 public:
2239 void* allocate(size_t size, size_t alignment) {
2240 char* p = new char[size + alignment * 2];
2241 allocated_array.push_back(p);
2242 while (!is_strict_aligned(p, alignment)) {
2243 ++p;
2244 }
2245 return p;
2246 }
2247
2248 ~StrictAlignmentAllocator() {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07002249 for (const auto& p : allocated_array) {
2250 delete[] p;
Yabin Cuib5845722015-03-16 22:46:42 -07002251 }
2252 }
2253
2254 private:
2255 bool is_strict_aligned(char* p, size_t alignment) {
2256 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2257 }
2258
2259 std::vector<char*> allocated_array;
2260};
2261
2262TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2263#if defined(__BIONIC__)
2264 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2265 StrictAlignmentAllocator allocator;
2266 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2267 allocator.allocate(sizeof(pthread_mutex_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002268 ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002269 ASSERT_EQ(0, pthread_mutex_lock(mutex));
2270 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2271 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2272
2273 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2274 allocator.allocate(sizeof(pthread_cond_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002275 ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002276 ASSERT_EQ(0, pthread_cond_signal(cond));
2277 ASSERT_EQ(0, pthread_cond_broadcast(cond));
2278 ASSERT_EQ(0, pthread_cond_destroy(cond));
2279
2280 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2281 allocator.allocate(sizeof(pthread_rwlock_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002282 ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002283 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2284 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2285 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2286 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2287 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2288
2289#else
2290 GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
2291#endif
2292}
Christopher Ferris60907c72015-06-09 18:46:15 -07002293
2294TEST(pthread, pthread_mutex_lock_null_32) {
2295#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002296 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2297 // EINVAL in that case: http://b/19995172.
2298 //
2299 // We decorate the public defintion with _Nonnull so that people recompiling
2300 // their code with get a warning and might fix their bug, but need to pass
2301 // NULL here to test that we remain compatible.
2302 pthread_mutex_t* null_value = nullptr;
2303 ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002304#else
2305 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
2306#endif
2307}
2308
2309TEST(pthread, pthread_mutex_unlock_null_32) {
2310#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002311 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2312 // EINVAL in that case: http://b/19995172.
2313 //
2314 // We decorate the public defintion with _Nonnull so that people recompiling
2315 // their code with get a warning and might fix their bug, but need to pass
2316 // NULL here to test that we remain compatible.
2317 pthread_mutex_t* null_value = nullptr;
2318 ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002319#else
2320 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
2321#endif
2322}
2323
2324TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2325#if defined(__BIONIC__) && defined(__LP64__)
2326 pthread_mutex_t* null_value = nullptr;
2327 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2328#else
2329 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
2330#endif
2331}
2332
2333TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2334#if defined(__BIONIC__) && defined(__LP64__)
2335 pthread_mutex_t* null_value = nullptr;
2336 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2337#else
2338 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
2339#endif
2340}
Yabin Cui33ac04a2015-09-22 11:16:15 -07002341
2342extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2343
2344static volatile bool signal_handler_on_altstack_done;
2345
Josh Gao61db9ac2017-03-15 19:42:05 -07002346__attribute__((__noinline__))
2347static void signal_handler_backtrace() {
2348 // Check if we have enough stack space for unwinding.
2349 int count = 0;
2350 _Unwind_Backtrace(FrameCounter, &count);
2351 ASSERT_GT(count, 0);
2352}
2353
2354__attribute__((__noinline__))
2355static void signal_handler_logging() {
2356 // Check if we have enough stack space for logging.
2357 std::string s(2048, '*');
2358 GTEST_LOG_(INFO) << s;
2359 signal_handler_on_altstack_done = true;
2360}
2361
2362__attribute__((__noinline__))
2363static void signal_handler_snprintf() {
2364 // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2365 char buf[PATH_MAX + 2048];
2366 ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2367}
2368
Yabin Cui33ac04a2015-09-22 11:16:15 -07002369static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2370 ASSERT_EQ(SIGUSR1, signo);
Josh Gao61db9ac2017-03-15 19:42:05 -07002371 signal_handler_backtrace();
2372 signal_handler_logging();
2373 signal_handler_snprintf();
Yabin Cui33ac04a2015-09-22 11:16:15 -07002374}
2375
Josh Gao415daa82017-03-06 17:45:33 -08002376TEST(pthread, big_enough_signal_stack) {
Yabin Cui33ac04a2015-09-22 11:16:15 -07002377 signal_handler_on_altstack_done = false;
2378 ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2379 kill(getpid(), SIGUSR1);
2380 ASSERT_TRUE(signal_handler_on_altstack_done);
2381}
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002382
2383TEST(pthread, pthread_barrierattr_smoke) {
2384 pthread_barrierattr_t attr;
2385 ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2386 int pshared;
2387 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2388 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2389 ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2390 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2391 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2392 ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2393}
2394
Yabin Cui81d27972016-03-22 13:45:55 -07002395struct BarrierTestHelperData {
2396 size_t thread_count;
2397 pthread_barrier_t barrier;
2398 std::atomic<int> finished_mask;
2399 std::atomic<int> serial_thread_count;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002400 size_t iteration_count;
Yabin Cui81d27972016-03-22 13:45:55 -07002401 std::atomic<size_t> finished_iteration_count;
2402
2403 BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2404 : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2405 iteration_count(iteration_count), finished_iteration_count(0) {
2406 }
2407};
2408
2409struct BarrierTestHelperArg {
2410 int id;
2411 BarrierTestHelperData* data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002412};
2413
2414static void BarrierTestHelper(BarrierTestHelperArg* arg) {
Yabin Cui81d27972016-03-22 13:45:55 -07002415 for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2416 int result = pthread_barrier_wait(&arg->data->barrier);
2417 if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2418 arg->data->serial_thread_count++;
2419 } else {
2420 ASSERT_EQ(0, result);
2421 }
Yabin Cuid5c04c52017-05-02 12:57:39 -07002422 int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
Yabin Cuiab4cddc2017-05-02 16:18:13 -07002423 mask |= 1 << arg->id;
Yabin Cuid5c04c52017-05-02 12:57:39 -07002424 if (mask == ((1 << arg->data->thread_count) - 1)) {
Yabin Cui81d27972016-03-22 13:45:55 -07002425 ASSERT_EQ(1, arg->data->serial_thread_count);
2426 arg->data->finished_iteration_count++;
2427 arg->data->finished_mask = 0;
2428 arg->data->serial_thread_count = 0;
2429 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002430 }
2431}
2432
2433TEST(pthread, pthread_barrier_smoke) {
2434 const size_t BARRIER_ITERATION_COUNT = 10;
2435 const size_t BARRIER_THREAD_COUNT = 10;
Yabin Cui81d27972016-03-22 13:45:55 -07002436 BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2437 ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2438 std::vector<pthread_t> threads(data.thread_count);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002439 std::vector<BarrierTestHelperArg> args(threads.size());
2440 for (size_t i = 0; i < threads.size(); ++i) {
Yabin Cui81d27972016-03-22 13:45:55 -07002441 args[i].id = i;
2442 args[i].data = &data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002443 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2444 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2445 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002446 for (size_t i = 0; i < threads.size(); ++i) {
2447 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2448 }
Yabin Cui81d27972016-03-22 13:45:55 -07002449 ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2450 ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2451}
2452
2453struct BarrierDestroyTestArg {
2454 std::atomic<int> tid;
2455 pthread_barrier_t* barrier;
2456};
2457
2458static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2459 arg->tid = gettid();
2460 ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002461}
2462
2463TEST(pthread, pthread_barrier_destroy) {
2464 pthread_barrier_t barrier;
2465 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2466 pthread_t thread;
Yabin Cui81d27972016-03-22 13:45:55 -07002467 BarrierDestroyTestArg arg;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002468 arg.tid = 0;
2469 arg.barrier = &barrier;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002470 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui81d27972016-03-22 13:45:55 -07002471 reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002472 WaitUntilThreadSleep(arg.tid);
2473 ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2474 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2475 // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2476 ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2477 ASSERT_EQ(0, pthread_join(thread, nullptr));
2478#if defined(__BIONIC__)
2479 ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2480#endif
2481}
2482
2483struct BarrierOrderingTestHelperArg {
2484 pthread_barrier_t* barrier;
2485 size_t* array;
2486 size_t array_length;
2487 size_t id;
2488};
2489
2490void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2491 const size_t ITERATION_COUNT = 10000;
2492 for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2493 arg->array[arg->id] = i;
Yabin Cuic9a659c2015-11-05 15:36:08 -08002494 int result = pthread_barrier_wait(arg->barrier);
2495 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002496 for (size_t j = 0; j < arg->array_length; ++j) {
2497 ASSERT_EQ(i, arg->array[j]);
2498 }
Yabin Cuic9a659c2015-11-05 15:36:08 -08002499 result = pthread_barrier_wait(arg->barrier);
2500 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002501 }
2502}
2503
2504TEST(pthread, pthread_barrier_check_ordering) {
2505 const size_t THREAD_COUNT = 4;
2506 pthread_barrier_t barrier;
2507 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2508 size_t array[THREAD_COUNT];
2509 std::vector<pthread_t> threads(THREAD_COUNT);
2510 std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2511 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2512 args[i].barrier = &barrier;
2513 args[i].array = array;
2514 args[i].array_length = THREAD_COUNT;
2515 args[i].id = i;
2516 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2517 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2518 &args[i]));
2519 }
2520 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2521 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2522 }
2523}
Yabin Cuife3a83a2015-11-17 16:03:18 -08002524
Elliott Hughes463faad2018-07-06 14:34:49 -07002525TEST(pthread, pthread_barrier_init_zero_count) {
2526 pthread_barrier_t barrier;
2527 ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2528}
2529
Yabin Cuife3a83a2015-11-17 16:03:18 -08002530TEST(pthread, pthread_spinlock_smoke) {
2531 pthread_spinlock_t lock;
2532 ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2533 ASSERT_EQ(0, pthread_spin_trylock(&lock));
2534 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2535 ASSERT_EQ(0, pthread_spin_lock(&lock));
2536 ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2537 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2538 ASSERT_EQ(0, pthread_spin_destroy(&lock));
2539}
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002540
Elliott Hughes8aecba72017-10-17 15:34:41 -07002541TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002542 pthread_attr_t attr;
2543 ASSERT_EQ(0, pthread_attr_init(&attr));
2544
Elliott Hughes8aecba72017-10-17 15:34:41 -07002545 int state;
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002546 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002547 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2548 ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2549
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002550 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002551 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2552 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2553
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002554 ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002555 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2556 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002557}
2558
2559TEST(pthread, pthread_create__mmap_failures) {
2560 pthread_attr_t attr;
2561 ASSERT_EQ(0, pthread_attr_init(&attr));
2562 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2563
2564 const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2565
Elliott Hughes57512982017-10-02 22:49:18 -07002566 // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002567 std::vector<void*> pages;
Elliott Hughes57512982017-10-02 22:49:18 -07002568 pages.reserve(64 * 1024);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002569 int prot = PROT_NONE;
2570 while (true) {
2571 void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2572 if (page == MAP_FAILED) break;
2573 pages.push_back(page);
2574 prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2575 }
2576
2577 // Try creating threads, freeing up a page each time we fail.
2578 size_t EAGAIN_count = 0;
2579 size_t i = 0;
2580 for (; i < pages.size(); ++i) {
2581 pthread_t t;
2582 int status = pthread_create(&t, &attr, IdFn, nullptr);
2583 if (status != EAGAIN) break;
2584 ++EAGAIN_count;
2585 ASSERT_EQ(0, munmap(pages[i], kPageSize));
2586 }
2587
Ryan Prichard45d13492019-01-03 02:51:30 -08002588 // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
2589 // side. So we should have seen at least three failures.
2590 ASSERT_GE(EAGAIN_count, 3U);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002591
2592 for (; i < pages.size(); ++i) {
2593 ASSERT_EQ(0, munmap(pages[i], kPageSize));
2594 }
2595}
Elliott Hughesdff08ce2017-10-16 09:58:45 -07002596
2597TEST(pthread, pthread_setschedparam) {
2598 sched_param p = { .sched_priority = INT_MIN };
2599 ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
2600}
2601
2602TEST(pthread, pthread_setschedprio) {
2603 ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
2604}
Elliott Hughes8aecba72017-10-17 15:34:41 -07002605
2606TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
2607 pthread_attr_t attr;
2608 ASSERT_EQ(0, pthread_attr_init(&attr));
2609
2610 int state;
2611 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2612 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2613 ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
2614
2615 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2616 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2617 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2618
2619 ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
2620 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2621 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2622}
2623
2624TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
2625 pthread_attr_t attr;
2626 ASSERT_EQ(0, pthread_attr_init(&attr));
2627
2628 // If we set invalid scheduling attributes but choose to inherit, everything's fine...
2629 sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
2630 ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
2631 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
2632 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2633
2634 pthread_t t;
2635 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
2636 ASSERT_EQ(0, pthread_join(t, nullptr));
2637
Elliott Hughes7a660662017-10-30 09:26:06 -07002638#if defined(__LP64__)
2639 // If we ask to use them, though, we'll see a failure...
Elliott Hughes8aecba72017-10-17 15:34:41 -07002640 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2641 ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
Elliott Hughes7a660662017-10-30 09:26:06 -07002642#else
2643 // For backwards compatibility with broken apps, we just ignore failures
2644 // to set scheduler attributes on LP32.
2645#endif
Elliott Hughes8aecba72017-10-17 15:34:41 -07002646}
2647
2648TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
2649 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2650 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
2651 if (rc == EPERM) {
2652 GTEST_LOG_(INFO) << "pthread_setschedparam failed with EPERM, skipping test\n";
2653 return;
2654 }
2655 ASSERT_EQ(0, rc);
2656
2657 pthread_attr_t attr;
2658 ASSERT_EQ(0, pthread_attr_init(&attr));
2659 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2660
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002661 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002662 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002663 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002664 int actual_policy;
2665 sched_param actual_param;
2666 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2667 ASSERT_EQ(SCHED_FIFO, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002668 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002669 ASSERT_EQ(0, pthread_join(t, nullptr));
2670}
2671
2672TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
2673 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2674 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
2675 if (rc == EPERM) {
2676 GTEST_LOG_(INFO) << "pthread_setschedparam failed with EPERM, skipping test\n";
2677 return;
2678 }
2679 ASSERT_EQ(0, rc);
2680
2681 pthread_attr_t attr;
2682 ASSERT_EQ(0, pthread_attr_init(&attr));
2683 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2684 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
2685
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002686 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002687 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002688 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002689 int actual_policy;
2690 sched_param actual_param;
2691 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2692 ASSERT_EQ(SCHED_OTHER, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002693 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002694 ASSERT_EQ(0, pthread_join(t, nullptr));
2695}
2696
2697TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
2698 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2699 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
2700 if (rc == EPERM) {
2701 GTEST_LOG_(INFO) << "pthread_setschedparam failed with EPERM, skipping test\n";
2702 return;
2703 }
2704 ASSERT_EQ(0, rc);
2705
2706 pthread_attr_t attr;
2707 ASSERT_EQ(0, pthread_attr_init(&attr));
2708 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2709
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002710 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002711 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002712 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002713 int actual_policy;
2714 sched_param actual_param;
2715 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2716 ASSERT_EQ(SCHED_FIFO | SCHED_RESET_ON_FORK, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002717 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002718 ASSERT_EQ(0, pthread_join(t, nullptr));
2719}