blob: 9c6b975fd66e8ed63338523080625861695ef048 [file] [log] [blame]
Elliott Hughesbfeab1b2012-09-05 17:47:37 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
Elliott Hughes5b9310e2013-10-02 16:59:05 -070020#include <inttypes.h>
Elliott Hughesb95cf0d2013-07-15 14:51:07 -070021#include <limits.h>
Elliott Hughes04620a32014-03-07 17:59:05 -080022#include <malloc.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070023#include <pthread.h>
Christopher Ferrisf04935c2013-12-20 18:43:21 -080024#include <signal.h>
Yabin Cui140f3672015-02-03 10:32:00 -080025#include <stdio.h>
Elliott Hughes70b24b12013-11-15 11:51:07 -080026#include <sys/mman.h>
Elliott Hughes4d098ca2016-04-11 12:43:05 -070027#include <sys/prctl.h>
George Burgess IV08fd0722019-01-15 19:00:11 -080028#include <sys/resource.h>
Elliott Hughes57b7a612014-08-25 17:26:50 -070029#include <sys/syscall.h>
Narayan Kamath51e6cb32014-03-03 15:38:51 +000030#include <time.h>
Elliott Hughes4d014e12012-09-07 16:47:54 -070031#include <unistd.h>
Yabin Cui33ac04a2015-09-22 11:16:15 -070032#include <unwind.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070033
Yabin Cui08ee8d22015-02-11 17:04:36 -080034#include <atomic>
Josh Gaoddf757e2018-10-17 15:23:03 -070035#include <future>
Yabin Cuib5845722015-03-16 22:46:42 -070036#include <vector>
Yabin Cui08ee8d22015-02-11 17:04:36 -080037
Elliott Hughes5e62b342018-10-25 11:00:00 -070038#include <android-base/macros.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080039#include <android-base/parseint.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070040#include <android-base/scopeguard.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080041#include <android-base/strings.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070042
Yabin Cuic9a659c2015-11-05 15:36:08 -080043#include "private/bionic_constants.h"
Yabin Cui17393b02015-03-21 15:08:25 -070044#include "BionicDeathTest.h"
Elliott Hughes71ba5892018-02-07 12:44:45 -080045#include "SignalUtils.h"
Elliott Hughes15dfd632015-09-22 16:40:14 -070046#include "utils.h"
47
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070048TEST(pthread, pthread_key_create) {
49 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -070050 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070051 ASSERT_EQ(0, pthread_key_delete(key));
52 // Can't delete a key that's already been deleted.
53 ASSERT_EQ(EINVAL, pthread_key_delete(key));
54}
Elliott Hughes4d014e12012-09-07 16:47:54 -070055
Dan Albertc4bcc752014-09-30 11:48:24 -070056TEST(pthread, pthread_keys_max) {
Yabin Cui6c238f22014-12-11 20:50:41 -080057 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
58 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070059}
Elliott Hughes718a5b52014-01-28 17:02:03 -080060
Yabin Cui6c238f22014-12-11 20:50:41 -080061TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
Dan Albertc4bcc752014-09-30 11:48:24 -070062 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
Yabin Cui6c238f22014-12-11 20:50:41 -080063 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070064}
65
66TEST(pthread, pthread_key_many_distinct) {
Yabin Cui6c238f22014-12-11 20:50:41 -080067 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
68 // pthread keys, but We should be able to allocate at least this many keys.
69 int nkeys = PTHREAD_KEYS_MAX / 2;
Dan Albertc4bcc752014-09-30 11:48:24 -070070 std::vector<pthread_key_t> keys;
71
Tom Cherryb8ab6182017-04-05 16:20:29 -070072 auto scope_guard = android::base::make_scope_guard([&keys] {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -070073 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -070074 EXPECT_EQ(0, pthread_key_delete(key));
75 }
76 });
77
78 for (int i = 0; i < nkeys; ++i) {
79 pthread_key_t key;
Elliott Hughes61706932015-03-31 10:56:58 -070080 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
Yi Kong32bc0fc2018-08-02 17:31:13 -070081 ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
Dan Albertc4bcc752014-09-30 11:48:24 -070082 keys.push_back(key);
83 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
84 }
85
86 for (int i = keys.size() - 1; i >= 0; --i) {
87 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
88 pthread_key_t key = keys.back();
89 keys.pop_back();
90 ASSERT_EQ(0, pthread_key_delete(key));
91 }
92}
93
Yabin Cui6c238f22014-12-11 20:50:41 -080094TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000095 std::vector<pthread_key_t> keys;
Dan Albertc4bcc752014-09-30 11:48:24 -070096 int rv = 0;
Yabin Cui6c238f22014-12-11 20:50:41 -080097
98 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
99 // be more than we are allowed to allocate now.
100 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000101 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700102 rv = pthread_key_create(&key, nullptr);
Dan Albertc4bcc752014-09-30 11:48:24 -0700103 if (rv == EAGAIN) {
104 break;
105 }
106 EXPECT_EQ(0, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000107 keys.push_back(key);
108 }
109
Dan Albertc4bcc752014-09-30 11:48:24 -0700110 // Don't leak keys.
Elliott Hughes0b2acdf2015-10-02 18:25:19 -0700111 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -0700112 EXPECT_EQ(0, pthread_key_delete(key));
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000113 }
Dan Albertc4bcc752014-09-30 11:48:24 -0700114 keys.clear();
115
116 // We should have eventually reached the maximum number of keys and received
117 // EAGAIN.
118 ASSERT_EQ(EAGAIN, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000119}
120
Elliott Hughesebb770f2014-06-25 13:46:46 -0700121TEST(pthread, pthread_key_delete) {
122 void* expected = reinterpret_cast<void*>(1234);
123 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700124 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700125 ASSERT_EQ(0, pthread_setspecific(key, expected));
126 ASSERT_EQ(expected, pthread_getspecific(key));
127 ASSERT_EQ(0, pthread_key_delete(key));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700128 // After deletion, pthread_getspecific returns nullptr.
129 ASSERT_EQ(nullptr, pthread_getspecific(key));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700130 // And you can't use pthread_setspecific with the deleted key.
131 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
132}
133
Elliott Hughes40a52172014-07-30 14:48:10 -0700134TEST(pthread, pthread_key_fork) {
135 void* expected = reinterpret_cast<void*>(1234);
136 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700137 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700138 ASSERT_EQ(0, pthread_setspecific(key, expected));
139 ASSERT_EQ(expected, pthread_getspecific(key));
140
141 pid_t pid = fork();
142 ASSERT_NE(-1, pid) << strerror(errno);
143
144 if (pid == 0) {
145 // The surviving thread inherits all the forking thread's TLS values...
146 ASSERT_EQ(expected, pthread_getspecific(key));
147 _exit(99);
148 }
149
Elliott Hughes33697a02016-01-26 13:04:57 -0800150 AssertChildExited(pid, 99);
Elliott Hughes40a52172014-07-30 14:48:10 -0700151
152 ASSERT_EQ(expected, pthread_getspecific(key));
Dan Albert1d53ae22014-09-02 15:24:26 -0700153 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700154}
155
156static void* DirtyKeyFn(void* key) {
157 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
158}
159
160TEST(pthread, pthread_key_dirty) {
161 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700162 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700163
Yabin Cuia36158a2015-11-16 21:06:16 -0800164 size_t stack_size = 640 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700165 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
Elliott Hughes40a52172014-07-30 14:48:10 -0700166 ASSERT_NE(MAP_FAILED, stack);
167 memset(stack, 0xff, stack_size);
168
169 pthread_attr_t attr;
170 ASSERT_EQ(0, pthread_attr_init(&attr));
171 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
172
173 pthread_t t;
174 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
175
176 void* result;
177 ASSERT_EQ(0, pthread_join(t, &result));
178 ASSERT_EQ(nullptr, result); // Not ~0!
179
180 ASSERT_EQ(0, munmap(stack, stack_size));
Dan Albert1d53ae22014-09-02 15:24:26 -0700181 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700182}
183
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800184TEST(pthread, static_pthread_key_used_before_creation) {
185#if defined(__BIONIC__)
186 // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
187 // So here tests if the static/global default value 0 can be detected as invalid key.
188 static pthread_key_t key;
189 ASSERT_EQ(nullptr, pthread_getspecific(key));
190 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
191 ASSERT_EQ(EINVAL, pthread_key_delete(key));
192#else
193 GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
194#endif
195}
196
Elliott Hughes4d014e12012-09-07 16:47:54 -0700197static void* IdFn(void* arg) {
198 return arg;
199}
200
Yabin Cui63481602014-12-01 17:41:04 -0800201class SpinFunctionHelper {
202 public:
203 SpinFunctionHelper() {
204 SpinFunctionHelper::spin_flag_ = true;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400205 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700206
Yabin Cui63481602014-12-01 17:41:04 -0800207 ~SpinFunctionHelper() {
208 UnSpin();
209 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700210
Yabin Cui63481602014-12-01 17:41:04 -0800211 auto GetFunction() -> void* (*)(void*) {
212 return SpinFunctionHelper::SpinFn;
213 }
214
215 void UnSpin() {
216 SpinFunctionHelper::spin_flag_ = false;
217 }
218
219 private:
220 static void* SpinFn(void*) {
221 while (spin_flag_) {}
Yi Kong32bc0fc2018-08-02 17:31:13 -0700222 return nullptr;
Yabin Cui63481602014-12-01 17:41:04 -0800223 }
Yabin Cuia36158a2015-11-16 21:06:16 -0800224 static std::atomic<bool> spin_flag_;
Yabin Cui63481602014-12-01 17:41:04 -0800225};
226
227// It doesn't matter if spin_flag_ is used in several tests,
228// because it is always set to false after each test. Each thread
229// loops on spin_flag_ can find it becomes false at some time.
Yabin Cuia36158a2015-11-16 21:06:16 -0800230std::atomic<bool> SpinFunctionHelper::spin_flag_;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400231
Elliott Hughes4d014e12012-09-07 16:47:54 -0700232static void* JoinFn(void* arg) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700233 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700234}
235
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400236static void AssertDetached(pthread_t t, bool is_detached) {
237 pthread_attr_t attr;
238 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
239 int detach_state;
240 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
241 pthread_attr_destroy(&attr);
242 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
243}
244
Elliott Hughes7484c212017-02-02 02:41:38 +0000245static void MakeDeadThread(pthread_t& t) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700246 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
247 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000248}
249
Elliott Hughes4d014e12012-09-07 16:47:54 -0700250TEST(pthread, pthread_create) {
251 void* expected_result = reinterpret_cast<void*>(123);
252 // Can we create a thread?
253 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700254 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700255 // If we join, do we get the expected value back?
256 void* result;
257 ASSERT_EQ(0, pthread_join(t, &result));
258 ASSERT_EQ(expected_result, result);
259}
260
Elliott Hughes3e898472013-02-12 16:40:24 +0000261TEST(pthread, pthread_create_EAGAIN) {
262 pthread_attr_t attributes;
263 ASSERT_EQ(0, pthread_attr_init(&attributes));
264 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
265
266 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700267 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000268}
269
Elliott Hughes4d014e12012-09-07 16:47:54 -0700270TEST(pthread, pthread_no_join_after_detach) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700271 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800272
Elliott Hughes4d014e12012-09-07 16:47:54 -0700273 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700274 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700275
276 // After a pthread_detach...
277 ASSERT_EQ(0, pthread_detach(t1));
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400278 AssertDetached(t1, true);
Elliott Hughes4d014e12012-09-07 16:47:54 -0700279
280 // ...pthread_join should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700281 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700282}
283
284TEST(pthread, pthread_no_op_detach_after_join) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700285 SpinFunctionHelper spin_helper;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400286
Elliott Hughes4d014e12012-09-07 16:47:54 -0700287 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700288 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700289
290 // If thread 2 is already waiting to join thread 1...
291 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700292 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700293
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400294 sleep(1); // (Give t2 a chance to call pthread_join.)
Elliott Hughes4d014e12012-09-07 16:47:54 -0700295
Yabin Cuibbb04322015-03-19 15:19:25 -0700296#if defined(__BIONIC__)
297 ASSERT_EQ(EINVAL, pthread_detach(t1));
298#else
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400299 ASSERT_EQ(0, pthread_detach(t1));
Yabin Cuibbb04322015-03-19 15:19:25 -0700300#endif
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400301 AssertDetached(t1, false);
302
Elliott Hughes725b2a92016-03-23 11:20:47 -0700303 spin_helper.UnSpin();
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400304
305 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
Elliott Hughes4d014e12012-09-07 16:47:54 -0700306 void* join_result;
307 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700308 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700309}
Elliott Hughes14f19592012-10-29 10:19:44 -0700310
311TEST(pthread, pthread_join_self) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700312 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
Elliott Hughes14f19592012-10-29 10:19:44 -0700313}
Elliott Hughes4f251be2012-11-01 16:33:29 -0700314
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800315struct TestBug37410 {
316 pthread_t main_thread;
317 pthread_mutex_t mutex;
Elliott Hughes4f251be2012-11-01 16:33:29 -0700318
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800319 static void main() {
320 TestBug37410 data;
321 data.main_thread = pthread_self();
Yi Kong32bc0fc2018-08-02 17:31:13 -0700322 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800323 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
324
325 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700326 ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800327
328 // Wait for the thread to be running...
329 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
330 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
331
332 // ...and exit.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700333 pthread_exit(nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800334 }
335
336 private:
337 static void* thread_fn(void* arg) {
338 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
339
340 // Let the main thread know we're running.
341 pthread_mutex_unlock(&data->mutex);
342
343 // And wait for the main thread to exit.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700344 pthread_join(data->main_thread, nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800345
Yi Kong32bc0fc2018-08-02 17:31:13 -0700346 return nullptr;
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800347 }
348};
Elliott Hughes4f251be2012-11-01 16:33:29 -0700349
Elliott Hughes7fd803c2013-02-14 16:33:52 -0800350// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
351// run this test (which exits normally) in its own process.
Yabin Cui9df70402014-11-05 18:01:01 -0800352
353class pthread_DeathTest : public BionicDeathTest {};
354
355TEST_F(pthread_DeathTest, pthread_bug_37410) {
Elliott Hughes4f251be2012-11-01 16:33:29 -0700356 // http://code.google.com/p/android/issues/detail?id=37410
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800357 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
Elliott Hughes4f251be2012-11-01 16:33:29 -0700358}
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800359
360static void* SignalHandlerFn(void* arg) {
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800361 sigset64_t wait_set;
362 sigfillset64(&wait_set);
363 return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800364}
365
366TEST(pthread, pthread_sigmask) {
Elliott Hughes19e62322013-10-15 11:23:57 -0700367 // Check that SIGUSR1 isn't blocked.
368 sigset_t original_set;
369 sigemptyset(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700370 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700371 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
372
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800373 // Block SIGUSR1.
374 sigset_t set;
375 sigemptyset(&set);
376 sigaddset(&set, SIGUSR1);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700377 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800378
Elliott Hughes19e62322013-10-15 11:23:57 -0700379 // Check that SIGUSR1 is blocked.
380 sigset_t final_set;
381 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700382 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700383 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
384 // ...and that sigprocmask agrees with pthread_sigmask.
385 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700386 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700387 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
388
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800389 // Spawn a thread that calls sigwait and tells us what it received.
390 pthread_t signal_thread;
391 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700392 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800393
394 // Send that thread SIGUSR1.
395 pthread_kill(signal_thread, SIGUSR1);
396
397 // See what it got.
398 void* join_result;
399 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
400 ASSERT_EQ(SIGUSR1, received_signal);
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700401 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes19e62322013-10-15 11:23:57 -0700402
403 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700404 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800405}
Elliott Hughes5e3fc432013-02-11 16:36:48 -0800406
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800407TEST(pthread, pthread_sigmask64_SIGTRMIN) {
408 // Check that SIGRTMIN isn't blocked.
409 sigset64_t original_set;
410 sigemptyset64(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700411 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800412 ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
413
414 // Block SIGRTMIN.
415 sigset64_t set;
416 sigemptyset64(&set);
417 sigaddset64(&set, SIGRTMIN);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700418 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800419
420 // Check that SIGRTMIN is blocked.
421 sigset64_t final_set;
422 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700423 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800424 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
425 // ...and that sigprocmask64 agrees with pthread_sigmask64.
426 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700427 ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800428 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
429
430 // Spawn a thread that calls sigwait64 and tells us what it received.
431 pthread_t signal_thread;
432 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700433 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800434
435 // Send that thread SIGRTMIN.
436 pthread_kill(signal_thread, SIGRTMIN);
437
438 // See what it got.
439 void* join_result;
440 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
441 ASSERT_EQ(SIGRTMIN, received_signal);
442 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
443
444 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700445 ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800446}
447
Elliott Hughes725b2a92016-03-23 11:20:47 -0700448static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
449 ASSERT_EQ(0, pthread_setname_np(t, "short"));
450 char name[32];
451 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
452 ASSERT_STREQ("short", name);
453
Elliott Hughesd1aea302015-04-25 10:05:24 -0700454 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
Elliott Hughes725b2a92016-03-23 11:20:47 -0700455 ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
456 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
457 ASSERT_STREQ("123456789012345", name);
458
459 ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
460
461 // The passed-in buffer should be at least 16 bytes.
462 ASSERT_EQ(0, pthread_getname_np(t, name, 16));
463 ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
Elliott Hughes3e898472013-02-12 16:40:24 +0000464}
465
Elliott Hughes725b2a92016-03-23 11:20:47 -0700466TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
467 test_pthread_setname_np__pthread_getname_np(pthread_self());
Elliott Hughes3e898472013-02-12 16:40:24 +0000468}
469
Elliott Hughes725b2a92016-03-23 11:20:47 -0700470TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
471 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800472
Elliott Hughes725b2a92016-03-23 11:20:47 -0700473 pthread_t t;
Elliott Hughes4d098ca2016-04-11 12:43:05 -0700474 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
475 test_pthread_setname_np__pthread_getname_np(t);
476 spin_helper.UnSpin();
477 ASSERT_EQ(0, pthread_join(t, nullptr));
478}
479
480// http://b/28051133: a kernel misfeature means that you can't change the
481// name of another thread if you've set PR_SET_DUMPABLE to 0.
482TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
483 ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
484
485 SpinFunctionHelper spin_helper;
486
487 pthread_t t;
488 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700489 test_pthread_setname_np__pthread_getname_np(t);
490 spin_helper.UnSpin();
491 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000492}
493
Elliott Hughes11859d42017-02-13 17:59:29 -0800494TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000495 pthread_t dead_thread;
496 MakeDeadThread(dead_thread);
497
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800498 EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"), "invalid pthread_t");
499}
500
501TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
502 pthread_t null_thread = 0;
503 EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
Elliott Hughes11859d42017-02-13 17:59:29 -0800504}
505
506TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
507 pthread_t dead_thread;
508 MakeDeadThread(dead_thread);
509
Elliott Hughesbcb15292017-02-07 21:05:30 +0000510 char name[64];
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800511 EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)), "invalid pthread_t");
512}
513
514TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
515 pthread_t null_thread = 0;
516
517 char name[64];
518 EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000519}
520
Elliott Hughes9d23e042013-02-15 19:21:51 -0800521TEST(pthread, pthread_kill__0) {
522 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
523 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
524}
525
526TEST(pthread, pthread_kill__invalid_signal) {
527 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
528}
529
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800530static void pthread_kill__in_signal_handler_helper(int signal_number) {
531 static int count = 0;
532 ASSERT_EQ(SIGALRM, signal_number);
533 if (++count == 1) {
534 // Can we call pthread_kill from a signal handler?
535 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
536 }
537}
538
539TEST(pthread, pthread_kill__in_signal_handler) {
Elliott Hughes4b558f52014-03-04 15:58:02 -0800540 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800541 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
542}
543
Josh Gaoddf757e2018-10-17 15:23:03 -0700544TEST(pthread, pthread_kill__exited_thread) {
545 static std::promise<pid_t> tid_promise;
546 pthread_t thread;
547 ASSERT_EQ(0, pthread_create(&thread, nullptr,
548 [](void*) -> void* {
549 tid_promise.set_value(gettid());
550 return nullptr;
551 },
552 nullptr));
553
554 pid_t tid = tid_promise.get_future().get();
555 while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
556 continue;
557 }
558 ASSERT_EQ(ESRCH, errno);
559
560 ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
561}
562
Elliott Hughes11859d42017-02-13 17:59:29 -0800563TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000564 pthread_t dead_thread;
565 MakeDeadThread(dead_thread);
566
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800567 EXPECT_DEATH(pthread_detach(dead_thread), "invalid pthread_t");
568}
569
570TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
571 pthread_t null_thread = 0;
572 EXPECT_EQ(ESRCH, pthread_detach(null_thread));
Elliott Hughes7484c212017-02-02 02:41:38 +0000573}
574
Jeff Hao9b06cc32013-08-15 14:51:16 -0700575TEST(pthread, pthread_getcpuclockid__clock_gettime) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700576 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800577
Jeff Hao9b06cc32013-08-15 14:51:16 -0700578 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700579 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700580
581 clockid_t c;
582 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
583 timespec ts;
584 ASSERT_EQ(0, clock_gettime(c, &ts));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700585 spin_helper.UnSpin();
Yabin Cuia36158a2015-11-16 21:06:16 -0800586 ASSERT_EQ(0, pthread_join(t, nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700587}
588
Elliott Hughes11859d42017-02-13 17:59:29 -0800589TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000590 pthread_t dead_thread;
591 MakeDeadThread(dead_thread);
592
593 clockid_t c;
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800594 EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c), "invalid pthread_t");
595}
596
597TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
598 pthread_t null_thread = 0;
599 clockid_t c;
600 EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000601}
602
Elliott Hughes11859d42017-02-13 17:59:29 -0800603TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000604 pthread_t dead_thread;
605 MakeDeadThread(dead_thread);
606
607 int policy;
608 sched_param param;
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800609 EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param), "invalid pthread_t");
610}
611
612TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
613 pthread_t null_thread = 0;
614 int policy;
615 sched_param param;
616 EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000617}
618
Elliott Hughes11859d42017-02-13 17:59:29 -0800619TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000620 pthread_t dead_thread;
621 MakeDeadThread(dead_thread);
622
623 int policy = 0;
624 sched_param param;
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800625 EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param), "invalid pthread_t");
626}
627
628TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
629 pthread_t null_thread = 0;
630 int policy = 0;
631 sched_param param;
632 EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000633}
634
Elliott Hughesdff08ce2017-10-16 09:58:45 -0700635TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
636 pthread_t dead_thread;
637 MakeDeadThread(dead_thread);
638
639 EXPECT_DEATH(pthread_setschedprio(dead_thread, 123), "invalid pthread_t");
640}
641
642TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
643 pthread_t null_thread = 0;
644 EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
645}
646
Elliott Hughes11859d42017-02-13 17:59:29 -0800647TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000648 pthread_t dead_thread;
649 MakeDeadThread(dead_thread);
650
Yi Kong32bc0fc2018-08-02 17:31:13 -0700651 EXPECT_DEATH(pthread_join(dead_thread, nullptr), "invalid pthread_t");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800652}
653
654TEST_F(pthread_DeathTest, pthread_join__null_thread) {
655 pthread_t null_thread = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700656 EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000657}
658
Elliott Hughes11859d42017-02-13 17:59:29 -0800659TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000660 pthread_t dead_thread;
661 MakeDeadThread(dead_thread);
662
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800663 EXPECT_DEATH(pthread_kill(dead_thread, 0), "invalid pthread_t");
664}
665
666TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
667 pthread_t null_thread = 0;
668 EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
Elliott Hughes7484c212017-02-02 02:41:38 +0000669}
670
msg5550f020d12013-06-06 14:59:28 -0400671TEST(pthread, pthread_join__multijoin) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700672 SpinFunctionHelper spin_helper;
msg5550f020d12013-06-06 14:59:28 -0400673
674 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700675 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
msg5550f020d12013-06-06 14:59:28 -0400676
677 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700678 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
msg5550f020d12013-06-06 14:59:28 -0400679
680 sleep(1); // (Give t2 a chance to call pthread_join.)
681
682 // Multiple joins to the same thread should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700683 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
msg5550f020d12013-06-06 14:59:28 -0400684
Elliott Hughes725b2a92016-03-23 11:20:47 -0700685 spin_helper.UnSpin();
msg5550f020d12013-06-06 14:59:28 -0400686
687 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
688 void* join_result;
689 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700690 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
msg5550f020d12013-06-06 14:59:28 -0400691}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700692
Elliott Hughes70b24b12013-11-15 11:51:07 -0800693TEST(pthread, pthread_join__race) {
694 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
695 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
696 for (size_t i = 0; i < 1024; ++i) {
Yabin Cuia36158a2015-11-16 21:06:16 -0800697 size_t stack_size = 640*1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700698 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
Elliott Hughes70b24b12013-11-15 11:51:07 -0800699
700 pthread_attr_t a;
701 pthread_attr_init(&a);
702 pthread_attr_setstack(&a, stack, stack_size);
703
704 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700705 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
706 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes70b24b12013-11-15 11:51:07 -0800707 ASSERT_EQ(0, munmap(stack, stack_size));
708 }
709}
710
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700711static void* GetActualGuardSizeFn(void* arg) {
712 pthread_attr_t attributes;
713 pthread_getattr_np(pthread_self(), &attributes);
714 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700715 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700716}
717
718static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
719 size_t result;
720 pthread_t t;
721 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700722 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700723 return result;
724}
725
726static void* GetActualStackSizeFn(void* arg) {
727 pthread_attr_t attributes;
728 pthread_getattr_np(pthread_self(), &attributes);
729 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700730 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700731}
732
733static size_t GetActualStackSize(const pthread_attr_t& attributes) {
734 size_t result;
735 pthread_t t;
736 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700737 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700738 return result;
739}
740
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700741TEST(pthread, pthread_attr_setguardsize_tiny) {
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700742 pthread_attr_t attributes;
743 ASSERT_EQ(0, pthread_attr_init(&attributes));
744
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700745 // No such thing as too small: will be rounded up to one page by pthread_create.
746 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
747 size_t guard_size;
748 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
749 ASSERT_EQ(128U, guard_size);
750 ASSERT_EQ(4096U, GetActualGuardSize(attributes));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700751}
752
753TEST(pthread, pthread_attr_setguardsize_reasonable) {
754 pthread_attr_t attributes;
755 ASSERT_EQ(0, pthread_attr_init(&attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700756
757 // Large enough and a multiple of the page size.
758 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700759 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700760 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
761 ASSERT_EQ(32*1024U, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700762 ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
763}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700764
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700765TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
766 pthread_attr_t attributes;
767 ASSERT_EQ(0, pthread_attr_init(&attributes));
768
769 // Large enough but not a multiple of the page size.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700770 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700771 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700772 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
773 ASSERT_EQ(32*1024U + 1, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700774 ASSERT_EQ(36*1024U, GetActualGuardSize(attributes));
775}
776
777TEST(pthread, pthread_attr_setguardsize_enormous) {
778 pthread_attr_t attributes;
779 ASSERT_EQ(0, pthread_attr_init(&attributes));
780
781 // Larger than the stack itself. (Historically we mistakenly carved
782 // the guard out of the stack itself, rather than adding it after the
783 // end.)
784 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
785 size_t guard_size;
786 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
787 ASSERT_EQ(32*1024*1024U, guard_size);
788 ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700789}
790
791TEST(pthread, pthread_attr_setstacksize) {
792 pthread_attr_t attributes;
793 ASSERT_EQ(0, pthread_attr_init(&attributes));
794
795 // Get the default stack size.
796 size_t default_stack_size;
797 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
798
799 // Too small.
800 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
801 size_t stack_size;
802 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
803 ASSERT_EQ(default_stack_size, stack_size);
804 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
805
Yabin Cui917d3902015-01-08 12:32:42 -0800806 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700807 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
808 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
809 ASSERT_EQ(32*1024U, stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -0800810 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700811
Yabin Cui917d3902015-01-08 12:32:42 -0800812 // Large enough but not aligned; will be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700813 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
814 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
815 ASSERT_EQ(32*1024U + 1, stack_size);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800816#if defined(__BIONIC__)
Yabin Cui917d3902015-01-08 12:32:42 -0800817 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800818#else // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700819 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
820 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800821#endif // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700822}
Elliott Hughesc3f11402013-10-30 14:40:09 -0700823
Yabin Cui76615da2015-03-17 14:22:09 -0700824TEST(pthread, pthread_rwlockattr_smoke) {
825 pthread_rwlockattr_t attr;
826 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
827
828 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
829 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
830 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
831 int pshared;
832 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
833 ASSERT_EQ(pshared_value_array[i], pshared);
834 }
835
836 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
837 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
838 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
839 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
840 int kind;
841 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
842 ASSERT_EQ(kind_array[i], kind);
843 }
844
845 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
846}
847
848TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
849 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
850 pthread_rwlock_t lock2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700851 ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -0700852 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
853}
854
Elliott Hughesc3f11402013-10-30 14:40:09 -0700855TEST(pthread, pthread_rwlock_smoke) {
856 pthread_rwlock_t l;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700857 ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
Elliott Hughesc3f11402013-10-30 14:40:09 -0700858
Calin Juravle76f352e2014-05-19 13:41:10 +0100859 // Single read lock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700860 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
861 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
862
Calin Juravle76f352e2014-05-19 13:41:10 +0100863 // Multiple read lock
864 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
865 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
866 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
867 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
868
869 // Write lock
Calin Juravle92687e42014-05-22 19:21:22 +0100870 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
871 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100872
873 // Try writer lock
874 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
875 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
876 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
877 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
878
879 // Try reader lock
880 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
881 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
882 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
883 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
884 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
885
886 // Try writer lock after unlock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700887 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
888 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
889
Calin Juravle76f352e2014-05-19 13:41:10 +0100890 // EDEADLK in "read after write"
891 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
892 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
893 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
894
895 // EDEADLK in "write after write"
896 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
897 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
898 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100899
Elliott Hughesc3f11402013-10-30 14:40:09 -0700900 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
901}
902
Yabin Cui08ee8d22015-02-11 17:04:36 -0800903struct RwlockWakeupHelperArg {
904 pthread_rwlock_t lock;
905 enum Progress {
906 LOCK_INITIALIZED,
907 LOCK_WAITING,
908 LOCK_RELEASED,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800909 LOCK_ACCESSED,
910 LOCK_TIMEDOUT,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800911 };
912 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -0700913 std::atomic<pid_t> tid;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800914 std::function<int (pthread_rwlock_t*)> trylock_function;
915 std::function<int (pthread_rwlock_t*)> lock_function;
916 std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -0800917 clockid_t clock;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800918};
919
Yabin Cuic9a659c2015-11-05 15:36:08 -0800920static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
Yabin Cuif7969852015-04-02 17:47:48 -0700921 arg->tid = gettid();
Yabin Cui08ee8d22015-02-11 17:04:36 -0800922 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
923 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
924
Yabin Cuic9a659c2015-11-05 15:36:08 -0800925 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
926 ASSERT_EQ(0, arg->lock_function(&arg->lock));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800927 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
928 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
929
930 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
931}
932
Yabin Cuic9a659c2015-11-05 15:36:08 -0800933static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800934 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700935 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800936 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
937 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700938 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -0800939 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800940 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800941
942 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700943 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800944 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700945 WaitUntilThreadSleep(wakeup_arg.tid);
946 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
947
Yabin Cui08ee8d22015-02-11 17:04:36 -0800948 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
949 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
950
Yi Kong32bc0fc2018-08-02 17:31:13 -0700951 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800952 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
953 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
954}
955
Yabin Cuic9a659c2015-11-05 15:36:08 -0800956TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
957 test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800958}
959
Yabin Cuic9a659c2015-11-05 15:36:08 -0800960TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
961 timespec ts;
962 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
963 ts.tv_sec += 1;
964 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
965 return pthread_rwlock_timedwrlock(lock, &ts);
966 });
967}
968
Tom Cherryc6b5bcd2018-03-05 14:14:44 -0800969TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
970#if defined(__BIONIC__)
971 timespec ts;
972 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
973 ts.tv_sec += 1;
974 test_pthread_rwlock_reader_wakeup_writer(
975 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
976#else // __BIONIC__
977 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedwrlock_monotonic_np is "
978 "only supported on bionic";
979#endif // __BIONIC__
980}
981
Yabin Cuic9a659c2015-11-05 15:36:08 -0800982static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800983 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700984 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -0800985 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
986 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700987 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -0800988 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800989 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800990
991 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700992 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800993 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700994 WaitUntilThreadSleep(wakeup_arg.tid);
995 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
996
Yabin Cui08ee8d22015-02-11 17:04:36 -0800997 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
998 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
999
Yi Kong32bc0fc2018-08-02 17:31:13 -07001000 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001001 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1002 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1003}
1004
Yabin Cuic9a659c2015-11-05 15:36:08 -08001005TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1006 test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1007}
1008
1009TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1010 timespec ts;
1011 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1012 ts.tv_sec += 1;
1013 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1014 return pthread_rwlock_timedrdlock(lock, &ts);
1015 });
1016}
1017
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001018TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1019#if defined(__BIONIC__)
1020 timespec ts;
1021 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1022 ts.tv_sec += 1;
1023 test_pthread_rwlock_writer_wakeup_reader(
1024 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1025#else // __BIONIC__
1026 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedrdlock_monotonic_np is "
1027 "only supported on bionic";
1028#endif // __BIONIC__
1029}
1030
Yabin Cuic9a659c2015-11-05 15:36:08 -08001031static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1032 arg->tid = gettid();
1033 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1034 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1035
1036 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1037
1038 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001039 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001040 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1041 ts.tv_nsec = -1;
1042 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1043 ts.tv_nsec = NS_PER_S;
1044 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1045 ts.tv_nsec = NS_PER_S - 1;
1046 ts.tv_sec = -1;
1047 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001048 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001049 ts.tv_sec += 1;
1050 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1051 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1052 arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1053}
1054
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001055static void pthread_rwlock_timedrdlock_timeout_helper(
1056 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001057 RwlockWakeupHelperArg wakeup_arg;
1058 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1059 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1060 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1061 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001062 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001063 wakeup_arg.timed_lock_function = lock_function;
1064 wakeup_arg.clock = clock;
1065
1066 pthread_t thread;
1067 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1068 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1069 WaitUntilThreadSleep(wakeup_arg.tid);
1070 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1071
1072 ASSERT_EQ(0, pthread_join(thread, nullptr));
1073 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1074 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1075 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1076}
1077
1078TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1079 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1080}
1081
1082TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1083#if defined(__BIONIC__)
1084 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1085 pthread_rwlock_timedrdlock_monotonic_np);
1086#else // __BIONIC__
1087 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedrdlock_monotonic_np is "
1088 "only supported on bionic";
1089#endif // __BIONIC__
1090}
1091
1092static void pthread_rwlock_timedwrlock_timeout_helper(
1093 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1094 RwlockWakeupHelperArg wakeup_arg;
1095 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1096 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1097 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1098 wakeup_arg.tid = 0;
1099 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1100 wakeup_arg.timed_lock_function = lock_function;
1101 wakeup_arg.clock = clock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001102
1103 pthread_t thread;
1104 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1105 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1106 WaitUntilThreadSleep(wakeup_arg.tid);
1107 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1108
1109 ASSERT_EQ(0, pthread_join(thread, nullptr));
1110 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1111 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1112 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1113}
1114
1115TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001116 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1117}
Yabin Cuic9a659c2015-11-05 15:36:08 -08001118
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001119TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1120#if defined(__BIONIC__)
1121 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1122 pthread_rwlock_timedwrlock_monotonic_np);
1123#else // __BIONIC__
1124 GTEST_LOG_(INFO) << "This test does nothing since pthread_rwlock_timedwrlock_monotonic_np is "
1125 "only supported on bionic";
1126#endif // __BIONIC__
Yabin Cuic9a659c2015-11-05 15:36:08 -08001127}
1128
Yabin Cui76615da2015-03-17 14:22:09 -07001129class RwlockKindTestHelper {
1130 private:
1131 struct ThreadArg {
1132 RwlockKindTestHelper* helper;
1133 std::atomic<pid_t>& tid;
1134
1135 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1136 : helper(helper), tid(tid) { }
1137 };
1138
1139 public:
1140 pthread_rwlock_t lock;
1141
1142 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07001143 explicit RwlockKindTestHelper(int kind_type) {
Yabin Cui76615da2015-03-17 14:22:09 -07001144 InitRwlock(kind_type);
1145 }
1146
1147 ~RwlockKindTestHelper() {
1148 DestroyRwlock();
1149 }
1150
1151 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1152 tid = 0;
1153 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001154 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001155 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1156 }
1157
1158 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1159 tid = 0;
1160 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001161 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001162 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1163 }
1164
1165 private:
1166 void InitRwlock(int kind_type) {
1167 pthread_rwlockattr_t attr;
1168 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1169 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1170 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1171 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1172 }
1173
1174 void DestroyRwlock() {
1175 ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1176 }
1177
1178 static void WriterThreadFn(ThreadArg* arg) {
1179 arg->tid = gettid();
1180
1181 RwlockKindTestHelper* helper = arg->helper;
1182 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1183 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1184 delete arg;
1185 }
1186
1187 static void ReaderThreadFn(ThreadArg* arg) {
1188 arg->tid = gettid();
1189
1190 RwlockKindTestHelper* helper = arg->helper;
1191 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1192 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1193 delete arg;
1194 }
1195};
1196
1197TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
1198 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1199 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1200
1201 pthread_t writer_thread;
1202 std::atomic<pid_t> writer_tid;
1203 helper.CreateWriterThread(writer_thread, writer_tid);
1204 WaitUntilThreadSleep(writer_tid);
1205
1206 pthread_t reader_thread;
1207 std::atomic<pid_t> reader_tid;
1208 helper.CreateReaderThread(reader_thread, reader_tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001209 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001210
1211 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001212 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001213}
1214
1215TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
1216 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1217 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1218
1219 pthread_t writer_thread;
1220 std::atomic<pid_t> writer_tid;
1221 helper.CreateWriterThread(writer_thread, writer_tid);
1222 WaitUntilThreadSleep(writer_tid);
1223
1224 pthread_t reader_thread;
1225 std::atomic<pid_t> reader_tid;
1226 helper.CreateReaderThread(reader_thread, reader_tid);
1227 WaitUntilThreadSleep(reader_tid);
1228
1229 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001230 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1231 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001232}
1233
Elliott Hughes1728b232014-05-14 10:02:03 -07001234static int g_once_fn_call_count = 0;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001235static void OnceFn() {
Elliott Hughes1728b232014-05-14 10:02:03 -07001236 ++g_once_fn_call_count;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001237}
1238
1239TEST(pthread, pthread_once_smoke) {
1240 pthread_once_t once_control = PTHREAD_ONCE_INIT;
1241 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1242 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
Elliott Hughes1728b232014-05-14 10:02:03 -07001243 ASSERT_EQ(1, g_once_fn_call_count);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001244}
1245
Elliott Hughes3694ec62014-05-14 11:46:08 -07001246static std::string pthread_once_1934122_result = "";
1247
1248static void Routine2() {
1249 pthread_once_1934122_result += "2";
1250}
1251
1252static void Routine1() {
1253 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1254 pthread_once_1934122_result += "1";
1255 pthread_once(&once_control_2, &Routine2);
1256}
1257
1258TEST(pthread, pthread_once_1934122) {
1259 // Very old versions of Android couldn't call pthread_once from a
1260 // pthread_once init routine. http://b/1934122.
1261 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1262 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1263 ASSERT_EQ("12", pthread_once_1934122_result);
1264}
1265
Elliott Hughes1728b232014-05-14 10:02:03 -07001266static int g_atfork_prepare_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001267static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
1268static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001269static int g_atfork_parent_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001270static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
1271static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001272static int g_atfork_child_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001273static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
1274static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
Elliott Hughesc3f11402013-10-30 14:40:09 -07001275
Dmitriy Ivanov00e37812014-11-20 16:53:47 -08001276TEST(pthread, pthread_atfork_smoke) {
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001277 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1278 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
Elliott Hughesc3f11402013-10-30 14:40:09 -07001279
Elliott Hughes33697a02016-01-26 13:04:57 -08001280 pid_t pid = fork();
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001281 ASSERT_NE(-1, pid) << strerror(errno);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001282
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001283 // Child and parent calls are made in the order they were registered.
1284 if (pid == 0) {
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001285 ASSERT_EQ(12, g_atfork_child_calls);
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001286 _exit(0);
1287 }
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001288 ASSERT_EQ(12, g_atfork_parent_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001289
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001290 // Prepare calls are made in the reverse order.
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001291 ASSERT_EQ(21, g_atfork_prepare_calls);
Elliott Hughes33697a02016-01-26 13:04:57 -08001292 AssertChildExited(pid, 0);
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001293}
1294
Elliott Hughesc3f11402013-10-30 14:40:09 -07001295TEST(pthread, pthread_attr_getscope) {
1296 pthread_attr_t attr;
1297 ASSERT_EQ(0, pthread_attr_init(&attr));
1298
1299 int scope;
1300 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1301 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1302}
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001303
1304TEST(pthread, pthread_condattr_init) {
1305 pthread_condattr_t attr;
1306 pthread_condattr_init(&attr);
1307
1308 clockid_t clock;
1309 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1310 ASSERT_EQ(CLOCK_REALTIME, clock);
1311
1312 int pshared;
1313 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1314 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1315}
1316
1317TEST(pthread, pthread_condattr_setclock) {
1318 pthread_condattr_t attr;
1319 pthread_condattr_init(&attr);
1320
1321 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1322 clockid_t clock;
1323 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1324 ASSERT_EQ(CLOCK_REALTIME, clock);
1325
1326 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1327 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1328 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1329
1330 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1331}
1332
1333TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
Yabin Cui32651b82015-03-13 20:30:00 -07001334#if defined(__BIONIC__)
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001335 pthread_condattr_t attr;
1336 pthread_condattr_init(&attr);
1337
1338 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1339 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1340
1341 pthread_cond_t cond_var;
1342 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1343
1344 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1345 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1346
Yabin Cui32651b82015-03-13 20:30:00 -07001347 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001348 clockid_t clock;
1349 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1350 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1351 int pshared;
1352 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1353 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
Yabin Cui32651b82015-03-13 20:30:00 -07001354#else // !defined(__BIONIC__)
1355 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
1356#endif // !defined(__BIONIC__)
1357}
1358
1359class pthread_CondWakeupTest : public ::testing::Test {
1360 protected:
1361 pthread_mutex_t mutex;
1362 pthread_cond_t cond;
1363
1364 enum Progress {
1365 INITIALIZED,
1366 WAITING,
1367 SIGNALED,
1368 FINISHED,
1369 };
1370 std::atomic<Progress> progress;
1371 pthread_t thread;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001372 std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
Yabin Cui32651b82015-03-13 20:30:00 -07001373
1374 protected:
Yabin Cuic9a659c2015-11-05 15:36:08 -08001375 void SetUp() override {
1376 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1377 }
1378
1379 void InitCond(clockid_t clock=CLOCK_REALTIME) {
1380 pthread_condattr_t attr;
1381 ASSERT_EQ(0, pthread_condattr_init(&attr));
1382 ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1383 ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1384 ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1385 }
1386
1387 void StartWaitingThread(std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
Yabin Cui32651b82015-03-13 20:30:00 -07001388 progress = INITIALIZED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001389 this->wait_function = wait_function;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001390 ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001391 while (progress != WAITING) {
Yabin Cui32651b82015-03-13 20:30:00 -07001392 usleep(5000);
1393 }
1394 usleep(5000);
1395 }
1396
Yabin Cuic9a659c2015-11-05 15:36:08 -08001397 void TearDown() override {
1398 ASSERT_EQ(0, pthread_join(thread, nullptr));
1399 ASSERT_EQ(FINISHED, progress);
1400 ASSERT_EQ(0, pthread_cond_destroy(&cond));
1401 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1402 }
1403
Yabin Cui32651b82015-03-13 20:30:00 -07001404 private:
1405 static void WaitThreadFn(pthread_CondWakeupTest* test) {
1406 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1407 test->progress = WAITING;
1408 while (test->progress == WAITING) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001409 ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
Yabin Cui32651b82015-03-13 20:30:00 -07001410 }
1411 ASSERT_EQ(SIGNALED, test->progress);
1412 test->progress = FINISHED;
1413 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1414 }
1415};
1416
Yabin Cuic9a659c2015-11-05 15:36:08 -08001417TEST_F(pthread_CondWakeupTest, signal_wait) {
1418 InitCond();
1419 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1420 return pthread_cond_wait(cond, mutex);
1421 });
Yabin Cui32651b82015-03-13 20:30:00 -07001422 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001423 ASSERT_EQ(0, pthread_cond_signal(&cond));
Yabin Cui32651b82015-03-13 20:30:00 -07001424}
1425
Yabin Cuic9a659c2015-11-05 15:36:08 -08001426TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1427 InitCond();
1428 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1429 return pthread_cond_wait(cond, mutex);
1430 });
Yabin Cui32651b82015-03-13 20:30:00 -07001431 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001432 ASSERT_EQ(0, pthread_cond_broadcast(&cond));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001433}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001434
Yabin Cuic9a659c2015-11-05 15:36:08 -08001435TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1436 InitCond(CLOCK_REALTIME);
Elliott Hughes0e714a52014-03-03 16:42:47 -08001437 timespec ts;
1438 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001439 ts.tv_sec += 1;
1440 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1441 return pthread_cond_timedwait(cond, mutex, &ts);
1442 });
1443 progress = SIGNALED;
1444 ASSERT_EQ(0, pthread_cond_signal(&cond));
1445}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001446
Yabin Cuic9a659c2015-11-05 15:36:08 -08001447TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1448 InitCond(CLOCK_MONOTONIC);
1449 timespec ts;
1450 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1451 ts.tv_sec += 1;
1452 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1453 return pthread_cond_timedwait(cond, mutex, &ts);
1454 });
1455 progress = SIGNALED;
1456 ASSERT_EQ(0, pthread_cond_signal(&cond));
1457}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001458
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001459TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1460#if defined(__BIONIC__)
1461 InitCond(CLOCK_REALTIME);
1462 timespec ts;
1463 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1464 ts.tv_sec += 1;
1465 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1466 return pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
1467 });
1468 progress = SIGNALED;
1469 ASSERT_EQ(0, pthread_cond_signal(&cond));
1470#else // __BIONIC__
1471 GTEST_LOG_(INFO) << "This test does nothing since pthread_cond_timedwait_monotonic_np is only "
1472 "supported on bionic";
1473#endif // __BIONIC__
1474}
1475
1476static void pthread_cond_timedwait_timeout_helper(clockid_t clock,
1477 int (*wait_function)(pthread_cond_t* __cond,
1478 pthread_mutex_t* __mutex,
1479 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001480 pthread_mutex_t mutex;
1481 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1482 pthread_cond_t cond;
1483 ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1484 ASSERT_EQ(0, pthread_mutex_lock(&mutex));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001485
Yabin Cuic9a659c2015-11-05 15:36:08 -08001486 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001487 ASSERT_EQ(0, clock_gettime(clock, &ts));
1488 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001489 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001490 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001491 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001492 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001493 ts.tv_nsec = NS_PER_S - 1;
1494 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001495 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001496 ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
Elliott Hughes0e714a52014-03-03 16:42:47 -08001497}
Elliott Hughes57b7a612014-08-25 17:26:50 -07001498
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001499TEST(pthread, pthread_cond_timedwait_timeout) {
1500 pthread_cond_timedwait_timeout_helper(CLOCK_REALTIME, pthread_cond_timedwait);
1501}
1502
1503TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1504#if defined(__BIONIC__)
1505 pthread_cond_timedwait_timeout_helper(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1506#else // __BIONIC__
1507 GTEST_LOG_(INFO) << "This test does nothing since pthread_cond_timedwait_monotonic_np is only "
1508 "supported on bionic";
1509#endif // __BIONIC__
1510}
1511
Elliott Hughes57b7a612014-08-25 17:26:50 -07001512TEST(pthread, pthread_attr_getstack__main_thread) {
1513 // This test is only meaningful for the main thread, so make sure we're running on it!
1514 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1515
1516 // Get the main thread's attributes.
1517 pthread_attr_t attributes;
1518 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1519
1520 // Check that we correctly report that the main thread has no guard page.
1521 size_t guard_size;
1522 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1523 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1524
1525 // Get the stack base and the stack size (both ways).
1526 void* stack_base;
1527 size_t stack_size;
1528 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1529 size_t stack_size2;
1530 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1531
1532 // The two methods of asking for the stack size should agree.
1533 EXPECT_EQ(stack_size, stack_size2);
1534
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001535#if defined(__BIONIC__)
dimitry6dfa5b52018-01-30 13:24:28 +01001536 // Find stack in /proc/self/maps using a pointer to the stack.
1537 //
1538 // We do not use "[stack]" label because in native-bridge environment it is not
1539 // guaranteed to point to the right stack. A native bridge implementation may
1540 // keep separate stack for the guest code.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001541 void* maps_stack_hi = nullptr;
Elliott Hughes15dfd632015-09-22 16:40:14 -07001542 std::vector<map_record> maps;
1543 ASSERT_TRUE(Maps::parse_maps(&maps));
dimitry6dfa5b52018-01-30 13:24:28 +01001544 uintptr_t stack_address = reinterpret_cast<uintptr_t>(&maps_stack_hi);
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07001545 for (const auto& map : maps) {
dimitry6dfa5b52018-01-30 13:24:28 +01001546 if (map.addr_start <= stack_address && map.addr_end > stack_address){
Elliott Hughes15dfd632015-09-22 16:40:14 -07001547 maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
Elliott Hughes57b7a612014-08-25 17:26:50 -07001548 break;
1549 }
1550 }
Elliott Hughes57b7a612014-08-25 17:26:50 -07001551
dimitry6dfa5b52018-01-30 13:24:28 +01001552 // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001553 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1554 // region isn't very interesting.
1555 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1556
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001557 // The stack size should correspond to RLIMIT_STACK.
Elliott Hughes57b7a612014-08-25 17:26:50 -07001558 rlimit rl;
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001559 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001560 uint64_t original_rlim_cur = rl.rlim_cur;
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001561 if (rl.rlim_cur == RLIM_INFINITY) {
1562 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1563 }
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001564 EXPECT_EQ(rl.rlim_cur, stack_size);
1565
Tom Cherryb8ab6182017-04-05 16:20:29 -07001566 auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001567 rl.rlim_cur = original_rlim_cur;
1568 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1569 });
1570
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001571 //
1572 // What if RLIMIT_STACK is smaller than the stack's current extent?
1573 //
Elliott Hughes57b7a612014-08-25 17:26:50 -07001574 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1575 rl.rlim_max = RLIM_INFINITY;
1576 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1577
1578 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1579 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1580 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1581
1582 EXPECT_EQ(stack_size, stack_size2);
1583 ASSERT_EQ(1024U, stack_size);
1584
1585 //
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001586 // What if RLIMIT_STACK isn't a whole number of pages?
Elliott Hughes57b7a612014-08-25 17:26:50 -07001587 //
1588 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1589 rl.rlim_max = RLIM_INFINITY;
1590 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1591
1592 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1593 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1594 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1595
1596 EXPECT_EQ(stack_size, stack_size2);
1597 ASSERT_EQ(6666U, stack_size);
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001598#endif
Elliott Hughes57b7a612014-08-25 17:26:50 -07001599}
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001600
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001601struct GetStackSignalHandlerArg {
1602 volatile bool done;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001603 void* signal_stack_base;
1604 size_t signal_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001605 void* main_stack_base;
1606 size_t main_stack_size;
1607};
1608
1609static GetStackSignalHandlerArg getstack_signal_handler_arg;
1610
1611static void getstack_signal_handler(int sig) {
1612 ASSERT_EQ(SIGUSR1, sig);
1613 // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1614 sleep(1);
1615 pthread_attr_t attr;
1616 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1617 void* stack_base;
1618 size_t stack_size;
1619 ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001620
1621 // Verify if the stack used by the signal handler is the alternate stack just registered.
1622 ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
1623 ASSERT_LT(static_cast<void*>(&attr),
1624 static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
1625 getstack_signal_handler_arg.signal_stack_size);
1626
1627 // Verify if the main thread's stack got in the signal handler is correct.
1628 ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1629 ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1630
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001631 getstack_signal_handler_arg.done = true;
1632}
1633
1634// The previous code obtained the main thread's stack by reading the entry in
1635// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1636// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1637// switches a process while the main thread is in an alternate stack, then the kernel will label
1638// the wrong map with [stack]. This test verifies that when the above situation happens, the main
1639// thread's stack is found correctly.
1640TEST(pthread, pthread_attr_getstack_in_signal_handler) {
Yabin Cui61e4d462016-03-07 17:44:58 -08001641 // This test is only meaningful for the main thread, so make sure we're running on it!
1642 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1643
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001644 const size_t sig_stack_size = 16 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001645 void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001646 -1, 0);
1647 ASSERT_NE(MAP_FAILED, sig_stack);
1648 stack_t ss;
1649 ss.ss_sp = sig_stack;
1650 ss.ss_size = sig_stack_size;
1651 ss.ss_flags = 0;
1652 stack_t oss;
1653 ASSERT_EQ(0, sigaltstack(&ss, &oss));
1654
Yabin Cui61e4d462016-03-07 17:44:58 -08001655 pthread_attr_t attr;
1656 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1657 void* main_stack_base;
1658 size_t main_stack_size;
1659 ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1660
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001661 ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1662 getstack_signal_handler_arg.done = false;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001663 getstack_signal_handler_arg.signal_stack_base = sig_stack;
1664 getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1665 getstack_signal_handler_arg.main_stack_base = main_stack_base;
1666 getstack_signal_handler_arg.main_stack_size = main_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001667 kill(getpid(), SIGUSR1);
1668 ASSERT_EQ(true, getstack_signal_handler_arg.done);
1669
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001670 ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1671 ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1672}
1673
Yabin Cui917d3902015-01-08 12:32:42 -08001674static void pthread_attr_getstack_18908062_helper(void*) {
1675 char local_variable;
1676 pthread_attr_t attributes;
1677 pthread_getattr_np(pthread_self(), &attributes);
1678 void* stack_base;
1679 size_t stack_size;
1680 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1681
1682 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1683 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1684 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1685}
1686
1687// Check whether something on stack is in the range of
1688// [stack_base, stack_base + stack_size). see b/18908062.
1689TEST(pthread, pthread_attr_getstack_18908062) {
1690 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001691 ASSERT_EQ(0, pthread_create(&t, nullptr,
Yabin Cui917d3902015-01-08 12:32:42 -08001692 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
Yi Kong32bc0fc2018-08-02 17:31:13 -07001693 nullptr));
1694 ASSERT_EQ(0, pthread_join(t, nullptr));
Yabin Cui917d3902015-01-08 12:32:42 -08001695}
1696
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001697#if defined(__BIONIC__)
Elliott Hughesf2083612015-11-11 13:32:28 -08001698static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
1699
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001700static void* pthread_gettid_np_helper(void* arg) {
1701 *reinterpret_cast<pid_t*>(arg) = gettid();
Elliott Hughesf2083612015-11-11 13:32:28 -08001702
1703 // Wait for our parent to call pthread_gettid_np on us before exiting.
1704 pthread_mutex_lock(&pthread_gettid_np_mutex);
1705 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001706 return nullptr;
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001707}
1708#endif
1709
1710TEST(pthread, pthread_gettid_np) {
1711#if defined(__BIONIC__)
1712 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1713
Elliott Hughesf2083612015-11-11 13:32:28 -08001714 // Ensure the other thread doesn't exit until after we've called
1715 // pthread_gettid_np on it.
1716 pthread_mutex_lock(&pthread_gettid_np_mutex);
1717
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001718 pid_t t_gettid_result;
1719 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001720 pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001721
1722 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1723
Elliott Hughesf2083612015-11-11 13:32:28 -08001724 // Release the other thread and wait for it to exit.
1725 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001726 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001727
1728 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1729#else
1730 GTEST_LOG_(INFO) << "This test does nothing.\n";
1731#endif
1732}
Elliott Hughes34c987a2014-09-22 16:01:26 -07001733
1734static size_t cleanup_counter = 0;
1735
Derek Xue41996952014-09-25 11:05:32 +01001736static void AbortCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001737 abort();
1738}
1739
Derek Xue41996952014-09-25 11:05:32 +01001740static void CountCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001741 ++cleanup_counter;
1742}
1743
Derek Xue41996952014-09-25 11:05:32 +01001744static void PthreadCleanupTester() {
Yi Kong32bc0fc2018-08-02 17:31:13 -07001745 pthread_cleanup_push(CountCleanupRoutine, nullptr);
1746 pthread_cleanup_push(CountCleanupRoutine, nullptr);
1747 pthread_cleanup_push(AbortCleanupRoutine, nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07001748
1749 pthread_cleanup_pop(0); // Pop the abort without executing it.
1750 pthread_cleanup_pop(1); // Pop one count while executing it.
1751 ASSERT_EQ(1U, cleanup_counter);
1752 // Exit while the other count is still on the cleanup stack.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001753 pthread_exit(nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07001754
1755 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1756 pthread_cleanup_pop(0);
1757}
1758
Derek Xue41996952014-09-25 11:05:32 +01001759static void* PthreadCleanupStartRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001760 PthreadCleanupTester();
Yi Kong32bc0fc2018-08-02 17:31:13 -07001761 return nullptr;
Elliott Hughes34c987a2014-09-22 16:01:26 -07001762}
1763
1764TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1765 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001766 ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
1767 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes34c987a2014-09-22 16:01:26 -07001768 ASSERT_EQ(2U, cleanup_counter);
1769}
Derek Xue41996952014-09-25 11:05:32 +01001770
1771TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1772 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1773}
1774
1775TEST(pthread, pthread_mutexattr_gettype) {
1776 pthread_mutexattr_t attr;
1777 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1778
1779 int attr_type;
1780
1781 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1782 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1783 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1784
1785 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1786 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1787 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1788
1789 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1790 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1791 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001792
1793 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1794}
1795
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001796TEST(pthread, pthread_mutexattr_protocol) {
1797 pthread_mutexattr_t attr;
1798 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1799
1800 int protocol;
1801 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
1802 ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
1803 for (size_t repeat = 0; repeat < 2; ++repeat) {
1804 for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
1805 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
1806 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
1807 ASSERT_EQ(protocol, set_protocol);
1808 }
1809 }
1810}
1811
Yabin Cui17393b02015-03-21 15:08:25 -07001812struct PthreadMutex {
1813 pthread_mutex_t lock;
1814
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001815 explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
1816 init(mutex_type, protocol);
Yabin Cui17393b02015-03-21 15:08:25 -07001817 }
1818
1819 ~PthreadMutex() {
1820 destroy();
1821 }
1822
1823 private:
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001824 void init(int mutex_type, int protocol) {
Yabin Cui17393b02015-03-21 15:08:25 -07001825 pthread_mutexattr_t attr;
1826 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1827 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001828 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
Yabin Cui17393b02015-03-21 15:08:25 -07001829 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1830 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1831 }
1832
1833 void destroy() {
1834 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1835 }
1836
1837 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1838};
Derek Xue41996952014-09-25 11:05:32 +01001839
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001840static void TestPthreadMutexLockNormal(int protocol) {
1841 PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
Derek Xue41996952014-09-25 11:05:32 +01001842
Yabin Cui17393b02015-03-21 15:08:25 -07001843 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1844 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08001845 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1846 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1847 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001848}
1849
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001850static void TestPthreadMutexLockErrorCheck(int protocol) {
1851 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
Derek Xue41996952014-09-25 11:05:32 +01001852
Yabin Cui17393b02015-03-21 15:08:25 -07001853 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1854 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1855 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1856 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001857 if (protocol == PTHREAD_PRIO_NONE) {
1858 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1859 } else {
1860 ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
1861 }
Yabin Cui17393b02015-03-21 15:08:25 -07001862 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1863 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001864}
1865
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001866static void TestPthreadMutexLockRecursive(int protocol) {
1867 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
Derek Xue41996952014-09-25 11:05:32 +01001868
Yabin Cui17393b02015-03-21 15:08:25 -07001869 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1870 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1871 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1872 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1873 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08001874 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1875 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07001876 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1877 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1878}
1879
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001880TEST(pthread, pthread_mutex_lock_NORMAL) {
1881 TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
1882}
1883
1884TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1885 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
1886}
1887
1888TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1889 TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
1890}
1891
1892TEST(pthread, pthread_mutex_lock_pi) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08001893 TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
1894 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
1895 TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
1896}
1897
Yabin Cui5a00ba72018-01-26 17:32:31 -08001898TEST(pthread, pthread_mutex_pi_count_limit) {
1899#if defined(__BIONIC__) && !defined(__LP64__)
1900 // Bionic only supports 65536 pi mutexes in 32-bit programs.
1901 pthread_mutexattr_t attr;
1902 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1903 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
1904 std::vector<pthread_mutex_t> mutexes(65536);
1905 // Test if we can use 65536 pi mutexes at the same time.
1906 // Run 2 times to check if freed pi mutexes can be recycled.
1907 for (int repeat = 0; repeat < 2; ++repeat) {
1908 for (auto& m : mutexes) {
1909 ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
1910 }
1911 pthread_mutex_t m;
1912 ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
1913 for (auto& m : mutexes) {
1914 ASSERT_EQ(0, pthread_mutex_lock(&m));
1915 }
1916 for (auto& m : mutexes) {
1917 ASSERT_EQ(0, pthread_mutex_unlock(&m));
1918 }
1919 for (auto& m : mutexes) {
1920 ASSERT_EQ(0, pthread_mutex_destroy(&m));
1921 }
1922 }
1923 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1924#else
1925 GTEST_LOG_(INFO) << "This test does nothing as pi mutex count isn't limited.\n";
1926#endif
1927}
1928
Yabin Cui17393b02015-03-21 15:08:25 -07001929TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1930 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1931 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1932 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1933 pthread_mutex_destroy(&lock_normal);
1934
1935 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1936 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1937 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1938 pthread_mutex_destroy(&lock_errorcheck);
1939
1940 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1941 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1942 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1943 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
Derek Xue41996952014-09-25 11:05:32 +01001944}
Yabin Cui5a00ba72018-01-26 17:32:31 -08001945
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001946class MutexWakeupHelper {
1947 private:
Yabin Cui17393b02015-03-21 15:08:25 -07001948 PthreadMutex m;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001949 enum Progress {
1950 LOCK_INITIALIZED,
1951 LOCK_WAITING,
1952 LOCK_RELEASED,
1953 LOCK_ACCESSED
1954 };
1955 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -07001956 std::atomic<pid_t> tid;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001957
1958 static void thread_fn(MutexWakeupHelper* helper) {
Yabin Cuif7969852015-04-02 17:47:48 -07001959 helper->tid = gettid();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001960 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1961 helper->progress = LOCK_WAITING;
1962
Yabin Cui17393b02015-03-21 15:08:25 -07001963 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001964 ASSERT_EQ(LOCK_RELEASED, helper->progress);
Yabin Cui17393b02015-03-21 15:08:25 -07001965 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001966
1967 helper->progress = LOCK_ACCESSED;
1968 }
1969
1970 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07001971 explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
Yabin Cui17393b02015-03-21 15:08:25 -07001972 }
1973
1974 void test() {
1975 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001976 progress = LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07001977 tid = 0;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001978
1979 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001980 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001981 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1982
Yabin Cuif7969852015-04-02 17:47:48 -07001983 WaitUntilThreadSleep(tid);
1984 ASSERT_EQ(LOCK_WAITING, progress);
1985
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001986 progress = LOCK_RELEASED;
Yabin Cui17393b02015-03-21 15:08:25 -07001987 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001988
Yi Kong32bc0fc2018-08-02 17:31:13 -07001989 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001990 ASSERT_EQ(LOCK_ACCESSED, progress);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001991 }
1992};
1993
1994TEST(pthread, pthread_mutex_NORMAL_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001995 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1996 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001997}
1998
1999TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002000 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2001 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002002}
2003
2004TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002005 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2006 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002007}
2008
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002009static int GetThreadPriority(pid_t tid) {
2010 // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2011 // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2012 std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2013 std::string content;
2014 int result = INT_MAX;
2015 if (!android::base::ReadFileToString(filename, &content)) {
2016 return result;
2017 }
2018 std::vector<std::string> strs = android::base::Split(content, " ");
2019 if (strs.size() < 18) {
2020 return result;
2021 }
2022 if (!android::base::ParseInt(strs[17], &result)) {
2023 return INT_MAX;
2024 }
2025 return result;
2026}
2027
2028class PIMutexWakeupHelper {
2029private:
2030 PthreadMutex m;
2031 int protocol;
2032 enum Progress {
2033 LOCK_INITIALIZED,
2034 LOCK_CHILD_READY,
2035 LOCK_WAITING,
2036 LOCK_RELEASED,
2037 };
2038 std::atomic<Progress> progress;
2039 std::atomic<pid_t> main_tid;
2040 std::atomic<pid_t> child_tid;
2041 PthreadMutex start_thread_m;
2042
2043 static void thread_fn(PIMutexWakeupHelper* helper) {
2044 helper->child_tid = gettid();
2045 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2046 ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2047 ASSERT_EQ(21, GetThreadPriority(gettid()));
2048 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2049 helper->progress = LOCK_CHILD_READY;
2050 ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2051
2052 ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2053 WaitUntilThreadSleep(helper->main_tid);
2054 ASSERT_EQ(LOCK_WAITING, helper->progress);
2055
2056 if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2057 ASSERT_EQ(20, GetThreadPriority(gettid()));
2058 } else {
2059 ASSERT_EQ(21, GetThreadPriority(gettid()));
2060 }
2061 helper->progress = LOCK_RELEASED;
2062 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2063 }
2064
2065public:
2066 explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2067 : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2068 }
2069
2070 void test() {
2071 ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2072 main_tid = gettid();
2073 ASSERT_EQ(20, GetThreadPriority(main_tid));
2074 progress = LOCK_INITIALIZED;
2075 child_tid = 0;
2076
2077 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002078 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002079 reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2080
2081 WaitUntilThreadSleep(child_tid);
2082 ASSERT_EQ(LOCK_CHILD_READY, progress);
2083 ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2084 progress = LOCK_WAITING;
2085 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2086
2087 ASSERT_EQ(LOCK_RELEASED, progress);
2088 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2089 ASSERT_EQ(0, pthread_join(thread, nullptr));
2090 }
2091};
2092
2093TEST(pthread, pthread_mutex_pi_wakeup) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002094 for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2095 for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2096 PIMutexWakeupHelper helper(type, protocol);
2097 helper.test();
2098 }
2099 }
2100}
2101
Yabin Cui140f3672015-02-03 10:32:00 -08002102TEST(pthread, pthread_mutex_owner_tid_limit) {
Yabin Cuie69c2452015-02-13 16:21:25 -08002103#if defined(__BIONIC__) && !defined(__LP64__)
Yabin Cui140f3672015-02-03 10:32:00 -08002104 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
Yi Kong32bc0fc2018-08-02 17:31:13 -07002105 ASSERT_TRUE(fp != nullptr);
Yabin Cui140f3672015-02-03 10:32:00 -08002106 long pid_max;
2107 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2108 fclose(fp);
Yabin Cuie69c2452015-02-13 16:21:25 -08002109 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
Yabin Cui140f3672015-02-03 10:32:00 -08002110 ASSERT_LE(pid_max, 65536);
Yabin Cuie69c2452015-02-13 16:21:25 -08002111#else
2112 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
2113#endif
Yabin Cui140f3672015-02-03 10:32:00 -08002114}
Yabin Cuib5845722015-03-16 22:46:42 -07002115
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002116static void pthread_mutex_timedlock_helper(clockid_t clock,
2117 int (*lock_function)(pthread_mutex_t* __mutex,
2118 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08002119 pthread_mutex_t m;
2120 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2121
2122 // If the mutex is already locked, pthread_mutex_timedlock should time out.
2123 ASSERT_EQ(0, pthread_mutex_lock(&m));
2124
2125 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002126 ASSERT_EQ(0, clock_gettime(clock, &ts));
2127 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002128 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002129 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002130 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002131 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002132 ts.tv_nsec = NS_PER_S - 1;
2133 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002134 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002135
2136 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2137 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2138
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002139 ASSERT_EQ(0, clock_gettime(clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002140 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002141 ASSERT_EQ(0, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002142
2143 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2144 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2145}
2146
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002147TEST(pthread, pthread_mutex_timedlock) {
2148 pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2149}
2150
2151TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2152#if defined(__BIONIC__)
2153 pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2154#else // __BIONIC__
2155 GTEST_LOG_(INFO) << "This test does nothing since pthread_mutex_timedlock_monotonic_np is only "
2156 "supported on bionic";
2157#endif // __BIONIC__
2158}
2159
2160static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2161 int (*lock_function)(pthread_mutex_t* __mutex,
2162 const timespec* __timeout)) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002163 PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002164
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002165 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002166 clock_gettime(clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002167 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002168 ASSERT_EQ(0, lock_function(&m.lock, &ts));
2169
2170 struct ThreadArgs {
2171 clockid_t clock;
2172 int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2173 PthreadMutex& m;
2174 };
2175
2176 ThreadArgs thread_args = {
2177 .clock = clock,
2178 .lock_function = lock_function,
2179 .m = m,
2180 };
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002181
2182 auto ThreadFn = [](void* arg) -> void* {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002183 auto args = static_cast<ThreadArgs*>(arg);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002184 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002185 clock_gettime(args->clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002186 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002187 intptr_t result = args->lock_function(&args->m.lock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002188 return reinterpret_cast<void*>(result);
2189 };
2190
2191 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002192 ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002193 void* result;
2194 ASSERT_EQ(0, pthread_join(thread, &result));
2195 ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
2196 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2197}
2198
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002199TEST(pthread, pthread_mutex_timedlock_pi) {
2200 pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2201}
2202
2203TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2204#if defined(__BIONIC__)
2205 pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2206#else // __BIONIC__
2207 GTEST_LOG_(INFO) << "This test does nothing since pthread_mutex_timedlock_monotonic_np is only "
2208 "supported on bionic";
2209#endif // __BIONIC__
2210}
2211
Yabin Cui9651fdf2018-03-14 12:02:21 -07002212TEST(pthread, pthread_mutex_using_destroyed_mutex) {
2213#if defined(__BIONIC__)
2214 pthread_mutex_t m;
2215 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2216 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2217 ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2218 "pthread_mutex_lock called on a destroyed mutex");
2219 ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2220 "pthread_mutex_unlock called on a destroyed mutex");
2221 ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2222 "pthread_mutex_trylock called on a destroyed mutex");
2223 timespec ts;
2224 ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2225 "pthread_mutex_timedlock called on a destroyed mutex");
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002226 ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2227 "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
Yabin Cui9651fdf2018-03-14 12:02:21 -07002228 ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2229 "pthread_mutex_destroy called on a destroyed mutex");
2230#else
2231 GTEST_LOG_(INFO) << "This test tests bionic pthread mutex implementation details.";
2232#endif
2233}
2234
Yabin Cuib5845722015-03-16 22:46:42 -07002235class StrictAlignmentAllocator {
2236 public:
2237 void* allocate(size_t size, size_t alignment) {
2238 char* p = new char[size + alignment * 2];
2239 allocated_array.push_back(p);
2240 while (!is_strict_aligned(p, alignment)) {
2241 ++p;
2242 }
2243 return p;
2244 }
2245
2246 ~StrictAlignmentAllocator() {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07002247 for (const auto& p : allocated_array) {
2248 delete[] p;
Yabin Cuib5845722015-03-16 22:46:42 -07002249 }
2250 }
2251
2252 private:
2253 bool is_strict_aligned(char* p, size_t alignment) {
2254 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2255 }
2256
2257 std::vector<char*> allocated_array;
2258};
2259
2260TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2261#if defined(__BIONIC__)
2262 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2263 StrictAlignmentAllocator allocator;
2264 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2265 allocator.allocate(sizeof(pthread_mutex_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002266 ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002267 ASSERT_EQ(0, pthread_mutex_lock(mutex));
2268 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2269 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2270
2271 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2272 allocator.allocate(sizeof(pthread_cond_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002273 ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002274 ASSERT_EQ(0, pthread_cond_signal(cond));
2275 ASSERT_EQ(0, pthread_cond_broadcast(cond));
2276 ASSERT_EQ(0, pthread_cond_destroy(cond));
2277
2278 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2279 allocator.allocate(sizeof(pthread_rwlock_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002280 ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002281 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2282 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2283 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2284 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2285 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2286
2287#else
2288 GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
2289#endif
2290}
Christopher Ferris60907c72015-06-09 18:46:15 -07002291
2292TEST(pthread, pthread_mutex_lock_null_32) {
2293#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002294 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2295 // EINVAL in that case: http://b/19995172.
2296 //
2297 // We decorate the public defintion with _Nonnull so that people recompiling
2298 // their code with get a warning and might fix their bug, but need to pass
2299 // NULL here to test that we remain compatible.
2300 pthread_mutex_t* null_value = nullptr;
2301 ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002302#else
2303 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
2304#endif
2305}
2306
2307TEST(pthread, pthread_mutex_unlock_null_32) {
2308#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002309 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2310 // EINVAL in that case: http://b/19995172.
2311 //
2312 // We decorate the public defintion with _Nonnull so that people recompiling
2313 // their code with get a warning and might fix their bug, but need to pass
2314 // NULL here to test that we remain compatible.
2315 pthread_mutex_t* null_value = nullptr;
2316 ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002317#else
2318 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
2319#endif
2320}
2321
2322TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2323#if defined(__BIONIC__) && defined(__LP64__)
2324 pthread_mutex_t* null_value = nullptr;
2325 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2326#else
2327 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
2328#endif
2329}
2330
2331TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2332#if defined(__BIONIC__) && defined(__LP64__)
2333 pthread_mutex_t* null_value = nullptr;
2334 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2335#else
2336 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
2337#endif
2338}
Yabin Cui33ac04a2015-09-22 11:16:15 -07002339
2340extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2341
2342static volatile bool signal_handler_on_altstack_done;
2343
Josh Gao61db9ac2017-03-15 19:42:05 -07002344__attribute__((__noinline__))
2345static void signal_handler_backtrace() {
2346 // Check if we have enough stack space for unwinding.
2347 int count = 0;
2348 _Unwind_Backtrace(FrameCounter, &count);
2349 ASSERT_GT(count, 0);
2350}
2351
2352__attribute__((__noinline__))
2353static void signal_handler_logging() {
2354 // Check if we have enough stack space for logging.
2355 std::string s(2048, '*');
2356 GTEST_LOG_(INFO) << s;
2357 signal_handler_on_altstack_done = true;
2358}
2359
2360__attribute__((__noinline__))
2361static void signal_handler_snprintf() {
2362 // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2363 char buf[PATH_MAX + 2048];
2364 ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2365}
2366
Yabin Cui33ac04a2015-09-22 11:16:15 -07002367static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2368 ASSERT_EQ(SIGUSR1, signo);
Josh Gao61db9ac2017-03-15 19:42:05 -07002369 signal_handler_backtrace();
2370 signal_handler_logging();
2371 signal_handler_snprintf();
Yabin Cui33ac04a2015-09-22 11:16:15 -07002372}
2373
Josh Gao415daa82017-03-06 17:45:33 -08002374TEST(pthread, big_enough_signal_stack) {
Yabin Cui33ac04a2015-09-22 11:16:15 -07002375 signal_handler_on_altstack_done = false;
2376 ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2377 kill(getpid(), SIGUSR1);
2378 ASSERT_TRUE(signal_handler_on_altstack_done);
2379}
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002380
2381TEST(pthread, pthread_barrierattr_smoke) {
2382 pthread_barrierattr_t attr;
2383 ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2384 int pshared;
2385 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2386 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2387 ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2388 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2389 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2390 ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2391}
2392
Yabin Cui81d27972016-03-22 13:45:55 -07002393struct BarrierTestHelperData {
2394 size_t thread_count;
2395 pthread_barrier_t barrier;
2396 std::atomic<int> finished_mask;
2397 std::atomic<int> serial_thread_count;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002398 size_t iteration_count;
Yabin Cui81d27972016-03-22 13:45:55 -07002399 std::atomic<size_t> finished_iteration_count;
2400
2401 BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2402 : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2403 iteration_count(iteration_count), finished_iteration_count(0) {
2404 }
2405};
2406
2407struct BarrierTestHelperArg {
2408 int id;
2409 BarrierTestHelperData* data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002410};
2411
2412static void BarrierTestHelper(BarrierTestHelperArg* arg) {
Yabin Cui81d27972016-03-22 13:45:55 -07002413 for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2414 int result = pthread_barrier_wait(&arg->data->barrier);
2415 if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2416 arg->data->serial_thread_count++;
2417 } else {
2418 ASSERT_EQ(0, result);
2419 }
Yabin Cuid5c04c52017-05-02 12:57:39 -07002420 int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
Yabin Cuiab4cddc2017-05-02 16:18:13 -07002421 mask |= 1 << arg->id;
Yabin Cuid5c04c52017-05-02 12:57:39 -07002422 if (mask == ((1 << arg->data->thread_count) - 1)) {
Yabin Cui81d27972016-03-22 13:45:55 -07002423 ASSERT_EQ(1, arg->data->serial_thread_count);
2424 arg->data->finished_iteration_count++;
2425 arg->data->finished_mask = 0;
2426 arg->data->serial_thread_count = 0;
2427 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002428 }
2429}
2430
2431TEST(pthread, pthread_barrier_smoke) {
2432 const size_t BARRIER_ITERATION_COUNT = 10;
2433 const size_t BARRIER_THREAD_COUNT = 10;
Yabin Cui81d27972016-03-22 13:45:55 -07002434 BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2435 ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2436 std::vector<pthread_t> threads(data.thread_count);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002437 std::vector<BarrierTestHelperArg> args(threads.size());
2438 for (size_t i = 0; i < threads.size(); ++i) {
Yabin Cui81d27972016-03-22 13:45:55 -07002439 args[i].id = i;
2440 args[i].data = &data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002441 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2442 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2443 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002444 for (size_t i = 0; i < threads.size(); ++i) {
2445 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2446 }
Yabin Cui81d27972016-03-22 13:45:55 -07002447 ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2448 ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2449}
2450
2451struct BarrierDestroyTestArg {
2452 std::atomic<int> tid;
2453 pthread_barrier_t* barrier;
2454};
2455
2456static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2457 arg->tid = gettid();
2458 ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002459}
2460
2461TEST(pthread, pthread_barrier_destroy) {
2462 pthread_barrier_t barrier;
2463 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2464 pthread_t thread;
Yabin Cui81d27972016-03-22 13:45:55 -07002465 BarrierDestroyTestArg arg;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002466 arg.tid = 0;
2467 arg.barrier = &barrier;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002468 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui81d27972016-03-22 13:45:55 -07002469 reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002470 WaitUntilThreadSleep(arg.tid);
2471 ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2472 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2473 // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2474 ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2475 ASSERT_EQ(0, pthread_join(thread, nullptr));
2476#if defined(__BIONIC__)
2477 ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2478#endif
2479}
2480
2481struct BarrierOrderingTestHelperArg {
2482 pthread_barrier_t* barrier;
2483 size_t* array;
2484 size_t array_length;
2485 size_t id;
2486};
2487
2488void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2489 const size_t ITERATION_COUNT = 10000;
2490 for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2491 arg->array[arg->id] = i;
Yabin Cuic9a659c2015-11-05 15:36:08 -08002492 int result = pthread_barrier_wait(arg->barrier);
2493 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002494 for (size_t j = 0; j < arg->array_length; ++j) {
2495 ASSERT_EQ(i, arg->array[j]);
2496 }
Yabin Cuic9a659c2015-11-05 15:36:08 -08002497 result = pthread_barrier_wait(arg->barrier);
2498 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002499 }
2500}
2501
2502TEST(pthread, pthread_barrier_check_ordering) {
2503 const size_t THREAD_COUNT = 4;
2504 pthread_barrier_t barrier;
2505 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2506 size_t array[THREAD_COUNT];
2507 std::vector<pthread_t> threads(THREAD_COUNT);
2508 std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2509 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2510 args[i].barrier = &barrier;
2511 args[i].array = array;
2512 args[i].array_length = THREAD_COUNT;
2513 args[i].id = i;
2514 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2515 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2516 &args[i]));
2517 }
2518 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2519 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2520 }
2521}
Yabin Cuife3a83a2015-11-17 16:03:18 -08002522
Elliott Hughes463faad2018-07-06 14:34:49 -07002523TEST(pthread, pthread_barrier_init_zero_count) {
2524 pthread_barrier_t barrier;
2525 ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2526}
2527
Yabin Cuife3a83a2015-11-17 16:03:18 -08002528TEST(pthread, pthread_spinlock_smoke) {
2529 pthread_spinlock_t lock;
2530 ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2531 ASSERT_EQ(0, pthread_spin_trylock(&lock));
2532 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2533 ASSERT_EQ(0, pthread_spin_lock(&lock));
2534 ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2535 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2536 ASSERT_EQ(0, pthread_spin_destroy(&lock));
2537}
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002538
Elliott Hughes8aecba72017-10-17 15:34:41 -07002539TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002540 pthread_attr_t attr;
2541 ASSERT_EQ(0, pthread_attr_init(&attr));
2542
Elliott Hughes8aecba72017-10-17 15:34:41 -07002543 int state;
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002544 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002545 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2546 ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2547
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002548 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002549 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2550 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2551
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002552 ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002553 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2554 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002555}
2556
2557TEST(pthread, pthread_create__mmap_failures) {
2558 pthread_attr_t attr;
2559 ASSERT_EQ(0, pthread_attr_init(&attr));
2560 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2561
2562 const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2563
Elliott Hughes57512982017-10-02 22:49:18 -07002564 // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002565 std::vector<void*> pages;
Elliott Hughes57512982017-10-02 22:49:18 -07002566 pages.reserve(64 * 1024);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002567 int prot = PROT_NONE;
2568 while (true) {
2569 void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2570 if (page == MAP_FAILED) break;
2571 pages.push_back(page);
2572 prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2573 }
2574
2575 // Try creating threads, freeing up a page each time we fail.
2576 size_t EAGAIN_count = 0;
2577 size_t i = 0;
2578 for (; i < pages.size(); ++i) {
2579 pthread_t t;
2580 int status = pthread_create(&t, &attr, IdFn, nullptr);
2581 if (status != EAGAIN) break;
2582 ++EAGAIN_count;
2583 ASSERT_EQ(0, munmap(pages[i], kPageSize));
2584 }
2585
Ryan Prichard45d13492019-01-03 02:51:30 -08002586 // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
2587 // side. So we should have seen at least three failures.
2588 ASSERT_GE(EAGAIN_count, 3U);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002589
2590 for (; i < pages.size(); ++i) {
2591 ASSERT_EQ(0, munmap(pages[i], kPageSize));
2592 }
2593}
Elliott Hughesdff08ce2017-10-16 09:58:45 -07002594
2595TEST(pthread, pthread_setschedparam) {
2596 sched_param p = { .sched_priority = INT_MIN };
2597 ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
2598}
2599
2600TEST(pthread, pthread_setschedprio) {
2601 ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
2602}
Elliott Hughes8aecba72017-10-17 15:34:41 -07002603
2604TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
2605 pthread_attr_t attr;
2606 ASSERT_EQ(0, pthread_attr_init(&attr));
2607
2608 int state;
2609 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2610 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2611 ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
2612
2613 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2614 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2615 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2616
2617 ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
2618 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2619 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2620}
2621
2622TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
2623 pthread_attr_t attr;
2624 ASSERT_EQ(0, pthread_attr_init(&attr));
2625
2626 // If we set invalid scheduling attributes but choose to inherit, everything's fine...
2627 sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
2628 ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
2629 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
2630 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2631
2632 pthread_t t;
2633 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
2634 ASSERT_EQ(0, pthread_join(t, nullptr));
2635
Elliott Hughes7a660662017-10-30 09:26:06 -07002636#if defined(__LP64__)
2637 // If we ask to use them, though, we'll see a failure...
Elliott Hughes8aecba72017-10-17 15:34:41 -07002638 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2639 ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
Elliott Hughes7a660662017-10-30 09:26:06 -07002640#else
2641 // For backwards compatibility with broken apps, we just ignore failures
2642 // to set scheduler attributes on LP32.
2643#endif
Elliott Hughes8aecba72017-10-17 15:34:41 -07002644}
2645
2646TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
2647 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2648 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
2649 if (rc == EPERM) {
2650 GTEST_LOG_(INFO) << "pthread_setschedparam failed with EPERM, skipping test\n";
2651 return;
2652 }
2653 ASSERT_EQ(0, rc);
2654
2655 pthread_attr_t attr;
2656 ASSERT_EQ(0, pthread_attr_init(&attr));
2657 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2658
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002659 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002660 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002661 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002662 int actual_policy;
2663 sched_param actual_param;
2664 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2665 ASSERT_EQ(SCHED_FIFO, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002666 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002667 ASSERT_EQ(0, pthread_join(t, nullptr));
2668}
2669
2670TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
2671 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2672 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
2673 if (rc == EPERM) {
2674 GTEST_LOG_(INFO) << "pthread_setschedparam failed with EPERM, skipping test\n";
2675 return;
2676 }
2677 ASSERT_EQ(0, rc);
2678
2679 pthread_attr_t attr;
2680 ASSERT_EQ(0, pthread_attr_init(&attr));
2681 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2682 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
2683
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002684 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002685 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002686 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002687 int actual_policy;
2688 sched_param actual_param;
2689 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2690 ASSERT_EQ(SCHED_OTHER, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002691 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002692 ASSERT_EQ(0, pthread_join(t, nullptr));
2693}
2694
2695TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
2696 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2697 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
2698 if (rc == EPERM) {
2699 GTEST_LOG_(INFO) << "pthread_setschedparam failed with EPERM, skipping test\n";
2700 return;
2701 }
2702 ASSERT_EQ(0, rc);
2703
2704 pthread_attr_t attr;
2705 ASSERT_EQ(0, pthread_attr_init(&attr));
2706 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2707
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002708 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07002709 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002710 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002711 int actual_policy;
2712 sched_param actual_param;
2713 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2714 ASSERT_EQ(SCHED_FIFO | SCHED_RESET_ON_FORK, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07002715 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07002716 ASSERT_EQ(0, pthread_join(t, nullptr));
2717}