blob: e5f2a31bf8c89bc85b9b46852c0cd14ca876fd07 [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
Pierre Peifferd0c884d2012-02-22 16:40:15 +010028
Elliott Hughes6f94de32013-02-12 06:06:22 +000029#include <pthread.h>
Elliott Hughes3e898472013-02-12 16:40:24 +000030
31#include <errno.h>
32#include <limits.h>
Yabin Cui86fc96f2015-01-29 21:50:48 -080033#include <stdatomic.h>
Yabin Cui5a00ba72018-01-26 17:32:31 -080034#include <stdlib.h>
Yabin Cui17393b02015-03-21 15:08:25 -070035#include <string.h>
Yabin Cui86fc96f2015-01-29 21:50:48 -080036#include <sys/cdefs.h>
Elliott Hughes84114c82013-07-17 13:33:19 -070037#include <sys/mman.h>
Pierre Peifferd0c884d2012-02-22 16:40:15 +010038#include <unistd.h>
39
Pierre Peifferd0c884d2012-02-22 16:40:15 +010040#include "pthread_internal.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070041
Elliott Hughes04303f52014-09-18 16:11:59 -070042#include "private/bionic_constants.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070043#include "private/bionic_futex.h"
Yabin Cui86fc96f2015-01-29 21:50:48 -080044#include "private/bionic_systrace.h"
Elliott Hughes04303f52014-09-18 16:11:59 -070045#include "private/bionic_time_conversions.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070046#include "private/bionic_tls.h"
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080047
Yabin Cuie69c2452015-02-13 16:21:25 -080048/* a mutex attribute holds the following fields
49 *
50 * bits: name description
51 * 0-3 type type of mutex
52 * 4 shared process-shared flag
Yabin Cui6b9c85b2018-01-23 12:56:18 -080053 * 5 protocol whether it is a priority inherit mutex.
Yabin Cuie69c2452015-02-13 16:21:25 -080054 */
55#define MUTEXATTR_TYPE_MASK 0x000f
56#define MUTEXATTR_SHARED_MASK 0x0010
Yabin Cui6b9c85b2018-01-23 12:56:18 -080057#define MUTEXATTR_PROTOCOL_MASK 0x0020
58
59#define MUTEXATTR_PROTOCOL_SHIFT 5
Yabin Cuie69c2452015-02-13 16:21:25 -080060
61int pthread_mutexattr_init(pthread_mutexattr_t *attr)
62{
63 *attr = PTHREAD_MUTEX_DEFAULT;
64 return 0;
65}
66
67int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
68{
69 *attr = -1;
70 return 0;
71}
72
73int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
74{
75 int type = (*attr & MUTEXATTR_TYPE_MASK);
76
77 if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
78 return EINVAL;
79 }
80
81 *type_p = type;
82 return 0;
83}
84
85int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
86{
87 if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
88 return EINVAL;
89 }
90
91 *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
92 return 0;
93}
94
95/* process-shared mutexes are not supported at the moment */
96
97int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
98{
99 switch (pshared) {
100 case PTHREAD_PROCESS_PRIVATE:
101 *attr &= ~MUTEXATTR_SHARED_MASK;
102 return 0;
103
104 case PTHREAD_PROCESS_SHARED:
105 /* our current implementation of pthread actually supports shared
106 * mutexes but won't cleanup if a process dies with the mutex held.
107 * Nevertheless, it's better than nothing. Shared mutexes are used
108 * by surfaceflinger and audioflinger.
109 */
110 *attr |= MUTEXATTR_SHARED_MASK;
111 return 0;
112 }
113 return EINVAL;
114}
115
116int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
117 *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
118 return 0;
119}
120
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800121int pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int protocol) {
122 if (protocol != PTHREAD_PRIO_NONE && protocol != PTHREAD_PRIO_INHERIT) {
123 return EINVAL;
124 }
125 *attr = (*attr & ~MUTEXATTR_PROTOCOL_MASK) | (protocol << MUTEXATTR_PROTOCOL_SHIFT);
126 return 0;
127}
128
129int pthread_mutexattr_getprotocol(const pthread_mutexattr_t* attr, int* protocol) {
130 *protocol = (*attr & MUTEXATTR_PROTOCOL_MASK) >> MUTEXATTR_PROTOCOL_SHIFT;
131 return 0;
132}
133
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800134// Priority Inheritance mutex implementation
135struct PIMutex {
136 // mutex type, can be 0 (normal), 1 (recursive), 2 (errorcheck), constant during lifetime
137 uint8_t type;
138 // process-shared flag, constant during lifetime
139 bool shared;
140 // <number of times a thread holding a recursive PI mutex> - 1
141 uint16_t counter;
142 // owner_tid is read/written by both userspace code and kernel code. It includes three fields:
143 // FUTEX_WAITERS, FUTEX_OWNER_DIED and FUTEX_TID_MASK.
144 atomic_int owner_tid;
145};
146
147static inline __always_inline int PIMutexTryLock(PIMutex& mutex) {
148 pid_t tid = __get_thread()->tid;
149 // Handle common case first.
150 int old_owner = 0;
151 if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex.owner_tid,
152 &old_owner, tid,
153 memory_order_acquire,
154 memory_order_relaxed))) {
155 return 0;
156 }
157 if (tid == (old_owner & FUTEX_TID_MASK)) {
158 // We already own this mutex.
159 if (mutex.type == PTHREAD_MUTEX_NORMAL) {
160 return EBUSY;
161 }
162 if (mutex.type == PTHREAD_MUTEX_ERRORCHECK) {
163 return EDEADLK;
164 }
165 if (mutex.counter == 0xffff) {
166 return EAGAIN;
167 }
168 mutex.counter++;
169 return 0;
170 }
171 return EBUSY;
172}
173
Yabin Cui5a00ba72018-01-26 17:32:31 -0800174// Inlining this function in pthread_mutex_lock() add the cost of stack frame instructions on
175// ARM/ARM64, which increases at most 20 percent overhead. So make it noinline.
176static int __attribute__((noinline)) PIMutexTimedLock(PIMutex& mutex,
177 const timespec* abs_timeout) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800178 int ret = PIMutexTryLock(mutex);
179 if (__predict_true(ret == 0)) {
180 return 0;
181 }
182 if (ret == EBUSY) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800183 ScopedTrace trace("Contending for pthread mutex");
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800184 ret = -__futex_pi_lock_ex(&mutex.owner_tid, mutex.shared, true, abs_timeout);
185 }
186 return ret;
187}
188
189static int PIMutexUnlock(PIMutex& mutex) {
190 pid_t tid = __get_thread()->tid;
191 int old_owner = tid;
192 // Handle common case first.
193 if (__predict_true(mutex.type == PTHREAD_MUTEX_NORMAL)) {
194 if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex.owner_tid,
195 &old_owner, 0,
196 memory_order_release,
197 memory_order_relaxed))) {
198 return 0;
199 }
200 }
201
202 if (tid != (old_owner & FUTEX_TID_MASK)) {
203 // The mutex can only be unlocked by the thread who owns it.
204 return EPERM;
205 }
206 if (mutex.type == PTHREAD_MUTEX_RECURSIVE) {
207 if (mutex.counter != 0u) {
208 --mutex.counter;
209 return 0;
210 }
211 }
212 if (old_owner == tid) {
213 // No thread is waiting.
214 if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex.owner_tid,
215 &old_owner, 0,
216 memory_order_release,
217 memory_order_relaxed))) {
218 return 0;
219 }
220 }
221 return -__futex_pi_unlock(&mutex.owner_tid, mutex.shared);
222}
223
224static int PIMutexDestroy(PIMutex& mutex) {
225 // The mutex should be in unlocked state (owner_tid == 0) when destroyed.
226 // Store 0xffffffff to make the mutex unusable.
227 int old_owner = 0;
228 if (atomic_compare_exchange_strong_explicit(&mutex.owner_tid, &old_owner, 0xffffffff,
229 memory_order_relaxed, memory_order_relaxed)) {
230 return 0;
231 }
232 return EBUSY;
233}
Yabin Cui5a00ba72018-01-26 17:32:31 -0800234
235#if !defined(__LP64__)
236
237namespace PIMutexAllocator {
238// pthread_mutex_t has only 4 bytes in 32-bit programs, which are not enough to hold PIMutex.
239// So we use malloc to allocate PIMutexes and use 16-bit of pthread_mutex_t as indexes to find
240// the allocated PIMutexes. This allows at most 65536 PI mutexes.
241// When calling operations like pthread_mutex_lock/unlock, the 16-bit index is mapped to the
242// corresponding PIMutex. To make the map operation fast, we use a lockless mapping method:
243// Once a PIMutex is allocated, all the data used to map index to the PIMutex isn't changed until
244// it is destroyed.
245// Below are the data structures:
246// // struct Node contains a PIMutex.
247// typedef Node NodeArray[256];
248// typedef NodeArray* NodeArrayP;
249// NodeArrayP nodes[256];
250//
251// A 16-bit index is mapped to Node as below:
252// (*nodes[index >> 8])[index & 0xff]
253//
254// Also use a free list to allow O(1) finding recycled PIMutexes.
255
256union Node {
257 PIMutex mutex;
258 int next_free_id; // If not -1, refer to the next node in the free PIMutex list.
259};
260typedef Node NodeArray[256];
261typedef NodeArray* NodeArrayP;
262
263// lock_ protects below items.
264static Lock lock;
265static NodeArrayP* nodes;
266static int next_to_alloc_id;
267static int first_free_id = -1; // If not -1, refer to the first node in the free PIMutex list.
268
269static inline __always_inline Node& IdToNode(int id) {
270 return (*nodes[id >> 8])[id & 0xff];
271}
272
273static inline __always_inline PIMutex& IdToPIMutex(int id) {
274 return IdToNode(id).mutex;
275}
276
277static int AllocIdLocked() {
278 if (first_free_id != -1) {
279 int result = first_free_id;
280 first_free_id = IdToNode(result).next_free_id;
281 return result;
282 }
283 if (next_to_alloc_id >= 0x10000) {
284 return -1;
285 }
286 int array_pos = next_to_alloc_id >> 8;
287 int node_pos = next_to_alloc_id & 0xff;
288 if (node_pos == 0) {
289 if (array_pos == 0) {
290 nodes = static_cast<NodeArray**>(calloc(256, sizeof(NodeArray*)));
291 if (nodes == nullptr) {
292 return -1;
293 }
294 }
295 nodes[array_pos] = static_cast<NodeArray*>(malloc(sizeof(NodeArray)));
296 if (nodes[array_pos] == nullptr) {
297 return -1;
298 }
299 }
300 return next_to_alloc_id++;
301}
302
303// If succeed, return an id referring to a PIMutex, otherwise return -1.
304// A valid id is in range [0, 0xffff].
305static int AllocId() {
306 lock.lock();
307 int result = AllocIdLocked();
308 lock.unlock();
309 if (result != -1) {
310 memset(&IdToPIMutex(result), 0, sizeof(PIMutex));
311 }
312 return result;
313}
314
315static void FreeId(int id) {
316 lock.lock();
317 IdToNode(id).next_free_id = first_free_id;
318 first_free_id = id;
319 lock.unlock();
320}
321
322} // namespace PIMutexAllocator
323
324#endif // !defined(__LP64__)
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800325
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800326
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100327/* Convenience macro, creates a mask of 'bits' bits that starts from
328 * the 'shift'-th least significant bit in a 32-bit word.
329 *
330 * Examples: FIELD_MASK(0,4) -> 0xf
331 * FIELD_MASK(16,9) -> 0x1ff0000
332 */
333#define FIELD_MASK(shift,bits) (((1 << (bits))-1) << (shift))
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800334
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100335/* This one is used to create a bit pattern from a given field value */
336#define FIELD_TO_BITS(val,shift,bits) (((val) & ((1 << (bits))-1)) << (shift))
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100337
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100338/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
339#define FIELD_FROM_BITS(val,shift,bits) (((val) >> (shift)) & ((1 << (bits))-1))
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800340
Yabin Cuie69c2452015-02-13 16:21:25 -0800341/* Convenience macros.
342 *
343 * These are used to form or modify the bit pattern of a given mutex value
344 */
345
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100346/* Mutex state:
347 *
348 * 0 for unlocked
349 * 1 for locked, no waiters
350 * 2 for locked, maybe waiters
351 */
352#define MUTEX_STATE_SHIFT 0
353#define MUTEX_STATE_LEN 2
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800354
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100355#define MUTEX_STATE_MASK FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
356#define MUTEX_STATE_FROM_BITS(v) FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
357#define MUTEX_STATE_TO_BITS(v) FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
358
Yabin Cui17393b02015-03-21 15:08:25 -0700359#define MUTEX_STATE_UNLOCKED 0 /* must be 0 to match PTHREAD_MUTEX_INITIALIZER */
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100360#define MUTEX_STATE_LOCKED_UNCONTENDED 1 /* must be 1 due to atomic dec in unlock operation */
361#define MUTEX_STATE_LOCKED_CONTENDED 2 /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
362
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100363#define MUTEX_STATE_BITS_UNLOCKED MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
364#define MUTEX_STATE_BITS_LOCKED_UNCONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
365#define MUTEX_STATE_BITS_LOCKED_CONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
366
Yabin Cui0307eee2015-11-16 20:19:31 -0800367// Return true iff the mutex is unlocked.
368#define MUTEX_STATE_BITS_IS_UNLOCKED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_UNLOCKED)
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100369
Yabin Cui0307eee2015-11-16 20:19:31 -0800370// Return true iff the mutex is locked with no waiters.
371#define MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
372
373// return true iff the mutex is locked with maybe waiters.
374#define MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100375
376/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
377#define MUTEX_STATE_BITS_FLIP_CONTENTION(v) ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
378
379/* Mutex counter:
380 *
381 * We need to check for overflow before incrementing, and we also need to
382 * detect when the counter is 0
383 */
384#define MUTEX_COUNTER_SHIFT 2
385#define MUTEX_COUNTER_LEN 11
386#define MUTEX_COUNTER_MASK FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
387
388#define MUTEX_COUNTER_BITS_WILL_OVERFLOW(v) (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
389#define MUTEX_COUNTER_BITS_IS_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
390
391/* Used to increment the counter directly after overflow has been checked */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800392#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100393
394/* Mutex shared bit flag
395 *
396 * This flag is set to indicate that the mutex is shared among processes.
397 * This changes the futex opcode we use for futex wait/wake operations
398 * (non-shared operations are much faster).
399 */
400#define MUTEX_SHARED_SHIFT 13
401#define MUTEX_SHARED_MASK FIELD_MASK(MUTEX_SHARED_SHIFT,1)
402
403/* Mutex type:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100404 * We support normal, recursive and errorcheck mutexes.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100405 */
406#define MUTEX_TYPE_SHIFT 14
407#define MUTEX_TYPE_LEN 2
408#define MUTEX_TYPE_MASK FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
409
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100410#define MUTEX_TYPE_TO_BITS(t) FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
411
Yabin Cui17393b02015-03-21 15:08:25 -0700412#define MUTEX_TYPE_BITS_NORMAL MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_NORMAL)
413#define MUTEX_TYPE_BITS_RECURSIVE MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_RECURSIVE)
414#define MUTEX_TYPE_BITS_ERRORCHECK MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_ERRORCHECK)
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800415// Use a special mutex type to mark priority inheritance mutexes.
416#define MUTEX_TYPE_BITS_WITH_PI MUTEX_TYPE_TO_BITS(3)
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100417
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800418// For a PI mutex, it includes below fields:
419// Atomic(uint16_t) state;
Yabin Cui5a00ba72018-01-26 17:32:31 -0800420// PIMutex pi_mutex; // uint16_t pi_mutex_id in 32-bit programs
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800421//
422// state holds the following fields:
423//
424// bits: name description
425// 15-14 type mutex type, should be 3
426//
427// pi_mutex holds the state of a PI mutex.
Yabin Cui5a00ba72018-01-26 17:32:31 -0800428// pi_mutex_id holds an integer to find the state of a PI mutex.
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800429//
430// For a Non-PI mutex, it includes below fields:
431// Atomic(uint16_t) state;
432// atomic_int owner_tid; // Atomic(uint16_t) in 32-bit programs
433//
434// state holds the following fields:
435//
436// bits: name description
437// 15-14 type mutex type, can be 0 (normal), 1 (recursive), 2 (errorcheck)
438// 13 shared process-shared flag
439// 12-2 counter <number of times a thread holding a recursive Non-PI mutex> - 1
440// 1-0 state lock state (0, 1 or 2)
441//
442// bits 15-13 are constant during the lifetime of the mutex.
443//
444// owner_tid is used only in recursive and errorcheck Non-PI mutexes to hold the mutex owner
445// thread id.
446//
447// PI mutexes and Non-PI mutexes are distinguished by checking type field in state.
Yabin Cui17393b02015-03-21 15:08:25 -0700448#if defined(__LP64__)
Yabin Cui5a00ba72018-01-26 17:32:31 -0800449struct pthread_mutex_internal_t {
450 _Atomic(uint16_t) state;
451 uint16_t __pad;
452 union {
453 atomic_int owner_tid;
454 PIMutex pi_mutex;
455 };
456 char __reserved[28];
457
458 PIMutex& ToPIMutex() {
459 return pi_mutex;
460 }
461
462 void FreePIMutex() {
463 }
Yabin Cuie69c2452015-02-13 16:21:25 -0800464} __attribute__((aligned(4)));
Yabin Cui86fc96f2015-01-29 21:50:48 -0800465
Yabin Cui5a00ba72018-01-26 17:32:31 -0800466#else
467struct pthread_mutex_internal_t {
468 _Atomic(uint16_t) state;
469 union {
470 _Atomic(uint16_t) owner_tid;
471 uint16_t pi_mutex_id;
472 };
473
474 PIMutex& ToPIMutex() {
475 return PIMutexAllocator::IdToPIMutex(pi_mutex_id);
476 }
477
478 void FreePIMutex() {
479 PIMutexAllocator::FreeId(pi_mutex_id);
480 }
481} __attribute__((aligned(4)));
482#endif
483
Yabin Cui17393b02015-03-21 15:08:25 -0700484static_assert(sizeof(pthread_mutex_t) == sizeof(pthread_mutex_internal_t),
485 "pthread_mutex_t should actually be pthread_mutex_internal_t in implementation.");
486
487// For binary compatibility with old version of pthread_mutex_t, we can't use more strict alignment
488// than 4-byte alignment.
489static_assert(alignof(pthread_mutex_t) == 4,
490 "pthread_mutex_t should fulfill the alignment of pthread_mutex_internal_t.");
491
492static inline pthread_mutex_internal_t* __get_internal_mutex(pthread_mutex_t* mutex_interface) {
493 return reinterpret_cast<pthread_mutex_internal_t*>(mutex_interface);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800494}
495
Yabin Cui17393b02015-03-21 15:08:25 -0700496int pthread_mutex_init(pthread_mutex_t* mutex_interface, const pthread_mutexattr_t* attr) {
497 pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
498
499 memset(mutex, 0, sizeof(pthread_mutex_internal_t));
Yabin Cui86fc96f2015-01-29 21:50:48 -0800500
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700501 if (__predict_true(attr == NULL)) {
Yabin Cui17393b02015-03-21 15:08:25 -0700502 atomic_init(&mutex->state, MUTEX_TYPE_BITS_NORMAL);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700503 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800504 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700505
Yabin Cuie69c2452015-02-13 16:21:25 -0800506 uint16_t state = 0;
Elliott Hughesdff72032013-12-11 14:54:00 -0800507 if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
Yabin Cui17393b02015-03-21 15:08:25 -0700508 state |= MUTEX_SHARED_MASK;
Elliott Hughesdff72032013-12-11 14:54:00 -0800509 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700510
511 switch (*attr & MUTEXATTR_TYPE_MASK) {
512 case PTHREAD_MUTEX_NORMAL:
Yabin Cuie69c2452015-02-13 16:21:25 -0800513 state |= MUTEX_TYPE_BITS_NORMAL;
514 break;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700515 case PTHREAD_MUTEX_RECURSIVE:
Yabin Cuie69c2452015-02-13 16:21:25 -0800516 state |= MUTEX_TYPE_BITS_RECURSIVE;
517 break;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700518 case PTHREAD_MUTEX_ERRORCHECK:
Yabin Cuie69c2452015-02-13 16:21:25 -0800519 state |= MUTEX_TYPE_BITS_ERRORCHECK;
520 break;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700521 default:
522 return EINVAL;
523 }
524
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800525 if (((*attr & MUTEXATTR_PROTOCOL_MASK) >> MUTEXATTR_PROTOCOL_SHIFT) == PTHREAD_PRIO_INHERIT) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800526#if !defined(__LP64__)
527 if (state & MUTEX_SHARED_MASK) {
528 return EINVAL;
529 }
530 int id = PIMutexAllocator::AllocId();
531 if (id == -1) {
532 return ENOMEM;
533 }
534 mutex->pi_mutex_id = id;
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800535#endif
Yabin Cui5a00ba72018-01-26 17:32:31 -0800536 atomic_init(&mutex->state, MUTEX_TYPE_BITS_WITH_PI);
537 PIMutex& pi_mutex = mutex->ToPIMutex();
538 pi_mutex.type = *attr & MUTEXATTR_TYPE_MASK;
539 pi_mutex.shared = (*attr & MUTEXATTR_SHARED_MASK) != 0;
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800540 } else {
541 atomic_init(&mutex->state, state);
542 atomic_init(&mutex->owner_tid, 0);
543 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700544 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800545}
546
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800547// namespace for Non-PI mutex routines.
548namespace NonPI {
549
550static inline __always_inline int NormalMutexTryLock(pthread_mutex_internal_t* mutex,
551 uint16_t shared) {
Yabin Cuie69c2452015-02-13 16:21:25 -0800552 const uint16_t unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
553 const uint16_t locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800554
Yabin Cuie69c2452015-02-13 16:21:25 -0800555 uint16_t old_state = unlocked;
Yabin Cui17393b02015-03-21 15:08:25 -0700556 if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
557 locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800558 return 0;
559 }
560 return EBUSY;
561}
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800562
563/*
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800564 * Lock a normal Non-PI mutex.
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800565 *
566 * As noted above, there are three states:
567 * 0 (unlocked, no contention)
568 * 1 (locked, no contention)
569 * 2 (locked, contention)
570 *
571 * Non-recursive mutexes don't use the thread-id or counter fields, and the
572 * "type" value is zero, so the only bits that will be set are the ones in
573 * the lock state field.
574 */
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800575static inline __always_inline int NormalMutexLock(pthread_mutex_internal_t* mutex,
576 uint16_t shared,
577 bool use_realtime_clock,
578 const timespec* abs_timeout_or_null) {
579 if (__predict_true(NormalMutexTryLock(mutex, shared) == 0)) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800580 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800581 }
Elliott Hughesdd586f22015-12-16 15:15:58 -0800582 int result = check_timespec(abs_timeout_or_null, true);
Yabin Cuic9a659c2015-11-05 15:36:08 -0800583 if (result != 0) {
584 return result;
585 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800586
587 ScopedTrace trace("Contending for pthread mutex");
588
Yabin Cuie69c2452015-02-13 16:21:25 -0800589 const uint16_t unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
590 const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800591
Yabin Cui86fc96f2015-01-29 21:50:48 -0800592 // We want to go to sleep until the mutex is available, which requires
593 // promoting it to locked_contended. We need to swap in the new state
Yabin Cui17393b02015-03-21 15:08:25 -0700594 // and then wait until somebody wakes us up.
Yabin Cui86fc96f2015-01-29 21:50:48 -0800595 // An atomic_exchange is used to compete with other threads for the lock.
596 // If it returns unlocked, we have acquired the lock, otherwise another
597 // thread still holds the lock and we should wait again.
598 // If lock is acquired, an acquire fence is needed to make all memory accesses
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800599 // made by other threads visible to the current CPU.
Yabin Cui17393b02015-03-21 15:08:25 -0700600 while (atomic_exchange_explicit(&mutex->state, locked_contended,
Yabin Cui86fc96f2015-01-29 21:50:48 -0800601 memory_order_acquire) != unlocked) {
Yabin Cuic9a659c2015-11-05 15:36:08 -0800602 if (__futex_wait_ex(&mutex->state, shared, locked_contended, use_realtime_clock,
603 abs_timeout_or_null) == -ETIMEDOUT) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800604 return ETIMEDOUT;
605 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800606 }
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800607 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800608}
609
610/*
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800611 * Release a normal Non-PI mutex. The caller is responsible for determining
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800612 * that we are in fact the owner of this lock.
613 */
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800614static inline __always_inline void NormalMutexUnlock(pthread_mutex_internal_t* mutex,
615 uint16_t shared) {
Yabin Cuie69c2452015-02-13 16:21:25 -0800616 const uint16_t unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
617 const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
Andy McFaddenfcd00eb2010-05-28 13:31:45 -0700618
Yabin Cui86fc96f2015-01-29 21:50:48 -0800619 // We use an atomic_exchange to release the lock. If locked_contended state
620 // is returned, some threads is waiting for the lock and we need to wake up
621 // one of them.
622 // A release fence is required to make previous stores visible to next
623 // lock owner threads.
Yabin Cui17393b02015-03-21 15:08:25 -0700624 if (atomic_exchange_explicit(&mutex->state, unlocked,
Yabin Cui86fc96f2015-01-29 21:50:48 -0800625 memory_order_release) == locked_contended) {
626 // Wake up one waiting thread. We don't know which thread will be
627 // woken or when it'll start executing -- futexes make no guarantees
628 // here. There may not even be a thread waiting.
629 //
630 // The newly-woken thread will replace the unlocked state we just set above
631 // with locked_contended state, which means that when it eventually releases
632 // the mutex it will also call FUTEX_WAKE. This results in one extra wake
633 // call whenever a lock is contended, but let us avoid forgetting anyone
634 // without requiring us to track the number of sleepers.
635 //
636 // It's possible for another thread to sneak in and grab the lock between
637 // the exchange above and the wake call below. If the new thread is "slow"
638 // and holds the lock for a while, we'll wake up a sleeper, which will swap
639 // in locked_uncontended state and then go back to sleep since the lock is
640 // still held. If the new thread is "fast", running to completion before
641 // we call wake, the thread we eventually wake will find an unlocked mutex
642 // and will execute. Either way we have correct behavior and nobody is
643 // orphaned on the wait queue.
Yabin Cui17393b02015-03-21 15:08:25 -0700644 __futex_wake_ex(&mutex->state, shared, 1);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800645 }
646}
647
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800648/* This common inlined function is used to increment the counter of a recursive Non-PI mutex.
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100649 *
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800650 * If the counter overflows, it will return EAGAIN.
651 * Otherwise, it atomically increments the counter and returns 0.
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100652 *
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100653 */
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800654static inline __always_inline int RecursiveIncrement(pthread_mutex_internal_t* mutex,
655 uint16_t old_state) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800656 // Detect recursive lock overflow and return EAGAIN.
657 // This is safe because only the owner thread can modify the
658 // counter bits in the mutex value.
Yabin Cui17393b02015-03-21 15:08:25 -0700659 if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(old_state)) {
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100660 return EAGAIN;
661 }
662
Yabin Cuie69c2452015-02-13 16:21:25 -0800663 // Other threads are able to change the lower bits (e.g. promoting it to "contended"),
664 // but the mutex counter will not overflow. So we use atomic_fetch_add operation here.
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800665 // The mutex is already locked by current thread, so we don't need an acquire fence.
Yabin Cui17393b02015-03-21 15:08:25 -0700666 atomic_fetch_add_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800667 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800668}
669
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800670// Wait on a recursive or errorcheck Non-PI mutex.
671static inline __always_inline int RecursiveOrErrorcheckMutexWait(pthread_mutex_internal_t* mutex,
672 uint16_t shared,
673 uint16_t old_state,
674 bool use_realtime_clock,
675 const timespec* abs_timeout) {
Yabin Cuif7969852015-04-02 17:47:48 -0700676// __futex_wait always waits on a 32-bit value. But state is 16-bit. For a normal mutex, the owner_tid
677// field in mutex is not used. On 64-bit devices, the __pad field in mutex is not used.
678// But when a recursive or errorcheck mutex is used on 32-bit devices, we need to add the
679// owner_tid value in the value argument for __futex_wait, otherwise we may always get EAGAIN error.
680
681#if defined(__LP64__)
Yabin Cuic9a659c2015-11-05 15:36:08 -0800682 return __futex_wait_ex(&mutex->state, shared, old_state, use_realtime_clock, abs_timeout);
Yabin Cuif7969852015-04-02 17:47:48 -0700683
684#else
685 // This implementation works only when the layout of pthread_mutex_internal_t matches below expectation.
686 // And it is based on the assumption that Android is always in little-endian devices.
687 static_assert(offsetof(pthread_mutex_internal_t, state) == 0, "");
688 static_assert(offsetof(pthread_mutex_internal_t, owner_tid) == 2, "");
689
690 uint32_t owner_tid = atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed);
Yabin Cuic9a659c2015-11-05 15:36:08 -0800691 return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state,
692 use_realtime_clock, abs_timeout);
Yabin Cuif7969852015-04-02 17:47:48 -0700693#endif
694}
695
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800696// Lock a Non-PI mutex.
697static int MutexLockWithTimeout(pthread_mutex_internal_t* mutex, bool use_realtime_clock,
698 const timespec* abs_timeout_or_null) {
Yabin Cuie69c2452015-02-13 16:21:25 -0800699 uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
700 uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
701 uint16_t shared = (old_state & MUTEX_SHARED_MASK);
Fabrice Di Meglio86418332010-03-11 14:47:47 -0800702
Yabin Cui86fc96f2015-01-29 21:50:48 -0800703 // Handle common case first.
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700704 if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800705 return NormalMutexLock(mutex, shared, use_realtime_clock, abs_timeout_or_null);
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800706 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700707
Yabin Cui86fc96f2015-01-29 21:50:48 -0800708 // Do we already own this recursive or error-check mutex?
Yabin Cuie69c2452015-02-13 16:21:25 -0800709 pid_t tid = __get_thread()->tid;
710 if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800711 if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
712 return EDEADLK;
713 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800714 return RecursiveIncrement(mutex, old_state);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800715 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700716
Yabin Cuie69c2452015-02-13 16:21:25 -0800717 const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
718 const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
719 const uint16_t locked_contended = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700720
Yabin Cui86fc96f2015-01-29 21:50:48 -0800721 // First, if the mutex is unlocked, try to quickly acquire it.
722 // In the optimistic case where this works, set the state to locked_uncontended.
Yabin Cui17393b02015-03-21 15:08:25 -0700723 if (old_state == unlocked) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800724 // If exchanged successfully, an acquire fence is required to make
725 // all memory accesses made by other threads visible to the current CPU.
Yabin Cui17393b02015-03-21 15:08:25 -0700726 if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
Yabin Cuie69c2452015-02-13 16:21:25 -0800727 locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
728 atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100729 return 0;
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700730 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700731 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100732
Brigid Smitha406ee62014-07-21 15:38:06 -0700733 ScopedTrace trace("Contending for pthread mutex");
734
Yabin Cui86fc96f2015-01-29 21:50:48 -0800735 while (true) {
Yabin Cui17393b02015-03-21 15:08:25 -0700736 if (old_state == unlocked) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800737 // NOTE: We put the state to locked_contended since we _know_ there
738 // is contention when we are in this loop. This ensures all waiters
739 // will be unlocked.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100740
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800741 // If exchanged successfully, an acquire fence is required to make
742 // all memory accesses made by other threads visible to the current CPU.
Yabin Cui17393b02015-03-21 15:08:25 -0700743 if (__predict_true(atomic_compare_exchange_weak_explicit(&mutex->state,
Yabin Cuie69c2452015-02-13 16:21:25 -0800744 &old_state, locked_contended,
Yabin Cui86fc96f2015-01-29 21:50:48 -0800745 memory_order_acquire,
746 memory_order_relaxed))) {
Yabin Cuie69c2452015-02-13 16:21:25 -0800747 atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800748 return 0;
749 }
750 continue;
Yabin Cui17393b02015-03-21 15:08:25 -0700751 } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(old_state)) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800752 // We should set it to locked_contended beforing going to sleep. This can make
Yabin Cui86fc96f2015-01-29 21:50:48 -0800753 // sure waiters will be woken up eventually.
754
Yabin Cui17393b02015-03-21 15:08:25 -0700755 int new_state = MUTEX_STATE_BITS_FLIP_CONTENTION(old_state);
756 if (__predict_false(!atomic_compare_exchange_weak_explicit(&mutex->state,
757 &old_state, new_state,
Yabin Cui86fc96f2015-01-29 21:50:48 -0800758 memory_order_relaxed,
759 memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100760 continue;
761 }
Yabin Cui17393b02015-03-21 15:08:25 -0700762 old_state = new_state;
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100763 }
764
Elliott Hughesdd586f22015-12-16 15:15:58 -0800765 int result = check_timespec(abs_timeout_or_null, true);
Yabin Cuic9a659c2015-11-05 15:36:08 -0800766 if (result != 0) {
767 return result;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800768 }
Yabin Cuic9a659c2015-11-05 15:36:08 -0800769 // We are in locked_contended state, sleep until someone wakes us up.
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800770 if (RecursiveOrErrorcheckMutexWait(mutex, shared, old_state, use_realtime_clock,
771 abs_timeout_or_null) == -ETIMEDOUT) {
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800772 return ETIMEDOUT;
773 }
Yabin Cui17393b02015-03-21 15:08:25 -0700774 old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100775 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800776}
777
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800778} // namespace NonPI
779
Yabin Cui17393b02015-03-21 15:08:25 -0700780int pthread_mutex_lock(pthread_mutex_t* mutex_interface) {
Christopher Ferris511cfd92015-06-09 18:46:15 -0700781#if !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -0700782 // Some apps depend on being able to pass NULL as a mutex and get EINVAL
783 // back. Don't need to worry about it for LP64 since the ABI is brand new,
784 // but keep compatibility for LP32. http://b/19995172.
Christopher Ferris511cfd92015-06-09 18:46:15 -0700785 if (mutex_interface == NULL) {
786 return EINVAL;
787 }
788#endif
789
Yabin Cui17393b02015-03-21 15:08:25 -0700790 pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800791
Yabin Cuie69c2452015-02-13 16:21:25 -0800792 uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
793 uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800794 // Avoid slowing down fast path of normal mutex lock operation.
795 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800796 uint16_t shared = (old_state & MUTEX_SHARED_MASK);
797 if (__predict_true(NonPI::NormalMutexTryLock(mutex, shared) == 0)) {
798 return 0;
799 }
800 } else if (mtype == MUTEX_TYPE_BITS_WITH_PI) {
801 PIMutex& m = mutex->ToPIMutex();
802 // Handle common case first.
803 if (__predict_true(PIMutexTryLock(m) == 0)) {
804 return 0;
805 }
806 return PIMutexTimedLock(mutex->ToPIMutex(), nullptr);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800807 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800808 return NonPI::MutexLockWithTimeout(mutex, false, nullptr);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800809}
810
Yabin Cui17393b02015-03-21 15:08:25 -0700811int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) {
Christopher Ferris511cfd92015-06-09 18:46:15 -0700812#if !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -0700813 // Some apps depend on being able to pass NULL as a mutex and get EINVAL
814 // back. Don't need to worry about it for LP64 since the ABI is brand new,
815 // but keep compatibility for LP32. http://b/19995172.
Christopher Ferris511cfd92015-06-09 18:46:15 -0700816 if (mutex_interface == NULL) {
817 return EINVAL;
818 }
819#endif
820
Yabin Cui17393b02015-03-21 15:08:25 -0700821 pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800822
Yabin Cuie69c2452015-02-13 16:21:25 -0800823 uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
824 uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
825 uint16_t shared = (old_state & MUTEX_SHARED_MASK);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800826
Yabin Cui86fc96f2015-01-29 21:50:48 -0800827 // Handle common case first.
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700828 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800829 NonPI::NormalMutexUnlock(mutex, shared);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800830 return 0;
831 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800832 if (mtype == MUTEX_TYPE_BITS_WITH_PI) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800833 return PIMutexUnlock(mutex->ToPIMutex());
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800834 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700835
Yabin Cui86fc96f2015-01-29 21:50:48 -0800836 // Do we already own this recursive or error-check mutex?
Yabin Cuie69c2452015-02-13 16:21:25 -0800837 pid_t tid = __get_thread()->tid;
838 if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) {
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700839 return EPERM;
Yabin Cuie69c2452015-02-13 16:21:25 -0800840 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700841
Yabin Cui86fc96f2015-01-29 21:50:48 -0800842 // If the counter is > 0, we can simply decrement it atomically.
843 // Since other threads can mutate the lower state bits (and only the
844 // lower state bits), use a compare_exchange loop to do it.
Yabin Cui17393b02015-03-21 15:08:25 -0700845 if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800846 // We still own the mutex, so a release fence is not needed.
Yabin Cui17393b02015-03-21 15:08:25 -0700847 atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800848 return 0;
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700849 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100850
Yabin Cui86fc96f2015-01-29 21:50:48 -0800851 // The counter is 0, so we'are going to unlock the mutex by resetting its
852 // state to unlocked, we need to perform a atomic_exchange inorder to read
853 // the current state, which will be locked_contended if there may have waiters
854 // to awake.
855 // A release fence is required to make previous stores visible to next
856 // lock owner threads.
Yabin Cuie69c2452015-02-13 16:21:25 -0800857 atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
858 const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
Yabin Cui17393b02015-03-21 15:08:25 -0700859 old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release);
860 if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) {
861 __futex_wake_ex(&mutex->state, shared, 1);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700862 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800863
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700864 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800865}
866
Yabin Cui17393b02015-03-21 15:08:25 -0700867int pthread_mutex_trylock(pthread_mutex_t* mutex_interface) {
868 pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800869
Yabin Cuie69c2452015-02-13 16:21:25 -0800870 uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
871 uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800872
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700873 // Handle common case first.
874 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800875 uint16_t shared = (old_state & MUTEX_SHARED_MASK);
876 return NonPI::NormalMutexTryLock(mutex, shared);
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800877 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800878 if (mtype == MUTEX_TYPE_BITS_WITH_PI) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800879 return PIMutexTryLock(mutex->ToPIMutex());
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800880 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700881
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700882 // Do we already own this recursive or error-check mutex?
883 pid_t tid = __get_thread()->tid;
Yabin Cuie69c2452015-02-13 16:21:25 -0800884 if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700885 if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
886 return EBUSY;
887 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800888 return NonPI::RecursiveIncrement(mutex, old_state);
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700889 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700890
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800891 uint16_t shared = (old_state & MUTEX_SHARED_MASK);
892 const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
893 const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
894
Yabin Cui86fc96f2015-01-29 21:50:48 -0800895 // Same as pthread_mutex_lock, except that we don't want to wait, and
896 // the only operation that can succeed is a single compare_exchange to acquire the
897 // lock if it is released / not owned by anyone. No need for a complex loop.
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800898 // If exchanged successfully, an acquire fence is required to make
899 // all memory accesses made by other threads visible to the current CPU.
Yabin Cui17393b02015-03-21 15:08:25 -0700900 old_state = unlocked;
Yabin Cuie69c2452015-02-13 16:21:25 -0800901 if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
902 locked_uncontended,
Yabin Cui86fc96f2015-01-29 21:50:48 -0800903 memory_order_acquire,
904 memory_order_relaxed))) {
Yabin Cuie69c2452015-02-13 16:21:25 -0800905 atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100906 return 0;
907 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100908 return EBUSY;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800909}
910
Elliott Hughes0e714a52014-03-03 16:42:47 -0800911#if !defined(__LP64__)
Yabin Cui17393b02015-03-21 15:08:25 -0700912extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex_interface, unsigned ms) {
Yabin Cuic9a659c2015-11-05 15:36:08 -0800913 timespec ts;
914 timespec_from_ms(ts, ms);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800915 timespec abs_timeout;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800916 absolute_timespec_from_timespec(abs_timeout, ts, CLOCK_MONOTONIC);
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800917 int error = NonPI::MutexLockWithTimeout(__get_internal_mutex(mutex_interface), false,
918 &abs_timeout);
Yabin Cui86fc96f2015-01-29 21:50:48 -0800919 if (error == ETIMEDOUT) {
920 error = EBUSY;
921 }
922 return error;
Elliott Hughes0e714a52014-03-03 16:42:47 -0800923}
924#endif
925
Yabin Cui17393b02015-03-21 15:08:25 -0700926int pthread_mutex_timedlock(pthread_mutex_t* mutex_interface, const timespec* abs_timeout) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800927 pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
928 uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
929 uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
930 // Handle common case first.
931 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
932 uint16_t shared = (old_state & MUTEX_SHARED_MASK);
933 if (__predict_true(NonPI::NormalMutexTryLock(mutex, shared) == 0)) {
934 return 0;
935 }
936 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800937 if (mtype == MUTEX_TYPE_BITS_WITH_PI) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800938 return PIMutexTimedLock(mutex->ToPIMutex(), abs_timeout);
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800939 }
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800940 return NonPI::MutexLockWithTimeout(mutex, true, abs_timeout);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700941}
942
Yabin Cui17393b02015-03-21 15:08:25 -0700943int pthread_mutex_destroy(pthread_mutex_t* mutex_interface) {
Yabin Cui0307eee2015-11-16 20:19:31 -0800944 pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
945 uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800946 uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
947 if (mtype == MUTEX_TYPE_BITS_WITH_PI) {
Yabin Cui5a00ba72018-01-26 17:32:31 -0800948 int result = PIMutexDestroy(mutex->ToPIMutex());
949 if (result == 0) {
950 mutex->FreePIMutex();
951 }
952 return result;
Yabin Cui6b9c85b2018-01-23 12:56:18 -0800953 }
Yabin Cui0307eee2015-11-16 20:19:31 -0800954 // Store 0xffff to make the mutex unusable. Although POSIX standard says it is undefined
955 // behavior to destroy a locked mutex, we prefer not to change mutex->state in that situation.
956 if (MUTEX_STATE_BITS_IS_UNLOCKED(old_state) &&
957 atomic_compare_exchange_strong_explicit(&mutex->state, &old_state, 0xffff,
958 memory_order_relaxed, memory_order_relaxed)) {
959 return 0;
Yabin Cui86fc96f2015-01-29 21:50:48 -0800960 }
Yabin Cui0307eee2015-11-16 20:19:31 -0800961 return EBUSY;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700962}