| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2010 The Android Open Source Project | 
|  | 3 | * All rights reserved. | 
|  | 4 | * | 
|  | 5 | * Redistribution and use in source and binary forms, with or without | 
|  | 6 | * modification, are permitted provided that the following conditions | 
|  | 7 | * are met: | 
|  | 8 | *  * Redistributions of source code must retain the above copyright | 
|  | 9 | *    notice, this list of conditions and the following disclaimer. | 
|  | 10 | *  * Redistributions in binary form must reproduce the above copyright | 
|  | 11 | *    notice, this list of conditions and the following disclaimer in | 
|  | 12 | *    the documentation and/or other materials provided with the | 
|  | 13 | *    distribution. | 
|  | 14 | * | 
|  | 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
|  | 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
|  | 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 
|  | 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | 
|  | 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | 
|  | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | 
|  | 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS | 
|  | 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | 
|  | 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | 
|  | 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | 
|  | 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 
|  | 26 | * SUCH DAMAGE. | 
|  | 27 | */ | 
|  | 28 |  | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 29 | #include <errno.h> | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 30 | #include <stdatomic.h> | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 31 | #include <string.h> | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 32 |  | 
|  | 33 | #include "pthread_internal.h" | 
|  | 34 | #include "private/bionic_futex.h" | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 35 | #include "private/bionic_lock.h" | 
| Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 36 | #include "private/bionic_time_conversions.h" | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 37 |  | 
|  | 38 | /* Technical note: | 
|  | 39 | * | 
|  | 40 | * Possible states of a read/write lock: | 
|  | 41 | * | 
|  | 42 | *  - no readers and no writer (unlocked) | 
|  | 43 | *  - one or more readers sharing the lock at the same time (read-locked) | 
|  | 44 | *  - one writer holding the lock (write-lock) | 
|  | 45 | * | 
|  | 46 | * Additionally: | 
|  | 47 | *  - trying to get the write-lock while there are any readers blocks | 
|  | 48 | *  - trying to get the read-lock while there is a writer blocks | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 49 | *  - a single thread can acquire the lock multiple times in read mode | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 50 | * | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 51 | *  - Posix states that behavior is undefined (may deadlock) if a thread tries | 
|  | 52 | *    to acquire the lock | 
|  | 53 | *      - in write mode while already holding the lock (whether in read or write mode) | 
|  | 54 | *      - in read mode while already holding the lock in write mode. | 
|  | 55 | *  - This implementation will return EDEADLK in "write after write" and "read after | 
|  | 56 | *    write" cases and will deadlock in write after read case. | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 57 | * | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 58 | */ | 
|  | 59 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 60 | // A rwlockattr is implemented as a 32-bit integer which has following fields: | 
|  | 61 | //  bits    name              description | 
|  | 62 | //   1     rwlock_kind       have rwlock preference like PTHREAD_RWLOCK_PREFER_READER_NP. | 
|  | 63 | //   0      process_shared    set to 1 if the rwlock is shared between processes. | 
|  | 64 |  | 
|  | 65 | #define RWLOCKATTR_PSHARED_SHIFT 0 | 
|  | 66 | #define RWLOCKATTR_KIND_SHIFT    1 | 
|  | 67 |  | 
|  | 68 | #define RWLOCKATTR_PSHARED_MASK  1 | 
|  | 69 | #define RWLOCKATTR_KIND_MASK     2 | 
|  | 70 | #define RWLOCKATTR_RESERVED_MASK (~3) | 
|  | 71 |  | 
|  | 72 | static inline __always_inline __always_inline bool __rwlockattr_getpshared(const pthread_rwlockattr_t* attr) { | 
|  | 73 | return (*attr & RWLOCKATTR_PSHARED_MASK) >> RWLOCKATTR_PSHARED_SHIFT; | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | static inline __always_inline __always_inline void __rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) { | 
|  | 77 | *attr = (*attr & ~RWLOCKATTR_PSHARED_MASK) | (pshared << RWLOCKATTR_PSHARED_SHIFT); | 
|  | 78 | } | 
|  | 79 |  | 
|  | 80 | static inline __always_inline int __rwlockattr_getkind(const pthread_rwlockattr_t* attr) { | 
|  | 81 | return (*attr & RWLOCKATTR_KIND_MASK) >> RWLOCKATTR_KIND_SHIFT; | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | static inline __always_inline void __rwlockattr_setkind(pthread_rwlockattr_t* attr, int kind) { | 
|  | 85 | *attr = (*attr & ~RWLOCKATTR_KIND_MASK) | (kind << RWLOCKATTR_KIND_SHIFT); | 
|  | 86 | } | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 87 |  | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 88 |  | 
| Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 89 | int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 90 | *attr = 0; | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 91 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 92 | } | 
|  | 93 |  | 
| Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 94 | int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) { | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 95 | *attr = -1; | 
|  | 96 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 97 | } | 
|  | 98 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 99 | int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) { | 
|  | 100 | if (__rwlockattr_getpshared(attr)) { | 
|  | 101 | *pshared = PTHREAD_PROCESS_SHARED; | 
|  | 102 | } else { | 
|  | 103 | *pshared = PTHREAD_PROCESS_PRIVATE; | 
|  | 104 | } | 
|  | 105 | return 0; | 
|  | 106 | } | 
|  | 107 |  | 
| Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 108 | int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) { | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 109 | switch (pshared) { | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 110 | case PTHREAD_PROCESS_PRIVATE: | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 111 | __rwlockattr_setpshared(attr, 0); | 
|  | 112 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 113 | case PTHREAD_PROCESS_SHARED: | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 114 | __rwlockattr_setpshared(attr, 1); | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 115 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 116 | default: | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 117 | return EINVAL; | 
|  | 118 | } | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 119 | } | 
|  | 120 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 121 | int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t* attr, int* pref) { | 
|  | 122 | *pref = __rwlockattr_getkind(attr); | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 123 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 124 | } | 
|  | 125 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 126 | int pthread_rwlockattr_setkind_np(pthread_rwlockattr_t* attr, int pref) { | 
|  | 127 | switch (pref) { | 
|  | 128 | case PTHREAD_RWLOCK_PREFER_READER_NP:   // Fall through. | 
|  | 129 | case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP: | 
|  | 130 | __rwlockattr_setkind(attr, pref); | 
|  | 131 | return 0; | 
|  | 132 | default: | 
|  | 133 | return EINVAL; | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 134 | } | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 135 | } | 
|  | 136 |  | 
|  | 137 | // A rwlock state is implemented as a 32-bit integer which has following rules: | 
|  | 138 | //  bits      name                              description | 
|  | 139 | //   31      owned_by_writer_flag              set to 1 if the lock is owned by a writer now. | 
|  | 140 | //  30-2     reader_count                      the count of readers holding the lock. | 
|  | 141 | //   1       have_pending_writers              set to 1 if having pending writers. | 
|  | 142 | //   0       have_pending_readers              set to 1 if having pending readers. | 
|  | 143 |  | 
|  | 144 | #define STATE_HAVE_PENDING_READERS_SHIFT    0 | 
|  | 145 | #define STATE_HAVE_PENDING_WRITERS_SHIFT    1 | 
|  | 146 | #define STATE_READER_COUNT_SHIFT            2 | 
|  | 147 | #define STATE_OWNED_BY_WRITER_SHIFT        31 | 
|  | 148 |  | 
|  | 149 | #define STATE_HAVE_PENDING_READERS_FLAG     (1 << STATE_HAVE_PENDING_READERS_SHIFT) | 
|  | 150 | #define STATE_HAVE_PENDING_WRITERS_FLAG     (1 << STATE_HAVE_PENDING_WRITERS_SHIFT) | 
|  | 151 | #define STATE_READER_COUNT_CHANGE_STEP  (1 << STATE_READER_COUNT_SHIFT) | 
|  | 152 | #define STATE_OWNED_BY_WRITER_FLAG      (1 << STATE_OWNED_BY_WRITER_SHIFT) | 
|  | 153 |  | 
|  | 154 | #define STATE_HAVE_PENDING_READERS_OR_WRITERS_FLAG \ | 
|  | 155 | (STATE_HAVE_PENDING_READERS_FLAG | STATE_HAVE_PENDING_WRITERS_FLAG) | 
|  | 156 |  | 
|  | 157 | struct pthread_rwlock_internal_t { | 
|  | 158 | atomic_int state; | 
|  | 159 | atomic_int writer_tid; | 
|  | 160 |  | 
|  | 161 | bool pshared; | 
|  | 162 | bool writer_nonrecursive_preferred; | 
|  | 163 | uint16_t __pad; | 
|  | 164 |  | 
|  | 165 | // When a reader thread plans to suspend on the rwlock, it will add STATE_HAVE_PENDING_READERS_FLAG | 
|  | 166 | // in state, increase pending_reader_count, and wait on pending_reader_wakeup_serial. After woken | 
|  | 167 | // up, the reader thread decreases pending_reader_count, and the last pending reader thread should | 
|  | 168 | // remove STATE_HAVE_PENDING_READERS_FLAG in state. A pending writer thread works in a similar way, | 
|  | 169 | // except that it uses flag and members for writer threads. | 
|  | 170 |  | 
|  | 171 | Lock pending_lock;  // All pending members below are protected by pending_lock. | 
|  | 172 | uint32_t pending_reader_count;  // Count of pending reader threads. | 
|  | 173 | uint32_t pending_writer_count;  // Count of pending writer threads. | 
|  | 174 | uint32_t pending_reader_wakeup_serial;  // Pending reader threads wait on this address by futex_wait. | 
|  | 175 | uint32_t pending_writer_wakeup_serial;  // Pending writer threads wait on this address by futex_wait. | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 176 |  | 
|  | 177 | #if defined(__LP64__) | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 178 | char __reserved[20]; | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 179 | #else | 
|  | 180 | char __reserved[4]; | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 181 | #endif | 
|  | 182 | }; | 
|  | 183 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 184 | static inline __always_inline bool __state_owned_by_writer(int state) { | 
|  | 185 | return state < 0; | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | static inline __always_inline bool __state_owned_by_readers(int state) { | 
|  | 189 | // If state >= 0, the owned_by_writer_flag is not set. | 
|  | 190 | // And if state >= STATE_READER_COUNT_CHANGE_STEP, the reader_count field is not empty. | 
|  | 191 | return state >= STATE_READER_COUNT_CHANGE_STEP; | 
|  | 192 | } | 
|  | 193 |  | 
|  | 194 | static inline __always_inline bool __state_owned_by_readers_or_writer(int state) { | 
|  | 195 | return state < 0 || state >= STATE_READER_COUNT_CHANGE_STEP; | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | static inline __always_inline int __state_add_writer_flag(int state) { | 
|  | 199 | return state | STATE_OWNED_BY_WRITER_FLAG; | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | static inline __always_inline bool __state_is_last_reader(int state) { | 
|  | 203 | return (state >> STATE_READER_COUNT_SHIFT) == 1; | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | static inline __always_inline bool __state_have_pending_writers(int state) { | 
|  | 207 | return state & STATE_HAVE_PENDING_WRITERS_FLAG; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | static inline __always_inline bool __state_have_pending_readers_or_writers(int state) { | 
|  | 211 | return state & STATE_HAVE_PENDING_READERS_OR_WRITERS_FLAG; | 
|  | 212 | } | 
|  | 213 |  | 
| Yabin Cui | b584572 | 2015-03-16 22:46:42 -0700 | [diff] [blame] | 214 | static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t), | 
|  | 215 | "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation."); | 
|  | 216 |  | 
|  | 217 | // For binary compatibility with old version of pthread_rwlock_t, we can't use more strict | 
|  | 218 | // alignment than 4-byte alignment. | 
|  | 219 | static_assert(alignof(pthread_rwlock_t) == 4, | 
|  | 220 | "pthread_rwlock_t should fulfill the alignment requirement of pthread_rwlock_internal_t."); | 
|  | 221 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 222 | static inline __always_inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) { | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 223 | return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface); | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 224 | } | 
|  | 225 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 226 | int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) { | 
|  | 227 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 228 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 229 | memset(rwlock, 0, sizeof(pthread_rwlock_internal_t)); | 
|  | 230 |  | 
| Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 231 | if (__predict_false(attr != nullptr)) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 232 | rwlock->pshared = __rwlockattr_getpshared(attr); | 
|  | 233 | int kind = __rwlockattr_getkind(attr); | 
|  | 234 | switch (kind) { | 
|  | 235 | case PTHREAD_RWLOCK_PREFER_READER_NP: | 
|  | 236 | rwlock->writer_nonrecursive_preferred = false; | 
|  | 237 | break; | 
|  | 238 | case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP: | 
|  | 239 | rwlock->writer_nonrecursive_preferred = true; | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 240 | break; | 
|  | 241 | default: | 
|  | 242 | return EINVAL; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 243 | } | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 244 | if ((*attr & RWLOCKATTR_RESERVED_MASK) != 0) { | 
|  | 245 | return EINVAL; | 
|  | 246 | } | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 247 | } | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 248 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 249 | atomic_init(&rwlock->state, 0); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 250 | rwlock->pending_lock.init(rwlock->pshared); | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 251 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 252 | } | 
|  | 253 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 254 | int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) { | 
|  | 255 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
|  | 256 |  | 
|  | 257 | if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) { | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 258 | return EBUSY; | 
|  | 259 | } | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 260 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 261 | } | 
|  | 262 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 263 | static inline __always_inline bool __can_acquire_read_lock(int old_state, | 
|  | 264 | bool writer_nonrecursive_preferred) { | 
|  | 265 | // If writer is preferred with nonrecursive reader, we prevent further readers from acquiring | 
|  | 266 | // the lock when there are writers waiting for the lock. | 
|  | 267 | bool cannot_apply = __state_owned_by_writer(old_state) || | 
|  | 268 | (writer_nonrecursive_preferred && __state_have_pending_writers(old_state)); | 
|  | 269 | return !cannot_apply; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | static inline __always_inline int __pthread_rwlock_tryrdlock(pthread_rwlock_internal_t* rwlock) { | 
|  | 273 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); | 
|  | 274 |  | 
|  | 275 | while (__predict_true(__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred))) { | 
|  | 276 |  | 
|  | 277 | int new_state = old_state + STATE_READER_COUNT_CHANGE_STEP; | 
|  | 278 | if (__predict_false(!__state_owned_by_readers(new_state))) { // Happens when reader count overflows. | 
|  | 279 | return EAGAIN; | 
|  | 280 | } | 
|  | 281 | if (__predict_true(atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, new_state, | 
|  | 282 | memory_order_acquire, memory_order_relaxed))) { | 
|  | 283 | return 0; | 
|  | 284 | } | 
|  | 285 | } | 
|  | 286 | return EBUSY; | 
|  | 287 | } | 
|  | 288 |  | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 289 | static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, bool use_realtime_clock, | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 290 | const timespec* abs_timeout_or_null) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 291 | if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) { | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 292 | return EDEADLK; | 
| Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 293 | } | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 294 |  | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 295 | while (true) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 296 | int result = __pthread_rwlock_tryrdlock(rwlock); | 
|  | 297 | if (result == 0 || result == EAGAIN) { | 
|  | 298 | return result; | 
|  | 299 | } | 
| Elliott Hughes | dd586f2 | 2015-12-16 15:15:58 -0800 | [diff] [blame] | 300 | result = check_timespec(abs_timeout_or_null, true); | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 301 | if (result != 0) { | 
|  | 302 | return result; | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 303 | } | 
|  | 304 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 305 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 306 | if (__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) { | 
|  | 307 | continue; | 
|  | 308 | } | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 309 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 310 | rwlock->pending_lock.lock(); | 
|  | 311 | rwlock->pending_reader_count++; | 
|  | 312 |  | 
|  | 313 | // We rely on the fact that all atomic exchange operations on the same object (here it is | 
|  | 314 | // rwlock->state) always appear to occur in a single total order. If the pending flag is added | 
|  | 315 | // before unlocking, the unlocking thread will wakeup the waiter. Otherwise, we will see the | 
|  | 316 | // state is unlocked and will not wait anymore. | 
|  | 317 | old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_READERS_FLAG, | 
|  | 318 | memory_order_relaxed); | 
|  | 319 |  | 
|  | 320 | int old_serial = rwlock->pending_reader_wakeup_serial; | 
|  | 321 | rwlock->pending_lock.unlock(); | 
|  | 322 |  | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 323 | int futex_result = 0; | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 324 | if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 325 | futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 326 | old_serial, use_realtime_clock, abs_timeout_or_null); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 327 | } | 
|  | 328 |  | 
|  | 329 | rwlock->pending_lock.lock(); | 
|  | 330 | rwlock->pending_reader_count--; | 
|  | 331 | if (rwlock->pending_reader_count == 0) { | 
|  | 332 | atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_READERS_FLAG, | 
|  | 333 | memory_order_relaxed); | 
|  | 334 | } | 
|  | 335 | rwlock->pending_lock.unlock(); | 
|  | 336 |  | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 337 | if (futex_result == -ETIMEDOUT) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 338 | return ETIMEDOUT; | 
|  | 339 | } | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 340 | } | 
| Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 341 | } | 
|  | 342 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 343 | static inline __always_inline bool __can_acquire_write_lock(int old_state) { | 
|  | 344 | return !__state_owned_by_readers_or_writer(old_state); | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | static inline __always_inline int __pthread_rwlock_trywrlock(pthread_rwlock_internal_t* rwlock) { | 
|  | 348 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); | 
|  | 349 |  | 
|  | 350 | while (__predict_true(__can_acquire_write_lock(old_state))) { | 
|  | 351 | if (__predict_true(atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, | 
|  | 352 | __state_add_writer_flag(old_state), memory_order_acquire, memory_order_relaxed))) { | 
|  | 353 |  | 
|  | 354 | atomic_store_explicit(&rwlock->writer_tid, __get_thread()->tid, memory_order_relaxed); | 
|  | 355 | return 0; | 
|  | 356 | } | 
|  | 357 | } | 
|  | 358 | return EBUSY; | 
|  | 359 | } | 
|  | 360 |  | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 361 | static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, bool use_realtime_clock, | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 362 | const timespec* abs_timeout_or_null) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 363 | if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) { | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 364 | return EDEADLK; | 
| Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 365 | } | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 366 | while (true) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 367 | int result = __pthread_rwlock_trywrlock(rwlock); | 
|  | 368 | if (result == 0) { | 
|  | 369 | return result; | 
|  | 370 | } | 
| Elliott Hughes | dd586f2 | 2015-12-16 15:15:58 -0800 | [diff] [blame] | 371 | result = check_timespec(abs_timeout_or_null, true); | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 372 | if (result != 0) { | 
|  | 373 | return result; | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 374 | } | 
|  | 375 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 376 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 377 | if (__can_acquire_write_lock(old_state)) { | 
|  | 378 | continue; | 
|  | 379 | } | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 380 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 381 | rwlock->pending_lock.lock(); | 
|  | 382 | rwlock->pending_writer_count++; | 
|  | 383 |  | 
|  | 384 | old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_WRITERS_FLAG, | 
|  | 385 | memory_order_relaxed); | 
|  | 386 |  | 
|  | 387 | int old_serial = rwlock->pending_writer_wakeup_serial; | 
|  | 388 | rwlock->pending_lock.unlock(); | 
|  | 389 |  | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 390 | int futex_result = 0; | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 391 | if (!__can_acquire_write_lock(old_state)) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 392 | futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 393 | old_serial, use_realtime_clock, abs_timeout_or_null); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 394 | } | 
|  | 395 |  | 
|  | 396 | rwlock->pending_lock.lock(); | 
|  | 397 | rwlock->pending_writer_count--; | 
|  | 398 | if (rwlock->pending_writer_count == 0) { | 
|  | 399 | atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_WRITERS_FLAG, | 
|  | 400 | memory_order_relaxed); | 
|  | 401 | } | 
|  | 402 | rwlock->pending_lock.unlock(); | 
|  | 403 |  | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 404 | if (futex_result == -ETIMEDOUT) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 405 | return ETIMEDOUT; | 
|  | 406 | } | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 407 | } | 
| Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 408 | } | 
|  | 409 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 410 | int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) { | 
|  | 411 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 412 | // Avoid slowing down fast path of rdlock. | 
|  | 413 | if (__predict_true(__pthread_rwlock_tryrdlock(rwlock) == 0)) { | 
|  | 414 | return 0; | 
|  | 415 | } | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 416 | return __pthread_rwlock_timedrdlock(rwlock, false, nullptr); | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 417 | } | 
|  | 418 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 419 | int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) { | 
|  | 420 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
|  | 421 |  | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 422 | return __pthread_rwlock_timedrdlock(rwlock, true, abs_timeout); | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | int pthread_rwlock_timedrdlock_monotonic_np(pthread_rwlock_t* rwlock_interface, | 
|  | 426 | const timespec* abs_timeout) { | 
|  | 427 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
|  | 428 |  | 
|  | 429 | return __pthread_rwlock_timedrdlock(rwlock, false, abs_timeout); | 
| Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 430 | } | 
|  | 431 |  | 
| Tom Cherry | 6901080 | 2019-05-07 20:33:05 -0700 | [diff] [blame] | 432 | int pthread_rwlock_clockrdlock(pthread_rwlock_t* rwlock_interface, clockid_t clock, | 
|  | 433 | const struct timespec* abs_timeout) { | 
|  | 434 | switch (clock) { | 
|  | 435 | case CLOCK_MONOTONIC: | 
|  | 436 | return pthread_rwlock_timedrdlock_monotonic_np(rwlock_interface, abs_timeout); | 
|  | 437 | case CLOCK_REALTIME: | 
|  | 438 | return pthread_rwlock_timedrdlock(rwlock_interface, abs_timeout); | 
|  | 439 | default: | 
|  | 440 | return EINVAL; | 
|  | 441 | } | 
|  | 442 | } | 
|  | 443 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 444 | int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 445 | return __pthread_rwlock_tryrdlock(__get_internal_rwlock(rwlock_interface)); | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 446 | } | 
|  | 447 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 448 | int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) { | 
|  | 449 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 450 | // Avoid slowing down fast path of wrlock. | 
|  | 451 | if (__predict_true(__pthread_rwlock_trywrlock(rwlock) == 0)) { | 
|  | 452 | return 0; | 
|  | 453 | } | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 454 | return __pthread_rwlock_timedwrlock(rwlock, false, nullptr); | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 455 | } | 
|  | 456 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 457 | int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) { | 
|  | 458 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
|  | 459 |  | 
| Tom Cherry | c6b5bcd | 2018-03-05 14:14:44 -0800 | [diff] [blame] | 460 | return __pthread_rwlock_timedwrlock(rwlock, true, abs_timeout); | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | int pthread_rwlock_timedwrlock_monotonic_np(pthread_rwlock_t* rwlock_interface, | 
|  | 464 | const timespec* abs_timeout) { | 
|  | 465 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
|  | 466 |  | 
|  | 467 | return __pthread_rwlock_timedwrlock(rwlock, false, abs_timeout); | 
| Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 468 | } | 
|  | 469 |  | 
| Tom Cherry | 6901080 | 2019-05-07 20:33:05 -0700 | [diff] [blame] | 470 | int pthread_rwlock_clockwrlock(pthread_rwlock_t* rwlock_interface, clockid_t clock, | 
|  | 471 | const struct timespec* abs_timeout) { | 
|  | 472 | switch (clock) { | 
|  | 473 | case CLOCK_MONOTONIC: | 
|  | 474 | return pthread_rwlock_timedwrlock_monotonic_np(rwlock_interface, abs_timeout); | 
|  | 475 | case CLOCK_REALTIME: | 
|  | 476 | return pthread_rwlock_timedwrlock(rwlock_interface, abs_timeout); | 
|  | 477 | default: | 
|  | 478 | return EINVAL; | 
|  | 479 | } | 
|  | 480 | } | 
|  | 481 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 482 | int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) { | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 483 | return __pthread_rwlock_trywrlock(__get_internal_rwlock(rwlock_interface)); | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 484 | } | 
|  | 485 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 486 | int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) { | 
|  | 487 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 488 |  | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 489 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 490 | if (__state_owned_by_writer(old_state)) { | 
|  | 491 | if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) != __get_thread()->tid) { | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 492 | return EPERM; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 493 | } | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 494 | atomic_store_explicit(&rwlock->writer_tid, 0, memory_order_relaxed); | 
|  | 495 | old_state = atomic_fetch_and_explicit(&rwlock->state, ~STATE_OWNED_BY_WRITER_FLAG, | 
|  | 496 | memory_order_release); | 
|  | 497 | if (!__state_have_pending_readers_or_writers(old_state)) { | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 498 | return 0; | 
|  | 499 | } | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 500 |  | 
|  | 501 | } else if (__state_owned_by_readers(old_state)) { | 
|  | 502 | old_state = atomic_fetch_sub_explicit(&rwlock->state, STATE_READER_COUNT_CHANGE_STEP, | 
|  | 503 | memory_order_release); | 
|  | 504 | if (!__state_is_last_reader(old_state) || !__state_have_pending_readers_or_writers(old_state)) { | 
|  | 505 | return 0; | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | } else { | 
|  | 509 | return EPERM; | 
| Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame] | 510 | } | 
|  | 511 |  | 
| Yabin Cui | 76615da | 2015-03-17 14:22:09 -0700 | [diff] [blame] | 512 | // Wake up pending readers or writers. | 
|  | 513 | rwlock->pending_lock.lock(); | 
|  | 514 | if (rwlock->pending_writer_count != 0) { | 
|  | 515 | rwlock->pending_writer_wakeup_serial++; | 
|  | 516 | rwlock->pending_lock.unlock(); | 
|  | 517 |  | 
|  | 518 | __futex_wake_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, 1); | 
|  | 519 |  | 
|  | 520 | } else if (rwlock->pending_reader_count != 0) { | 
|  | 521 | rwlock->pending_reader_wakeup_serial++; | 
|  | 522 | rwlock->pending_lock.unlock(); | 
|  | 523 |  | 
|  | 524 | __futex_wake_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, INT_MAX); | 
|  | 525 |  | 
|  | 526 | } else { | 
|  | 527 | // It happens when waiters are woken up by timeout. | 
|  | 528 | rwlock->pending_lock.unlock(); | 
| Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 529 | } | 
| Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 530 | return 0; | 
| David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 531 | } |