blob: bfe2f982e5a0af872ed0617c50f5e049b940870c [file] [log] [blame]
Elliott Hughes7484c212017-02-02 02:41:38 +00001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "pthread_internal.h"
30
31#include <errno.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070032#include <semaphore.h>
Elliott Hughes7484c212017-02-02 02:41:38 +000033#include <stdlib.h>
34#include <string.h>
35#include <sys/mman.h>
36
Christopher Ferris7a3681e2017-04-24 17:48:32 -070037#include <async_safe/log.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070038#include <bionic/reserved_signals.h>
Christopher Ferris7a3681e2017-04-24 17:48:32 -070039
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070040#include "private/ErrnoRestorer.h"
Ryan Prichardc86576c2019-01-09 03:09:42 -080041#include "private/ScopedRWLock.h"
Elliott Hughes7484c212017-02-02 02:41:38 +000042#include "private/bionic_futex.h"
Florian Mayere65e1932024-02-15 22:20:54 +000043#include "private/bionic_globals.h"
Elliott Hughes7484c212017-02-02 02:41:38 +000044#include "private/bionic_tls.h"
Elliott Hughes7484c212017-02-02 02:41:38 +000045
Elliott Hughes11859d42017-02-13 17:59:29 -080046static pthread_internal_t* g_thread_list = nullptr;
47static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
48
Elliott Hughes7484c212017-02-02 02:41:38 +000049pthread_t __pthread_internal_add(pthread_internal_t* thread) {
Elliott Hughes11859d42017-02-13 17:59:29 -080050 ScopedWriteLock locker(&g_thread_list_lock);
Elliott Hughes7484c212017-02-02 02:41:38 +000051
52 // We insert at the head.
53 thread->next = g_thread_list;
Elliott Hughes11859d42017-02-13 17:59:29 -080054 thread->prev = nullptr;
55 if (thread->next != nullptr) {
Elliott Hughes7484c212017-02-02 02:41:38 +000056 thread->next->prev = thread;
57 }
58 g_thread_list = thread;
59 return reinterpret_cast<pthread_t>(thread);
60}
61
62void __pthread_internal_remove(pthread_internal_t* thread) {
Elliott Hughes11859d42017-02-13 17:59:29 -080063 ScopedWriteLock locker(&g_thread_list_lock);
Elliott Hughes7484c212017-02-02 02:41:38 +000064
Elliott Hughes11859d42017-02-13 17:59:29 -080065 if (thread->next != nullptr) {
Elliott Hughes7484c212017-02-02 02:41:38 +000066 thread->next->prev = thread->prev;
67 }
Elliott Hughes11859d42017-02-13 17:59:29 -080068 if (thread->prev != nullptr) {
Elliott Hughes7484c212017-02-02 02:41:38 +000069 thread->prev->next = thread->next;
70 } else {
71 g_thread_list = thread->next;
72 }
73}
74
75static void __pthread_internal_free(pthread_internal_t* thread) {
76 if (thread->mmap_size != 0) {
77 // Free mapped space, including thread stack and pthread_internal_t.
Ryan Prichard45d13492019-01-03 02:51:30 -080078 munmap(thread->mmap_base, thread->mmap_size);
Elliott Hughes7484c212017-02-02 02:41:38 +000079 }
80}
81
82void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
83 __pthread_internal_remove(thread);
84 __pthread_internal_free(thread);
85}
86
Elliott Hughes5bb113c2019-02-01 16:31:10 -080087pid_t __pthread_internal_gettid(pthread_t thread_id, const char* caller) {
88 pthread_internal_t* thread = __pthread_internal_find(thread_id, caller);
89 return thread ? thread->tid : -1;
90}
91
92pthread_internal_t* __pthread_internal_find(pthread_t thread_id, const char* caller) {
Elliott Hughes7484c212017-02-02 02:41:38 +000093 pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
94
Elliott Hughes11859d42017-02-13 17:59:29 -080095 // Check if we're looking for ourselves before acquiring the lock.
96 if (thread == __get_thread()) return thread;
97
Christopher Ferris79829142017-09-18 14:39:33 -070098 {
99 // Make sure to release the lock before the abort below. Otherwise,
100 // some apps might deadlock in their own crash handlers (see b/6565627).
101 ScopedReadLock locker(&g_thread_list_lock);
102 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
103 if (t == thread) return thread;
104 }
Elliott Hughes7484c212017-02-02 02:41:38 +0000105 }
106
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800107 // Historically we'd return null, but from API level 26 we catch this error.
108 if (android_get_application_target_sdk_version() >= 26) {
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800109 if (thread == nullptr) {
110 // This seems to be a common mistake, and it's relatively harmless because
111 // there will never be a valid thread at address 0, whereas other invalid
112 // addresses might sometimes contain threads or things that look enough like
113 // threads for us to do some real damage by continuing.
114 // TODO: try getting rid of this when Treble lets us keep vendor blobs on an old API level.
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800115 async_safe_format_log(ANDROID_LOG_WARN, "libc", "invalid pthread_t (0) passed to %s", caller);
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800116 } else {
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800117 async_safe_fatal("invalid pthread_t %p passed to %s", thread, caller);
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800118 }
Elliott Hughes7484c212017-02-02 02:41:38 +0000119 }
Elliott Hughes11859d42017-02-13 17:59:29 -0800120 return nullptr;
Elliott Hughes7484c212017-02-02 02:41:38 +0000121}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -0700122
Florian Mayere65e1932024-02-15 22:20:54 +0000123static uintptr_t __get_main_stack_startstack() {
124 FILE* fp = fopen("/proc/self/stat", "re");
125 if (fp == nullptr) {
126 async_safe_fatal("couldn't open /proc/self/stat: %m");
127 }
128
129 char line[BUFSIZ];
130 if (fgets(line, sizeof(line), fp) == nullptr) {
131 async_safe_fatal("couldn't read /proc/self/stat: %m");
132 }
133
134 fclose(fp);
135
136 // See man 5 proc. There's no reason comm can't contain ' ' or ')',
137 // so we search backwards for the end of it. We're looking for this field:
138 //
139 // startstack %lu (28) The address of the start (i.e., bottom) of the stack.
140 uintptr_t startstack = 0;
141 const char* end_of_comm = strrchr(line, ')');
142 if (sscanf(end_of_comm + 1,
143 " %*c "
144 "%*d %*d %*d %*d %*d "
145 "%*u %*u %*u %*u %*u %*u %*u "
146 "%*d %*d %*d %*d %*d %*d "
147 "%*u %*u %*d %*u %*u %*u %" SCNuPTR,
148 &startstack) != 1) {
149 async_safe_fatal("couldn't parse /proc/self/stat");
150 }
151
152 return startstack;
153}
154
155void __find_main_stack_limits(uintptr_t* low, uintptr_t* high) {
156 // Ask the kernel where our main thread's stack started.
157 uintptr_t startstack = __get_main_stack_startstack();
158
159 // Hunt for the region that contains that address.
160 FILE* fp = fopen("/proc/self/maps", "re");
161 if (fp == nullptr) {
162 async_safe_fatal("couldn't open /proc/self/maps: %m");
163 }
164 char line[BUFSIZ];
165 while (fgets(line, sizeof(line), fp) != nullptr) {
166 uintptr_t lo, hi;
167 if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR, &lo, &hi) == 2) {
168 if (lo <= startstack && startstack <= hi) {
169 *low = lo;
170 *high = hi;
171 fclose(fp);
172 return;
173 }
174 }
175 }
176 async_safe_fatal("stack not found in /proc/self/maps");
177}
178
179void __pthread_internal_remap_stack_with_mte() {
180#if defined(__aarch64__)
181 // If process doesn't have MTE enabled, we don't need to do anything.
182 if (!__libc_globals->memtag) return;
183 bool prev = true;
184 __libc_globals.mutate(
185 [&prev](libc_globals* globals) { prev = atomic_exchange(&globals->memtag_stack, true); });
186 if (prev) return;
187 uintptr_t lo, hi;
188 __find_main_stack_limits(&lo, &hi);
189
190 if (mprotect(reinterpret_cast<void*>(lo), hi - lo,
191 PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
192 async_safe_fatal("error: failed to set PROT_MTE on main thread");
193 }
194 ScopedWriteLock creation_locker(&g_thread_creation_lock);
195 ScopedReadLock list_locker(&g_thread_list_lock);
196 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
197 if (t->terminating || t->is_main()) continue;
198 if (mprotect(t->mmap_base_unguarded, t->mmap_size_unguarded,
199 PROT_READ | PROT_WRITE | PROT_MTE)) {
200 async_safe_fatal("error: failed to set PROT_MTE on thread: %d", t->tid);
201 }
202 }
203#endif
204}
205
Peter Collingbourne5d3aa862020-09-11 15:05:17 -0700206bool android_run_on_all_threads(bool (*func)(void*), void* arg) {
207 // Take the locks in this order to avoid inversion (pthread_create ->
208 // __pthread_internal_add).
209 ScopedWriteLock creation_locker(&g_thread_creation_lock);
210 ScopedReadLock list_locker(&g_thread_list_lock);
211
212 // Call the function directly for the current thread so that we don't need to worry about
213 // the consequences of synchronizing with ourselves.
214 if (!func(arg)) {
215 return false;
216 }
217
218 static sem_t g_sem;
219 if (sem_init(&g_sem, 0, 0) != 0) {
220 return false;
221 }
222
223 static bool (*g_func)(void*);
224 static void *g_arg;
225 g_func = func;
226 g_arg = arg;
227
228 static _Atomic(bool) g_retval;
229 atomic_init(&g_retval, true);
230
231 auto handler = [](int, siginfo_t*, void*) {
232 ErrnoRestorer restorer;
233 if (!g_func(g_arg)) {
234 atomic_store(&g_retval, false);
235 }
236 sem_post(&g_sem);
237 };
238
239 struct sigaction act = {}, oldact;
240 act.sa_flags = SA_SIGINFO;
241 act.sa_sigaction = handler;
242 sigfillset(&act.sa_mask);
243 if (sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &act, &oldact) != 0) {
244 sem_destroy(&g_sem);
245 return false;
246 }
247
248 pid_t my_pid = getpid();
249 size_t num_tids = 0;
250 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
251 // The function is called directly for the current thread above, so no need to send a signal to
252 // ourselves to call it here.
253 if (t == __get_thread()) continue;
254
255 // If a thread is terminating (has blocked signals) or has already terminated, our signal will
256 // never be received, so we need to check for that condition and skip the thread if it is the
257 // case.
258 if (atomic_load(&t->terminating)) continue;
259
260 if (tgkill(my_pid, t->tid, BIONIC_SIGNAL_RUN_ON_ALL_THREADS) == 0) {
261 ++num_tids;
262 } else {
263 atomic_store(&g_retval, false);
264 }
265 }
266
267 for (size_t i = 0; i != num_tids; ++i) {
268 if (TEMP_FAILURE_RETRY(sem_wait(&g_sem)) != 0) {
269 atomic_store(&g_retval, false);
270 break;
271 }
272 }
273
274 sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &oldact, 0);
275 sem_destroy(&g_sem);
276 return atomic_load(&g_retval);
277}