blob: 3c8f9d5262d64bcca753e0bc6ef7bd98a2551889 [file] [log] [blame]
Elliott Hughes7484c212017-02-02 02:41:38 +00001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "pthread_internal.h"
30
31#include <errno.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070032#include <semaphore.h>
Elliott Hughes7484c212017-02-02 02:41:38 +000033#include <stdlib.h>
34#include <string.h>
35#include <sys/mman.h>
Florian Mayer10d11dd2024-04-19 17:09:14 -070036#include <sys/prctl.h>
Elliott Hughes7484c212017-02-02 02:41:38 +000037
Christopher Ferris7a3681e2017-04-24 17:48:32 -070038#include <async_safe/log.h>
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070039#include <bionic/reserved_signals.h>
Christopher Ferris7a3681e2017-04-24 17:48:32 -070040
Florian Mayer10d11dd2024-04-19 17:09:14 -070041#include "bionic/tls_defines.h"
Peter Collingbourne5d3aa862020-09-11 15:05:17 -070042#include "private/ErrnoRestorer.h"
Ryan Prichardc86576c2019-01-09 03:09:42 -080043#include "private/ScopedRWLock.h"
Elliott Hughes7484c212017-02-02 02:41:38 +000044#include "private/bionic_futex.h"
Florian Mayere65e1932024-02-15 22:20:54 +000045#include "private/bionic_globals.h"
Elliott Hughes7484c212017-02-02 02:41:38 +000046#include "private/bionic_tls.h"
Elliott Hughes7484c212017-02-02 02:41:38 +000047
Elliott Hughes11859d42017-02-13 17:59:29 -080048static pthread_internal_t* g_thread_list = nullptr;
49static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
50
Elliott Hughes7484c212017-02-02 02:41:38 +000051pthread_t __pthread_internal_add(pthread_internal_t* thread) {
Elliott Hughes11859d42017-02-13 17:59:29 -080052 ScopedWriteLock locker(&g_thread_list_lock);
Elliott Hughes7484c212017-02-02 02:41:38 +000053
54 // We insert at the head.
55 thread->next = g_thread_list;
Elliott Hughes11859d42017-02-13 17:59:29 -080056 thread->prev = nullptr;
57 if (thread->next != nullptr) {
Elliott Hughes7484c212017-02-02 02:41:38 +000058 thread->next->prev = thread;
59 }
60 g_thread_list = thread;
61 return reinterpret_cast<pthread_t>(thread);
62}
63
64void __pthread_internal_remove(pthread_internal_t* thread) {
Elliott Hughes11859d42017-02-13 17:59:29 -080065 ScopedWriteLock locker(&g_thread_list_lock);
Elliott Hughes7484c212017-02-02 02:41:38 +000066
Elliott Hughes11859d42017-02-13 17:59:29 -080067 if (thread->next != nullptr) {
Elliott Hughes7484c212017-02-02 02:41:38 +000068 thread->next->prev = thread->prev;
69 }
Elliott Hughes11859d42017-02-13 17:59:29 -080070 if (thread->prev != nullptr) {
Elliott Hughes7484c212017-02-02 02:41:38 +000071 thread->prev->next = thread->next;
72 } else {
73 g_thread_list = thread->next;
74 }
75}
Florian Mayer10d11dd2024-04-19 17:09:14 -070076// N.B. that this is NOT the pagesize, but 4096. This is hardcoded in the codegen.
77// See
78// https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
79constexpr size_t kStackMteRingbufferSizeMultiplier = 4096;
Elliott Hughes7484c212017-02-02 02:41:38 +000080
81static void __pthread_internal_free(pthread_internal_t* thread) {
Florian Mayer10d11dd2024-04-19 17:09:14 -070082#ifdef __aarch64__
83 if (void* stack_mte_tls = thread->bionic_tcb->tls_slot(TLS_SLOT_STACK_MTE)) {
84 size_t size =
85 kStackMteRingbufferSizeMultiplier * (reinterpret_cast<uintptr_t>(stack_mte_tls) >> 56ULL);
86 void* ptr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(stack_mte_tls) &
87 ((1ULL << 56ULL) - 1ULL));
88 munmap(ptr, size);
89 }
90#endif
Elliott Hughes7484c212017-02-02 02:41:38 +000091 if (thread->mmap_size != 0) {
92 // Free mapped space, including thread stack and pthread_internal_t.
Ryan Prichard45d13492019-01-03 02:51:30 -080093 munmap(thread->mmap_base, thread->mmap_size);
Elliott Hughes7484c212017-02-02 02:41:38 +000094 }
95}
96
97void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
98 __pthread_internal_remove(thread);
99 __pthread_internal_free(thread);
100}
101
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800102pid_t __pthread_internal_gettid(pthread_t thread_id, const char* caller) {
103 pthread_internal_t* thread = __pthread_internal_find(thread_id, caller);
104 return thread ? thread->tid : -1;
105}
106
107pthread_internal_t* __pthread_internal_find(pthread_t thread_id, const char* caller) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000108 pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
109
Elliott Hughes11859d42017-02-13 17:59:29 -0800110 // Check if we're looking for ourselves before acquiring the lock.
111 if (thread == __get_thread()) return thread;
112
Christopher Ferris79829142017-09-18 14:39:33 -0700113 {
114 // Make sure to release the lock before the abort below. Otherwise,
115 // some apps might deadlock in their own crash handlers (see b/6565627).
116 ScopedReadLock locker(&g_thread_list_lock);
117 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
118 if (t == thread) return thread;
119 }
Elliott Hughes7484c212017-02-02 02:41:38 +0000120 }
121
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800122 // Historically we'd return null, but from API level 26 we catch this error.
123 if (android_get_application_target_sdk_version() >= 26) {
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800124 if (thread == nullptr) {
125 // This seems to be a common mistake, and it's relatively harmless because
126 // there will never be a valid thread at address 0, whereas other invalid
127 // addresses might sometimes contain threads or things that look enough like
128 // threads for us to do some real damage by continuing.
129 // TODO: try getting rid of this when Treble lets us keep vendor blobs on an old API level.
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800130 async_safe_format_log(ANDROID_LOG_WARN, "libc", "invalid pthread_t (0) passed to %s", caller);
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800131 } else {
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800132 async_safe_fatal("invalid pthread_t %p passed to %s", thread, caller);
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800133 }
Elliott Hughes7484c212017-02-02 02:41:38 +0000134 }
Elliott Hughes11859d42017-02-13 17:59:29 -0800135 return nullptr;
Elliott Hughes7484c212017-02-02 02:41:38 +0000136}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -0700137
Florian Mayere65e1932024-02-15 22:20:54 +0000138static uintptr_t __get_main_stack_startstack() {
139 FILE* fp = fopen("/proc/self/stat", "re");
140 if (fp == nullptr) {
141 async_safe_fatal("couldn't open /proc/self/stat: %m");
142 }
143
144 char line[BUFSIZ];
145 if (fgets(line, sizeof(line), fp) == nullptr) {
146 async_safe_fatal("couldn't read /proc/self/stat: %m");
147 }
148
149 fclose(fp);
150
151 // See man 5 proc. There's no reason comm can't contain ' ' or ')',
152 // so we search backwards for the end of it. We're looking for this field:
153 //
154 // startstack %lu (28) The address of the start (i.e., bottom) of the stack.
155 uintptr_t startstack = 0;
156 const char* end_of_comm = strrchr(line, ')');
157 if (sscanf(end_of_comm + 1,
158 " %*c "
159 "%*d %*d %*d %*d %*d "
160 "%*u %*u %*u %*u %*u %*u %*u "
161 "%*d %*d %*d %*d %*d %*d "
162 "%*u %*u %*d %*u %*u %*u %" SCNuPTR,
163 &startstack) != 1) {
164 async_safe_fatal("couldn't parse /proc/self/stat");
165 }
166
167 return startstack;
168}
169
170void __find_main_stack_limits(uintptr_t* low, uintptr_t* high) {
171 // Ask the kernel where our main thread's stack started.
172 uintptr_t startstack = __get_main_stack_startstack();
173
174 // Hunt for the region that contains that address.
175 FILE* fp = fopen("/proc/self/maps", "re");
176 if (fp == nullptr) {
177 async_safe_fatal("couldn't open /proc/self/maps: %m");
178 }
179 char line[BUFSIZ];
180 while (fgets(line, sizeof(line), fp) != nullptr) {
181 uintptr_t lo, hi;
182 if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR, &lo, &hi) == 2) {
183 if (lo <= startstack && startstack <= hi) {
184 *low = lo;
185 *high = hi;
186 fclose(fp);
187 return;
188 }
189 }
190 }
191 async_safe_fatal("stack not found in /proc/self/maps");
192}
193
Florian Mayer10d11dd2024-04-19 17:09:14 -0700194__LIBC_HIDDEN__ void* __allocate_stack_mte_ringbuffer(size_t n, pthread_internal_t* thread) {
195 if (n > 7) async_safe_fatal("error: invalid mte stack ring buffer size");
196 // Allocation needs to be aligned to 2*size to make the fancy code-gen work.
197 // So we allocate 3*size - pagesz bytes, which will always contain size bytes
198 // aligned to 2*size, and unmap the unneeded part.
199 // See
200 // https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
201 //
202 // In the worst case, we get an allocation that is one page past the properly
203 // aligned address, in which case we have to unmap the previous
204 // 2*size - pagesz bytes. In that case, we still have size properly aligned
205 // bytes left.
206 size_t size = (1 << n) * kStackMteRingbufferSizeMultiplier;
207 size_t pgsize = page_size();
208
209 size_t alloc_size = __BIONIC_ALIGN(3 * size - pgsize, pgsize);
210 void* allocation_ptr =
211 mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
212 if (allocation_ptr == MAP_FAILED)
213 async_safe_fatal("error: failed to allocate stack mte ring buffer");
214 uintptr_t allocation = reinterpret_cast<uintptr_t>(allocation_ptr);
215
216 size_t alignment = 2 * size;
217 uintptr_t aligned_allocation = __BIONIC_ALIGN(allocation, alignment);
218 if (allocation != aligned_allocation) {
219 munmap(reinterpret_cast<void*>(allocation), aligned_allocation - allocation);
220 }
221 if (aligned_allocation + size != allocation + alloc_size) {
222 munmap(reinterpret_cast<void*>(aligned_allocation + size),
223 (allocation + alloc_size) - (aligned_allocation + size));
224 }
225
226 const char* name;
227 if (thread == nullptr) {
228 name = "stack_mte_ring:main";
229 } else {
230 // The kernel doesn't copy the name string, but this variable will last at least as long as the
231 // mapped area. We unmap the ring buffer before unmapping the rest of the thread storage.
232 auto& name_buffer = thread->stack_mte_ringbuffer_vma_name_buffer;
233 static_assert(arraysize(name_buffer) >= arraysize("stack_mte_ring:") + 11 + 1);
234 async_safe_format_buffer(name_buffer, arraysize(name_buffer), "stack_mte_ring:%d", thread->tid);
235 name = name_buffer;
236 }
237 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<void*>(aligned_allocation), size, name);
238
239 // We store the size in the top byte of the pointer (which is ignored)
240 return reinterpret_cast<void*>(aligned_allocation | ((1ULL << n) << 56ULL));
241}
242
Florian Mayere65e1932024-02-15 22:20:54 +0000243void __pthread_internal_remap_stack_with_mte() {
244#if defined(__aarch64__)
Florian Mayer10d11dd2024-04-19 17:09:14 -0700245 ScopedWriteLock creation_locker(&g_thread_creation_lock);
246 ScopedReadLock list_locker(&g_thread_list_lock);
247 // If process already uses memtag-stack ABI, we don't need to do anything.
248 if (__libc_memtag_stack_abi) return;
249 __libc_memtag_stack_abi = true;
250
251 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
252 if (t->terminating) continue;
253 t->bionic_tcb->tls_slot(TLS_SLOT_STACK_MTE) =
254 __allocate_stack_mte_ringbuffer(0, t->is_main() ? nullptr : t);
255 }
Florian Mayer73750dc2024-03-08 14:10:48 -0800256 if (!atomic_load(&__libc_globals->memtag)) return;
Florian Mayer10d11dd2024-04-19 17:09:14 -0700257 if (atomic_exchange(&__libc_memtag_stack, true)) return;
Florian Mayere65e1932024-02-15 22:20:54 +0000258 uintptr_t lo, hi;
259 __find_main_stack_limits(&lo, &hi);
260
261 if (mprotect(reinterpret_cast<void*>(lo), hi - lo,
262 PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
263 async_safe_fatal("error: failed to set PROT_MTE on main thread");
264 }
Florian Mayere65e1932024-02-15 22:20:54 +0000265 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
266 if (t->terminating || t->is_main()) continue;
267 if (mprotect(t->mmap_base_unguarded, t->mmap_size_unguarded,
268 PROT_READ | PROT_WRITE | PROT_MTE)) {
269 async_safe_fatal("error: failed to set PROT_MTE on thread: %d", t->tid);
270 }
271 }
272#endif
273}
274
Peter Collingbourne5d3aa862020-09-11 15:05:17 -0700275bool android_run_on_all_threads(bool (*func)(void*), void* arg) {
276 // Take the locks in this order to avoid inversion (pthread_create ->
277 // __pthread_internal_add).
278 ScopedWriteLock creation_locker(&g_thread_creation_lock);
279 ScopedReadLock list_locker(&g_thread_list_lock);
280
281 // Call the function directly for the current thread so that we don't need to worry about
282 // the consequences of synchronizing with ourselves.
283 if (!func(arg)) {
284 return false;
285 }
286
287 static sem_t g_sem;
288 if (sem_init(&g_sem, 0, 0) != 0) {
289 return false;
290 }
291
292 static bool (*g_func)(void*);
293 static void *g_arg;
294 g_func = func;
295 g_arg = arg;
296
297 static _Atomic(bool) g_retval;
298 atomic_init(&g_retval, true);
299
300 auto handler = [](int, siginfo_t*, void*) {
301 ErrnoRestorer restorer;
302 if (!g_func(g_arg)) {
303 atomic_store(&g_retval, false);
304 }
305 sem_post(&g_sem);
306 };
307
308 struct sigaction act = {}, oldact;
309 act.sa_flags = SA_SIGINFO;
310 act.sa_sigaction = handler;
311 sigfillset(&act.sa_mask);
312 if (sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &act, &oldact) != 0) {
313 sem_destroy(&g_sem);
314 return false;
315 }
316
317 pid_t my_pid = getpid();
318 size_t num_tids = 0;
319 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
320 // The function is called directly for the current thread above, so no need to send a signal to
321 // ourselves to call it here.
322 if (t == __get_thread()) continue;
323
324 // If a thread is terminating (has blocked signals) or has already terminated, our signal will
325 // never be received, so we need to check for that condition and skip the thread if it is the
326 // case.
327 if (atomic_load(&t->terminating)) continue;
328
329 if (tgkill(my_pid, t->tid, BIONIC_SIGNAL_RUN_ON_ALL_THREADS) == 0) {
330 ++num_tids;
331 } else {
332 atomic_store(&g_retval, false);
333 }
334 }
335
336 for (size_t i = 0; i != num_tids; ++i) {
337 if (TEMP_FAILURE_RETRY(sem_wait(&g_sem)) != 0) {
338 atomic_store(&g_retval, false);
339 break;
340 }
341 }
342
343 sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &oldact, 0);
344 sem_destroy(&g_sem);
345 return atomic_load(&g_retval);
346}