blob: 083c2ed8c81daa95ab49cf5498fd816eeeb8adff [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
Elliott Hughescbc80ba2018-02-13 14:26:29 -080028
29#pragma once
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080030
31#include <pthread.h>
Yabin Cui58cf31b2015-03-06 17:23:53 -080032#include <stdatomic.h>
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080033
Evgenii Stepanovbe551f52018-08-13 16:46:15 -070034#if __has_feature(hwaddress_sanitizer)
35#include <sanitizer/hwasan_interface.h>
36#else
37#define __hwasan_thread_enter()
38#define __hwasan_thread_exit()
39#endif
40
Ryan Prichard16455b52019-01-18 01:00:59 -080041#include "private/bionic_elf_tls.h"
Yabin Cuid26e7802015-10-22 20:07:56 -070042#include "private/bionic_lock.h"
Yabin Cui8cf1b302014-12-03 21:36:24 -080043#include "private/bionic_tls.h"
44
Elliott Hughes8aecba72017-10-17 15:34:41 -070045// Has the thread been detached by a pthread_join or pthread_detach call?
Elliott Hughes40a52172014-07-30 14:48:10 -070046#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
47
Elliott Hughes8aecba72017-10-17 15:34:41 -070048// Has the thread been joined by another thread?
Yabin Cuiba8dfc22015-01-06 09:31:00 -080049#define PTHREAD_ATTR_FLAG_JOINED 0x00000002
Elliott Hughes40a52172014-07-30 14:48:10 -070050
Elliott Hughes38f01e02017-10-27 15:28:54 -070051// Used for pthread_attr_setinheritsched. We need two flags for this apparent
52// boolean because our historical behavior matches neither of the POSIX choices.
Elliott Hughes8aecba72017-10-17 15:34:41 -070053#define PTHREAD_ATTR_FLAG_INHERIT 0x00000004
Elliott Hughes38f01e02017-10-27 15:28:54 -070054#define PTHREAD_ATTR_FLAG_EXPLICIT 0x00000008
Elliott Hughes8aecba72017-10-17 15:34:41 -070055
Yabin Cui58cf31b2015-03-06 17:23:53 -080056enum ThreadJoinState {
57 THREAD_NOT_JOINED,
58 THREAD_EXITED_NOT_JOINED,
59 THREAD_JOINED,
60 THREAD_DETACHED
61};
62
Elliott Hughes42d949f2016-01-06 19:51:43 -080063class thread_local_dtor;
Yabin Cui952e9eb2015-11-24 17:24:06 -080064
Elliott Hughes42d949f2016-01-06 19:51:43 -080065class pthread_internal_t {
Elliott Hughesb0e8c562017-01-04 14:12:54 -080066 public:
Elliott Hughes7484c212017-02-02 02:41:38 +000067 class pthread_internal_t* next;
68 class pthread_internal_t* prev;
69
Elliott Hughes877ec6d2013-11-15 17:40:18 -080070 pid_t tid;
Elliott Hughes70b24b12013-11-15 11:51:07 -080071
Elliott Hughes7086ad62014-06-19 16:39:01 -070072 private:
Josh Gao23032832020-05-07 17:02:19 -070073 uint32_t cached_pid_ : 31;
74 uint32_t vforked_ : 1;
Elliott Hughes7086ad62014-06-19 16:39:01 -070075
76 public:
Josh Gao23032832020-05-07 17:02:19 -070077 bool is_vforked() { return vforked_; }
78
Elliott Hughes7086ad62014-06-19 16:39:01 -070079 pid_t invalidate_cached_pid() {
80 pid_t old_value;
81 get_cached_pid(&old_value);
82 set_cached_pid(0);
83 return old_value;
84 }
85
86 void set_cached_pid(pid_t value) {
87 cached_pid_ = value;
88 }
89
90 bool get_cached_pid(pid_t* cached_pid) {
91 *cached_pid = cached_pid_;
92 return (*cached_pid != 0);
93 }
94
Elliott Hughes877ec6d2013-11-15 17:40:18 -080095 pthread_attr_t attr;
Elliott Hughes877ec6d2013-11-15 17:40:18 -080096
Yabin Cui58cf31b2015-03-06 17:23:53 -080097 _Atomic(ThreadJoinState) join_state;
98
Elliott Hughes877ec6d2013-11-15 17:40:18 -080099 __pthread_cleanup_t* cleanup_stack;
100
101 void* (*start_routine)(void*);
102 void* start_routine_arg;
103 void* return_value;
Evgenii Stepanovb92d1c32019-10-02 16:26:43 -0700104 sigset64_t start_mask;
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800105
106 void* alternate_signal_stack;
107
Elliott Hughes9a7155d2023-02-10 02:00:03 +0000108 // The start address of the shadow call stack's guard region (arm64/riscv64).
109 // This region is SCS_GUARD_REGION_SIZE bytes large, but only SCS_SIZE bytes
110 // are actually used.
111 //
Peter Collingbourne5d427bc2018-11-07 14:48:29 -0800112 // This address is only used to deallocate the shadow call stack on thread
113 // exit; the address of the stack itself is stored only in the x18 register.
Elliott Hughes9a7155d2023-02-10 02:00:03 +0000114 //
Peter Collingbourne5d427bc2018-11-07 14:48:29 -0800115 // Because the protection offered by SCS relies on the secrecy of the stack
116 // address, storing the address here weakens the protection, but only
117 // slightly, because it is relatively easy for an attacker to discover the
118 // address of the guard region anyway (e.g. it can be discovered by reference
119 // to other allocations), but not the stack itself, which is <0.1% of the size
120 // of the guard region.
121 //
Elliott Hughes9a7155d2023-02-10 02:00:03 +0000122 // longjmp()/setjmp() don't store all the bits of x18, only the bottom bits
123 // covered by SCS_MASK. Since longjmp()/setjmp() between different threads is
124 // undefined behavior (and unsupported on Android), we can retrieve the high
125 // bits of x18 from the current value in x18 --- all the jmp_buf needs to store
126 // is where exactly the shadow stack pointer is in the thread's shadow stack:
127 // the bottom bits of x18.
128 //
Peter Collingbourne5d427bc2018-11-07 14:48:29 -0800129 // There are at least two other options for discovering the start address of
130 // the guard region on thread exit, but they are not as simple as storing in
131 // TLS.
Elliott Hughes9a7155d2023-02-10 02:00:03 +0000132 //
Peter Collingbourne5d427bc2018-11-07 14:48:29 -0800133 // 1) Derive it from the value of the x18 register. This is only possible in
134 // processes that do not contain legacy code that might clobber x18,
135 // therefore each process must declare early during process startup whether
136 // it might load legacy code.
Elliott Hughes9a7155d2023-02-10 02:00:03 +0000137 // TODO: riscv64 has no legacy code, so we can actually go this route there!
Peter Collingbourne5d427bc2018-11-07 14:48:29 -0800138 // 2) Mark the guard region as such using prctl(PR_SET_VMA_ANON_NAME) and
139 // discover its address by reading /proc/self/maps. One issue with this is
140 // that reading /proc/self/maps can race with allocations, so we may need
141 // code to handle retries.
142 void* shadow_call_stack_guard_region;
143
Peter Collingbourne5f45c182020-01-14 17:59:41 -0800144 // A pointer to the top of the stack. This lets android_unsafe_frame_pointer_chase determine the
145 // top of the stack quickly, which would otherwise require special logic for the main thread.
146 uintptr_t stack_top;
147
Peter Collingbourne5d3aa862020-09-11 15:05:17 -0700148 // Whether the thread is in the process of terminating (has blocked signals), or has already
149 // terminated. This is used by android_run_on_all_threads() to avoid sending a signal to a thread
150 // that will never receive it.
151 _Atomic(bool) terminating;
152
Yabin Cuid26e7802015-10-22 20:07:56 -0700153 Lock startup_handshake_lock;
Elliott Hughesb30aff42014-05-28 19:35:33 +0000154
Ryan Prichard45d13492019-01-03 02:51:30 -0800155 void* mmap_base;
Yabin Cuiba8dfc22015-01-06 09:31:00 -0800156 size_t mmap_size;
Yabin Cui6a7aaf42014-12-22 19:17:33 -0800157
Ryan Prichard03cef382019-06-17 17:57:19 -0700158 // The location of the VMA to label as the thread's stack_and_tls.
159 void* mmap_base_unguarded;
160 size_t mmap_size_unguarded;
161 char vma_name_buffer[32];
162
Yabin Cui952e9eb2015-11-24 17:24:06 -0800163 thread_local_dtor* thread_local_dtors;
164
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800165 /*
166 * The dynamic linker implements dlerror(3), which makes it hard for us to implement this
167 * per-thread buffer by simply using malloc(3) and free(3).
168 */
Elliott Hughes34583c12018-11-13 15:30:07 -0800169 char* current_dlerror;
Elliott Hughes5419b942012-10-16 15:54:46 -0700170#define __BIONIC_DLERROR_BUFFER_SIZE 512
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800171 char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
Josh Gao5e2285d2017-02-22 12:19:05 -0800172
173 bionic_tls* bionic_tls;
Ryan Prichard29d6dbc2018-10-19 20:35:23 -0700174
Ryan Prichard37754cd2018-12-07 01:47:00 -0800175 int errno_value;
Evgenii Stepanov3031a7e2022-05-12 15:50:47 -0700176
177 // The last observed value of SP in a vfork child process.
178 // The part of the stack between this address and the value of SP when the vfork parent process
179 // regains control may have stale MTE tags and needs cleanup. This field is only meaningful while
180 // the parent is waiting for the vfork child to return control by calling either exec*() or
181 // exit().
182 void* vfork_child_stack_bottom;
Yabin Cuia2db50d2015-03-20 10:58:04 -0700183};
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800184
Ryan Prichard45d13492019-01-03 02:51:30 -0800185struct ThreadMapping {
186 char* mmap_base;
187 size_t mmap_size;
Ryan Prichard03cef382019-06-17 17:57:19 -0700188 char* mmap_base_unguarded;
189 size_t mmap_size_unguarded;
Ryan Prichard45d13492019-01-03 02:51:30 -0800190
191 char* static_tls;
192 char* stack_base;
193 char* stack_top;
194};
195
196__LIBC_HIDDEN__ void __init_tcb(bionic_tcb* tcb, pthread_internal_t* thread);
197__LIBC_HIDDEN__ void __init_tcb_stack_guard(bionic_tcb* tcb);
Ryan Prichard16455b52019-01-18 01:00:59 -0800198__LIBC_HIDDEN__ void __init_tcb_dtv(bionic_tcb* tcb);
Ryan Prichard45d13492019-01-03 02:51:30 -0800199__LIBC_HIDDEN__ void __init_bionic_tls_ptrs(bionic_tcb* tcb, bionic_tls* tls);
200__LIBC_HIDDEN__ bionic_tls* __allocate_temp_bionic_tls();
201__LIBC_HIDDEN__ void __free_temp_bionic_tls(bionic_tls* tls);
Peter Collingbourneda772e22018-09-06 22:20:44 -0700202__LIBC_HIDDEN__ void __init_additional_stacks(pthread_internal_t*);
Ryan Prichard9cfca862018-11-22 02:44:09 -0800203__LIBC_HIDDEN__ int __init_thread(pthread_internal_t* thread);
Ryan Prichard45d13492019-01-03 02:51:30 -0800204__LIBC_HIDDEN__ ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_size);
Ryan Prichard03cef382019-06-17 17:57:19 -0700205__LIBC_HIDDEN__ void __set_stack_and_tls_vma_name(bool is_main_thread);
Elliott Hughesa4831cb2014-09-11 16:11:43 -0700206
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800207__LIBC_HIDDEN__ pthread_t __pthread_internal_add(pthread_internal_t* thread);
208__LIBC_HIDDEN__ pthread_internal_t* __pthread_internal_find(pthread_t pthread_id, const char* caller);
209__LIBC_HIDDEN__ pid_t __pthread_internal_gettid(pthread_t pthread_id, const char* caller);
210__LIBC_HIDDEN__ void __pthread_internal_remove(pthread_internal_t* thread);
211__LIBC_HIDDEN__ void __pthread_internal_remove_and_free(pthread_internal_t* thread);
Elliott Hughes7484c212017-02-02 02:41:38 +0000212
Ryan Prichard45d13492019-01-03 02:51:30 -0800213static inline __always_inline bionic_tcb* __get_bionic_tcb() {
214 return reinterpret_cast<bionic_tcb*>(&__get_tls()[MIN_TLS_SLOT]);
215}
216
Yabin Cui2f836d42015-03-18 14:14:02 -0700217// Make __get_thread() inlined for performance reason. See http://b/19825434.
218static inline __always_inline pthread_internal_t* __get_thread() {
Ryan Prichard45d13492019-01-03 02:51:30 -0800219 return static_cast<pthread_internal_t*>(__get_tls()[TLS_SLOT_THREAD_ID]);
Yabin Cui2f836d42015-03-18 14:14:02 -0700220}
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800221
Josh Gao5e2285d2017-02-22 12:19:05 -0800222static inline __always_inline bionic_tls& __get_bionic_tls() {
Ryan Prichard45d13492019-01-03 02:51:30 -0800223 return *static_cast<bionic_tls*>(__get_tls()[TLS_SLOT_BIONIC_TLS]);
Josh Gao5e2285d2017-02-22 12:19:05 -0800224}
225
Ryan Prichard16455b52019-01-18 01:00:59 -0800226static inline __always_inline TlsDtv* __get_tcb_dtv(bionic_tcb* tcb) {
227 uintptr_t dtv_slot = reinterpret_cast<uintptr_t>(tcb->tls_slot(TLS_SLOT_DTV));
228 return reinterpret_cast<TlsDtv*>(dtv_slot - offsetof(TlsDtv, generation));
229}
230
231static inline void __set_tcb_dtv(bionic_tcb* tcb, TlsDtv* val) {
232 tcb->tls_slot(TLS_SLOT_DTV) = &val->generation;
233}
234
Evgenii Stepanov13e8dcb2018-09-19 16:29:12 -0700235extern "C" __LIBC_HIDDEN__ int __set_tls(void* ptr);
236
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000237__LIBC_HIDDEN__ void pthread_key_clean_all(void);
238
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700239// Address space is precious on LP32, so use the minimum unit: one page.
240// On LP64, we could use more but there's no obvious advantage to doing
241// so, and the various media processes use RLIMIT_AS as a way to limit
242// the amount of allocation they'll do.
243#define PTHREAD_GUARD_SIZE PAGE_SIZE
Yabin Cuiff624c22016-03-30 17:48:50 -0700244
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700245// SIGSTKSZ (8KiB) is not big enough.
246// An snprintf to a stack buffer of size PATH_MAX consumes ~7KiB of stack.
Josh Gaof90687c2018-11-06 14:47:27 -0800247// On 64-bit, logging uses more than 8KiB by itself, ucontext is comically
248// large on aarch64, and we have effectively infinite address space, so double
249// the signal stack size.
250#if defined(__LP64__)
251#define SIGNAL_STACK_SIZE_WITHOUT_GUARD (32 * 1024)
252#else
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700253#define SIGNAL_STACK_SIZE_WITHOUT_GUARD (16 * 1024)
Josh Gaof90687c2018-11-06 14:47:27 -0800254#endif
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700255
256// Traditionally we gave threads a 1MiB stack. When we started
257// allocating per-thread alternate signal stacks to ease debugging of
258// stack overflows, we subtracted the same amount we were using there
259// from the default thread stack size. This should keep memory usage
260// roughly constant.
261#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGNAL_STACK_SIZE_WITHOUT_GUARD)
Brian Carlstrom50af69e2013-09-13 16:34:43 -0700262
Yabin Cui33ac04a2015-09-22 11:16:15 -0700263// Leave room for a guard page in the internally created signal stacks.
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700264#define SIGNAL_STACK_SIZE (SIGNAL_STACK_SIZE_WITHOUT_GUARD + PTHREAD_GUARD_SIZE)
Yabin Cuief115002015-03-30 20:03:57 -0700265
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700266// Needed by fork.
Elliott Hughesc3f11402013-10-30 14:40:09 -0700267__LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare();
268__LIBC_HIDDEN__ extern void __bionic_atfork_run_child();
269__LIBC_HIDDEN__ extern void __bionic_atfork_run_parent();
Peter Collingbourne5d3aa862020-09-11 15:05:17 -0700270
271extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
272
273extern pthread_rwlock_t g_thread_creation_lock;