The Android Open Source Project | 1dc9e47 | 2009-03-03 19:28:35 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
Elliott Hughes | cbc80ba | 2018-02-13 14:26:29 -0800 | [diff] [blame] | 28 | |
| 29 | #pragma once |
The Android Open Source Project | 1dc9e47 | 2009-03-03 19:28:35 -0800 | [diff] [blame] | 30 | |
| 31 | #include <pthread.h> |
Yabin Cui | 58cf31b | 2015-03-06 17:23:53 -0800 | [diff] [blame] | 32 | #include <stdatomic.h> |
The Android Open Source Project | 1dc9e47 | 2009-03-03 19:28:35 -0800 | [diff] [blame] | 33 | |
Evgenii Stepanov | be551f5 | 2018-08-13 16:46:15 -0700 | [diff] [blame] | 34 | #if __has_feature(hwaddress_sanitizer) |
| 35 | #include <sanitizer/hwasan_interface.h> |
| 36 | #else |
| 37 | #define __hwasan_thread_enter() |
| 38 | #define __hwasan_thread_exit() |
| 39 | #endif |
| 40 | |
Ryan Prichard | 16455b5 | 2019-01-18 01:00:59 -0800 | [diff] [blame] | 41 | #include "private/bionic_elf_tls.h" |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 42 | #include "private/bionic_lock.h" |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 43 | #include "private/bionic_tls.h" |
| 44 | |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 45 | // Has the thread been detached by a pthread_join or pthread_detach call? |
Elliott Hughes | 40a5217 | 2014-07-30 14:48:10 -0700 | [diff] [blame] | 46 | #define PTHREAD_ATTR_FLAG_DETACHED 0x00000001 |
| 47 | |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 48 | // Has the thread been joined by another thread? |
Yabin Cui | ba8dfc2 | 2015-01-06 09:31:00 -0800 | [diff] [blame] | 49 | #define PTHREAD_ATTR_FLAG_JOINED 0x00000002 |
Elliott Hughes | 40a5217 | 2014-07-30 14:48:10 -0700 | [diff] [blame] | 50 | |
Elliott Hughes | 38f01e0 | 2017-10-27 15:28:54 -0700 | [diff] [blame] | 51 | // Used for pthread_attr_setinheritsched. We need two flags for this apparent |
| 52 | // boolean because our historical behavior matches neither of the POSIX choices. |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 53 | #define PTHREAD_ATTR_FLAG_INHERIT 0x00000004 |
Elliott Hughes | 38f01e0 | 2017-10-27 15:28:54 -0700 | [diff] [blame] | 54 | #define PTHREAD_ATTR_FLAG_EXPLICIT 0x00000008 |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 55 | |
Yabin Cui | 58cf31b | 2015-03-06 17:23:53 -0800 | [diff] [blame] | 56 | enum ThreadJoinState { |
| 57 | THREAD_NOT_JOINED, |
| 58 | THREAD_EXITED_NOT_JOINED, |
| 59 | THREAD_JOINED, |
| 60 | THREAD_DETACHED |
| 61 | }; |
| 62 | |
Elliott Hughes | 42d949f | 2016-01-06 19:51:43 -0800 | [diff] [blame] | 63 | class thread_local_dtor; |
Yabin Cui | 952e9eb | 2015-11-24 17:24:06 -0800 | [diff] [blame] | 64 | |
Elliott Hughes | 42d949f | 2016-01-06 19:51:43 -0800 | [diff] [blame] | 65 | class pthread_internal_t { |
Elliott Hughes | b0e8c56 | 2017-01-04 14:12:54 -0800 | [diff] [blame] | 66 | public: |
Elliott Hughes | 7484c21 | 2017-02-02 02:41:38 +0000 | [diff] [blame] | 67 | class pthread_internal_t* next; |
| 68 | class pthread_internal_t* prev; |
| 69 | |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 70 | pid_t tid; |
Elliott Hughes | 70b24b1 | 2013-11-15 11:51:07 -0800 | [diff] [blame] | 71 | |
Elliott Hughes | 7086ad6 | 2014-06-19 16:39:01 -0700 | [diff] [blame] | 72 | private: |
Josh Gao | 2303283 | 2020-05-07 17:02:19 -0700 | [diff] [blame] | 73 | uint32_t cached_pid_ : 31; |
| 74 | uint32_t vforked_ : 1; |
Elliott Hughes | 7086ad6 | 2014-06-19 16:39:01 -0700 | [diff] [blame] | 75 | |
| 76 | public: |
Josh Gao | 2303283 | 2020-05-07 17:02:19 -0700 | [diff] [blame] | 77 | bool is_vforked() { return vforked_; } |
| 78 | |
Elliott Hughes | 7086ad6 | 2014-06-19 16:39:01 -0700 | [diff] [blame] | 79 | pid_t invalidate_cached_pid() { |
| 80 | pid_t old_value; |
| 81 | get_cached_pid(&old_value); |
| 82 | set_cached_pid(0); |
| 83 | return old_value; |
| 84 | } |
| 85 | |
| 86 | void set_cached_pid(pid_t value) { |
| 87 | cached_pid_ = value; |
| 88 | } |
| 89 | |
| 90 | bool get_cached_pid(pid_t* cached_pid) { |
| 91 | *cached_pid = cached_pid_; |
| 92 | return (*cached_pid != 0); |
| 93 | } |
| 94 | |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 95 | pthread_attr_t attr; |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 96 | |
Yabin Cui | 58cf31b | 2015-03-06 17:23:53 -0800 | [diff] [blame] | 97 | _Atomic(ThreadJoinState) join_state; |
| 98 | |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 99 | __pthread_cleanup_t* cleanup_stack; |
| 100 | |
| 101 | void* (*start_routine)(void*); |
| 102 | void* start_routine_arg; |
| 103 | void* return_value; |
Evgenii Stepanov | b92d1c3 | 2019-10-02 16:26:43 -0700 | [diff] [blame] | 104 | sigset64_t start_mask; |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 105 | |
| 106 | void* alternate_signal_stack; |
| 107 | |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 108 | // The start address of the shadow call stack's guard region (arm64/riscv64). |
| 109 | // This region is SCS_GUARD_REGION_SIZE bytes large, but only SCS_SIZE bytes |
| 110 | // are actually used. |
| 111 | // |
Peter Collingbourne | 5d427bc | 2018-11-07 14:48:29 -0800 | [diff] [blame] | 112 | // This address is only used to deallocate the shadow call stack on thread |
Elliott Hughes | 7dd3896 | 2023-04-06 14:50:31 -0700 | [diff] [blame^] | 113 | // exit; the address of the stack itself is stored only in the register used |
| 114 | // as the shadow stack pointer (x18 on arm64, gp on riscv64). |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 115 | // |
Peter Collingbourne | 5d427bc | 2018-11-07 14:48:29 -0800 | [diff] [blame] | 116 | // Because the protection offered by SCS relies on the secrecy of the stack |
| 117 | // address, storing the address here weakens the protection, but only |
| 118 | // slightly, because it is relatively easy for an attacker to discover the |
| 119 | // address of the guard region anyway (e.g. it can be discovered by reference |
| 120 | // to other allocations), but not the stack itself, which is <0.1% of the size |
| 121 | // of the guard region. |
| 122 | // |
Elliott Hughes | 7dd3896 | 2023-04-06 14:50:31 -0700 | [diff] [blame^] | 123 | // longjmp()/setjmp() don't store all the bits of the shadow stack pointer, |
| 124 | // only the bottom bits covered by SCS_MASK. Since longjmp()/setjmp() between |
| 125 | // different threads is undefined behavior (and unsupported on Android), we |
| 126 | // can retrieve the high bits of the shadow stack pointer from the current |
| 127 | // value in the register --- all the jmp_buf needs to store is where exactly |
| 128 | // the shadow stack pointer is *within* the thread's shadow stack: the bottom |
| 129 | // bits of the register. |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 130 | // |
Peter Collingbourne | 5d427bc | 2018-11-07 14:48:29 -0800 | [diff] [blame] | 131 | // There are at least two other options for discovering the start address of |
| 132 | // the guard region on thread exit, but they are not as simple as storing in |
| 133 | // TLS. |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 134 | // |
Elliott Hughes | 7dd3896 | 2023-04-06 14:50:31 -0700 | [diff] [blame^] | 135 | // 1) Derive it from the current value of the shadow stack pointer. This is |
| 136 | // only possible in processes that do not contain legacy code that might |
| 137 | // clobber x18 on arm64, therefore each process must declare early during |
| 138 | // process startup whether it might load legacy code. |
| 139 | // TODO: riscv64 has no legacy code, so we can actually go this route |
| 140 | // there, but hopefully we'll actually get the Zsslpcfi extension instead. |
Peter Collingbourne | 5d427bc | 2018-11-07 14:48:29 -0800 | [diff] [blame] | 141 | // 2) Mark the guard region as such using prctl(PR_SET_VMA_ANON_NAME) and |
| 142 | // discover its address by reading /proc/self/maps. One issue with this is |
| 143 | // that reading /proc/self/maps can race with allocations, so we may need |
| 144 | // code to handle retries. |
| 145 | void* shadow_call_stack_guard_region; |
| 146 | |
Peter Collingbourne | 5f45c18 | 2020-01-14 17:59:41 -0800 | [diff] [blame] | 147 | // A pointer to the top of the stack. This lets android_unsafe_frame_pointer_chase determine the |
| 148 | // top of the stack quickly, which would otherwise require special logic for the main thread. |
| 149 | uintptr_t stack_top; |
| 150 | |
Peter Collingbourne | 5d3aa86 | 2020-09-11 15:05:17 -0700 | [diff] [blame] | 151 | // Whether the thread is in the process of terminating (has blocked signals), or has already |
| 152 | // terminated. This is used by android_run_on_all_threads() to avoid sending a signal to a thread |
| 153 | // that will never receive it. |
| 154 | _Atomic(bool) terminating; |
| 155 | |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 156 | Lock startup_handshake_lock; |
Elliott Hughes | b30aff4 | 2014-05-28 19:35:33 +0000 | [diff] [blame] | 157 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 158 | void* mmap_base; |
Yabin Cui | ba8dfc2 | 2015-01-06 09:31:00 -0800 | [diff] [blame] | 159 | size_t mmap_size; |
Yabin Cui | 6a7aaf4 | 2014-12-22 19:17:33 -0800 | [diff] [blame] | 160 | |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 161 | // The location of the VMA to label as the thread's stack_and_tls. |
| 162 | void* mmap_base_unguarded; |
| 163 | size_t mmap_size_unguarded; |
| 164 | char vma_name_buffer[32]; |
| 165 | |
Yabin Cui | 952e9eb | 2015-11-24 17:24:06 -0800 | [diff] [blame] | 166 | thread_local_dtor* thread_local_dtors; |
| 167 | |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 168 | /* |
| 169 | * The dynamic linker implements dlerror(3), which makes it hard for us to implement this |
| 170 | * per-thread buffer by simply using malloc(3) and free(3). |
| 171 | */ |
Elliott Hughes | 34583c1 | 2018-11-13 15:30:07 -0800 | [diff] [blame] | 172 | char* current_dlerror; |
Elliott Hughes | 5419b94 | 2012-10-16 15:54:46 -0700 | [diff] [blame] | 173 | #define __BIONIC_DLERROR_BUFFER_SIZE 512 |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 174 | char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE]; |
Josh Gao | 5e2285d | 2017-02-22 12:19:05 -0800 | [diff] [blame] | 175 | |
| 176 | bionic_tls* bionic_tls; |
Ryan Prichard | 29d6dbc | 2018-10-19 20:35:23 -0700 | [diff] [blame] | 177 | |
Ryan Prichard | 37754cd | 2018-12-07 01:47:00 -0800 | [diff] [blame] | 178 | int errno_value; |
Evgenii Stepanov | 3031a7e | 2022-05-12 15:50:47 -0700 | [diff] [blame] | 179 | |
| 180 | // The last observed value of SP in a vfork child process. |
| 181 | // The part of the stack between this address and the value of SP when the vfork parent process |
| 182 | // regains control may have stale MTE tags and needs cleanup. This field is only meaningful while |
| 183 | // the parent is waiting for the vfork child to return control by calling either exec*() or |
| 184 | // exit(). |
| 185 | void* vfork_child_stack_bottom; |
Yabin Cui | a2db50d | 2015-03-20 10:58:04 -0700 | [diff] [blame] | 186 | }; |
The Android Open Source Project | 1dc9e47 | 2009-03-03 19:28:35 -0800 | [diff] [blame] | 187 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 188 | struct ThreadMapping { |
| 189 | char* mmap_base; |
| 190 | size_t mmap_size; |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 191 | char* mmap_base_unguarded; |
| 192 | size_t mmap_size_unguarded; |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 193 | |
| 194 | char* static_tls; |
| 195 | char* stack_base; |
| 196 | char* stack_top; |
| 197 | }; |
| 198 | |
| 199 | __LIBC_HIDDEN__ void __init_tcb(bionic_tcb* tcb, pthread_internal_t* thread); |
| 200 | __LIBC_HIDDEN__ void __init_tcb_stack_guard(bionic_tcb* tcb); |
Ryan Prichard | 16455b5 | 2019-01-18 01:00:59 -0800 | [diff] [blame] | 201 | __LIBC_HIDDEN__ void __init_tcb_dtv(bionic_tcb* tcb); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 202 | __LIBC_HIDDEN__ void __init_bionic_tls_ptrs(bionic_tcb* tcb, bionic_tls* tls); |
| 203 | __LIBC_HIDDEN__ bionic_tls* __allocate_temp_bionic_tls(); |
| 204 | __LIBC_HIDDEN__ void __free_temp_bionic_tls(bionic_tls* tls); |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 205 | __LIBC_HIDDEN__ void __init_additional_stacks(pthread_internal_t*); |
Ryan Prichard | 9cfca86 | 2018-11-22 02:44:09 -0800 | [diff] [blame] | 206 | __LIBC_HIDDEN__ int __init_thread(pthread_internal_t* thread); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 207 | __LIBC_HIDDEN__ ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_size); |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 208 | __LIBC_HIDDEN__ void __set_stack_and_tls_vma_name(bool is_main_thread); |
Elliott Hughes | a4831cb | 2014-09-11 16:11:43 -0700 | [diff] [blame] | 209 | |
Elliott Hughes | 5bb113c | 2019-02-01 16:31:10 -0800 | [diff] [blame] | 210 | __LIBC_HIDDEN__ pthread_t __pthread_internal_add(pthread_internal_t* thread); |
| 211 | __LIBC_HIDDEN__ pthread_internal_t* __pthread_internal_find(pthread_t pthread_id, const char* caller); |
| 212 | __LIBC_HIDDEN__ pid_t __pthread_internal_gettid(pthread_t pthread_id, const char* caller); |
| 213 | __LIBC_HIDDEN__ void __pthread_internal_remove(pthread_internal_t* thread); |
| 214 | __LIBC_HIDDEN__ void __pthread_internal_remove_and_free(pthread_internal_t* thread); |
Elliott Hughes | 7484c21 | 2017-02-02 02:41:38 +0000 | [diff] [blame] | 215 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 216 | static inline __always_inline bionic_tcb* __get_bionic_tcb() { |
| 217 | return reinterpret_cast<bionic_tcb*>(&__get_tls()[MIN_TLS_SLOT]); |
| 218 | } |
| 219 | |
Yabin Cui | 2f836d4 | 2015-03-18 14:14:02 -0700 | [diff] [blame] | 220 | // Make __get_thread() inlined for performance reason. See http://b/19825434. |
| 221 | static inline __always_inline pthread_internal_t* __get_thread() { |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 222 | return static_cast<pthread_internal_t*>(__get_tls()[TLS_SLOT_THREAD_ID]); |
Yabin Cui | 2f836d4 | 2015-03-18 14:14:02 -0700 | [diff] [blame] | 223 | } |
The Android Open Source Project | 1dc9e47 | 2009-03-03 19:28:35 -0800 | [diff] [blame] | 224 | |
Josh Gao | 5e2285d | 2017-02-22 12:19:05 -0800 | [diff] [blame] | 225 | static inline __always_inline bionic_tls& __get_bionic_tls() { |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 226 | return *static_cast<bionic_tls*>(__get_tls()[TLS_SLOT_BIONIC_TLS]); |
Josh Gao | 5e2285d | 2017-02-22 12:19:05 -0800 | [diff] [blame] | 227 | } |
| 228 | |
Ryan Prichard | 16455b5 | 2019-01-18 01:00:59 -0800 | [diff] [blame] | 229 | static inline __always_inline TlsDtv* __get_tcb_dtv(bionic_tcb* tcb) { |
| 230 | uintptr_t dtv_slot = reinterpret_cast<uintptr_t>(tcb->tls_slot(TLS_SLOT_DTV)); |
| 231 | return reinterpret_cast<TlsDtv*>(dtv_slot - offsetof(TlsDtv, generation)); |
| 232 | } |
| 233 | |
| 234 | static inline void __set_tcb_dtv(bionic_tcb* tcb, TlsDtv* val) { |
| 235 | tcb->tls_slot(TLS_SLOT_DTV) = &val->generation; |
| 236 | } |
| 237 | |
Evgenii Stepanov | 13e8dcb | 2018-09-19 16:29:12 -0700 | [diff] [blame] | 238 | extern "C" __LIBC_HIDDEN__ int __set_tls(void* ptr); |
| 239 | |
Elliott Hughes | 44b53ad | 2013-02-11 20:18:47 +0000 | [diff] [blame] | 240 | __LIBC_HIDDEN__ void pthread_key_clean_all(void); |
| 241 | |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 242 | // Address space is precious on LP32, so use the minimum unit: one page. |
| 243 | // On LP64, we could use more but there's no obvious advantage to doing |
| 244 | // so, and the various media processes use RLIMIT_AS as a way to limit |
| 245 | // the amount of allocation they'll do. |
| 246 | #define PTHREAD_GUARD_SIZE PAGE_SIZE |
Yabin Cui | ff624c2 | 2016-03-30 17:48:50 -0700 | [diff] [blame] | 247 | |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 248 | // SIGSTKSZ (8KiB) is not big enough. |
| 249 | // An snprintf to a stack buffer of size PATH_MAX consumes ~7KiB of stack. |
Josh Gao | f90687c | 2018-11-06 14:47:27 -0800 | [diff] [blame] | 250 | // On 64-bit, logging uses more than 8KiB by itself, ucontext is comically |
| 251 | // large on aarch64, and we have effectively infinite address space, so double |
| 252 | // the signal stack size. |
| 253 | #if defined(__LP64__) |
| 254 | #define SIGNAL_STACK_SIZE_WITHOUT_GUARD (32 * 1024) |
| 255 | #else |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 256 | #define SIGNAL_STACK_SIZE_WITHOUT_GUARD (16 * 1024) |
Josh Gao | f90687c | 2018-11-06 14:47:27 -0800 | [diff] [blame] | 257 | #endif |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 258 | |
| 259 | // Traditionally we gave threads a 1MiB stack. When we started |
| 260 | // allocating per-thread alternate signal stacks to ease debugging of |
| 261 | // stack overflows, we subtracted the same amount we were using there |
| 262 | // from the default thread stack size. This should keep memory usage |
| 263 | // roughly constant. |
| 264 | #define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGNAL_STACK_SIZE_WITHOUT_GUARD) |
Brian Carlstrom | 50af69e | 2013-09-13 16:34:43 -0700 | [diff] [blame] | 265 | |
Yabin Cui | 33ac04a | 2015-09-22 11:16:15 -0700 | [diff] [blame] | 266 | // Leave room for a guard page in the internally created signal stacks. |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 267 | #define SIGNAL_STACK_SIZE (SIGNAL_STACK_SIZE_WITHOUT_GUARD + PTHREAD_GUARD_SIZE) |
Yabin Cui | ef11500 | 2015-03-30 20:03:57 -0700 | [diff] [blame] | 268 | |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 269 | // Needed by fork. |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 270 | __LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare(); |
| 271 | __LIBC_HIDDEN__ extern void __bionic_atfork_run_child(); |
| 272 | __LIBC_HIDDEN__ extern void __bionic_atfork_run_parent(); |
Peter Collingbourne | 5d3aa86 | 2020-09-11 15:05:17 -0700 | [diff] [blame] | 273 | |
| 274 | extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg); |
| 275 | |
| 276 | extern pthread_rwlock_t g_thread_creation_lock; |