Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include <pthread.h> |
| 30 | |
| 31 | #include <errno.h> |
Elliott Hughes | 05fc1d7 | 2015-01-28 18:02:33 -0800 | [diff] [blame] | 32 | #include <string.h> |
Peter Collingbourne | dcbacd6 | 2021-04-22 12:13:40 -0700 | [diff] [blame] | 33 | #include <sys/auxv.h> |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 34 | #include <sys/mman.h> |
Elliott Hughes | 99d5465 | 2018-08-22 10:36:23 -0700 | [diff] [blame] | 35 | #include <sys/prctl.h> |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 36 | #include <sys/random.h> |
Elliott Hughes | 7086ad6 | 2014-06-19 16:39:01 -0700 | [diff] [blame] | 37 | #include <unistd.h> |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 38 | |
| 39 | #include "pthread_internal.h" |
| 40 | |
Christopher Ferris | 7a3681e | 2017-04-24 17:48:32 -0700 | [diff] [blame] | 41 | #include <async_safe/log.h> |
| 42 | |
Evgenii Stepanov | f9fa32a | 2022-05-12 15:54:38 -0700 | [diff] [blame] | 43 | #include "platform/bionic/macros.h" |
| 44 | #include "platform/bionic/mte.h" |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 45 | #include "platform/bionic/page.h" |
Evgenii Stepanov | f9fa32a | 2022-05-12 15:54:38 -0700 | [diff] [blame] | 46 | #include "private/ErrnoRestorer.h" |
Peter Collingbourne | 5d3aa86 | 2020-09-11 15:05:17 -0700 | [diff] [blame] | 47 | #include "private/ScopedRWLock.h" |
Peter Collingbourne | 734beec | 2018-11-14 12:41:41 -0800 | [diff] [blame] | 48 | #include "private/bionic_constants.h" |
dimitry | fa43252 | 2017-10-25 13:07:45 +0200 | [diff] [blame] | 49 | #include "private/bionic_defs.h" |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 50 | #include "private/bionic_globals.h" |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 51 | #include "private/bionic_ssp.h" |
Philip Cuadra | 77d0f90 | 2019-01-25 10:39:25 -0800 | [diff] [blame] | 52 | #include "private/bionic_systrace.h" |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 53 | #include "private/bionic_tls.h" |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 54 | |
Elliott Hughes | 0d236aa | 2014-05-09 14:42:16 -0700 | [diff] [blame] | 55 | // x86 uses segment descriptors rather than a direct pointer to TLS. |
Josh Gao | cb728e6 | 2016-09-15 13:56:37 -0700 | [diff] [blame] | 56 | #if defined(__i386__) |
Elliott Hughes | 0d236aa | 2014-05-09 14:42:16 -0700 | [diff] [blame] | 57 | #include <asm/ldt.h> |
Elliott Hughes | 01b85d5 | 2016-02-09 22:44:16 -0800 | [diff] [blame] | 58 | void __init_user_desc(struct user_desc*, bool, void*); |
Elliott Hughes | 0d236aa | 2014-05-09 14:42:16 -0700 | [diff] [blame] | 59 | #endif |
| 60 | |
Ryan Prichard | 9cfca86 | 2018-11-22 02:44:09 -0800 | [diff] [blame] | 61 | __attribute__((no_stack_protector)) |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 62 | void __init_tcb_stack_guard(bionic_tcb* tcb) { |
Ryan Prichard | 9cfca86 | 2018-11-22 02:44:09 -0800 | [diff] [blame] | 63 | // GCC looks in the TLS for the stack guard on x86, so copy it there from our global. |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 64 | tcb->tls_slot(TLS_SLOT_STACK_GUARD) = reinterpret_cast<void*>(__stack_chk_guard); |
Ryan Prichard | 9cfca86 | 2018-11-22 02:44:09 -0800 | [diff] [blame] | 65 | } |
| 66 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 67 | void __init_bionic_tls_ptrs(bionic_tcb* tcb, bionic_tls* tls) { |
| 68 | tcb->thread()->bionic_tls = tls; |
| 69 | tcb->tls_slot(TLS_SLOT_BIONIC_TLS) = tls; |
| 70 | } |
| 71 | |
| 72 | // Allocate a temporary bionic_tls that the dynamic linker's main thread can |
| 73 | // use while it's loading the initial set of ELF modules. |
| 74 | bionic_tls* __allocate_temp_bionic_tls() { |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 75 | size_t allocation_size = __BIONIC_ALIGN(sizeof(bionic_tls), page_size()); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 76 | void* allocation = mmap(nullptr, allocation_size, |
| 77 | PROT_READ | PROT_WRITE, |
| 78 | MAP_PRIVATE | MAP_ANONYMOUS, |
| 79 | -1, 0); |
Josh Gao | 5e2285d | 2017-02-22 12:19:05 -0800 | [diff] [blame] | 80 | if (allocation == MAP_FAILED) { |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 81 | // Avoid strerror because it might need bionic_tls. |
| 82 | async_safe_fatal("failed to allocate bionic_tls: error %d", errno); |
Josh Gao | 5e2285d | 2017-02-22 12:19:05 -0800 | [diff] [blame] | 83 | } |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 84 | return static_cast<bionic_tls*>(allocation); |
| 85 | } |
Elliott Hughes | 53dc9dd | 2017-09-19 14:02:50 -0700 | [diff] [blame] | 86 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 87 | void __free_temp_bionic_tls(bionic_tls* tls) { |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 88 | munmap(tls, __BIONIC_ALIGN(sizeof(bionic_tls), page_size())); |
Elliott Hughes | 70b24b1 | 2013-11-15 11:51:07 -0800 | [diff] [blame] | 89 | } |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 90 | |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 91 | static void __init_alternate_signal_stack(pthread_internal_t* thread) { |
Elliott Hughes | 84114c8 | 2013-07-17 13:33:19 -0700 | [diff] [blame] | 92 | // Create and set an alternate signal stack. |
Evgenii Stepanov | f9fa32a | 2022-05-12 15:54:38 -0700 | [diff] [blame] | 93 | int prot = PROT_READ | PROT_WRITE; |
| 94 | #ifdef __aarch64__ |
| 95 | if (atomic_load(&__libc_globals->memtag_stack)) { |
| 96 | prot |= PROT_MTE; |
| 97 | } |
| 98 | #endif |
| 99 | void* stack_base = mmap(nullptr, SIGNAL_STACK_SIZE, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
Yabin Cui | ef11500 | 2015-03-30 20:03:57 -0700 | [diff] [blame] | 100 | if (stack_base != MAP_FAILED) { |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 101 | // Create a guard to catch stack overflows in signal handlers. |
| 102 | if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) { |
Yabin Cui | ef11500 | 2015-03-30 20:03:57 -0700 | [diff] [blame] | 103 | munmap(stack_base, SIGNAL_STACK_SIZE); |
| 104 | return; |
| 105 | } |
| 106 | stack_t ss; |
Elliott Hughes | d6c678c | 2017-06-27 17:01:57 -0700 | [diff] [blame] | 107 | ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PTHREAD_GUARD_SIZE; |
| 108 | ss.ss_size = SIGNAL_STACK_SIZE - PTHREAD_GUARD_SIZE; |
Elliott Hughes | 84114c8 | 2013-07-17 13:33:19 -0700 | [diff] [blame] | 109 | ss.ss_flags = 0; |
Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 110 | sigaltstack(&ss, nullptr); |
Yabin Cui | ef11500 | 2015-03-30 20:03:57 -0700 | [diff] [blame] | 111 | thread->alternate_signal_stack = stack_base; |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 112 | |
| 113 | // We can only use const static allocated string for mapped region name, as Android kernel |
| 114 | // uses the string pointer directly when dumping /proc/pid/maps. |
Elliott Hughes | a3125fd | 2015-03-31 02:42:39 +0000 | [diff] [blame] | 115 | prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ss.ss_sp, ss.ss_size, "thread signal stack"); |
Elliott Hughes | 84114c8 | 2013-07-17 13:33:19 -0700 | [diff] [blame] | 116 | } |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 117 | } |
| 118 | |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 119 | static void __init_shadow_call_stack(pthread_internal_t* thread __unused) { |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 120 | #if defined(__aarch64__) || defined(__riscv) |
Peter Collingbourne | f1ed31f | 2019-01-31 14:26:43 -0800 | [diff] [blame] | 121 | // Allocate the stack and the guard region. |
Peter Collingbourne | 734beec | 2018-11-14 12:41:41 -0800 | [diff] [blame] | 122 | char* scs_guard_region = reinterpret_cast<char*>( |
| 123 | mmap(nullptr, SCS_GUARD_REGION_SIZE, 0, MAP_PRIVATE | MAP_ANON, -1, 0)); |
| 124 | thread->shadow_call_stack_guard_region = scs_guard_region; |
| 125 | |
Peter Collingbourne | f1ed31f | 2019-01-31 14:26:43 -0800 | [diff] [blame] | 126 | // The address is aligned to SCS_SIZE so that we only need to store the lower log2(SCS_SIZE) bits |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 127 | // in jmp_buf. See the SCS commentary in pthread_internal.h for more detail. |
Peter Collingbourne | f1ed31f | 2019-01-31 14:26:43 -0800 | [diff] [blame] | 128 | char* scs_aligned_guard_region = |
Peter Collingbourne | 734beec | 2018-11-14 12:41:41 -0800 | [diff] [blame] | 129 | reinterpret_cast<char*>(align_up(reinterpret_cast<uintptr_t>(scs_guard_region), SCS_SIZE)); |
Peter Collingbourne | f1ed31f | 2019-01-31 14:26:43 -0800 | [diff] [blame] | 130 | |
| 131 | // We need to ensure that [scs_offset,scs_offset+SCS_SIZE) is in the guard region and that there |
| 132 | // is at least one unmapped page after the shadow call stack (to catch stack overflows). We can't |
| 133 | // use arc4random_uniform in init because /dev/urandom might not have been created yet. |
| 134 | size_t scs_offset = |
| 135 | (getpid() == 1) ? 0 : (arc4random_uniform(SCS_GUARD_REGION_SIZE / SCS_SIZE - 1) * SCS_SIZE); |
| 136 | |
Elliott Hughes | 7dd3896 | 2023-04-06 14:50:31 -0700 | [diff] [blame] | 137 | // Make the stack read-write, and store its address in the register we're using as the shadow |
| 138 | // stack pointer. This is deliberately the only place where the address is stored. |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 139 | char* scs = scs_aligned_guard_region + scs_offset; |
Peter Collingbourne | 734beec | 2018-11-14 12:41:41 -0800 | [diff] [blame] | 140 | mprotect(scs, SCS_SIZE, PROT_READ | PROT_WRITE); |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 141 | #if defined(__aarch64__) |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 142 | __asm__ __volatile__("mov x18, %0" ::"r"(scs)); |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 143 | #elif defined(__riscv) |
Elliott Hughes | c35a0dc | 2023-05-16 16:09:30 -0700 | [diff] [blame] | 144 | __asm__ __volatile__("mv x3, %0" ::"r"(scs)); |
Elliott Hughes | 9a7155d | 2023-02-10 02:00:03 +0000 | [diff] [blame] | 145 | #endif |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 146 | #endif |
| 147 | } |
| 148 | |
| 149 | void __init_additional_stacks(pthread_internal_t* thread) { |
| 150 | __init_alternate_signal_stack(thread); |
| 151 | __init_shadow_call_stack(thread); |
| 152 | } |
| 153 | |
Yabin Cui | 673b15e | 2015-03-19 14:19:19 -0700 | [diff] [blame] | 154 | int __init_thread(pthread_internal_t* thread) { |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 155 | thread->cleanup_stack = nullptr; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 156 | |
Yabin Cui | 58cf31b | 2015-03-06 17:23:53 -0800 | [diff] [blame] | 157 | if (__predict_true((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) == 0)) { |
| 158 | atomic_init(&thread->join_state, THREAD_NOT_JOINED); |
| 159 | } else { |
| 160 | atomic_init(&thread->join_state, THREAD_DETACHED); |
| 161 | } |
| 162 | |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 163 | // Set the scheduling policy/priority of the thread if necessary. |
| 164 | bool need_set = true; |
| 165 | int policy; |
| 166 | sched_param param; |
Elliott Hughes | 38f01e0 | 2017-10-27 15:28:54 -0700 | [diff] [blame] | 167 | if ((thread->attr.flags & PTHREAD_ATTR_FLAG_INHERIT) != 0) { |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 168 | // Unless the parent has SCHED_RESET_ON_FORK set, we've already inherited from the parent. |
| 169 | policy = sched_getscheduler(0); |
| 170 | need_set = ((policy & SCHED_RESET_ON_FORK) != 0); |
| 171 | if (need_set) { |
| 172 | if (policy == -1) { |
| 173 | async_safe_format_log(ANDROID_LOG_WARN, "libc", |
| 174 | "pthread_create sched_getscheduler failed: %s", strerror(errno)); |
| 175 | return errno; |
| 176 | } |
| 177 | if (sched_getparam(0, ¶m) == -1) { |
| 178 | async_safe_format_log(ANDROID_LOG_WARN, "libc", |
| 179 | "pthread_create sched_getparam failed: %s", strerror(errno)); |
| 180 | return errno; |
| 181 | } |
| 182 | } |
| 183 | } else { |
| 184 | policy = thread->attr.sched_policy; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 185 | param.sched_priority = thread->attr.sched_priority; |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 186 | } |
Elliott Hughes | 38f01e0 | 2017-10-27 15:28:54 -0700 | [diff] [blame] | 187 | // Backwards compatibility: before P, Android didn't have pthread_attr_setinheritsched, |
| 188 | // and our behavior was neither of the POSIX behaviors. |
| 189 | if ((thread->attr.flags & (PTHREAD_ATTR_FLAG_INHERIT|PTHREAD_ATTR_FLAG_EXPLICIT)) == 0) { |
| 190 | need_set = (thread->attr.sched_policy != SCHED_NORMAL); |
| 191 | } |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 192 | if (need_set) { |
| 193 | if (sched_setscheduler(thread->tid, policy, ¶m) == -1) { |
| 194 | async_safe_format_log(ANDROID_LOG_WARN, "libc", |
| 195 | "pthread_create sched_setscheduler(%d, {%d}) call failed: %s", policy, |
| 196 | param.sched_priority, strerror(errno)); |
Josh Gao | b36efa4 | 2016-09-15 13:55:41 -0700 | [diff] [blame] | 197 | #if defined(__LP64__) |
Elliott Hughes | 98624c3 | 2013-10-15 16:51:17 -0700 | [diff] [blame] | 198 | // For backwards compatibility reasons, we only report failures on 64-bit devices. |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 199 | return errno; |
Elliott Hughes | 98624c3 | 2013-10-15 16:51:17 -0700 | [diff] [blame] | 200 | #endif |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 201 | } |
| 202 | } |
| 203 | |
Elliott Hughes | 8aecba7 | 2017-10-17 15:34:41 -0700 | [diff] [blame] | 204 | return 0; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 205 | } |
| 206 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 207 | // Allocate a thread's primary mapping. This mapping includes static TLS and |
| 208 | // optionally a stack. Static TLS includes ELF TLS segments and the bionic_tls |
| 209 | // struct. |
| 210 | // |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 211 | // The stack_guard_size must be a multiple of the page_size(). |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 212 | ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_size) { |
| 213 | const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout; |
| 214 | |
| 215 | // Allocate in order: stack guard, stack, static TLS, guard page. |
| 216 | size_t mmap_size; |
| 217 | if (__builtin_add_overflow(stack_size, stack_guard_size, &mmap_size)) return {}; |
| 218 | if (__builtin_add_overflow(mmap_size, layout.size(), &mmap_size)) return {}; |
| 219 | if (__builtin_add_overflow(mmap_size, PTHREAD_GUARD_SIZE, &mmap_size)) return {}; |
| 220 | |
| 221 | // Align the result to a page size. |
| 222 | const size_t unaligned_size = mmap_size; |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 223 | mmap_size = __BIONIC_ALIGN(mmap_size, page_size()); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 224 | if (mmap_size < unaligned_size) return {}; |
| 225 | |
| 226 | // Create a new private anonymous map. Make the entire mapping PROT_NONE, then carve out a |
| 227 | // read+write area in the middle. |
| 228 | const int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; |
| 229 | char* const space = static_cast<char*>(mmap(nullptr, mmap_size, PROT_NONE, flags, -1, 0)); |
Yabin Cui | ba8dfc2 | 2015-01-06 09:31:00 -0800 | [diff] [blame] | 230 | if (space == MAP_FAILED) { |
Christopher Ferris | 7a3681e | 2017-04-24 17:48:32 -0700 | [diff] [blame] | 231 | async_safe_format_log(ANDROID_LOG_WARN, |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 232 | "libc", |
| 233 | "pthread_create failed: couldn't allocate %zu-bytes mapped space: %s", |
| 234 | mmap_size, strerror(errno)); |
| 235 | return {}; |
| 236 | } |
| 237 | const size_t writable_size = mmap_size - stack_guard_size - PTHREAD_GUARD_SIZE; |
Evgenii Stepanov | f9fa32a | 2022-05-12 15:54:38 -0700 | [diff] [blame] | 238 | int prot = PROT_READ | PROT_WRITE; |
| 239 | const char* prot_str = "R+W"; |
| 240 | #ifdef __aarch64__ |
| 241 | if (atomic_load(&__libc_globals->memtag_stack)) { |
| 242 | prot |= PROT_MTE; |
| 243 | prot_str = "R+W+MTE"; |
| 244 | } |
| 245 | #endif |
| 246 | if (mprotect(space + stack_guard_size, writable_size, prot) != 0) { |
| 247 | async_safe_format_log( |
| 248 | ANDROID_LOG_WARN, "libc", |
| 249 | "pthread_create failed: couldn't mprotect %s %zu-byte thread mapping region: %s", prot_str, |
| 250 | writable_size, strerror(errno)); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 251 | munmap(space, mmap_size); |
| 252 | return {}; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 253 | } |
| 254 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 255 | ThreadMapping result = {}; |
| 256 | result.mmap_base = space; |
| 257 | result.mmap_size = mmap_size; |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 258 | result.mmap_base_unguarded = space + stack_guard_size; |
| 259 | result.mmap_size_unguarded = mmap_size - stack_guard_size - PTHREAD_GUARD_SIZE; |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 260 | result.static_tls = space + mmap_size - PTHREAD_GUARD_SIZE - layout.size(); |
| 261 | result.stack_base = space; |
| 262 | result.stack_top = result.static_tls; |
| 263 | return result; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 264 | } |
| 265 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 266 | static int __allocate_thread(pthread_attr_t* attr, bionic_tcb** tcbp, void** child_stack) { |
| 267 | ThreadMapping mapping; |
| 268 | char* stack_top; |
| 269 | bool stack_clean = false; |
Yabin Cui | 6a7aaf4 | 2014-12-22 19:17:33 -0800 | [diff] [blame] | 270 | |
Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 271 | if (attr->stack_base == nullptr) { |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 272 | // The caller didn't provide a stack, so allocate one. |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 273 | |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 274 | // Make sure the guard size is a multiple of page_size(). |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 275 | const size_t unaligned_guard_size = attr->guard_size; |
Peter Collingbourne | bb11ee6 | 2022-05-02 12:26:16 -0700 | [diff] [blame^] | 276 | attr->guard_size = __BIONIC_ALIGN(attr->guard_size, page_size()); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 277 | if (attr->guard_size < unaligned_guard_size) return EAGAIN; |
| 278 | |
| 279 | mapping = __allocate_thread_mapping(attr->stack_size, attr->guard_size); |
| 280 | if (mapping.mmap_base == nullptr) return EAGAIN; |
| 281 | |
| 282 | stack_top = mapping.stack_top; |
| 283 | attr->stack_base = mapping.stack_base; |
| 284 | stack_clean = true; |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 285 | } else { |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 286 | mapping = __allocate_thread_mapping(0, PTHREAD_GUARD_SIZE); |
| 287 | if (mapping.mmap_base == nullptr) return EAGAIN; |
| 288 | |
| 289 | stack_top = static_cast<char*>(attr->stack_base) + attr->stack_size; |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 290 | } |
| 291 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 292 | // Carve out space from the stack for the thread's pthread_internal_t. This |
| 293 | // memory isn't counted in pthread_attr_getstacksize. |
Yabin Cui | a2db50d | 2015-03-20 10:58:04 -0700 | [diff] [blame] | 294 | |
| 295 | // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary. |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 296 | stack_top = align_down(stack_top - sizeof(pthread_internal_t), 16); |
Yabin Cui | a2db50d | 2015-03-20 10:58:04 -0700 | [diff] [blame] | 297 | |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 298 | pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 299 | if (!stack_clean) { |
Yabin Cui | 304348a | 2015-12-03 13:01:42 -0800 | [diff] [blame] | 300 | // If thread was not allocated by mmap(), it may not have been cleared to zero. |
| 301 | // So assume the worst and zero it. |
| 302 | memset(thread, 0, sizeof(pthread_internal_t)); |
| 303 | } |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 304 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 305 | // Locate static TLS structures within the mapped region. |
| 306 | const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout; |
| 307 | auto tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb()); |
| 308 | auto tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls()); |
| 309 | |
Ryan Prichard | 361c1b4 | 2019-01-15 13:45:27 -0800 | [diff] [blame] | 310 | // Initialize TLS memory. |
| 311 | __init_static_tls(mapping.static_tls); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 312 | __init_tcb(tcb, thread); |
Ryan Prichard | 16455b5 | 2019-01-18 01:00:59 -0800 | [diff] [blame] | 313 | __init_tcb_dtv(tcb); |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 314 | __init_tcb_stack_guard(tcb); |
| 315 | __init_bionic_tls_ptrs(tcb, tls); |
| 316 | |
| 317 | attr->stack_size = stack_top - static_cast<char*>(attr->stack_base); |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 318 | thread->attr = *attr; |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 319 | thread->mmap_base = mapping.mmap_base; |
| 320 | thread->mmap_size = mapping.mmap_size; |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 321 | thread->mmap_base_unguarded = mapping.mmap_base_unguarded; |
| 322 | thread->mmap_size_unguarded = mapping.mmap_size_unguarded; |
Peter Collingbourne | 5f45c18 | 2020-01-14 17:59:41 -0800 | [diff] [blame] | 323 | thread->stack_top = reinterpret_cast<uintptr_t>(stack_top); |
Ryan Prichard | 9cfca86 | 2018-11-22 02:44:09 -0800 | [diff] [blame] | 324 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 325 | *tcbp = tcb; |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 326 | *child_stack = stack_top; |
| 327 | return 0; |
| 328 | } |
| 329 | |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 330 | void __set_stack_and_tls_vma_name(bool is_main_thread) { |
| 331 | // Name the thread's stack-and-tls area to help with debugging. This mapped area also includes |
| 332 | // static TLS data, which is typically a few pages (e.g. bionic_tls). |
| 333 | pthread_internal_t* thread = __get_thread(); |
| 334 | const char* name; |
| 335 | if (is_main_thread) { |
| 336 | name = "stack_and_tls:main"; |
| 337 | } else { |
| 338 | // The kernel doesn't copy the name string, but this variable will last at least as long as the |
| 339 | // mapped area. The mapped area's VMAs are unmapped with a single call to munmap. |
| 340 | auto& name_buffer = thread->vma_name_buffer; |
| 341 | static_assert(arraysize(name_buffer) >= arraysize("stack_and_tls:") + 11 + 1); |
| 342 | async_safe_format_buffer(name_buffer, arraysize(name_buffer), "stack_and_tls:%d", thread->tid); |
| 343 | name = name_buffer; |
| 344 | } |
| 345 | prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, thread->mmap_base_unguarded, thread->mmap_size_unguarded, |
| 346 | name); |
| 347 | } |
| 348 | |
Evgenii Stepanov | b92d1c3 | 2019-10-02 16:26:43 -0700 | [diff] [blame] | 349 | extern "C" int __rt_sigprocmask(int, const sigset64_t*, sigset64_t*, size_t); |
| 350 | |
Evgenii Stepanov | 13e8dcb | 2018-09-19 16:29:12 -0700 | [diff] [blame] | 351 | __attribute__((no_sanitize("hwaddress"))) |
Elliott Hughes | 7309177 | 2022-03-10 18:01:04 +0000 | [diff] [blame] | 352 | #ifdef __aarch64__ |
Peter Collingbourne | 26d83ba | 2021-06-04 14:35:13 -0700 | [diff] [blame] | 353 | // This function doesn't return, but it does appear in stack traces. Avoid using return PAC in this |
| 354 | // function because we may end up resetting IA, which may confuse unwinders due to mismatching keys. |
Elliott Hughes | 7309177 | 2022-03-10 18:01:04 +0000 | [diff] [blame] | 355 | __attribute__((target("branch-protection=bti"))) |
| 356 | #endif |
Elliott Hughes | e48b685 | 2013-11-15 14:57:45 -0800 | [diff] [blame] | 357 | static int __pthread_start(void* arg) { |
| 358 | pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(arg); |
| 359 | |
Evgenii Stepanov | 13e8dcb | 2018-09-19 16:29:12 -0700 | [diff] [blame] | 360 | __hwasan_thread_enter(); |
| 361 | |
Elliott Hughes | e48b685 | 2013-11-15 14:57:45 -0800 | [diff] [blame] | 362 | // Wait for our creating thread to release us. This lets it have time to |
| 363 | // notify gdb about this thread before we start doing anything. |
| 364 | // This also provides the memory barrier needed to ensure that all memory |
| 365 | // accesses previously made by the creating thread are visible to us. |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 366 | thread->startup_handshake_lock.lock(); |
Elliott Hughes | e48b685 | 2013-11-15 14:57:45 -0800 | [diff] [blame] | 367 | |
Ryan Prichard | 03cef38 | 2019-06-17 17:57:19 -0700 | [diff] [blame] | 368 | __set_stack_and_tls_vma_name(false); |
Peter Collingbourne | da772e2 | 2018-09-06 22:20:44 -0700 | [diff] [blame] | 369 | __init_additional_stacks(thread); |
Evgenii Stepanov | b92d1c3 | 2019-10-02 16:26:43 -0700 | [diff] [blame] | 370 | __rt_sigprocmask(SIG_SETMASK, &thread->start_mask, nullptr, sizeof(thread->start_mask)); |
Peter Collingbourne | 811d180 | 2021-03-25 11:46:44 -0700 | [diff] [blame] | 371 | #ifdef __aarch64__ |
| 372 | // Chrome's sandbox prevents this prctl, so only reset IA if the target SDK level is high enough. |
Peter Collingbourne | dcbacd6 | 2021-04-22 12:13:40 -0700 | [diff] [blame] | 373 | // Furthermore, processes loaded from vendor partitions may have their own sandboxes that would |
| 374 | // reject the prctl. Because no devices launched with PAC enabled before S, we can avoid issues on |
| 375 | // upgrading devices by checking for PAC support before issuing the prctl. |
| 376 | static const bool pac_supported = getauxval(AT_HWCAP) & HWCAP_PACA; |
| 377 | if (pac_supported && android_get_application_target_sdk_version() >= __ANDROID_API_S__) { |
Peter Collingbourne | 811d180 | 2021-03-25 11:46:44 -0700 | [diff] [blame] | 378 | prctl(PR_PAC_RESET_KEYS, PR_PAC_APIAKEY, 0, 0, 0); |
| 379 | } |
| 380 | #endif |
Elliott Hughes | e48b685 | 2013-11-15 14:57:45 -0800 | [diff] [blame] | 381 | |
Elliott Hughes | e48b685 | 2013-11-15 14:57:45 -0800 | [diff] [blame] | 382 | void* result = thread->start_routine(thread->start_routine_arg); |
| 383 | pthread_exit(result); |
| 384 | |
| 385 | return 0; |
| 386 | } |
| 387 | |
Elliott Hughes | 68ae6ad | 2020-07-21 16:11:30 -0700 | [diff] [blame] | 388 | // A no-op start routine for pthread_create failures where we've created a thread but aren't |
Elliott Hughes | cef3fae | 2013-11-19 16:52:24 -0800 | [diff] [blame] | 389 | // going to run user code on it. We swap out the user's start routine for this and take advantage |
| 390 | // of the regular thread teardown to free up resources. |
| 391 | static void* __do_nothing(void*) { |
Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 392 | return nullptr; |
Elliott Hughes | cef3fae | 2013-11-19 16:52:24 -0800 | [diff] [blame] | 393 | } |
| 394 | |
Peter Collingbourne | 5d3aa86 | 2020-09-11 15:05:17 -0700 | [diff] [blame] | 395 | pthread_rwlock_t g_thread_creation_lock = PTHREAD_RWLOCK_INITIALIZER; |
dimitry | fa43252 | 2017-10-25 13:07:45 +0200 | [diff] [blame] | 396 | |
| 397 | __BIONIC_WEAK_FOR_NATIVE_BRIDGE |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 398 | int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr, |
| 399 | void* (*start_routine)(void*), void* arg) { |
| 400 | ErrnoRestorer errno_restorer; |
| 401 | |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 402 | pthread_attr_t thread_attr; |
Philip Cuadra | 77d0f90 | 2019-01-25 10:39:25 -0800 | [diff] [blame] | 403 | ScopedTrace trace("pthread_create"); |
Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 404 | if (attr == nullptr) { |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 405 | pthread_attr_init(&thread_attr); |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 406 | } else { |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 407 | thread_attr = *attr; |
Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 408 | attr = nullptr; // Prevent misuse below. |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 409 | } |
| 410 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 411 | bionic_tcb* tcb = nullptr; |
Yi Kong | 32bc0fc | 2018-08-02 17:31:13 -0700 | [diff] [blame] | 412 | void* child_stack = nullptr; |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 413 | int result = __allocate_thread(&thread_attr, &tcb, &child_stack); |
Yabin Cui | 8cf1b30 | 2014-12-03 21:36:24 -0800 | [diff] [blame] | 414 | if (result != 0) { |
| 415 | return result; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 416 | } |
| 417 | |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 418 | pthread_internal_t* thread = tcb->thread(); |
| 419 | |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 420 | // Create a lock for the thread to wait on once it starts so we can keep |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 421 | // it from doing anything until after we notify the debugger about it |
| 422 | // |
| 423 | // This also provides the memory barrier we need to ensure that all |
| 424 | // memory accesses previously performed by this thread are visible to |
| 425 | // the new thread. |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 426 | thread->startup_handshake_lock.init(false); |
| 427 | thread->startup_handshake_lock.lock(); |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 428 | |
Elliott Hughes | 70b24b1 | 2013-11-15 11:51:07 -0800 | [diff] [blame] | 429 | thread->start_routine = start_routine; |
| 430 | thread->start_routine_arg = arg; |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 431 | |
Elliott Hughes | 7086ad6 | 2014-06-19 16:39:01 -0700 | [diff] [blame] | 432 | thread->set_cached_pid(getpid()); |
| 433 | |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 434 | int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM | |
| 435 | CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 436 | void* tls = &tcb->tls_slot(0); |
Elliott Hughes | 8090614 | 2013-11-26 13:57:21 -0800 | [diff] [blame] | 437 | #if defined(__i386__) |
| 438 | // On x86 (but not x86-64), CLONE_SETTLS takes a pointer to a struct user_desc rather than |
Elliott Hughes | 0d236aa | 2014-05-09 14:42:16 -0700 | [diff] [blame] | 439 | // a pointer to the TLS itself. |
| 440 | user_desc tls_descriptor; |
| 441 | __init_user_desc(&tls_descriptor, false, tls); |
| 442 | tls = &tls_descriptor; |
Elliott Hughes | 8090614 | 2013-11-26 13:57:21 -0800 | [diff] [blame] | 443 | #endif |
Evgenii Stepanov | b92d1c3 | 2019-10-02 16:26:43 -0700 | [diff] [blame] | 444 | |
Peter Collingbourne | 5d3aa86 | 2020-09-11 15:05:17 -0700 | [diff] [blame] | 445 | ScopedReadLock locker(&g_thread_creation_lock); |
| 446 | |
Evgenii Stepanov | b92d1c3 | 2019-10-02 16:26:43 -0700 | [diff] [blame] | 447 | sigset64_t block_all_mask; |
| 448 | sigfillset64(&block_all_mask); |
| 449 | __rt_sigprocmask(SIG_SETMASK, &block_all_mask, &thread->start_mask, sizeof(thread->start_mask)); |
Elliott Hughes | 0d236aa | 2014-05-09 14:42:16 -0700 | [diff] [blame] | 450 | int rc = clone(__pthread_start, child_stack, flags, thread, &(thread->tid), tls, &(thread->tid)); |
Evgenii Stepanov | b92d1c3 | 2019-10-02 16:26:43 -0700 | [diff] [blame] | 451 | __rt_sigprocmask(SIG_SETMASK, &thread->start_mask, nullptr, sizeof(thread->start_mask)); |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 452 | if (rc == -1) { |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 453 | int clone_errno = errno; |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 454 | // We don't have to unlock the mutex at all because clone(2) failed so there's no child waiting to |
| 455 | // be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a |
| 456 | // reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker. |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 457 | thread->startup_handshake_lock.unlock(); |
Elliott Hughes | 7484c21 | 2017-02-02 02:41:38 +0000 | [diff] [blame] | 458 | if (thread->mmap_size != 0) { |
Ryan Prichard | 45d1349 | 2019-01-03 02:51:30 -0800 | [diff] [blame] | 459 | munmap(thread->mmap_base, thread->mmap_size); |
Elliott Hughes | 7484c21 | 2017-02-02 02:41:38 +0000 | [diff] [blame] | 460 | } |
Christopher Ferris | 7a3681e | 2017-04-24 17:48:32 -0700 | [diff] [blame] | 461 | async_safe_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", |
dimitry | 6de6087 | 2017-08-14 14:42:19 +0200 | [diff] [blame] | 462 | strerror(clone_errno)); |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 463 | return clone_errno; |
| 464 | } |
| 465 | |
Yabin Cui | 673b15e | 2015-03-19 14:19:19 -0700 | [diff] [blame] | 466 | int init_errno = __init_thread(thread); |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 467 | if (init_errno != 0) { |
Elliott Hughes | cef3fae | 2013-11-19 16:52:24 -0800 | [diff] [blame] | 468 | // Mark the thread detached and replace its start_routine with a no-op. |
| 469 | // Letting the thread run is the easiest way to clean up its resources. |
Yabin Cui | 58cf31b | 2015-03-06 17:23:53 -0800 | [diff] [blame] | 470 | atomic_store(&thread->join_state, THREAD_DETACHED); |
Elliott Hughes | 7484c21 | 2017-02-02 02:41:38 +0000 | [diff] [blame] | 471 | __pthread_internal_add(thread); |
Elliott Hughes | cef3fae | 2013-11-19 16:52:24 -0800 | [diff] [blame] | 472 | thread->start_routine = __do_nothing; |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 473 | thread->startup_handshake_lock.unlock(); |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 474 | return init_errno; |
| 475 | } |
| 476 | |
Elliott Hughes | 877ec6d | 2013-11-15 17:40:18 -0800 | [diff] [blame] | 477 | // Publish the pthread_t and unlock the mutex to let the new thread start running. |
Elliott Hughes | 7484c21 | 2017-02-02 02:41:38 +0000 | [diff] [blame] | 478 | *thread_out = __pthread_internal_add(thread); |
Yabin Cui | d26e780 | 2015-10-22 20:07:56 -0700 | [diff] [blame] | 479 | thread->startup_handshake_lock.unlock(); |
Elliott Hughes | 4b4a882 | 2013-02-12 17:15:59 -0800 | [diff] [blame] | 480 | |
| 481 | return 0; |
| 482 | } |