blob: 4253b9720be98abcccf48b412a6e986bba8f0199 [file] [log] [blame]
Ryan Prichard45d13492019-01-03 02:51:30 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "private/bionic_elf_tls.h"
30
Ryan Prichard48097552019-01-06 18:24:10 -080031#include <async_safe/log.h>
Ryan Prichard361c1b42019-01-15 13:45:27 -080032#include <string.h>
Ryan Prichard45d13492019-01-03 02:51:30 -080033#include <sys/param.h>
Ryan Prichard48097552019-01-06 18:24:10 -080034#include <unistd.h>
Ryan Prichard45d13492019-01-03 02:51:30 -080035
Ryan Prichard361c1b42019-01-15 13:45:27 -080036#include "private/ScopedRWLock.h"
37#include "private/bionic_globals.h"
Ryan Prichard45d13492019-01-03 02:51:30 -080038#include "private/bionic_macros.h"
39#include "private/bionic_tls.h"
40
Ryan Prichard48097552019-01-06 18:24:10 -080041// Search for a TLS segment in the given phdr table. Returns true if it has a
42// TLS segment and false otherwise.
43bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ryan Prichard19883502019-01-16 23:13:38 -080044 ElfW(Addr) load_bias, TlsSegment* out) {
Ryan Prichard48097552019-01-06 18:24:10 -080045 for (size_t i = 0; i < phdr_count; ++i) {
46 const ElfW(Phdr)& phdr = phdr_table[i];
47 if (phdr.p_type == PT_TLS) {
Ryan Prichard48097552019-01-06 18:24:10 -080048 *out = TlsSegment {
49 phdr.p_memsz,
Ryan Prichard19883502019-01-16 23:13:38 -080050 phdr.p_align,
Ryan Prichard48097552019-01-06 18:24:10 -080051 reinterpret_cast<void*>(load_bias + phdr.p_vaddr),
52 phdr.p_filesz,
53 };
54 return true;
55 }
56 }
57 return false;
58}
59
Ryan Prichard19883502019-01-16 23:13:38 -080060// Return true if the alignment of a TLS segment is a valid power-of-two. Also
61// cap the alignment if it's too high.
62bool __bionic_check_tls_alignment(size_t* alignment) {
63 // N.B. The size does not need to be a multiple of the alignment. With
64 // ld.bfd (or after using binutils' strip), the TLS segment's size isn't
65 // rounded up.
66 if (*alignment == 0 || !powerof2(*alignment)) {
67 return false;
68 }
69 // Bionic only respects TLS alignment up to one page.
70 *alignment = MIN(*alignment, PAGE_SIZE);
71 return true;
72}
73
Ryan Prichardfb8730d2019-01-15 00:11:37 -080074size_t StaticTlsLayout::offset_thread_pointer() const {
75 return offset_bionic_tcb_ + (-MIN_TLS_SLOT * sizeof(void*));
76}
77
Ryan Prichard977e47d2019-01-14 21:52:14 -080078// Reserves space for the Bionic TCB and the executable's TLS segment. Returns
79// the offset of the executable's TLS segment.
80size_t StaticTlsLayout::reserve_exe_segment_and_tcb(const TlsSegment* exe_segment,
81 const char* progname __attribute__((unused))) {
82 // Special case: if the executable has no TLS segment, then just allocate a
83 // TCB and skip the minimum alignment check on ARM.
84 if (exe_segment == nullptr) {
85 offset_bionic_tcb_ = reserve_type<bionic_tcb>();
86 return 0;
87 }
88
89#if defined(__arm__) || defined(__aarch64__)
90
91 // First reserve enough space for the TCB before the executable segment.
92 reserve(sizeof(bionic_tcb), 1);
93
94 // Then reserve the segment itself.
95 const size_t result = reserve(exe_segment->size, exe_segment->alignment);
96
97 // The variant 1 ABI that ARM linkers follow specifies a 2-word TCB between
98 // the thread pointer and the start of the executable's TLS segment, but both
99 // the thread pointer and the TLS segment are aligned appropriately for the
100 // TLS segment. Calculate the distance between the thread pointer and the
101 // EXE's segment.
102 const size_t exe_tpoff = __BIONIC_ALIGN(sizeof(void*) * 2, exe_segment->alignment);
103
104 const size_t min_bionic_alignment = BIONIC_ROUND_UP_POWER_OF_2(MAX_TLS_SLOT) * sizeof(void*);
105 if (exe_tpoff < min_bionic_alignment) {
106 async_safe_fatal("error: \"%s\": executable's TLS segment is underaligned: "
107 "alignment is %zu, needs to be at least %zu for %s Bionic",
108 progname, exe_segment->alignment, min_bionic_alignment,
109 (sizeof(void*) == 4 ? "ARM" : "ARM64"));
110 }
111
112 offset_bionic_tcb_ = result - exe_tpoff - (-MIN_TLS_SLOT * sizeof(void*));
113 return result;
114
115#elif defined(__i386__) || defined(__x86_64__)
116
117 // x86 uses variant 2 TLS layout. The executable's segment is located just
118 // before the TCB.
119 static_assert(MIN_TLS_SLOT == 0, "First slot of bionic_tcb must be slot #0 on x86");
120 const size_t exe_size = round_up_with_overflow_check(exe_segment->size, exe_segment->alignment);
121 reserve(exe_size, 1);
122 const size_t max_align = MAX(alignof(bionic_tcb), exe_segment->alignment);
123 offset_bionic_tcb_ = reserve(sizeof(bionic_tcb), max_align);
124 return offset_bionic_tcb_ - exe_size;
125
126#else
127#error "Unrecognized architecture"
128#endif
Ryan Prichard45d13492019-01-03 02:51:30 -0800129}
130
131void StaticTlsLayout::reserve_bionic_tls() {
132 offset_bionic_tls_ = reserve_type<bionic_tls>();
133}
134
135void StaticTlsLayout::finish_layout() {
136 // Round the offset up to the alignment.
137 offset_ = round_up_with_overflow_check(offset_, alignment_);
Ryan Prichard977e47d2019-01-14 21:52:14 -0800138
139 if (overflowed_) {
140 async_safe_fatal("error: TLS segments in static TLS overflowed");
141 }
Ryan Prichard45d13492019-01-03 02:51:30 -0800142}
143
144// The size is not required to be a multiple of the alignment. The alignment
145// must be a positive power-of-two.
146size_t StaticTlsLayout::reserve(size_t size, size_t alignment) {
147 offset_ = round_up_with_overflow_check(offset_, alignment);
148 const size_t result = offset_;
149 if (__builtin_add_overflow(offset_, size, &offset_)) overflowed_ = true;
150 alignment_ = MAX(alignment_, alignment);
151 return result;
152}
153
154size_t StaticTlsLayout::round_up_with_overflow_check(size_t value, size_t alignment) {
155 const size_t old_value = value;
156 value = __BIONIC_ALIGN(value, alignment);
157 if (value < old_value) overflowed_ = true;
158 return value;
159}
Ryan Prichard361c1b42019-01-15 13:45:27 -0800160
161// Copy each TLS module's initialization image into a newly-allocated block of
162// static TLS memory. To reduce dirty pages, this function only writes to pages
163// within the static TLS that need initialization. The memory should already be
164// zero-initialized on entry.
165void __init_static_tls(void* static_tls) {
166 // The part of the table we care about (i.e. static TLS modules) never changes
167 // after startup, but we still need the mutex because the table could grow,
168 // moving the initial part. If this locking is too slow, we can duplicate the
169 // static part of the table.
170 TlsModules& modules = __libc_shared_globals()->tls_modules;
171 ScopedReadLock locker(&modules.rwlock);
172
173 for (size_t i = 0; i < modules.module_count; ++i) {
174 TlsModule& module = modules.module_table[i];
175 if (module.static_offset == SIZE_MAX) {
176 // All of the static modules come before all of the dynamic modules, so
177 // once we see the first dynamic module, we're done.
178 break;
179 }
180 if (module.segment.init_size == 0) {
181 // Skip the memcpy call for TLS segments with no initializer, which is
182 // common.
183 continue;
184 }
185 memcpy(static_cast<char*>(static_tls) + module.static_offset,
186 module.segment.init_ptr,
187 module.segment.init_size);
188 }
189}