blob: 19efff291c8e8e128f837cee8beae6b26c651098 [file] [log] [blame]
Ryan Prichard45d13492019-01-03 02:51:30 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "private/bionic_elf_tls.h"
30
Ryan Prichard48097552019-01-06 18:24:10 -080031#include <async_safe/log.h>
Ryan Prichard361c1b42019-01-15 13:45:27 -080032#include <string.h>
Ryan Prichard45d13492019-01-03 02:51:30 -080033#include <sys/param.h>
Ryan Prichard48097552019-01-06 18:24:10 -080034#include <unistd.h>
Ryan Prichard45d13492019-01-03 02:51:30 -080035
Ryan Prichard361c1b42019-01-15 13:45:27 -080036#include "private/ScopedRWLock.h"
37#include "private/bionic_globals.h"
Ryan Prichard45d13492019-01-03 02:51:30 -080038#include "private/bionic_macros.h"
39#include "private/bionic_tls.h"
40
Ryan Prichard48097552019-01-06 18:24:10 -080041// Search for a TLS segment in the given phdr table. Returns true if it has a
42// TLS segment and false otherwise.
43bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
44 ElfW(Addr) load_bias, const char* mod_name,
45 TlsSegment* out) {
46 for (size_t i = 0; i < phdr_count; ++i) {
47 const ElfW(Phdr)& phdr = phdr_table[i];
48 if (phdr.p_type == PT_TLS) {
49 // N.B. The size does not need to be a multiple of the alignment. With
50 // ld.bfd (or after using binutils' strip), the TLS segment's size isn't
51 // rounded up.
52 size_t alignment = phdr.p_align;
53 if (alignment == 0 || !powerof2(alignment)) {
54 async_safe_fatal("error: \"%s\": TLS segment alignment is not a power of 2: %zu",
55 mod_name, alignment);
56 }
57 // Bionic only respects TLS alignment up to one page.
58 alignment = MIN(alignment, PAGE_SIZE);
59 *out = TlsSegment {
60 phdr.p_memsz,
61 alignment,
62 reinterpret_cast<void*>(load_bias + phdr.p_vaddr),
63 phdr.p_filesz,
64 };
65 return true;
66 }
67 }
68 return false;
69}
70
Ryan Prichard977e47d2019-01-14 21:52:14 -080071// Reserves space for the Bionic TCB and the executable's TLS segment. Returns
72// the offset of the executable's TLS segment.
73size_t StaticTlsLayout::reserve_exe_segment_and_tcb(const TlsSegment* exe_segment,
74 const char* progname __attribute__((unused))) {
75 // Special case: if the executable has no TLS segment, then just allocate a
76 // TCB and skip the minimum alignment check on ARM.
77 if (exe_segment == nullptr) {
78 offset_bionic_tcb_ = reserve_type<bionic_tcb>();
79 return 0;
80 }
81
82#if defined(__arm__) || defined(__aarch64__)
83
84 // First reserve enough space for the TCB before the executable segment.
85 reserve(sizeof(bionic_tcb), 1);
86
87 // Then reserve the segment itself.
88 const size_t result = reserve(exe_segment->size, exe_segment->alignment);
89
90 // The variant 1 ABI that ARM linkers follow specifies a 2-word TCB between
91 // the thread pointer and the start of the executable's TLS segment, but both
92 // the thread pointer and the TLS segment are aligned appropriately for the
93 // TLS segment. Calculate the distance between the thread pointer and the
94 // EXE's segment.
95 const size_t exe_tpoff = __BIONIC_ALIGN(sizeof(void*) * 2, exe_segment->alignment);
96
97 const size_t min_bionic_alignment = BIONIC_ROUND_UP_POWER_OF_2(MAX_TLS_SLOT) * sizeof(void*);
98 if (exe_tpoff < min_bionic_alignment) {
99 async_safe_fatal("error: \"%s\": executable's TLS segment is underaligned: "
100 "alignment is %zu, needs to be at least %zu for %s Bionic",
101 progname, exe_segment->alignment, min_bionic_alignment,
102 (sizeof(void*) == 4 ? "ARM" : "ARM64"));
103 }
104
105 offset_bionic_tcb_ = result - exe_tpoff - (-MIN_TLS_SLOT * sizeof(void*));
106 return result;
107
108#elif defined(__i386__) || defined(__x86_64__)
109
110 // x86 uses variant 2 TLS layout. The executable's segment is located just
111 // before the TCB.
112 static_assert(MIN_TLS_SLOT == 0, "First slot of bionic_tcb must be slot #0 on x86");
113 const size_t exe_size = round_up_with_overflow_check(exe_segment->size, exe_segment->alignment);
114 reserve(exe_size, 1);
115 const size_t max_align = MAX(alignof(bionic_tcb), exe_segment->alignment);
116 offset_bionic_tcb_ = reserve(sizeof(bionic_tcb), max_align);
117 return offset_bionic_tcb_ - exe_size;
118
119#else
120#error "Unrecognized architecture"
121#endif
Ryan Prichard45d13492019-01-03 02:51:30 -0800122}
123
124void StaticTlsLayout::reserve_bionic_tls() {
125 offset_bionic_tls_ = reserve_type<bionic_tls>();
126}
127
128void StaticTlsLayout::finish_layout() {
129 // Round the offset up to the alignment.
130 offset_ = round_up_with_overflow_check(offset_, alignment_);
Ryan Prichard977e47d2019-01-14 21:52:14 -0800131
132 if (overflowed_) {
133 async_safe_fatal("error: TLS segments in static TLS overflowed");
134 }
Ryan Prichard45d13492019-01-03 02:51:30 -0800135}
136
137// The size is not required to be a multiple of the alignment. The alignment
138// must be a positive power-of-two.
139size_t StaticTlsLayout::reserve(size_t size, size_t alignment) {
140 offset_ = round_up_with_overflow_check(offset_, alignment);
141 const size_t result = offset_;
142 if (__builtin_add_overflow(offset_, size, &offset_)) overflowed_ = true;
143 alignment_ = MAX(alignment_, alignment);
144 return result;
145}
146
147size_t StaticTlsLayout::round_up_with_overflow_check(size_t value, size_t alignment) {
148 const size_t old_value = value;
149 value = __BIONIC_ALIGN(value, alignment);
150 if (value < old_value) overflowed_ = true;
151 return value;
152}
Ryan Prichard361c1b42019-01-15 13:45:27 -0800153
154// Copy each TLS module's initialization image into a newly-allocated block of
155// static TLS memory. To reduce dirty pages, this function only writes to pages
156// within the static TLS that need initialization. The memory should already be
157// zero-initialized on entry.
158void __init_static_tls(void* static_tls) {
159 // The part of the table we care about (i.e. static TLS modules) never changes
160 // after startup, but we still need the mutex because the table could grow,
161 // moving the initial part. If this locking is too slow, we can duplicate the
162 // static part of the table.
163 TlsModules& modules = __libc_shared_globals()->tls_modules;
164 ScopedReadLock locker(&modules.rwlock);
165
166 for (size_t i = 0; i < modules.module_count; ++i) {
167 TlsModule& module = modules.module_table[i];
168 if (module.static_offset == SIZE_MAX) {
169 // All of the static modules come before all of the dynamic modules, so
170 // once we see the first dynamic module, we're done.
171 break;
172 }
173 if (module.segment.init_size == 0) {
174 // Skip the memcpy call for TLS segments with no initializer, which is
175 // common.
176 continue;
177 }
178 memcpy(static_cast<char*>(static_tls) + module.static_offset,
179 module.segment.init_ptr,
180 module.segment.init_size);
181 }
182}