blob: a053c276a5b92f0753d7c41ee45a74954d74f71a [file] [log] [blame]
Ryan Prichard45d13492019-01-03 02:51:30 -08001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "private/bionic_elf_tls.h"
30
Vy Nguyend5007512020-07-14 17:37:04 -040031#include <async_safe/CHECK.h>
Ryan Prichard48097552019-01-06 18:24:10 -080032#include <async_safe/log.h>
Ryan Prichard361c1b42019-01-15 13:45:27 -080033#include <string.h>
Ryan Prichard45d13492019-01-03 02:51:30 -080034#include <sys/param.h>
Ryan Prichard48097552019-01-06 18:24:10 -080035#include <unistd.h>
Ryan Prichard45d13492019-01-03 02:51:30 -080036
Peter Collingbournebb11ee62022-05-02 12:26:16 -070037#include "platform/bionic/macros.h"
38#include "platform/bionic/page.h"
Ryan Prichard361c1b42019-01-15 13:45:27 -080039#include "private/ScopedRWLock.h"
Ryan Prichard16455b52019-01-18 01:00:59 -080040#include "private/ScopedSignalBlocker.h"
Ryan Prichard361c1b42019-01-15 13:45:27 -080041#include "private/bionic_globals.h"
Ryan Prichard45d13492019-01-03 02:51:30 -080042#include "private/bionic_tls.h"
Ryan Prichard16455b52019-01-18 01:00:59 -080043#include "pthread_internal.h"
44
45// Every call to __tls_get_addr needs to check the generation counter, so
46// accesses to the counter need to be as fast as possible. Keep a copy of it in
47// a hidden variable, which can be accessed without using the GOT. The linker
48// will update this variable when it updates its counter.
49//
50// To allow the linker to update this variable, libc.so's constructor passes its
51// address to the linker. To accommodate a possible __tls_get_addr call before
52// libc.so's constructor, this local copy is initialized to SIZE_MAX, forcing
53// __tls_get_addr to initially use the slow path.
54__LIBC_HIDDEN__ _Atomic(size_t) __libc_tls_generation_copy = SIZE_MAX;
Ryan Prichard45d13492019-01-03 02:51:30 -080055
Ryan Prichard48097552019-01-06 18:24:10 -080056// Search for a TLS segment in the given phdr table. Returns true if it has a
57// TLS segment and false otherwise.
58bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ryan Prichard19883502019-01-16 23:13:38 -080059 ElfW(Addr) load_bias, TlsSegment* out) {
Ryan Prichard48097552019-01-06 18:24:10 -080060 for (size_t i = 0; i < phdr_count; ++i) {
61 const ElfW(Phdr)& phdr = phdr_table[i];
62 if (phdr.p_type == PT_TLS) {
Ryan Prichard43963922024-03-14 16:51:27 -070063 *out = TlsSegment{
64 .aligned_size =
65 TlsAlignedSize{
66 .size = phdr.p_memsz,
67 .align =
68 TlsAlign{
69 .value = phdr.p_align ?: 1, // 0 means "no alignment requirement"
70 .skew = phdr.p_vaddr % MAX(1, phdr.p_align),
71 },
72 },
73 .init_ptr = reinterpret_cast<void*>(load_bias + phdr.p_vaddr),
74 .init_size = phdr.p_filesz,
Ryan Prichard48097552019-01-06 18:24:10 -080075 };
76 return true;
77 }
78 }
79 return false;
80}
81
Ryan Prichard43963922024-03-14 16:51:27 -070082// Return true if the alignment of a TLS segment is a valid power-of-two.
83bool __bionic_check_tls_align(size_t align) {
84 // Note: The size does not need to be a multiple of the alignment. With ld.bfd
85 // (or after using binutils' strip), the TLS segment's size isn't rounded up.
86 return powerof2(align);
87}
88
89static void static_tls_layout_overflow() {
90 async_safe_fatal("error: TLS segments in static TLS overflowed");
91}
92
93static size_t align_checked(size_t value, TlsAlign tls_align) {
94 const size_t align = tls_align.value;
95 const size_t skew = tls_align.skew;
96 CHECK(align != 0 && powerof2(align + 0) && skew < align);
97 const size_t result = ((value - skew + align - 1) & ~(align - 1)) + skew;
98 if (result < value) static_tls_layout_overflow();
99 return result;
Ryan Prichard19883502019-01-16 23:13:38 -0800100}
101
Ryan Prichardfb8730d2019-01-15 00:11:37 -0800102size_t StaticTlsLayout::offset_thread_pointer() const {
103 return offset_bionic_tcb_ + (-MIN_TLS_SLOT * sizeof(void*));
104}
105
Ryan Prichard43963922024-03-14 16:51:27 -0700106// Allocates the Bionic TCB and the executable's TLS segment in the static TLS
107// layout, satisfying alignment requirements for both.
108//
109// For an executable's TLS accesses (using the LocalExec model), the static
110// linker bakes TLS offsets directly into the .text section, so the loader must
111// place the executable segment at the same offset relative to the TP.
112// Similarly, the Bionic TLS slots (bionic_tcb) must also be allocated at the
113// correct offset relative to the TP.
114//
115// Returns the offset of the executable's TLS segment.
116//
117// Note: This function has unit tests, but they are in bionic-unit-tests-static,
118// not bionic-unit-tests.
119size_t StaticTlsLayout::reserve_exe_segment_and_tcb(const TlsSegment* seg,
Ryan Prichard977e47d2019-01-14 21:52:14 -0800120 const char* progname __attribute__((unused))) {
121 // Special case: if the executable has no TLS segment, then just allocate a
122 // TCB and skip the minimum alignment check on ARM.
Ryan Prichard43963922024-03-14 16:51:27 -0700123 if (seg == nullptr) {
Ryan Prichard977e47d2019-01-14 21:52:14 -0800124 offset_bionic_tcb_ = reserve_type<bionic_tcb>();
125 return 0;
126 }
127
128#if defined(__arm__) || defined(__aarch64__)
Ryan Prichard43963922024-03-14 16:51:27 -0700129 // ARM uses a "variant 1" TLS layout. The ABI specifies that the TP points at
130 // a 2-word TCB, followed by the executable's segment. In practice, libc
131 // implementations actually allocate a larger TCB at negative offsets from the
132 // TP.
133 //
134 // Historically, Bionic allocated an 8-word TCB starting at TP+0, so to keep
135 // the executable's TLS segment from overlapping the last 6 slots, Bionic
136 // requires that executables have an 8-word PT_TLS alignment to ensure that
137 // the TCB fits in the alignment padding, which it accomplishes using
138 // crtbegin.c. Bionic uses negative offsets for new TLS slots to avoid this
139 // problem.
Ryan Prichard977e47d2019-01-14 21:52:14 -0800140
Ryan Prichard43963922024-03-14 16:51:27 -0700141 static_assert(MIN_TLS_SLOT <= 0 && MAX_TLS_SLOT >= 1);
142 static_assert(sizeof(bionic_tcb) == (MAX_TLS_SLOT - MIN_TLS_SLOT + 1) * sizeof(void*));
143 static_assert(alignof(bionic_tcb) == sizeof(void*));
144 const size_t max_align = MAX(alignof(bionic_tcb), seg->aligned_size.align.value);
Ryan Prichard977e47d2019-01-14 21:52:14 -0800145
Ryan Prichard43963922024-03-14 16:51:27 -0700146 // Allocate the TCB first. Split it into negative and non-negative slots and
147 // ensure that TP (i.e. the first non-negative slot) is aligned to max_align.
148 const size_t tcb_size_pre = -MIN_TLS_SLOT * sizeof(void*);
149 const size_t tcb_size_post = (MAX_TLS_SLOT + 1) * sizeof(void*);
150 const auto pair =
151 reserve_tp_pair(TlsAlignedSize{.size = tcb_size_pre},
152 TlsAlignedSize{.size = tcb_size_post, .align = TlsAlign{.value = max_align}});
153 offset_bionic_tcb_ = pair.before;
154 const size_t offset_tp = pair.tp;
Ryan Prichard977e47d2019-01-14 21:52:14 -0800155
Ryan Prichard43963922024-03-14 16:51:27 -0700156 // Allocate the segment.
157 offset_exe_ = reserve(seg->aligned_size);
Ryan Prichard977e47d2019-01-14 21:52:14 -0800158
Ryan Prichard43963922024-03-14 16:51:27 -0700159 // Verify that the ABI and Bionic tpoff values are equal, which is equivalent
160 // to checking whether the segment is sufficiently aligned.
161 const size_t abi_tpoff = align_checked(2 * sizeof(void*), seg->aligned_size.align);
162 const size_t actual_tpoff = align_checked(tcb_size_post, seg->aligned_size.align);
163 CHECK(actual_tpoff == offset_exe_ - offset_tp);
164
165 if (abi_tpoff != actual_tpoff) {
166 async_safe_fatal(
167 "error: \"%s\": executable's TLS segment is underaligned: "
168 "alignment is %zu (skew %zu), needs to be at least %zu for %s Bionic",
169 progname, seg->aligned_size.align.value, seg->aligned_size.align.skew, tcb_size_post,
170 (sizeof(void*) == 4 ? "ARM" : "ARM64"));
Ryan Prichard977e47d2019-01-14 21:52:14 -0800171 }
172
Ryan Prichard977e47d2019-01-14 21:52:14 -0800173#elif defined(__i386__) || defined(__x86_64__)
174
Ryan Prichard43963922024-03-14 16:51:27 -0700175 auto pair = reserve_tp_pair(seg->aligned_size, TlsAlignedSize::of_type<bionic_tcb>());
176 offset_exe_ = pair.before;
177 offset_bionic_tcb_ = pair.after;
Ryan Prichard977e47d2019-01-14 21:52:14 -0800178
Elliott Hughes43462702022-10-10 19:21:44 +0000179#elif defined(__riscv)
Ryan Prichard43963922024-03-14 16:51:27 -0700180 static_assert(MAX_TLS_SLOT == -1, "Last slot of bionic_tcb must be slot #(-1) on riscv");
Elliott Hughes43462702022-10-10 19:21:44 +0000181
Ryan Prichard43963922024-03-14 16:51:27 -0700182 auto pair = reserve_tp_pair(TlsAlignedSize::of_type<bionic_tcb>(), seg->aligned_size);
183 offset_bionic_tcb_ = pair.before;
184 offset_exe_ = pair.after;
Elliott Hughes43462702022-10-10 19:21:44 +0000185
Ryan Prichard977e47d2019-01-14 21:52:14 -0800186#else
187#error "Unrecognized architecture"
188#endif
Ryan Prichard43963922024-03-14 16:51:27 -0700189
190 return offset_exe_;
Ryan Prichard45d13492019-01-03 02:51:30 -0800191}
192
Ryan Prichard43963922024-03-14 16:51:27 -0700193size_t StaticTlsLayout::reserve_bionic_tls() {
Ryan Prichard45d13492019-01-03 02:51:30 -0800194 offset_bionic_tls_ = reserve_type<bionic_tls>();
Ryan Prichard43963922024-03-14 16:51:27 -0700195 return offset_bionic_tls_;
Ryan Prichard45d13492019-01-03 02:51:30 -0800196}
197
198void StaticTlsLayout::finish_layout() {
199 // Round the offset up to the alignment.
Ryan Prichard43963922024-03-14 16:51:27 -0700200 cursor_ = align_checked(cursor_, TlsAlign{.value = align_});
Ryan Prichard45d13492019-01-03 02:51:30 -0800201}
202
Ryan Prichard43963922024-03-14 16:51:27 -0700203size_t StaticTlsLayout::align_cursor(TlsAlign align) {
204 cursor_ = align_checked(cursor_, align);
205 align_ = MAX(align_, align.value);
206 return cursor_;
207}
208
209size_t StaticTlsLayout::align_cursor_unskewed(size_t align) {
210 return align_cursor(TlsAlign{.value = align});
211}
212
213// Reserve the requested number of bytes at the requested alignment. The
214// requested size is not required to be a multiple of the alignment, nor is the
215// cursor aligned after the allocation.
216size_t StaticTlsLayout::reserve(TlsAlignedSize aligned_size) {
217 align_cursor(aligned_size.align);
218 const size_t result = cursor_;
219 if (__builtin_add_overflow(cursor_, aligned_size.size, &cursor_)) static_tls_layout_overflow();
Ryan Prichard45d13492019-01-03 02:51:30 -0800220 return result;
221}
222
Ryan Prichard43963922024-03-14 16:51:27 -0700223// Calculate the TP offset and allocate something before it and something after
224// it. The TP will be aligned to:
225//
226// MAX(before.align.value, after.align.value)
227//
228// The `before` and `after` allocations are each allocated as closely as
229// possible to the TP.
230StaticTlsLayout::TpAllocations StaticTlsLayout::reserve_tp_pair(TlsAlignedSize before,
231 TlsAlignedSize after) {
232 // Tentative `before` allocation.
233 const size_t tentative_before = reserve(before);
234 const size_t tentative_before_end = align_cursor_unskewed(before.align.value);
235
236 const size_t offset_tp = align_cursor_unskewed(MAX(before.align.value, after.align.value));
237
238 const size_t offset_after = reserve(after);
239
240 // If the `after` allocation has higher alignment than `before`, then there
241 // may be alignment padding to remove between `before` and the TP. Shift
242 // `before` forward to remove this padding.
243 CHECK(((offset_tp - tentative_before_end) & (before.align.value - 1)) == 0);
244 const size_t offset_before = tentative_before + (offset_tp - tentative_before_end);
245
246 return TpAllocations{offset_before, offset_tp, offset_after};
Ryan Prichard45d13492019-01-03 02:51:30 -0800247}
Ryan Prichard361c1b42019-01-15 13:45:27 -0800248
249// Copy each TLS module's initialization image into a newly-allocated block of
250// static TLS memory. To reduce dirty pages, this function only writes to pages
251// within the static TLS that need initialization. The memory should already be
252// zero-initialized on entry.
253void __init_static_tls(void* static_tls) {
254 // The part of the table we care about (i.e. static TLS modules) never changes
255 // after startup, but we still need the mutex because the table could grow,
256 // moving the initial part. If this locking is too slow, we can duplicate the
257 // static part of the table.
258 TlsModules& modules = __libc_shared_globals()->tls_modules;
Ryan Prichard16455b52019-01-18 01:00:59 -0800259 ScopedSignalBlocker ssb;
Ryan Prichard361c1b42019-01-15 13:45:27 -0800260 ScopedReadLock locker(&modules.rwlock);
261
262 for (size_t i = 0; i < modules.module_count; ++i) {
263 TlsModule& module = modules.module_table[i];
264 if (module.static_offset == SIZE_MAX) {
265 // All of the static modules come before all of the dynamic modules, so
266 // once we see the first dynamic module, we're done.
267 break;
268 }
269 if (module.segment.init_size == 0) {
270 // Skip the memcpy call for TLS segments with no initializer, which is
271 // common.
272 continue;
273 }
274 memcpy(static_cast<char*>(static_tls) + module.static_offset,
275 module.segment.init_ptr,
276 module.segment.init_size);
277 }
278}
Ryan Prichard16455b52019-01-18 01:00:59 -0800279
280static inline size_t dtv_size_in_bytes(size_t module_count) {
281 return sizeof(TlsDtv) + module_count * sizeof(void*);
282}
283
284// Calculates the number of module slots to allocate in a new DTV. For small
285// objects (up to 1KiB), the TLS allocator allocates memory in power-of-2 sizes,
286// so for better space usage, ensure that the DTV size (header + slots) is a
287// power of 2.
288//
289// The lock on TlsModules must be held.
290static size_t calculate_new_dtv_count() {
291 size_t loaded_cnt = __libc_shared_globals()->tls_modules.module_count;
292 size_t bytes = dtv_size_in_bytes(MAX(1, loaded_cnt));
293 if (!powerof2(bytes)) {
294 bytes = BIONIC_ROUND_UP_POWER_OF_2(bytes);
295 }
296 return (bytes - sizeof(TlsDtv)) / sizeof(void*);
297}
298
299// This function must be called with signals blocked and a write lock on
300// TlsModules held.
301static void update_tls_dtv(bionic_tcb* tcb) {
302 const TlsModules& modules = __libc_shared_globals()->tls_modules;
303 BionicAllocator& allocator = __libc_shared_globals()->tls_allocator;
304
305 // Use the generation counter from the shared globals instead of the local
306 // copy, which won't be initialized yet if __tls_get_addr is called before
307 // libc.so's constructor.
308 if (__get_tcb_dtv(tcb)->generation == atomic_load(&modules.generation)) {
309 return;
310 }
311
312 const size_t old_cnt = __get_tcb_dtv(tcb)->count;
313
314 // If the DTV isn't large enough, allocate a larger one. Because a signal
315 // handler could interrupt the fast path of __tls_get_addr, we don't free the
316 // old DTV. Instead, we add the old DTV to a list, then free all of a thread's
317 // DTVs at thread-exit. Each time the DTV is reallocated, its size at least
318 // doubles.
319 if (modules.module_count > old_cnt) {
320 size_t new_cnt = calculate_new_dtv_count();
321 TlsDtv* const old_dtv = __get_tcb_dtv(tcb);
322 TlsDtv* const new_dtv = static_cast<TlsDtv*>(allocator.alloc(dtv_size_in_bytes(new_cnt)));
323 memcpy(new_dtv, old_dtv, dtv_size_in_bytes(old_cnt));
324 new_dtv->count = new_cnt;
325 new_dtv->next = old_dtv;
326 __set_tcb_dtv(tcb, new_dtv);
327 }
328
329 TlsDtv* const dtv = __get_tcb_dtv(tcb);
330
331 const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
332 char* static_tls = reinterpret_cast<char*>(tcb) - layout.offset_bionic_tcb();
333
334 // Initialize static TLS modules and free unloaded modules.
335 for (size_t i = 0; i < dtv->count; ++i) {
336 if (i < modules.module_count) {
337 const TlsModule& mod = modules.module_table[i];
338 if (mod.static_offset != SIZE_MAX) {
339 dtv->modules[i] = static_tls + mod.static_offset;
340 continue;
341 }
342 if (mod.first_generation != kTlsGenerationNone &&
343 mod.first_generation <= dtv->generation) {
344 continue;
345 }
346 }
Vy Nguyend5007512020-07-14 17:37:04 -0400347 if (modules.on_destruction_cb != nullptr) {
348 void* dtls_begin = dtv->modules[i];
349 void* dtls_end =
350 static_cast<void*>(static_cast<char*>(dtls_begin) + allocator.get_chunk_size(dtls_begin));
351 modules.on_destruction_cb(dtls_begin, dtls_end);
352 }
Ryan Prichard16455b52019-01-18 01:00:59 -0800353 allocator.free(dtv->modules[i]);
354 dtv->modules[i] = nullptr;
355 }
356
357 dtv->generation = atomic_load(&modules.generation);
358}
359
360__attribute__((noinline)) static void* tls_get_addr_slow_path(const TlsIndex* ti) {
361 TlsModules& modules = __libc_shared_globals()->tls_modules;
362 bionic_tcb* tcb = __get_bionic_tcb();
363
364 // Block signals and lock TlsModules. We may need the allocator, so take
365 // a write lock.
366 ScopedSignalBlocker ssb;
367 ScopedWriteLock locker(&modules.rwlock);
368
369 update_tls_dtv(tcb);
370
371 TlsDtv* dtv = __get_tcb_dtv(tcb);
372 const size_t module_idx = __tls_module_id_to_idx(ti->module_id);
373 void* mod_ptr = dtv->modules[module_idx];
374 if (mod_ptr == nullptr) {
375 const TlsSegment& segment = modules.module_table[module_idx].segment;
Ryan Prichard43963922024-03-14 16:51:27 -0700376 // TODO: Currently the aligned_size.align.skew property is ignored.
377 // That is, for a dynamic TLS block at addr A, (A % p_align) will be 0, not
378 // (p_vaddr % p_align).
379 mod_ptr = __libc_shared_globals()->tls_allocator.memalign(segment.aligned_size.align.value,
380 segment.aligned_size.size);
Ryan Prichard16455b52019-01-18 01:00:59 -0800381 if (segment.init_size > 0) {
382 memcpy(mod_ptr, segment.init_ptr, segment.init_size);
383 }
384 dtv->modules[module_idx] = mod_ptr;
Vy Nguyend5007512020-07-14 17:37:04 -0400385
386 // Reports the allocation to the listener, if any.
387 if (modules.on_creation_cb != nullptr) {
Ryan Prichard43963922024-03-14 16:51:27 -0700388 modules.on_creation_cb(
389 mod_ptr, static_cast<void*>(static_cast<char*>(mod_ptr) + segment.aligned_size.size));
Vy Nguyend5007512020-07-14 17:37:04 -0400390 }
Ryan Prichard16455b52019-01-18 01:00:59 -0800391 }
392
Elliott Hughes43462702022-10-10 19:21:44 +0000393 return static_cast<char*>(mod_ptr) + ti->offset + TLS_DTV_OFFSET;
Ryan Prichard16455b52019-01-18 01:00:59 -0800394}
395
396// Returns the address of a thread's TLS memory given a module ID and an offset
397// into that module's TLS segment. This function is called on every access to a
398// dynamic TLS variable on targets that don't use TLSDESC. arm64 uses TLSDESC,
399// so it only calls this function on a thread's first access to a module's TLS
400// segment.
401//
402// On most targets, this accessor function is __tls_get_addr and
403// TLS_GET_ADDR_CCONV is unset. 32-bit x86 uses ___tls_get_addr instead and a
404// regparm() calling convention.
405extern "C" void* TLS_GET_ADDR(const TlsIndex* ti) TLS_GET_ADDR_CCONV {
406 TlsDtv* dtv = __get_tcb_dtv(__get_bionic_tcb());
407
408 // TODO: See if we can use a relaxed memory ordering here instead.
409 size_t generation = atomic_load(&__libc_tls_generation_copy);
410 if (__predict_true(generation == dtv->generation)) {
411 void* mod_ptr = dtv->modules[__tls_module_id_to_idx(ti->module_id)];
412 if (__predict_true(mod_ptr != nullptr)) {
Elliott Hughes43462702022-10-10 19:21:44 +0000413 return static_cast<char*>(mod_ptr) + ti->offset + TLS_DTV_OFFSET;
Ryan Prichard16455b52019-01-18 01:00:59 -0800414 }
415 }
416
417 return tls_get_addr_slow_path(ti);
418}
419
420// This function frees:
421// - TLS modules referenced by the current DTV.
422// - The list of DTV objects associated with the current thread.
423//
424// The caller must have already blocked signals.
425void __free_dynamic_tls(bionic_tcb* tcb) {
426 TlsModules& modules = __libc_shared_globals()->tls_modules;
427 BionicAllocator& allocator = __libc_shared_globals()->tls_allocator;
428
429 // If we didn't allocate any dynamic memory, skip out early without taking
430 // the lock.
431 TlsDtv* dtv = __get_tcb_dtv(tcb);
432 if (dtv->generation == kTlsGenerationNone) {
433 return;
434 }
435
436 // We need the write lock to use the allocator.
437 ScopedWriteLock locker(&modules.rwlock);
438
439 // First free everything in the current DTV.
440 for (size_t i = 0; i < dtv->count; ++i) {
441 if (i < modules.module_count && modules.module_table[i].static_offset != SIZE_MAX) {
442 // This module's TLS memory is allocated statically, so don't free it here.
443 continue;
444 }
Vy Nguyend5007512020-07-14 17:37:04 -0400445
446 if (modules.on_destruction_cb != nullptr) {
447 void* dtls_begin = dtv->modules[i];
448 void* dtls_end =
449 static_cast<void*>(static_cast<char*>(dtls_begin) + allocator.get_chunk_size(dtls_begin));
450 modules.on_destruction_cb(dtls_begin, dtls_end);
451 }
452
Ryan Prichard16455b52019-01-18 01:00:59 -0800453 allocator.free(dtv->modules[i]);
454 }
455
456 // Now free the thread's list of DTVs.
457 while (dtv->generation != kTlsGenerationNone) {
458 TlsDtv* next = dtv->next;
459 allocator.free(dtv);
460 dtv = next;
461 }
462
463 // Clear the DTV slot. The DTV must not be used again with this thread.
464 tcb->tls_slot(TLS_SLOT_DTV) = nullptr;
465}
Vy Nguyend5007512020-07-14 17:37:04 -0400466
467// Invokes all the registered thread_exit callbacks, if any.
468void __notify_thread_exit_callbacks() {
469 TlsModules& modules = __libc_shared_globals()->tls_modules;
470 if (modules.first_thread_exit_callback == nullptr) {
471 // If there is no first_thread_exit_callback, there shouldn't be a tail.
472 CHECK(modules.thread_exit_callback_tail_node == nullptr);
473 return;
474 }
475
476 // Callbacks are supposed to be invoked in the reverse order
477 // in which they were registered.
478 CallbackHolder* node = modules.thread_exit_callback_tail_node;
479 while (node != nullptr) {
480 node->cb();
481 node = node->prev;
482 }
483 modules.first_thread_exit_callback();
484}