blob: 19f0cab9eef18b6f1c12d2528eb5a050241ac231 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000054#elif defined(__riscv)
55 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080056#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070075 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020076
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070077 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020078
79 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
80 ranges of virtual addresses. A few rules apply:
81
82 - the virtual address ranges should not overlap.
83
84 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
85 between them should always be initialized to 0.
86
87 - ranges do not necessarily start or end at page boundaries. Two distinct
88 segments can have their start and end on the same page. In this case, the
89 page inherits the mapping flags of the latter segment.
90
91 Finally, the real load addrs of each segment is not p_vaddr. Instead the
92 loader decides where to load the first segment, then will load all others
93 relative to the first one to respect the initial range layout.
94
95 For example, consider the following list:
96
97 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
98 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
99
100 This corresponds to two segments that cover these virtual address ranges:
101
102 0x30000...0x34000
103 0x40000...0x48000
104
105 If the loader decides to load the first segment at address 0xa0000000
106 then the segments' load address ranges will be:
107
108 0xa0030000...0xa0034000
109 0xa0040000...0xa0048000
110
111 In other words, all segments must be loaded at an address that has the same
112 constant offset from their p_vaddr value. This offset is computed as the
113 difference between the first segment's load address, and its p_vaddr value.
114
115 However, in practice, segments do _not_ start at page boundaries. Since we
116 can only memory-map at page boundaries, this means that the bias is
117 computed as:
118
119 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
120
121 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
122 possible wrap around UINT32_MAX for possible large p_vaddr values).
123
124 And that the phdr0_load_address must start at a page boundary, with
125 the segment's real content starting at:
126
127 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
128
129 Note that ELF requires the following condition to make the mmap()-ing work:
130
131 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
132
133 The load_bias must be added to any p_vaddr value read from the ELF file to
134 determine the corresponding memory address.
135
136 **/
137
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800138#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200139#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
140 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
141 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
142
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700143// Default PMD size for x86_64 and aarch64 (2MB).
144static constexpr size_t kPmdSize = (1UL << 21);
145
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700146ElfReader::ElfReader()
147 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
148 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800149 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
150 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700151}
152
153bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900154 if (did_read_) {
155 return true;
156 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700157 name_ = name;
158 fd_ = fd;
159 file_offset_ = file_offset;
160 file_size_ = file_size;
161
162 if (ReadElfHeader() &&
163 VerifyElfHeader() &&
164 ReadProgramHeaders() &&
165 ReadSectionHeaders() &&
166 ReadDynamicSection()) {
167 did_read_ = true;
168 }
169
170 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200171}
172
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400173bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700174 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900175 if (did_load_) {
176 return true;
177 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100178 if (ReserveAddressSpace(address_space) && LoadSegments() && FindPhdr() &&
179 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700180 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100181#if defined(__aarch64__)
182 // For Armv8.5-A loaded executable segments may require PROT_BTI.
183 if (note_gnu_property_.IsBTICompatible()) {
184 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
185 &note_gnu_property_) == 0);
186 }
187#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700188 }
189
190 return did_load_;
191}
192
193const char* ElfReader::get_string(ElfW(Word) index) const {
194 CHECK(strtab_ != nullptr);
195 CHECK(index < strtab_size_);
196
197 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800198}
199
200bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700201 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800202 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700203 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800204 return false;
205 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700206
Elliott Hughes650be4e2013-03-05 18:47:58 -0800207 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700208 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700209 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800210 return false;
211 }
212 return true;
213}
214
Elliott Hughes72007ee2017-04-19 17:44:57 -0700215static const char* EM_to_string(int em) {
216 if (em == EM_386) return "EM_386";
217 if (em == EM_AARCH64) return "EM_AARCH64";
218 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000219 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700220 if (em == EM_X86_64) return "EM_X86_64";
221 return "EM_???";
222}
223
Elliott Hughes650be4e2013-03-05 18:47:58 -0800224bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700225 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700226 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
227 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800228 return false;
229 }
230
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700231 // Try to give a clear diagnostic for ELF class mismatches, since they're
232 // an easy mistake to make during the 32-bit/64-bit transition period.
233 int elf_class = header_.e_ident[EI_CLASS];
234#if defined(__LP64__)
235 if (elf_class != ELFCLASS64) {
236 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700237 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700238 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700239 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700240 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800241 return false;
242 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700243#else
244 if (elf_class != ELFCLASS32) {
245 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700246 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700247 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700248 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700249 }
250 return false;
251 }
252#endif
253
Elliott Hughes650be4e2013-03-05 18:47:58 -0800254 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700255 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800256 return false;
257 }
258
259 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700260 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800261 return false;
262 }
263
264 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700265 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800266 return false;
267 }
268
Elliott Hughesb5140262014-12-02 16:16:29 -0800269 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700270 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
271 name_.c_str(),
272 EM_to_string(header_.e_machine), header_.e_machine,
273 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800274 return false;
275 }
276
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700277 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800278 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800279 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800280 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
281 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
282 return false;
283 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800284 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800285 "invalid-elf-header_section-headers-enforced-for-api-level-26",
286 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
287 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800288 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700289 }
290
291 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800292 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800293 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800294 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
295 return false;
296 }
297
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800298 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800299 "invalid-elf-header_section-headers-enforced-for-api-level-26",
300 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800301 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700302 }
303
Elliott Hughes650be4e2013-03-05 18:47:58 -0800304 return true;
305}
306
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700307bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800308 off64_t range_start;
309 off64_t range_end;
310
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700311 // Only header can be located at the 0 offset... This function called to
312 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700313 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700314
315 return offset > 0 &&
316 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800317 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700318 (range_start < file_size_) &&
319 (range_end <= file_size_) &&
320 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800321}
322
Elliott Hughes650be4e2013-03-05 18:47:58 -0800323// Loads the program header table from an ELF file into a read-only private
324// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700325bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800326 phdr_num_ = header_.e_phnum;
327
328 // Like the kernel, we only accept program header tables that
329 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800330 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700331 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800332 return false;
333 }
334
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800335 // Boundary checks
336 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700337 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
338 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
339 name_.c_str(),
340 static_cast<size_t>(header_.e_phoff),
341 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800342 return false;
343 }
344
345 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700346 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800347 return false;
348 }
349
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700350 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800351 return true;
352}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200353
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700354bool ElfReader::ReadSectionHeaders() {
355 shdr_num_ = header_.e_shnum;
356
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800357 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700358 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800359 return false;
360 }
361
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800362 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700363 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
364 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
365 name_.c_str(),
366 static_cast<size_t>(header_.e_shoff),
367 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800368 return false;
369 }
370
371 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700372 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
373 return false;
374 }
375
376 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
377 return true;
378}
379
380bool ElfReader::ReadDynamicSection() {
381 // 1. Find .dynamic section (in section headers)
382 const ElfW(Shdr)* dynamic_shdr = nullptr;
383 for (size_t i = 0; i < shdr_num_; ++i) {
384 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
385 dynamic_shdr = &shdr_table_ [i];
386 break;
387 }
388 }
389
390 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700391 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700392 return false;
393 }
394
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700395 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
396 size_t pt_dynamic_offset = 0;
397 size_t pt_dynamic_filesz = 0;
398 for (size_t i = 0; i < phdr_num_; ++i) {
399 const ElfW(Phdr)* phdr = &phdr_table_[i];
400 if (phdr->p_type == PT_DYNAMIC) {
401 pt_dynamic_offset = phdr->p_offset;
402 pt_dynamic_filesz = phdr->p_filesz;
403 }
404 }
405
406 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800407 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800408 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
409 "expected to match PT_DYNAMIC offset: 0x%zx",
410 name_.c_str(),
411 static_cast<size_t>(dynamic_shdr->sh_offset),
412 pt_dynamic_offset);
413 return false;
414 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800415 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800416 "invalid-elf-header_section-headers-enforced-for-api-level-26",
417 "\"%s\" .dynamic section has invalid offset: 0x%zx "
418 "(expected to match PT_DYNAMIC offset 0x%zx)",
419 name_.c_str(),
420 static_cast<size_t>(dynamic_shdr->sh_offset),
421 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800422 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700423 }
424
425 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800426 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800427 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
428 "expected to match PT_DYNAMIC filesz: 0x%zx",
429 name_.c_str(),
430 static_cast<size_t>(dynamic_shdr->sh_size),
431 pt_dynamic_filesz);
432 return false;
433 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800434 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800435 "invalid-elf-header_section-headers-enforced-for-api-level-26",
436 "\"%s\" .dynamic section has invalid size: 0x%zx "
437 "(expected to match PT_DYNAMIC filesz 0x%zx)",
438 name_.c_str(),
439 static_cast<size_t>(dynamic_shdr->sh_size),
440 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800441 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700442 }
443
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700444 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700445 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
446 name_.c_str(),
447 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700448 return false;
449 }
450
451 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
452
453 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700454 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
455 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700456 return false;
457 }
458
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700459 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
460 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800461 return false;
462 }
463
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700464 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
465 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
466 return false;
467 }
468
469 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
470
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700471 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
472 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
473 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800474 return false;
475 }
476
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700477 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
478 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
479 return false;
480 }
481
482 strtab_ = static_cast<const char*>(strtab_fragment_.data());
483 strtab_size_ = strtab_fragment_.size();
484 return true;
485}
486
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800487/* Returns the size of the extent of all the possibly non-contiguous
488 * loadable segments in an ELF program header table. This corresponds
489 * to the page-aligned size in bytes that needs to be reserved in the
490 * process' address space. If there are no loadable segments, 0 is
491 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200492 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700493 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800494 * set to the minimum and maximum addresses of pages to be reserved,
495 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200496 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800497size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
498 ElfW(Addr)* out_min_vaddr,
499 ElfW(Addr)* out_max_vaddr) {
500 ElfW(Addr) min_vaddr = UINTPTR_MAX;
501 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200502
Elliott Hughes0266ae52014-02-10 17:46:57 -0800503 bool found_pt_load = false;
504 for (size_t i = 0; i < phdr_count; ++i) {
505 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200506
Elliott Hughes0266ae52014-02-10 17:46:57 -0800507 if (phdr->p_type != PT_LOAD) {
508 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200509 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800510 found_pt_load = true;
511
512 if (phdr->p_vaddr < min_vaddr) {
513 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200514 }
515
Elliott Hughes0266ae52014-02-10 17:46:57 -0800516 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
517 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
518 }
519 }
520 if (!found_pt_load) {
521 min_vaddr = 0;
522 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200523
Elliott Hughes0266ae52014-02-10 17:46:57 -0800524 min_vaddr = PAGE_START(min_vaddr);
525 max_vaddr = PAGE_END(max_vaddr);
526
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700527 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800528 *out_min_vaddr = min_vaddr;
529 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700530 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800531 *out_max_vaddr = max_vaddr;
532 }
533 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200534}
535
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700536// Returns the maximum p_align associated with a loadable segment in the ELF
537// program header table. Used to determine whether the file should be loaded at
538// a specific virtual address alignment for use with huge pages.
539size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
540 size_t maximum_alignment = PAGE_SIZE;
541
542 for (size_t i = 0; i < phdr_count; ++i) {
543 const ElfW(Phdr)* phdr = &phdr_table[i];
544
545 // p_align must be 0, 1, or a positive, integral power of two.
546 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
547 continue;
548 }
549
550 if (phdr->p_align > maximum_alignment) {
551 maximum_alignment = phdr->p_align;
552 }
553 }
554
555#if defined(__LP64__)
556 return maximum_alignment;
557#else
558 return PAGE_SIZE;
559#endif
560}
561
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700562// Reserve a virtual address range such that if it's limits were extended to the next 2**align
563// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700564static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
565 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700566 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700567 // Reserve enough space to properly align the library's start address.
568 mapping_align = std::max(mapping_align, start_align);
569 if (mapping_align == PAGE_SIZE) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800570 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700571 if (mmap_ptr == MAP_FAILED) {
572 return nullptr;
573 }
574 return mmap_ptr;
575 }
576
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700577 // Minimum alignment of shared library gap. For efficiency, this should match the second level
578 // page size of the platform.
579#if defined(__LP64__)
580 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
581#else
582 constexpr size_t kGapAlignment = 0;
583#endif
584 // Maximum gap size, in the units of kGapAlignment.
585 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700586 // Allocate enough space so that the end of the desired region aligned up is still inside the
587 // mapping.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700588 size_t mmap_size = align_up(size, mapping_align) + mapping_align - PAGE_SIZE;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700589 uint8_t* mmap_ptr =
590 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
591 if (mmap_ptr == MAP_FAILED) {
592 return nullptr;
593 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700594 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700595 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
596 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700597 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
598 // This library crosses a 2MB boundary and will fragment a new huge page.
599 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
600 // to improve address randomization and make it harder to locate this library code by probing.
601 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700602 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700603 gap_size =
604 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700605 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - PAGE_SIZE;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700606 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
607 if (mmap_ptr == MAP_FAILED) {
608 return nullptr;
609 }
610 }
611
612 uint8_t *gap_end, *gap_start;
613 if (gap_size) {
614 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
615 gap_start = gap_end - gap_size;
616 } else {
617 gap_start = gap_end = mmap_ptr + mmap_size;
618 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700619
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700620 uint8_t* first = align_up(mmap_ptr, mapping_align);
621 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900622
Tom Cherry66bc4282018-11-08 13:40:52 -0800623 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900624 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700625 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
626 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700627 // Unmap the extra space around the allocation.
628 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
629 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700630 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700631 munmap(start + size, gap_start - (start + size));
632 if (gap_end != mmap_ptr + mmap_size) {
633 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
634 }
635 *out_gap_start = gap_start;
636 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700637 return start;
638}
639
Elliott Hughes650be4e2013-03-05 18:47:58 -0800640// Reserve a virtual address range big enough to hold all loadable
641// segments of a program header table. This is done by creating a
642// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400643bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800644 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800645 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800646 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700647 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800648 return false;
649 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200650
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800651 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000652 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000653
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400654 if (load_size_ > address_space->reserved_size) {
655 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000656 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400657 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000658 return false;
659 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700660 size_t start_alignment = PAGE_SIZE;
661 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
662 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
663 // Limit alignment to PMD size as other alignments reduce the number of
664 // bits available for ASLR for no benefit.
665 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : PAGE_SIZE;
666 }
667 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
668 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700669 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700670 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000671 return false;
672 }
673 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400674 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700675 gap_start_ = nullptr;
676 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800677 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400678
679 // Update the reserved address space to subtract the space used by this library.
680 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
681 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800682 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200683
Elliott Hughes650be4e2013-03-05 18:47:58 -0800684 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800685 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800686 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200687}
688
Elliott Hughes650be4e2013-03-05 18:47:58 -0800689bool ElfReader::LoadSegments() {
690 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800691 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200692
Elliott Hughes650be4e2013-03-05 18:47:58 -0800693 if (phdr->p_type != PT_LOAD) {
694 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200695 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800696
697 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800698 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
699 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800700
Elliott Hughes0266ae52014-02-10 17:46:57 -0800701 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
702 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800703
Elliott Hughes0266ae52014-02-10 17:46:57 -0800704 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800705
706 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800707 ElfW(Addr) file_start = phdr->p_offset;
708 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800709
Elliott Hughes0266ae52014-02-10 17:46:57 -0800710 ElfW(Addr) file_page_start = PAGE_START(file_start);
711 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800712
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700713 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700714 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700715 return false;
716 }
717
skvalex93ce3542015-08-20 01:06:42 +0300718 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700719 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
720 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700721 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700722 reinterpret_cast<void*>(phdr->p_filesz),
723 reinterpret_cast<void*>(file_end), file_size_);
724 return false;
725 }
726
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700727 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700728 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700729 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800730 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800731 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800732 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800733 return false;
734 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800735 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800736 "writable-and-executable-segments-enforced-for-api-level-26",
737 "\"%s\" has load segments that are both writable and executable",
738 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800739 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700740 }
741
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700742 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700743 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700744 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700745 MAP_FIXED|MAP_PRIVATE,
746 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700747 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700748 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700749 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700750 return false;
751 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700752
753 // Mark segments as huge page eligible if they meet the requirements
754 // (executable and PMD aligned).
755 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
756 get_transparent_hugepages_supported()) {
757 madvise(seg_addr, file_length, MADV_HUGEPAGE);
758 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800759 }
760
761 // if the segment is writable, and does not end on a page boundary,
762 // zero-fill it until the page limit.
763 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800764 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800765 }
766
767 seg_file_end = PAGE_END(seg_file_end);
768
769 // seg_file_end is now the first page address after the file
770 // content. If seg_end is larger, we need to zero anything
771 // between them. This is done by using a private anonymous
772 // map for all extra pages.
773 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800774 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800775 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800776 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800777 PFLAGS_TO_PROT(phdr->p_flags),
778 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
779 -1,
780 0);
781 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700782 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800783 return false;
784 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800785
786 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800787 }
788 }
789 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200790}
791
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000792/* Used internally. Used to set the protection bits of all loaded segments
793 * with optional extra flags (i.e. really PROT_WRITE). Used by
794 * phdr_table_protect_segments and phdr_table_unprotect_segments.
795 */
796static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
797 ElfW(Addr) load_bias, int extra_prot_flags) {
798 const ElfW(Phdr)* phdr = phdr_table;
799 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
800
801 for (; phdr < phdr_limit; phdr++) {
802 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
803 continue;
804 }
805
806 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
807 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
808
Tamas Petz8d55d182020-02-24 14:15:25 +0100809 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
810 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700811 // make sure we're never simultaneously writable / executable
812 prot &= ~PROT_EXEC;
813 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100814#if defined(__aarch64__)
815 if ((prot & PROT_EXEC) == 0) {
816 // Though it is not specified don't add PROT_BTI if segment is not
817 // executable.
818 prot &= ~PROT_BTI;
819 }
820#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700821
Tamas Petz8d55d182020-02-24 14:15:25 +0100822 int ret =
823 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000824 if (ret < 0) {
825 return -1;
826 }
827 }
828 return 0;
829}
830
831/* Restore the original protection modes for all loadable segments.
832 * You should only call this after phdr_table_unprotect_segments and
833 * applying all relocations.
834 *
Tamas Petz8d55d182020-02-24 14:15:25 +0100835 * AArch64: also called from linker_main and ElfReader::Load to apply
836 * PROT_BTI for loaded main so and other so-s.
837 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000838 * Input:
839 * phdr_table -> program header table
840 * phdr_count -> number of entries in tables
841 * load_bias -> load bias
Tamas Petz8d55d182020-02-24 14:15:25 +0100842 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000843 * Return:
844 * 0 on error, -1 on failure (error code in errno).
845 */
Tamas Petz8d55d182020-02-24 14:15:25 +0100846int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
847 ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
848 int prot = 0;
849#if defined(__aarch64__)
850 if ((prop != nullptr) && prop->IsBTICompatible()) {
851 prot |= PROT_BTI;
852 }
853#endif
854 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000855}
856
857/* Change the protection of all loaded segments in memory to writable.
858 * This is useful before performing relocations. Once completed, you
859 * will have to call phdr_table_protect_segments to restore the original
860 * protection flags on all segments.
861 *
862 * Note that some writable segments can also have their content turned
863 * to read-only by calling phdr_table_protect_gnu_relro. This is no
864 * performed here.
865 *
866 * Input:
867 * phdr_table -> program header table
868 * phdr_count -> number of entries in tables
869 * load_bias -> load bias
870 * Return:
871 * 0 on error, -1 on failure (error code in errno).
872 */
873int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
874 size_t phdr_count, ElfW(Addr) load_bias) {
875 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
876}
877
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200878/* Used internally by phdr_table_protect_gnu_relro and
879 * phdr_table_unprotect_gnu_relro.
880 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800881static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
882 ElfW(Addr) load_bias, int prot_flags) {
883 const ElfW(Phdr)* phdr = phdr_table;
884 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200885
Elliott Hughes0266ae52014-02-10 17:46:57 -0800886 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
887 if (phdr->p_type != PT_GNU_RELRO) {
888 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200889 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800890
891 // Tricky: what happens when the relro segment does not start
892 // or end at page boundaries? We're going to be over-protective
893 // here and put every page touched by the segment as read-only.
894
895 // This seems to match Ian Lance Taylor's description of the
896 // feature at http://www.airs.com/blog/archives/189.
897
898 // Extract:
899 // Note that the current dynamic linker code will only work
900 // correctly if the PT_GNU_RELRO segment starts on a page
901 // boundary. This is because the dynamic linker rounds the
902 // p_vaddr field down to the previous page boundary. If
903 // there is anything on the page which should not be read-only,
904 // the program is likely to fail at runtime. So in effect the
905 // linker must only emit a PT_GNU_RELRO segment if it ensures
906 // that it starts on a page boundary.
907 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
908 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
909
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800910 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800911 seg_page_end - seg_page_start,
912 prot_flags);
913 if (ret < 0) {
914 return -1;
915 }
916 }
917 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200918}
919
920/* Apply GNU relro protection if specified by the program header. This will
921 * turn some of the pages of a writable PT_LOAD segment to read-only, as
922 * specified by one or more PT_GNU_RELRO segments. This must be always
923 * performed after relocations.
924 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200925 * The areas typically covered are .got and .data.rel.ro, these are
926 * read-only from the program's POV, but contain absolute addresses
927 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200928 *
929 * Input:
930 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700931 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200932 * load_bias -> load bias
933 * Return:
934 * 0 on error, -1 on failure (error code in errno).
935 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700936int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
937 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800938 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200939}
940
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000941/* Serialize the GNU relro segments to the given file descriptor. This can be
942 * performed after relocations to allow another process to later share the
943 * relocated segment, if it was loaded at the same address.
944 *
945 * Input:
946 * phdr_table -> program header table
947 * phdr_count -> number of entries in tables
948 * load_bias -> load bias
949 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400950 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000951 * Return:
952 * 0 on error, -1 on failure (error code in errno).
953 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700954int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
955 size_t phdr_count,
956 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400957 int fd,
958 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000959 const ElfW(Phdr)* phdr = phdr_table;
960 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000961
962 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
963 if (phdr->p_type != PT_GNU_RELRO) {
964 continue;
965 }
966
967 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
968 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
969 ssize_t size = seg_page_end - seg_page_start;
970
971 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
972 if (written != size) {
973 return -1;
974 }
975 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400976 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000977 if (map == MAP_FAILED) {
978 return -1;
979 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400980 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000981 }
982 return 0;
983}
984
985/* Where possible, replace the GNU relro segments with mappings of the given
986 * file descriptor. This can be performed after relocations to allow a file
987 * previously created by phdr_table_serialize_gnu_relro in another process to
988 * replace the dirty relocated pages, saving memory, if it was loaded at the
989 * same address. We have to compare the data before we map over it, since some
990 * parts of the relro segment may not be identical due to other libraries in
991 * the process being loaded at different addresses.
992 *
993 * Input:
994 * phdr_table -> program header table
995 * phdr_count -> number of entries in tables
996 * load_bias -> load bias
997 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400998 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000999 * Return:
1000 * 0 on error, -1 on failure (error code in errno).
1001 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001002int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1003 size_t phdr_count,
1004 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001005 int fd,
1006 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001007 // Map the file at a temporary location so we can compare its contents.
1008 struct stat file_stat;
1009 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1010 return -1;
1011 }
1012 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001013 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001014 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001015 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001016 if (temp_mapping == MAP_FAILED) {
1017 return -1;
1018 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001019 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001020
1021 // Iterate over the relro segments and compare/remap the pages.
1022 const ElfW(Phdr)* phdr = phdr_table;
1023 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1024
1025 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1026 if (phdr->p_type != PT_GNU_RELRO) {
1027 continue;
1028 }
1029
1030 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
1031 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1032
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001033 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001034 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1035 size_t match_offset = 0;
1036 size_t size = seg_page_end - seg_page_start;
1037
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001038 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001039 // File is too short to compare to this segment. The contents are likely
1040 // different as well (it's probably for a different library version) so
1041 // just don't bother checking.
1042 break;
1043 }
1044
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001045 while (match_offset < size) {
1046 // Skip over dissimilar pages.
1047 while (match_offset < size &&
1048 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
1049 match_offset += PAGE_SIZE;
1050 }
1051
1052 // Count similar pages.
1053 size_t mismatch_offset = match_offset;
1054 while (mismatch_offset < size &&
1055 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
1056 mismatch_offset += PAGE_SIZE;
1057 }
1058
1059 // Map over similar pages.
1060 if (mismatch_offset > match_offset) {
1061 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001062 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001063 if (map == MAP_FAILED) {
1064 munmap(temp_mapping, file_size);
1065 return -1;
1066 }
1067 }
1068
1069 match_offset = mismatch_offset;
1070 }
1071
1072 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001073 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001074 }
1075 munmap(temp_mapping, file_size);
1076 return 0;
1077}
1078
1079
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001080#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001081
1082# ifndef PT_ARM_EXIDX
1083# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
1084# endif
1085
1086/* Return the address and size of the .ARM.exidx section in memory,
1087 * if present.
1088 *
1089 * Input:
1090 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001091 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001092 * load_bias -> load bias
1093 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001094 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001095 * arm_exidx_count -> number of items in table (0 on failure).
1096 * Return:
1097 * 0 on error, -1 on failure (_no_ error code in errno)
1098 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001099int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1100 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001101 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001102 const ElfW(Phdr)* phdr = phdr_table;
1103 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001104
Elliott Hughes0266ae52014-02-10 17:46:57 -08001105 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1106 if (phdr->p_type != PT_ARM_EXIDX) {
1107 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001108 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001109
1110 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001111 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001112 return 0;
1113 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001114 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001115 *arm_exidx_count = 0;
1116 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001117}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001118#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001119
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001120/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001121 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001122 *
1123 * Input:
1124 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001125 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001126 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001127 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001128 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001129 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001130 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001131 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001132 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001133void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001134 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1135 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001136 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001137 for (size_t i = 0; i<phdr_count; ++i) {
1138 const ElfW(Phdr)& phdr = phdr_table[i];
1139 if (phdr.p_type == PT_DYNAMIC) {
1140 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001141 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001142 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001143 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001144 return;
1145 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001146 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001147}
1148
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001149/* Return the program interpreter string, or nullptr if missing.
1150 *
1151 * Input:
1152 * phdr_table -> program header table
1153 * phdr_count -> number of entries in tables
1154 * load_bias -> load bias
1155 * Return:
1156 * pointer to the program interpreter string.
1157 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001158const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001159 ElfW(Addr) load_bias) {
1160 for (size_t i = 0; i<phdr_count; ++i) {
1161 const ElfW(Phdr)& phdr = phdr_table[i];
1162 if (phdr.p_type == PT_INTERP) {
1163 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1164 }
1165 }
1166 return nullptr;
1167}
1168
Robert Grosse4544d9f2014-10-15 14:32:19 -07001169// Sets loaded_phdr_ to the address of the program header table as it appears
1170// in the loaded segments in memory. This is in contrast with phdr_table_,
1171// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001172bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001173 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001174
Elliott Hughes650be4e2013-03-05 18:47:58 -08001175 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001176 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001177 if (phdr->p_type == PT_PHDR) {
1178 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001179 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001180 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001181
Elliott Hughes650be4e2013-03-05 18:47:58 -08001182 // Otherwise, check the first loadable segment. If its file offset
1183 // is 0, it starts with the ELF header, and we can trivially find the
1184 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001185 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001186 if (phdr->p_type == PT_LOAD) {
1187 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001188 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001189 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001190 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001191 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001192 }
1193 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001194 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001195 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001196
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001197 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001198 return false;
1199}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001200
Tamas Petz8d55d182020-02-24 14:15:25 +01001201// Tries to find .note.gnu.property section.
1202// It is not considered an error if such section is missing.
1203bool ElfReader::FindGnuPropertySection() {
1204#if defined(__aarch64__)
1205 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1206#endif
1207 return true;
1208}
1209
Elliott Hughes650be4e2013-03-05 18:47:58 -08001210// Ensures that our program header is actually within a loadable
1211// segment. This should help catch badly-formed ELF files that
1212// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001213bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1214 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1215 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001216 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001217 if (phdr->p_type != PT_LOAD) {
1218 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001219 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001220 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1221 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001222 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001223 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001224 return true;
1225 }
1226 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001227 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1228 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001229 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001230}