blob: 60fd7762099e8f675e0f314af778942bd7808934 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
Elliott Hughesb5140262014-12-02 16:16:29 -080054#elif defined(__x86_64__)
55 return EM_X86_64;
56#endif
57}
58
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020059/**
60 TECHNICAL NOTE ON ELF LOADING.
61
62 An ELF file's program header table contains one or more PT_LOAD
63 segments, which corresponds to portions of the file that need to
64 be mapped into the process' address space.
65
66 Each loadable segment has the following important properties:
67
68 p_offset -> segment file offset
69 p_filesz -> segment file size
70 p_memsz -> segment memory size (always >= p_filesz)
71 p_vaddr -> segment's virtual address
72 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070073 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020074
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070075 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020076
77 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
78 ranges of virtual addresses. A few rules apply:
79
80 - the virtual address ranges should not overlap.
81
82 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
83 between them should always be initialized to 0.
84
85 - ranges do not necessarily start or end at page boundaries. Two distinct
86 segments can have their start and end on the same page. In this case, the
87 page inherits the mapping flags of the latter segment.
88
89 Finally, the real load addrs of each segment is not p_vaddr. Instead the
90 loader decides where to load the first segment, then will load all others
91 relative to the first one to respect the initial range layout.
92
93 For example, consider the following list:
94
95 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
96 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
97
98 This corresponds to two segments that cover these virtual address ranges:
99
100 0x30000...0x34000
101 0x40000...0x48000
102
103 If the loader decides to load the first segment at address 0xa0000000
104 then the segments' load address ranges will be:
105
106 0xa0030000...0xa0034000
107 0xa0040000...0xa0048000
108
109 In other words, all segments must be loaded at an address that has the same
110 constant offset from their p_vaddr value. This offset is computed as the
111 difference between the first segment's load address, and its p_vaddr value.
112
113 However, in practice, segments do _not_ start at page boundaries. Since we
114 can only memory-map at page boundaries, this means that the bias is
115 computed as:
116
117 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
118
119 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
120 possible wrap around UINT32_MAX for possible large p_vaddr values).
121
122 And that the phdr0_load_address must start at a page boundary, with
123 the segment's real content starting at:
124
125 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
126
127 Note that ELF requires the following condition to make the mmap()-ing work:
128
129 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
130
131 The load_bias must be added to any p_vaddr value read from the ELF file to
132 determine the corresponding memory address.
133
134 **/
135
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800136#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200137#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
138 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
139 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
140
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700141// Default PMD size for x86_64 and aarch64 (2MB).
142static constexpr size_t kPmdSize = (1UL << 21);
143
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700144ElfReader::ElfReader()
145 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
146 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800147 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
148 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700149}
150
151bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900152 if (did_read_) {
153 return true;
154 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700155 name_ = name;
156 fd_ = fd;
157 file_offset_ = file_offset;
158 file_size_ = file_size;
159
160 if (ReadElfHeader() &&
161 VerifyElfHeader() &&
162 ReadProgramHeaders() &&
163 ReadSectionHeaders() &&
164 ReadDynamicSection()) {
165 did_read_ = true;
166 }
167
168 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200169}
170
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400171bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700172 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900173 if (did_load_) {
174 return true;
175 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100176 if (ReserveAddressSpace(address_space) && LoadSegments() && FindPhdr() &&
177 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700178 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100179#if defined(__aarch64__)
180 // For Armv8.5-A loaded executable segments may require PROT_BTI.
181 if (note_gnu_property_.IsBTICompatible()) {
182 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
183 &note_gnu_property_) == 0);
184 }
185#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700186 }
187
188 return did_load_;
189}
190
191const char* ElfReader::get_string(ElfW(Word) index) const {
192 CHECK(strtab_ != nullptr);
193 CHECK(index < strtab_size_);
194
195 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800196}
197
198bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700199 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800200 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700201 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800202 return false;
203 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700204
Elliott Hughes650be4e2013-03-05 18:47:58 -0800205 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700206 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700207 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800208 return false;
209 }
210 return true;
211}
212
Elliott Hughes72007ee2017-04-19 17:44:57 -0700213static const char* EM_to_string(int em) {
214 if (em == EM_386) return "EM_386";
215 if (em == EM_AARCH64) return "EM_AARCH64";
216 if (em == EM_ARM) return "EM_ARM";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700217 if (em == EM_X86_64) return "EM_X86_64";
218 return "EM_???";
219}
220
Elliott Hughes650be4e2013-03-05 18:47:58 -0800221bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700222 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700223 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
224 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800225 return false;
226 }
227
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700228 // Try to give a clear diagnostic for ELF class mismatches, since they're
229 // an easy mistake to make during the 32-bit/64-bit transition period.
230 int elf_class = header_.e_ident[EI_CLASS];
231#if defined(__LP64__)
232 if (elf_class != ELFCLASS64) {
233 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700234 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700235 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700236 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700237 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800238 return false;
239 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700240#else
241 if (elf_class != ELFCLASS32) {
242 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700243 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700244 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700245 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700246 }
247 return false;
248 }
249#endif
250
Elliott Hughes650be4e2013-03-05 18:47:58 -0800251 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700252 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800253 return false;
254 }
255
256 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700257 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800258 return false;
259 }
260
261 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700262 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800263 return false;
264 }
265
Elliott Hughesb5140262014-12-02 16:16:29 -0800266 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700267 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
268 name_.c_str(),
269 EM_to_string(header_.e_machine), header_.e_machine,
270 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800271 return false;
272 }
273
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700274 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800275 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800276 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800277 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
278 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
279 return false;
280 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800281 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800282 "invalid-elf-header_section-headers-enforced-for-api-level-26",
283 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
284 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800285 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700286 }
287
288 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800289 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800290 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800291 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
292 return false;
293 }
294
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800295 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800296 "invalid-elf-header_section-headers-enforced-for-api-level-26",
297 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800298 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700299 }
300
Elliott Hughes650be4e2013-03-05 18:47:58 -0800301 return true;
302}
303
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700304bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800305 off64_t range_start;
306 off64_t range_end;
307
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700308 // Only header can be located at the 0 offset... This function called to
309 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700310 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700311
312 return offset > 0 &&
313 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800314 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700315 (range_start < file_size_) &&
316 (range_end <= file_size_) &&
317 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800318}
319
Elliott Hughes650be4e2013-03-05 18:47:58 -0800320// Loads the program header table from an ELF file into a read-only private
321// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700322bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800323 phdr_num_ = header_.e_phnum;
324
325 // Like the kernel, we only accept program header tables that
326 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800327 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700328 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800329 return false;
330 }
331
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800332 // Boundary checks
333 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700334 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
335 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
336 name_.c_str(),
337 static_cast<size_t>(header_.e_phoff),
338 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800339 return false;
340 }
341
342 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700343 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800344 return false;
345 }
346
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700347 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800348 return true;
349}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200350
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700351bool ElfReader::ReadSectionHeaders() {
352 shdr_num_ = header_.e_shnum;
353
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800354 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700355 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800356 return false;
357 }
358
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800359 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700360 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
361 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
362 name_.c_str(),
363 static_cast<size_t>(header_.e_shoff),
364 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800365 return false;
366 }
367
368 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700369 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
370 return false;
371 }
372
373 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
374 return true;
375}
376
377bool ElfReader::ReadDynamicSection() {
378 // 1. Find .dynamic section (in section headers)
379 const ElfW(Shdr)* dynamic_shdr = nullptr;
380 for (size_t i = 0; i < shdr_num_; ++i) {
381 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
382 dynamic_shdr = &shdr_table_ [i];
383 break;
384 }
385 }
386
387 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700388 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700389 return false;
390 }
391
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700392 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
393 size_t pt_dynamic_offset = 0;
394 size_t pt_dynamic_filesz = 0;
395 for (size_t i = 0; i < phdr_num_; ++i) {
396 const ElfW(Phdr)* phdr = &phdr_table_[i];
397 if (phdr->p_type == PT_DYNAMIC) {
398 pt_dynamic_offset = phdr->p_offset;
399 pt_dynamic_filesz = phdr->p_filesz;
400 }
401 }
402
403 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800404 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800405 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
406 "expected to match PT_DYNAMIC offset: 0x%zx",
407 name_.c_str(),
408 static_cast<size_t>(dynamic_shdr->sh_offset),
409 pt_dynamic_offset);
410 return false;
411 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800412 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800413 "invalid-elf-header_section-headers-enforced-for-api-level-26",
414 "\"%s\" .dynamic section has invalid offset: 0x%zx "
415 "(expected to match PT_DYNAMIC offset 0x%zx)",
416 name_.c_str(),
417 static_cast<size_t>(dynamic_shdr->sh_offset),
418 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800419 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700420 }
421
422 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800423 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800424 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
425 "expected to match PT_DYNAMIC filesz: 0x%zx",
426 name_.c_str(),
427 static_cast<size_t>(dynamic_shdr->sh_size),
428 pt_dynamic_filesz);
429 return false;
430 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800431 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800432 "invalid-elf-header_section-headers-enforced-for-api-level-26",
433 "\"%s\" .dynamic section has invalid size: 0x%zx "
434 "(expected to match PT_DYNAMIC filesz 0x%zx)",
435 name_.c_str(),
436 static_cast<size_t>(dynamic_shdr->sh_size),
437 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800438 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700439 }
440
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700441 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700442 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
443 name_.c_str(),
444 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700445 return false;
446 }
447
448 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
449
450 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700451 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
452 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700453 return false;
454 }
455
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700456 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
457 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800458 return false;
459 }
460
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700461 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
462 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
463 return false;
464 }
465
466 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
467
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700468 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
469 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
470 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800471 return false;
472 }
473
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700474 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
475 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
476 return false;
477 }
478
479 strtab_ = static_cast<const char*>(strtab_fragment_.data());
480 strtab_size_ = strtab_fragment_.size();
481 return true;
482}
483
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800484/* Returns the size of the extent of all the possibly non-contiguous
485 * loadable segments in an ELF program header table. This corresponds
486 * to the page-aligned size in bytes that needs to be reserved in the
487 * process' address space. If there are no loadable segments, 0 is
488 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200489 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700490 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800491 * set to the minimum and maximum addresses of pages to be reserved,
492 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200493 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800494size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
495 ElfW(Addr)* out_min_vaddr,
496 ElfW(Addr)* out_max_vaddr) {
497 ElfW(Addr) min_vaddr = UINTPTR_MAX;
498 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200499
Elliott Hughes0266ae52014-02-10 17:46:57 -0800500 bool found_pt_load = false;
501 for (size_t i = 0; i < phdr_count; ++i) {
502 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200503
Elliott Hughes0266ae52014-02-10 17:46:57 -0800504 if (phdr->p_type != PT_LOAD) {
505 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200506 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800507 found_pt_load = true;
508
509 if (phdr->p_vaddr < min_vaddr) {
510 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200511 }
512
Elliott Hughes0266ae52014-02-10 17:46:57 -0800513 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
514 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
515 }
516 }
517 if (!found_pt_load) {
518 min_vaddr = 0;
519 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200520
Elliott Hughes0266ae52014-02-10 17:46:57 -0800521 min_vaddr = PAGE_START(min_vaddr);
522 max_vaddr = PAGE_END(max_vaddr);
523
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700524 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800525 *out_min_vaddr = min_vaddr;
526 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700527 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800528 *out_max_vaddr = max_vaddr;
529 }
530 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200531}
532
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700533// Returns the maximum p_align associated with a loadable segment in the ELF
534// program header table. Used to determine whether the file should be loaded at
535// a specific virtual address alignment for use with huge pages.
536size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
537 size_t maximum_alignment = PAGE_SIZE;
538
539 for (size_t i = 0; i < phdr_count; ++i) {
540 const ElfW(Phdr)* phdr = &phdr_table[i];
541
542 // p_align must be 0, 1, or a positive, integral power of two.
543 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
544 continue;
545 }
546
547 if (phdr->p_align > maximum_alignment) {
548 maximum_alignment = phdr->p_align;
549 }
550 }
551
552#if defined(__LP64__)
553 return maximum_alignment;
554#else
555 return PAGE_SIZE;
556#endif
557}
558
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700559// Reserve a virtual address range such that if it's limits were extended to the next 2**align
560// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700561static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
562 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700563 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700564 // Reserve enough space to properly align the library's start address.
565 mapping_align = std::max(mapping_align, start_align);
566 if (mapping_align == PAGE_SIZE) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800567 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700568 if (mmap_ptr == MAP_FAILED) {
569 return nullptr;
570 }
571 return mmap_ptr;
572 }
573
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700574 // Minimum alignment of shared library gap. For efficiency, this should match the second level
575 // page size of the platform.
576#if defined(__LP64__)
577 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
578#else
579 constexpr size_t kGapAlignment = 0;
580#endif
581 // Maximum gap size, in the units of kGapAlignment.
582 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700583 // Allocate enough space so that the end of the desired region aligned up is still inside the
584 // mapping.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700585 size_t mmap_size = align_up(size, mapping_align) + mapping_align - PAGE_SIZE;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700586 uint8_t* mmap_ptr =
587 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
588 if (mmap_ptr == MAP_FAILED) {
589 return nullptr;
590 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700591 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700592 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
593 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700594 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
595 // This library crosses a 2MB boundary and will fragment a new huge page.
596 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
597 // to improve address randomization and make it harder to locate this library code by probing.
598 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700599 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700600 gap_size =
601 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700602 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - PAGE_SIZE;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700603 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
604 if (mmap_ptr == MAP_FAILED) {
605 return nullptr;
606 }
607 }
608
609 uint8_t *gap_end, *gap_start;
610 if (gap_size) {
611 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
612 gap_start = gap_end - gap_size;
613 } else {
614 gap_start = gap_end = mmap_ptr + mmap_size;
615 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700616
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700617 uint8_t* first = align_up(mmap_ptr, mapping_align);
618 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900619
Tom Cherry66bc4282018-11-08 13:40:52 -0800620 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900621 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700622 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
623 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700624 // Unmap the extra space around the allocation.
625 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
626 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700627 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700628 munmap(start + size, gap_start - (start + size));
629 if (gap_end != mmap_ptr + mmap_size) {
630 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
631 }
632 *out_gap_start = gap_start;
633 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700634 return start;
635}
636
Elliott Hughes650be4e2013-03-05 18:47:58 -0800637// Reserve a virtual address range big enough to hold all loadable
638// segments of a program header table. This is done by creating a
639// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400640bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800641 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800642 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800643 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700644 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800645 return false;
646 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200647
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800648 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000649 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000650
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400651 if (load_size_ > address_space->reserved_size) {
652 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000653 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400654 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000655 return false;
656 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700657 size_t start_alignment = PAGE_SIZE;
658 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
659 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
660 // Limit alignment to PMD size as other alignments reduce the number of
661 // bits available for ASLR for no benefit.
662 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : PAGE_SIZE;
663 }
664 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
665 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700666 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700667 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000668 return false;
669 }
670 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400671 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700672 gap_start_ = nullptr;
673 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800674 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400675
676 // Update the reserved address space to subtract the space used by this library.
677 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
678 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800679 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200680
Elliott Hughes650be4e2013-03-05 18:47:58 -0800681 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800682 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800683 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200684}
685
Elliott Hughes650be4e2013-03-05 18:47:58 -0800686bool ElfReader::LoadSegments() {
687 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800688 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200689
Elliott Hughes650be4e2013-03-05 18:47:58 -0800690 if (phdr->p_type != PT_LOAD) {
691 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200692 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800693
694 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800695 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
696 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800697
Elliott Hughes0266ae52014-02-10 17:46:57 -0800698 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
699 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800700
Elliott Hughes0266ae52014-02-10 17:46:57 -0800701 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800702
703 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800704 ElfW(Addr) file_start = phdr->p_offset;
705 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800706
Elliott Hughes0266ae52014-02-10 17:46:57 -0800707 ElfW(Addr) file_page_start = PAGE_START(file_start);
708 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800709
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700710 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700711 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700712 return false;
713 }
714
skvalex93ce3542015-08-20 01:06:42 +0300715 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700716 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
717 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700718 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700719 reinterpret_cast<void*>(phdr->p_filesz),
720 reinterpret_cast<void*>(file_end), file_size_);
721 return false;
722 }
723
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700724 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700725 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700726 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800727 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800728 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800729 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800730 return false;
731 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800732 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800733 "writable-and-executable-segments-enforced-for-api-level-26",
734 "\"%s\" has load segments that are both writable and executable",
735 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800736 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700737 }
738
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700739 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700740 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700741 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700742 MAP_FIXED|MAP_PRIVATE,
743 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700744 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700745 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700746 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700747 return false;
748 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700749
750 // Mark segments as huge page eligible if they meet the requirements
751 // (executable and PMD aligned).
752 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
753 get_transparent_hugepages_supported()) {
754 madvise(seg_addr, file_length, MADV_HUGEPAGE);
755 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800756 }
757
758 // if the segment is writable, and does not end on a page boundary,
759 // zero-fill it until the page limit.
760 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800761 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800762 }
763
764 seg_file_end = PAGE_END(seg_file_end);
765
766 // seg_file_end is now the first page address after the file
767 // content. If seg_end is larger, we need to zero anything
768 // between them. This is done by using a private anonymous
769 // map for all extra pages.
770 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800771 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800772 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800773 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800774 PFLAGS_TO_PROT(phdr->p_flags),
775 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
776 -1,
777 0);
778 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700779 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800780 return false;
781 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800782
783 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800784 }
785 }
786 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200787}
788
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000789/* Used internally. Used to set the protection bits of all loaded segments
790 * with optional extra flags (i.e. really PROT_WRITE). Used by
791 * phdr_table_protect_segments and phdr_table_unprotect_segments.
792 */
793static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
794 ElfW(Addr) load_bias, int extra_prot_flags) {
795 const ElfW(Phdr)* phdr = phdr_table;
796 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
797
798 for (; phdr < phdr_limit; phdr++) {
799 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
800 continue;
801 }
802
803 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
804 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
805
Tamas Petz8d55d182020-02-24 14:15:25 +0100806 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
807 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700808 // make sure we're never simultaneously writable / executable
809 prot &= ~PROT_EXEC;
810 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100811#if defined(__aarch64__)
812 if ((prot & PROT_EXEC) == 0) {
813 // Though it is not specified don't add PROT_BTI if segment is not
814 // executable.
815 prot &= ~PROT_BTI;
816 }
817#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700818
Tamas Petz8d55d182020-02-24 14:15:25 +0100819 int ret =
820 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000821 if (ret < 0) {
822 return -1;
823 }
824 }
825 return 0;
826}
827
828/* Restore the original protection modes for all loadable segments.
829 * You should only call this after phdr_table_unprotect_segments and
830 * applying all relocations.
831 *
Tamas Petz8d55d182020-02-24 14:15:25 +0100832 * AArch64: also called from linker_main and ElfReader::Load to apply
833 * PROT_BTI for loaded main so and other so-s.
834 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000835 * Input:
836 * phdr_table -> program header table
837 * phdr_count -> number of entries in tables
838 * load_bias -> load bias
Tamas Petz8d55d182020-02-24 14:15:25 +0100839 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000840 * Return:
841 * 0 on error, -1 on failure (error code in errno).
842 */
Tamas Petz8d55d182020-02-24 14:15:25 +0100843int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
844 ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
845 int prot = 0;
846#if defined(__aarch64__)
847 if ((prop != nullptr) && prop->IsBTICompatible()) {
848 prot |= PROT_BTI;
849 }
850#endif
851 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000852}
853
854/* Change the protection of all loaded segments in memory to writable.
855 * This is useful before performing relocations. Once completed, you
856 * will have to call phdr_table_protect_segments to restore the original
857 * protection flags on all segments.
858 *
859 * Note that some writable segments can also have their content turned
860 * to read-only by calling phdr_table_protect_gnu_relro. This is no
861 * performed here.
862 *
863 * Input:
864 * phdr_table -> program header table
865 * phdr_count -> number of entries in tables
866 * load_bias -> load bias
867 * Return:
868 * 0 on error, -1 on failure (error code in errno).
869 */
870int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
871 size_t phdr_count, ElfW(Addr) load_bias) {
872 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
873}
874
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200875/* Used internally by phdr_table_protect_gnu_relro and
876 * phdr_table_unprotect_gnu_relro.
877 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800878static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
879 ElfW(Addr) load_bias, int prot_flags) {
880 const ElfW(Phdr)* phdr = phdr_table;
881 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200882
Elliott Hughes0266ae52014-02-10 17:46:57 -0800883 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
884 if (phdr->p_type != PT_GNU_RELRO) {
885 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200886 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800887
888 // Tricky: what happens when the relro segment does not start
889 // or end at page boundaries? We're going to be over-protective
890 // here and put every page touched by the segment as read-only.
891
892 // This seems to match Ian Lance Taylor's description of the
893 // feature at http://www.airs.com/blog/archives/189.
894
895 // Extract:
896 // Note that the current dynamic linker code will only work
897 // correctly if the PT_GNU_RELRO segment starts on a page
898 // boundary. This is because the dynamic linker rounds the
899 // p_vaddr field down to the previous page boundary. If
900 // there is anything on the page which should not be read-only,
901 // the program is likely to fail at runtime. So in effect the
902 // linker must only emit a PT_GNU_RELRO segment if it ensures
903 // that it starts on a page boundary.
904 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
905 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
906
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800907 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800908 seg_page_end - seg_page_start,
909 prot_flags);
910 if (ret < 0) {
911 return -1;
912 }
913 }
914 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200915}
916
917/* Apply GNU relro protection if specified by the program header. This will
918 * turn some of the pages of a writable PT_LOAD segment to read-only, as
919 * specified by one or more PT_GNU_RELRO segments. This must be always
920 * performed after relocations.
921 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200922 * The areas typically covered are .got and .data.rel.ro, these are
923 * read-only from the program's POV, but contain absolute addresses
924 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200925 *
926 * Input:
927 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700928 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200929 * load_bias -> load bias
930 * Return:
931 * 0 on error, -1 on failure (error code in errno).
932 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700933int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
934 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800935 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200936}
937
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000938/* Serialize the GNU relro segments to the given file descriptor. This can be
939 * performed after relocations to allow another process to later share the
940 * relocated segment, if it was loaded at the same address.
941 *
942 * Input:
943 * phdr_table -> program header table
944 * phdr_count -> number of entries in tables
945 * load_bias -> load bias
946 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400947 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000948 * Return:
949 * 0 on error, -1 on failure (error code in errno).
950 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700951int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
952 size_t phdr_count,
953 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400954 int fd,
955 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000956 const ElfW(Phdr)* phdr = phdr_table;
957 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000958
959 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
960 if (phdr->p_type != PT_GNU_RELRO) {
961 continue;
962 }
963
964 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
965 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
966 ssize_t size = seg_page_end - seg_page_start;
967
968 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
969 if (written != size) {
970 return -1;
971 }
972 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400973 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000974 if (map == MAP_FAILED) {
975 return -1;
976 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400977 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000978 }
979 return 0;
980}
981
982/* Where possible, replace the GNU relro segments with mappings of the given
983 * file descriptor. This can be performed after relocations to allow a file
984 * previously created by phdr_table_serialize_gnu_relro in another process to
985 * replace the dirty relocated pages, saving memory, if it was loaded at the
986 * same address. We have to compare the data before we map over it, since some
987 * parts of the relro segment may not be identical due to other libraries in
988 * the process being loaded at different addresses.
989 *
990 * Input:
991 * phdr_table -> program header table
992 * phdr_count -> number of entries in tables
993 * load_bias -> load bias
994 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400995 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000996 * Return:
997 * 0 on error, -1 on failure (error code in errno).
998 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700999int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1000 size_t phdr_count,
1001 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001002 int fd,
1003 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001004 // Map the file at a temporary location so we can compare its contents.
1005 struct stat file_stat;
1006 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1007 return -1;
1008 }
1009 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001010 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001011 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001012 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001013 if (temp_mapping == MAP_FAILED) {
1014 return -1;
1015 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001016 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001017
1018 // Iterate over the relro segments and compare/remap the pages.
1019 const ElfW(Phdr)* phdr = phdr_table;
1020 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1021
1022 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1023 if (phdr->p_type != PT_GNU_RELRO) {
1024 continue;
1025 }
1026
1027 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
1028 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1029
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001030 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001031 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1032 size_t match_offset = 0;
1033 size_t size = seg_page_end - seg_page_start;
1034
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001035 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001036 // File is too short to compare to this segment. The contents are likely
1037 // different as well (it's probably for a different library version) so
1038 // just don't bother checking.
1039 break;
1040 }
1041
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001042 while (match_offset < size) {
1043 // Skip over dissimilar pages.
1044 while (match_offset < size &&
1045 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
1046 match_offset += PAGE_SIZE;
1047 }
1048
1049 // Count similar pages.
1050 size_t mismatch_offset = match_offset;
1051 while (mismatch_offset < size &&
1052 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
1053 mismatch_offset += PAGE_SIZE;
1054 }
1055
1056 // Map over similar pages.
1057 if (mismatch_offset > match_offset) {
1058 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001059 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001060 if (map == MAP_FAILED) {
1061 munmap(temp_mapping, file_size);
1062 return -1;
1063 }
1064 }
1065
1066 match_offset = mismatch_offset;
1067 }
1068
1069 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001070 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001071 }
1072 munmap(temp_mapping, file_size);
1073 return 0;
1074}
1075
1076
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001077#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001078
1079# ifndef PT_ARM_EXIDX
1080# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
1081# endif
1082
1083/* Return the address and size of the .ARM.exidx section in memory,
1084 * if present.
1085 *
1086 * Input:
1087 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001088 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001089 * load_bias -> load bias
1090 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001091 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001092 * arm_exidx_count -> number of items in table (0 on failure).
1093 * Return:
1094 * 0 on error, -1 on failure (_no_ error code in errno)
1095 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001096int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1097 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001098 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001099 const ElfW(Phdr)* phdr = phdr_table;
1100 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001101
Elliott Hughes0266ae52014-02-10 17:46:57 -08001102 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1103 if (phdr->p_type != PT_ARM_EXIDX) {
1104 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001105 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001106
1107 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001108 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001109 return 0;
1110 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001111 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001112 *arm_exidx_count = 0;
1113 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001114}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001115#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001116
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001117/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001118 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001119 *
1120 * Input:
1121 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001122 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001123 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001124 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001125 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001126 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001127 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001128 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001129 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001130void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001131 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1132 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001133 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001134 for (size_t i = 0; i<phdr_count; ++i) {
1135 const ElfW(Phdr)& phdr = phdr_table[i];
1136 if (phdr.p_type == PT_DYNAMIC) {
1137 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001138 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001139 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001140 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001141 return;
1142 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001143 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001144}
1145
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001146/* Return the program interpreter string, or nullptr if missing.
1147 *
1148 * Input:
1149 * phdr_table -> program header table
1150 * phdr_count -> number of entries in tables
1151 * load_bias -> load bias
1152 * Return:
1153 * pointer to the program interpreter string.
1154 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001155const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001156 ElfW(Addr) load_bias) {
1157 for (size_t i = 0; i<phdr_count; ++i) {
1158 const ElfW(Phdr)& phdr = phdr_table[i];
1159 if (phdr.p_type == PT_INTERP) {
1160 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1161 }
1162 }
1163 return nullptr;
1164}
1165
Robert Grosse4544d9f2014-10-15 14:32:19 -07001166// Sets loaded_phdr_ to the address of the program header table as it appears
1167// in the loaded segments in memory. This is in contrast with phdr_table_,
1168// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001169bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001170 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001171
Elliott Hughes650be4e2013-03-05 18:47:58 -08001172 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001173 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001174 if (phdr->p_type == PT_PHDR) {
1175 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001176 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001177 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001178
Elliott Hughes650be4e2013-03-05 18:47:58 -08001179 // Otherwise, check the first loadable segment. If its file offset
1180 // is 0, it starts with the ELF header, and we can trivially find the
1181 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001182 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001183 if (phdr->p_type == PT_LOAD) {
1184 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001185 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001186 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001187 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001188 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001189 }
1190 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001191 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001192 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001193
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001194 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001195 return false;
1196}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001197
Tamas Petz8d55d182020-02-24 14:15:25 +01001198// Tries to find .note.gnu.property section.
1199// It is not considered an error if such section is missing.
1200bool ElfReader::FindGnuPropertySection() {
1201#if defined(__aarch64__)
1202 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1203#endif
1204 return true;
1205}
1206
Elliott Hughes650be4e2013-03-05 18:47:58 -08001207// Ensures that our program header is actually within a loadable
1208// segment. This should help catch badly-formed ELF files that
1209// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001210bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1211 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1212 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001213 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001214 if (phdr->p_type != PT_LOAD) {
1215 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001216 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001217 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1218 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001219 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001220 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001221 return true;
1222 }
1223 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001224 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1225 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001226 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001227}