blob: 9b1b99ffd1712e8c1ac2ff1e15c18745274c200e [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
Elliott Hughesb5140262014-12-02 16:16:29 -080054#elif defined(__x86_64__)
55 return EM_X86_64;
56#endif
57}
58
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020059/**
60 TECHNICAL NOTE ON ELF LOADING.
61
62 An ELF file's program header table contains one or more PT_LOAD
63 segments, which corresponds to portions of the file that need to
64 be mapped into the process' address space.
65
66 Each loadable segment has the following important properties:
67
68 p_offset -> segment file offset
69 p_filesz -> segment file size
70 p_memsz -> segment memory size (always >= p_filesz)
71 p_vaddr -> segment's virtual address
72 p_flags -> segment flags (e.g. readable, writable, executable)
73
Elliott Hughes0266ae52014-02-10 17:46:57 -080074 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020075
76 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
77 ranges of virtual addresses. A few rules apply:
78
79 - the virtual address ranges should not overlap.
80
81 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
82 between them should always be initialized to 0.
83
84 - ranges do not necessarily start or end at page boundaries. Two distinct
85 segments can have their start and end on the same page. In this case, the
86 page inherits the mapping flags of the latter segment.
87
88 Finally, the real load addrs of each segment is not p_vaddr. Instead the
89 loader decides where to load the first segment, then will load all others
90 relative to the first one to respect the initial range layout.
91
92 For example, consider the following list:
93
94 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
95 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
96
97 This corresponds to two segments that cover these virtual address ranges:
98
99 0x30000...0x34000
100 0x40000...0x48000
101
102 If the loader decides to load the first segment at address 0xa0000000
103 then the segments' load address ranges will be:
104
105 0xa0030000...0xa0034000
106 0xa0040000...0xa0048000
107
108 In other words, all segments must be loaded at an address that has the same
109 constant offset from their p_vaddr value. This offset is computed as the
110 difference between the first segment's load address, and its p_vaddr value.
111
112 However, in practice, segments do _not_ start at page boundaries. Since we
113 can only memory-map at page boundaries, this means that the bias is
114 computed as:
115
116 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
117
118 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
119 possible wrap around UINT32_MAX for possible large p_vaddr values).
120
121 And that the phdr0_load_address must start at a page boundary, with
122 the segment's real content starting at:
123
124 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
125
126 Note that ELF requires the following condition to make the mmap()-ing work:
127
128 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
129
130 The load_bias must be added to any p_vaddr value read from the ELF file to
131 determine the corresponding memory address.
132
133 **/
134
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800135#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200136#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
137 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
138 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
139
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700140ElfReader::ElfReader()
141 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
142 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800143 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
144 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700145}
146
147bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900148 if (did_read_) {
149 return true;
150 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700151 name_ = name;
152 fd_ = fd;
153 file_offset_ = file_offset;
154 file_size_ = file_size;
155
156 if (ReadElfHeader() &&
157 VerifyElfHeader() &&
158 ReadProgramHeaders() &&
159 ReadSectionHeaders() &&
160 ReadDynamicSection()) {
161 did_read_ = true;
162 }
163
164 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200165}
166
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400167bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700168 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900169 if (did_load_) {
170 return true;
171 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100172 if (ReserveAddressSpace(address_space) && LoadSegments() && FindPhdr() &&
173 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700174 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100175#if defined(__aarch64__)
176 // For Armv8.5-A loaded executable segments may require PROT_BTI.
177 if (note_gnu_property_.IsBTICompatible()) {
178 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
179 &note_gnu_property_) == 0);
180 }
181#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700182 }
183
184 return did_load_;
185}
186
187const char* ElfReader::get_string(ElfW(Word) index) const {
188 CHECK(strtab_ != nullptr);
189 CHECK(index < strtab_size_);
190
191 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192}
193
194bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700195 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800196 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700197 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800198 return false;
199 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700200
Elliott Hughes650be4e2013-03-05 18:47:58 -0800201 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700202 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700203 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800204 return false;
205 }
206 return true;
207}
208
Elliott Hughes72007ee2017-04-19 17:44:57 -0700209static const char* EM_to_string(int em) {
210 if (em == EM_386) return "EM_386";
211 if (em == EM_AARCH64) return "EM_AARCH64";
212 if (em == EM_ARM) return "EM_ARM";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700213 if (em == EM_X86_64) return "EM_X86_64";
214 return "EM_???";
215}
216
Elliott Hughes650be4e2013-03-05 18:47:58 -0800217bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700218 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700219 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
220 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800221 return false;
222 }
223
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700224 // Try to give a clear diagnostic for ELF class mismatches, since they're
225 // an easy mistake to make during the 32-bit/64-bit transition period.
226 int elf_class = header_.e_ident[EI_CLASS];
227#if defined(__LP64__)
228 if (elf_class != ELFCLASS64) {
229 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700230 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700231 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700232 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700233 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800234 return false;
235 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700236#else
237 if (elf_class != ELFCLASS32) {
238 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700239 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700240 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700241 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700242 }
243 return false;
244 }
245#endif
246
Elliott Hughes650be4e2013-03-05 18:47:58 -0800247 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700248 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800249 return false;
250 }
251
252 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700253 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800254 return false;
255 }
256
257 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700258 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800259 return false;
260 }
261
Elliott Hughesb5140262014-12-02 16:16:29 -0800262 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700263 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
264 name_.c_str(),
265 EM_to_string(header_.e_machine), header_.e_machine,
266 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800267 return false;
268 }
269
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700270 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800271 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800272 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800273 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
274 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
275 return false;
276 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800277 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800278 "invalid-elf-header_section-headers-enforced-for-api-level-26",
279 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
280 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800281 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700282 }
283
284 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800285 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800286 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800287 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
288 return false;
289 }
290
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800291 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800292 "invalid-elf-header_section-headers-enforced-for-api-level-26",
293 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800294 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700295 }
296
Elliott Hughes650be4e2013-03-05 18:47:58 -0800297 return true;
298}
299
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700300bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800301 off64_t range_start;
302 off64_t range_end;
303
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700304 // Only header can be located at the 0 offset... This function called to
305 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700306 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700307
308 return offset > 0 &&
309 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800310 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700311 (range_start < file_size_) &&
312 (range_end <= file_size_) &&
313 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800314}
315
Elliott Hughes650be4e2013-03-05 18:47:58 -0800316// Loads the program header table from an ELF file into a read-only private
317// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700318bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800319 phdr_num_ = header_.e_phnum;
320
321 // Like the kernel, we only accept program header tables that
322 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800323 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700324 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800325 return false;
326 }
327
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800328 // Boundary checks
329 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700330 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
331 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
332 name_.c_str(),
333 static_cast<size_t>(header_.e_phoff),
334 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800335 return false;
336 }
337
338 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700339 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800340 return false;
341 }
342
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700343 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800344 return true;
345}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200346
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700347bool ElfReader::ReadSectionHeaders() {
348 shdr_num_ = header_.e_shnum;
349
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800350 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700351 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800352 return false;
353 }
354
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800355 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700356 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
357 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
358 name_.c_str(),
359 static_cast<size_t>(header_.e_shoff),
360 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800361 return false;
362 }
363
364 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700365 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
366 return false;
367 }
368
369 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
370 return true;
371}
372
373bool ElfReader::ReadDynamicSection() {
374 // 1. Find .dynamic section (in section headers)
375 const ElfW(Shdr)* dynamic_shdr = nullptr;
376 for (size_t i = 0; i < shdr_num_; ++i) {
377 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
378 dynamic_shdr = &shdr_table_ [i];
379 break;
380 }
381 }
382
383 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700384 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700385 return false;
386 }
387
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700388 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
389 size_t pt_dynamic_offset = 0;
390 size_t pt_dynamic_filesz = 0;
391 for (size_t i = 0; i < phdr_num_; ++i) {
392 const ElfW(Phdr)* phdr = &phdr_table_[i];
393 if (phdr->p_type == PT_DYNAMIC) {
394 pt_dynamic_offset = phdr->p_offset;
395 pt_dynamic_filesz = phdr->p_filesz;
396 }
397 }
398
399 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800400 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800401 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
402 "expected to match PT_DYNAMIC offset: 0x%zx",
403 name_.c_str(),
404 static_cast<size_t>(dynamic_shdr->sh_offset),
405 pt_dynamic_offset);
406 return false;
407 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800408 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800409 "invalid-elf-header_section-headers-enforced-for-api-level-26",
410 "\"%s\" .dynamic section has invalid offset: 0x%zx "
411 "(expected to match PT_DYNAMIC offset 0x%zx)",
412 name_.c_str(),
413 static_cast<size_t>(dynamic_shdr->sh_offset),
414 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800415 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700416 }
417
418 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800419 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800420 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
421 "expected to match PT_DYNAMIC filesz: 0x%zx",
422 name_.c_str(),
423 static_cast<size_t>(dynamic_shdr->sh_size),
424 pt_dynamic_filesz);
425 return false;
426 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800427 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800428 "invalid-elf-header_section-headers-enforced-for-api-level-26",
429 "\"%s\" .dynamic section has invalid size: 0x%zx "
430 "(expected to match PT_DYNAMIC filesz 0x%zx)",
431 name_.c_str(),
432 static_cast<size_t>(dynamic_shdr->sh_size),
433 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800434 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700435 }
436
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700437 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700438 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
439 name_.c_str(),
440 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700441 return false;
442 }
443
444 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
445
446 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700447 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
448 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700449 return false;
450 }
451
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700452 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
453 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800454 return false;
455 }
456
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700457 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
458 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
459 return false;
460 }
461
462 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
463
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700464 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
465 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
466 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800467 return false;
468 }
469
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700470 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
471 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
472 return false;
473 }
474
475 strtab_ = static_cast<const char*>(strtab_fragment_.data());
476 strtab_size_ = strtab_fragment_.size();
477 return true;
478}
479
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800480/* Returns the size of the extent of all the possibly non-contiguous
481 * loadable segments in an ELF program header table. This corresponds
482 * to the page-aligned size in bytes that needs to be reserved in the
483 * process' address space. If there are no loadable segments, 0 is
484 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200485 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700486 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800487 * set to the minimum and maximum addresses of pages to be reserved,
488 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200489 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800490size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
491 ElfW(Addr)* out_min_vaddr,
492 ElfW(Addr)* out_max_vaddr) {
493 ElfW(Addr) min_vaddr = UINTPTR_MAX;
494 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200495
Elliott Hughes0266ae52014-02-10 17:46:57 -0800496 bool found_pt_load = false;
497 for (size_t i = 0; i < phdr_count; ++i) {
498 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200499
Elliott Hughes0266ae52014-02-10 17:46:57 -0800500 if (phdr->p_type != PT_LOAD) {
501 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200502 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800503 found_pt_load = true;
504
505 if (phdr->p_vaddr < min_vaddr) {
506 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200507 }
508
Elliott Hughes0266ae52014-02-10 17:46:57 -0800509 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
510 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
511 }
512 }
513 if (!found_pt_load) {
514 min_vaddr = 0;
515 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200516
Elliott Hughes0266ae52014-02-10 17:46:57 -0800517 min_vaddr = PAGE_START(min_vaddr);
518 max_vaddr = PAGE_END(max_vaddr);
519
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700520 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800521 *out_min_vaddr = min_vaddr;
522 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700523 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800524 *out_max_vaddr = max_vaddr;
525 }
526 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200527}
528
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700529// Reserve a virtual address range such that if it's limits were extended to the next 2**align
530// boundary, it would not overlap with any existing mappings.
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700531static void* ReserveWithAlignmentPadding(size_t size, size_t align, void** out_gap_start,
532 size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700533 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Elliott Hughes8178c412018-11-05 13:34:36 -0800534 if (align == PAGE_SIZE) {
535 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700536 if (mmap_ptr == MAP_FAILED) {
537 return nullptr;
538 }
539 return mmap_ptr;
540 }
541
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700542 // Minimum alignment of shared library gap. For efficiency, this should match the second level
543 // page size of the platform.
544#if defined(__LP64__)
545 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
546#else
547 constexpr size_t kGapAlignment = 0;
548#endif
549 // Maximum gap size, in the units of kGapAlignment.
550 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700551 // Allocate enough space so that the end of the desired region aligned up is still inside the
552 // mapping.
Evgenii Stepanov474f2f52020-07-06 19:25:43 +0000553 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700554 uint8_t* mmap_ptr =
555 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
556 if (mmap_ptr == MAP_FAILED) {
557 return nullptr;
558 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700559 size_t gap_size = 0;
560 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, align));
561 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, align) - 1);
562 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
563 // This library crosses a 2MB boundary and will fragment a new huge page.
564 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
565 // to improve address randomization and make it harder to locate this library code by probing.
566 munmap(mmap_ptr, mmap_size);
567 align = std::max(align, kGapAlignment);
568 gap_size =
569 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
570 mmap_size = align_up(size + gap_size, align) + align - PAGE_SIZE;
571 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
572 if (mmap_ptr == MAP_FAILED) {
573 return nullptr;
574 }
575 }
576
577 uint8_t *gap_end, *gap_start;
578 if (gap_size) {
579 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
580 gap_start = gap_end - gap_size;
581 } else {
582 gap_start = gap_end = mmap_ptr + mmap_size;
583 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700584
585 uint8_t* first = align_up(mmap_ptr, align);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700586 uint8_t* last = align_down(gap_start, align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900587
Tom Cherry66bc4282018-11-08 13:40:52 -0800588 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900589 // created. Don't randomize then.
Tom Cherry66bc4282018-11-08 13:40:52 -0800590 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / PAGE_SIZE + 1);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700591 uint8_t* start = first + n * PAGE_SIZE;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700592 // Unmap the extra space around the allocation.
593 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
594 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700595 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700596 munmap(start + size, gap_start - (start + size));
597 if (gap_end != mmap_ptr + mmap_size) {
598 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
599 }
600 *out_gap_start = gap_start;
601 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700602 return start;
603}
604
Elliott Hughes650be4e2013-03-05 18:47:58 -0800605// Reserve a virtual address range big enough to hold all loadable
606// segments of a program header table. This is done by creating a
607// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400608bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800609 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800610 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800611 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700612 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800613 return false;
614 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200615
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800616 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000617 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000618
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400619 if (load_size_ > address_space->reserved_size) {
620 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000621 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400622 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000623 return false;
624 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700625 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, &gap_start_, &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700626 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700627 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000628 return false;
629 }
630 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400631 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700632 gap_start_ = nullptr;
633 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800634 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400635
636 // Update the reserved address space to subtract the space used by this library.
637 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
638 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800639 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200640
Elliott Hughes650be4e2013-03-05 18:47:58 -0800641 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800642 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800643 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200644}
645
Elliott Hughes650be4e2013-03-05 18:47:58 -0800646bool ElfReader::LoadSegments() {
647 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800648 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200649
Elliott Hughes650be4e2013-03-05 18:47:58 -0800650 if (phdr->p_type != PT_LOAD) {
651 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200652 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800653
654 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800655 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
656 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800657
Elliott Hughes0266ae52014-02-10 17:46:57 -0800658 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
659 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800660
Elliott Hughes0266ae52014-02-10 17:46:57 -0800661 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800662
663 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800664 ElfW(Addr) file_start = phdr->p_offset;
665 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800666
Elliott Hughes0266ae52014-02-10 17:46:57 -0800667 ElfW(Addr) file_page_start = PAGE_START(file_start);
668 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800669
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700670 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700671 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700672 return false;
673 }
674
skvalex93ce3542015-08-20 01:06:42 +0300675 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700676 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
677 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700678 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700679 reinterpret_cast<void*>(phdr->p_filesz),
680 reinterpret_cast<void*>(file_end), file_size_);
681 return false;
682 }
683
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700684 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700685 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700686 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800687 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800688 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800689 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800690 return false;
691 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800692 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800693 "writable-and-executable-segments-enforced-for-api-level-26",
694 "\"%s\" has load segments that are both writable and executable",
695 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800696 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700697 }
698
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700699 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700700 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700701 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700702 MAP_FIXED|MAP_PRIVATE,
703 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700704 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700705 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700706 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700707 return false;
708 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800709 }
710
711 // if the segment is writable, and does not end on a page boundary,
712 // zero-fill it until the page limit.
713 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800714 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800715 }
716
717 seg_file_end = PAGE_END(seg_file_end);
718
719 // seg_file_end is now the first page address after the file
720 // content. If seg_end is larger, we need to zero anything
721 // between them. This is done by using a private anonymous
722 // map for all extra pages.
723 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800724 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800725 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800726 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800727 PFLAGS_TO_PROT(phdr->p_flags),
728 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
729 -1,
730 0);
731 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700732 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800733 return false;
734 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800735
736 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800737 }
738 }
739 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200740}
741
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000742/* Used internally. Used to set the protection bits of all loaded segments
743 * with optional extra flags (i.e. really PROT_WRITE). Used by
744 * phdr_table_protect_segments and phdr_table_unprotect_segments.
745 */
746static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
747 ElfW(Addr) load_bias, int extra_prot_flags) {
748 const ElfW(Phdr)* phdr = phdr_table;
749 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
750
751 for (; phdr < phdr_limit; phdr++) {
752 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
753 continue;
754 }
755
756 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
757 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
758
Tamas Petz8d55d182020-02-24 14:15:25 +0100759 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
760 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700761 // make sure we're never simultaneously writable / executable
762 prot &= ~PROT_EXEC;
763 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100764#if defined(__aarch64__)
765 if ((prot & PROT_EXEC) == 0) {
766 // Though it is not specified don't add PROT_BTI if segment is not
767 // executable.
768 prot &= ~PROT_BTI;
769 }
770#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700771
Tamas Petz8d55d182020-02-24 14:15:25 +0100772 int ret =
773 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000774 if (ret < 0) {
775 return -1;
776 }
777 }
778 return 0;
779}
780
781/* Restore the original protection modes for all loadable segments.
782 * You should only call this after phdr_table_unprotect_segments and
783 * applying all relocations.
784 *
Tamas Petz8d55d182020-02-24 14:15:25 +0100785 * AArch64: also called from linker_main and ElfReader::Load to apply
786 * PROT_BTI for loaded main so and other so-s.
787 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000788 * Input:
789 * phdr_table -> program header table
790 * phdr_count -> number of entries in tables
791 * load_bias -> load bias
Tamas Petz8d55d182020-02-24 14:15:25 +0100792 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000793 * Return:
794 * 0 on error, -1 on failure (error code in errno).
795 */
Tamas Petz8d55d182020-02-24 14:15:25 +0100796int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
797 ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
798 int prot = 0;
799#if defined(__aarch64__)
800 if ((prop != nullptr) && prop->IsBTICompatible()) {
801 prot |= PROT_BTI;
802 }
803#endif
804 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000805}
806
807/* Change the protection of all loaded segments in memory to writable.
808 * This is useful before performing relocations. Once completed, you
809 * will have to call phdr_table_protect_segments to restore the original
810 * protection flags on all segments.
811 *
812 * Note that some writable segments can also have their content turned
813 * to read-only by calling phdr_table_protect_gnu_relro. This is no
814 * performed here.
815 *
816 * Input:
817 * phdr_table -> program header table
818 * phdr_count -> number of entries in tables
819 * load_bias -> load bias
820 * Return:
821 * 0 on error, -1 on failure (error code in errno).
822 */
823int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
824 size_t phdr_count, ElfW(Addr) load_bias) {
825 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
826}
827
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200828/* Used internally by phdr_table_protect_gnu_relro and
829 * phdr_table_unprotect_gnu_relro.
830 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800831static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
832 ElfW(Addr) load_bias, int prot_flags) {
833 const ElfW(Phdr)* phdr = phdr_table;
834 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200835
Elliott Hughes0266ae52014-02-10 17:46:57 -0800836 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
837 if (phdr->p_type != PT_GNU_RELRO) {
838 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200839 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800840
841 // Tricky: what happens when the relro segment does not start
842 // or end at page boundaries? We're going to be over-protective
843 // here and put every page touched by the segment as read-only.
844
845 // This seems to match Ian Lance Taylor's description of the
846 // feature at http://www.airs.com/blog/archives/189.
847
848 // Extract:
849 // Note that the current dynamic linker code will only work
850 // correctly if the PT_GNU_RELRO segment starts on a page
851 // boundary. This is because the dynamic linker rounds the
852 // p_vaddr field down to the previous page boundary. If
853 // there is anything on the page which should not be read-only,
854 // the program is likely to fail at runtime. So in effect the
855 // linker must only emit a PT_GNU_RELRO segment if it ensures
856 // that it starts on a page boundary.
857 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
858 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
859
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800860 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800861 seg_page_end - seg_page_start,
862 prot_flags);
863 if (ret < 0) {
864 return -1;
865 }
866 }
867 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200868}
869
870/* Apply GNU relro protection if specified by the program header. This will
871 * turn some of the pages of a writable PT_LOAD segment to read-only, as
872 * specified by one or more PT_GNU_RELRO segments. This must be always
873 * performed after relocations.
874 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200875 * The areas typically covered are .got and .data.rel.ro, these are
876 * read-only from the program's POV, but contain absolute addresses
877 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200878 *
879 * Input:
880 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700881 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200882 * load_bias -> load bias
883 * Return:
884 * 0 on error, -1 on failure (error code in errno).
885 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700886int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
887 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800888 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200889}
890
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000891/* Serialize the GNU relro segments to the given file descriptor. This can be
892 * performed after relocations to allow another process to later share the
893 * relocated segment, if it was loaded at the same address.
894 *
895 * Input:
896 * phdr_table -> program header table
897 * phdr_count -> number of entries in tables
898 * load_bias -> load bias
899 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400900 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000901 * Return:
902 * 0 on error, -1 on failure (error code in errno).
903 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700904int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
905 size_t phdr_count,
906 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400907 int fd,
908 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000909 const ElfW(Phdr)* phdr = phdr_table;
910 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000911
912 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
913 if (phdr->p_type != PT_GNU_RELRO) {
914 continue;
915 }
916
917 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
918 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
919 ssize_t size = seg_page_end - seg_page_start;
920
921 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
922 if (written != size) {
923 return -1;
924 }
925 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400926 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000927 if (map == MAP_FAILED) {
928 return -1;
929 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -0400930 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000931 }
932 return 0;
933}
934
935/* Where possible, replace the GNU relro segments with mappings of the given
936 * file descriptor. This can be performed after relocations to allow a file
937 * previously created by phdr_table_serialize_gnu_relro in another process to
938 * replace the dirty relocated pages, saving memory, if it was loaded at the
939 * same address. We have to compare the data before we map over it, since some
940 * parts of the relro segment may not be identical due to other libraries in
941 * the process being loaded at different addresses.
942 *
943 * Input:
944 * phdr_table -> program header table
945 * phdr_count -> number of entries in tables
946 * load_bias -> load bias
947 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400948 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000949 * Return:
950 * 0 on error, -1 on failure (error code in errno).
951 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700952int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
953 size_t phdr_count,
954 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400955 int fd,
956 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000957 // Map the file at a temporary location so we can compare its contents.
958 struct stat file_stat;
959 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
960 return -1;
961 }
962 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700963 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100964 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700965 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100966 if (temp_mapping == MAP_FAILED) {
967 return -1;
968 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000969 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000970
971 // Iterate over the relro segments and compare/remap the pages.
972 const ElfW(Phdr)* phdr = phdr_table;
973 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
974
975 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
976 if (phdr->p_type != PT_GNU_RELRO) {
977 continue;
978 }
979
980 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
981 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
982
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400983 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000984 char* mem_base = reinterpret_cast<char*>(seg_page_start);
985 size_t match_offset = 0;
986 size_t size = seg_page_end - seg_page_start;
987
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400988 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100989 // File is too short to compare to this segment. The contents are likely
990 // different as well (it's probably for a different library version) so
991 // just don't bother checking.
992 break;
993 }
994
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000995 while (match_offset < size) {
996 // Skip over dissimilar pages.
997 while (match_offset < size &&
998 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
999 match_offset += PAGE_SIZE;
1000 }
1001
1002 // Count similar pages.
1003 size_t mismatch_offset = match_offset;
1004 while (mismatch_offset < size &&
1005 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
1006 mismatch_offset += PAGE_SIZE;
1007 }
1008
1009 // Map over similar pages.
1010 if (mismatch_offset > match_offset) {
1011 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001012 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001013 if (map == MAP_FAILED) {
1014 munmap(temp_mapping, file_size);
1015 return -1;
1016 }
1017 }
1018
1019 match_offset = mismatch_offset;
1020 }
1021
1022 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001023 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001024 }
1025 munmap(temp_mapping, file_size);
1026 return 0;
1027}
1028
1029
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001030#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001031
1032# ifndef PT_ARM_EXIDX
1033# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
1034# endif
1035
1036/* Return the address and size of the .ARM.exidx section in memory,
1037 * if present.
1038 *
1039 * Input:
1040 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001041 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001042 * load_bias -> load bias
1043 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001044 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001045 * arm_exidx_count -> number of items in table (0 on failure).
1046 * Return:
1047 * 0 on error, -1 on failure (_no_ error code in errno)
1048 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001049int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1050 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001051 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001052 const ElfW(Phdr)* phdr = phdr_table;
1053 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001054
Elliott Hughes0266ae52014-02-10 17:46:57 -08001055 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1056 if (phdr->p_type != PT_ARM_EXIDX) {
1057 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001058 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001059
1060 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001061 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001062 return 0;
1063 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001064 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001065 *arm_exidx_count = 0;
1066 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001067}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001068#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001069
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001070/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001071 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001072 *
1073 * Input:
1074 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001075 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001076 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001077 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001078 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001079 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001080 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001081 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001082 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001083void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001084 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1085 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001086 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001087 for (size_t i = 0; i<phdr_count; ++i) {
1088 const ElfW(Phdr)& phdr = phdr_table[i];
1089 if (phdr.p_type == PT_DYNAMIC) {
1090 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001091 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001092 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001093 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001094 return;
1095 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001096 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001097}
1098
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001099/* Return the program interpreter string, or nullptr if missing.
1100 *
1101 * Input:
1102 * phdr_table -> program header table
1103 * phdr_count -> number of entries in tables
1104 * load_bias -> load bias
1105 * Return:
1106 * pointer to the program interpreter string.
1107 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001108const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001109 ElfW(Addr) load_bias) {
1110 for (size_t i = 0; i<phdr_count; ++i) {
1111 const ElfW(Phdr)& phdr = phdr_table[i];
1112 if (phdr.p_type == PT_INTERP) {
1113 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1114 }
1115 }
1116 return nullptr;
1117}
1118
Robert Grosse4544d9f2014-10-15 14:32:19 -07001119// Sets loaded_phdr_ to the address of the program header table as it appears
1120// in the loaded segments in memory. This is in contrast with phdr_table_,
1121// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001122bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001123 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001124
Elliott Hughes650be4e2013-03-05 18:47:58 -08001125 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001126 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001127 if (phdr->p_type == PT_PHDR) {
1128 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001129 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001130 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001131
Elliott Hughes650be4e2013-03-05 18:47:58 -08001132 // Otherwise, check the first loadable segment. If its file offset
1133 // is 0, it starts with the ELF header, and we can trivially find the
1134 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001135 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001136 if (phdr->p_type == PT_LOAD) {
1137 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001138 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001139 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001140 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001141 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001142 }
1143 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001144 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001145 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001146
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001147 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001148 return false;
1149}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001150
Tamas Petz8d55d182020-02-24 14:15:25 +01001151// Tries to find .note.gnu.property section.
1152// It is not considered an error if such section is missing.
1153bool ElfReader::FindGnuPropertySection() {
1154#if defined(__aarch64__)
1155 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1156#endif
1157 return true;
1158}
1159
Elliott Hughes650be4e2013-03-05 18:47:58 -08001160// Ensures that our program header is actually within a loadable
1161// segment. This should help catch badly-formed ELF files that
1162// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001163bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1164 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1165 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001166 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001167 if (phdr->p_type != PT_LOAD) {
1168 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001169 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001170 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1171 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001172 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001173 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001174 return true;
1175 }
1176 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001177 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1178 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001179 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001180}