blob: a629ee607fbdf200a88053a045ca08dbd226a2c3 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Evgenii Stepanov6bbb75a2023-12-06 18:54:45 +000042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Kalesh Singh377f0b92024-01-31 20:23:39 -080045#include "private/bionic_asm_note.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070046#include "private/CFIShadow.h" // For kLibraryAlignment
Kalesh Singh377f0b92024-01-31 20:23:39 -080047#include "private/elf_note.h"
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080048
Kalesh Singhc5c1d192024-04-09 16:27:56 -070049#include <android-base/file.h>
50
Elliott Hughesb5140262014-12-02 16:16:29 -080051static int GetTargetElfMachine() {
52#if defined(__arm__)
53 return EM_ARM;
54#elif defined(__aarch64__)
55 return EM_AARCH64;
56#elif defined(__i386__)
57 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000058#elif defined(__riscv)
59 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080060#elif defined(__x86_64__)
61 return EM_X86_64;
62#endif
63}
64
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020065/**
66 TECHNICAL NOTE ON ELF LOADING.
67
68 An ELF file's program header table contains one or more PT_LOAD
69 segments, which corresponds to portions of the file that need to
70 be mapped into the process' address space.
71
72 Each loadable segment has the following important properties:
73
74 p_offset -> segment file offset
75 p_filesz -> segment file size
76 p_memsz -> segment memory size (always >= p_filesz)
77 p_vaddr -> segment's virtual address
78 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070079 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020080
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070081 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020082
83 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
84 ranges of virtual addresses. A few rules apply:
85
86 - the virtual address ranges should not overlap.
87
88 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
89 between them should always be initialized to 0.
90
91 - ranges do not necessarily start or end at page boundaries. Two distinct
92 segments can have their start and end on the same page. In this case, the
93 page inherits the mapping flags of the latter segment.
94
95 Finally, the real load addrs of each segment is not p_vaddr. Instead the
96 loader decides where to load the first segment, then will load all others
97 relative to the first one to respect the initial range layout.
98
99 For example, consider the following list:
100
101 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
102 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
103
104 This corresponds to two segments that cover these virtual address ranges:
105
106 0x30000...0x34000
107 0x40000...0x48000
108
109 If the loader decides to load the first segment at address 0xa0000000
110 then the segments' load address ranges will be:
111
112 0xa0030000...0xa0034000
113 0xa0040000...0xa0048000
114
115 In other words, all segments must be loaded at an address that has the same
116 constant offset from their p_vaddr value. This offset is computed as the
117 difference between the first segment's load address, and its p_vaddr value.
118
119 However, in practice, segments do _not_ start at page boundaries. Since we
120 can only memory-map at page boundaries, this means that the bias is
121 computed as:
122
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700123 load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200124
125 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
126 possible wrap around UINT32_MAX for possible large p_vaddr values).
127
128 And that the phdr0_load_address must start at a page boundary, with
129 the segment's real content starting at:
130
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700131 phdr0_load_address + page_offset(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200132
133 Note that ELF requires the following condition to make the mmap()-ing work:
134
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700135 page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200136
137 The load_bias must be added to any p_vaddr value read from the ELF file to
138 determine the corresponding memory address.
139
140 **/
141
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800142#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200143#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
144 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
145 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
146
Kalesh Singh1dd68582024-02-01 00:14:36 -0800147static const size_t kPageSize = page_size();
148
149/*
150 * Generic PMD size calculation:
151 * - Each page table (PT) is of size 1 page.
152 * - Each page table entry (PTE) is of size 64 bits.
153 * - Each PTE locates one physical page frame (PFN) of size 1 page.
154 * - A PMD entry locates 1 page table (PT)
155 *
156 * PMD size = Num entries in a PT * page_size
157 */
158static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700159
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700160ElfReader::ElfReader()
161 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
162 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800163 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
164 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700165}
166
167bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900168 if (did_read_) {
169 return true;
170 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700171 name_ = name;
172 fd_ = fd;
173 file_offset_ = file_offset;
174 file_size_ = file_size;
175
176 if (ReadElfHeader() &&
177 VerifyElfHeader() &&
178 ReadProgramHeaders() &&
179 ReadSectionHeaders() &&
Kalesh Singh377f0b92024-01-31 20:23:39 -0800180 ReadDynamicSection() &&
181 ReadPadSegmentNote()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700182 did_read_ = true;
183 }
184
185 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200186}
187
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400188bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900190 if (did_load_) {
191 return true;
192 }
huangchaochaobdc37962022-12-27 19:38:41 +0800193 bool reserveSuccess = ReserveAddressSpace(address_space);
194 if (reserveSuccess && LoadSegments() && FindPhdr() &&
Tamas Petz8d55d182020-02-24 14:15:25 +0100195 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700196 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100197#if defined(__aarch64__)
198 // For Armv8.5-A loaded executable segments may require PROT_BTI.
199 if (note_gnu_property_.IsBTICompatible()) {
200 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
Kalesh Singh4084b552024-03-13 13:35:49 -0700201 should_pad_segments_, &note_gnu_property_) == 0);
Tamas Petz8d55d182020-02-24 14:15:25 +0100202 }
203#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700204 }
huangchaochaobdc37962022-12-27 19:38:41 +0800205 if (reserveSuccess && !did_load_) {
206 if (load_start_ != nullptr && load_size_ != 0) {
207 if (!mapped_by_caller_) {
208 munmap(load_start_, load_size_);
209 }
210 }
211 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700212
213 return did_load_;
214}
215
216const char* ElfReader::get_string(ElfW(Word) index) const {
217 CHECK(strtab_ != nullptr);
218 CHECK(index < strtab_size_);
219
220 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800221}
222
223bool ElfReader::ReadElfHeader() {
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700224 size_t map_size = file_size_ - file_offset_;
225 if (map_size < sizeof(header_)) {
226 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
227 map_size);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800228 return false;
229 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700230
Suren Baghdasaryan8eebd022024-07-22 14:12:51 -0700231#if !defined(__LP64__)
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700232 // Map at most 1MiB which should cover most cases
233 map_size = std::min(map_size, static_cast<size_t>(1 * 1024 * 1024));
Suren Baghdasaryan8eebd022024-07-22 14:12:51 -0700234#endif
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700235
236 if (!file_fragment_.Map(fd_, file_offset_, 0, map_size)) {
237 DL_ERR("\"%s\" header mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800238 return false;
239 }
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700240
241 header_ = *static_cast<ElfW(Ehdr)*>(file_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800242 return true;
243}
244
Elliott Hughes72007ee2017-04-19 17:44:57 -0700245static const char* EM_to_string(int em) {
246 if (em == EM_386) return "EM_386";
247 if (em == EM_AARCH64) return "EM_AARCH64";
248 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000249 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700250 if (em == EM_X86_64) return "EM_X86_64";
251 return "EM_???";
252}
253
Elliott Hughes650be4e2013-03-05 18:47:58 -0800254bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700255 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700256 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
257 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800258 return false;
259 }
260
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700261 // Try to give a clear diagnostic for ELF class mismatches, since they're
262 // an easy mistake to make during the 32-bit/64-bit transition period.
263 int elf_class = header_.e_ident[EI_CLASS];
264#if defined(__LP64__)
265 if (elf_class != ELFCLASS64) {
266 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700267 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700268 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700269 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700270 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800271 return false;
272 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700273#else
274 if (elf_class != ELFCLASS32) {
275 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700276 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700277 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700278 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700279 }
280 return false;
281 }
282#endif
283
Elliott Hughes650be4e2013-03-05 18:47:58 -0800284 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700285 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800286 return false;
287 }
288
289 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700290 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800291 return false;
292 }
293
294 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700295 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800296 return false;
297 }
298
Elliott Hughesb5140262014-12-02 16:16:29 -0800299 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700300 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
301 name_.c_str(),
302 EM_to_string(header_.e_machine), header_.e_machine,
303 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800304 return false;
305 }
306
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700307 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800308 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800309 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
310 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
311 return false;
312 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800313 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800314 "invalid-elf-header_section-headers-enforced-for-api-level-26",
315 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
316 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800317 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700318 }
319
320 if (header_.e_shstrndx == 0) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800321 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800322 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
323 return false;
324 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800325 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800326 "invalid-elf-header_section-headers-enforced-for-api-level-26",
327 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800328 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700329 }
330
Elliott Hughes650be4e2013-03-05 18:47:58 -0800331 return true;
332}
333
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700334bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800335 off64_t range_start;
336 off64_t range_end;
337
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700338 // Only header can be located at the 0 offset... This function called to
339 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700340 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700341
342 return offset > 0 &&
343 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800344 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700345 (range_start < file_size_) &&
346 (range_end <= file_size_) &&
347 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800348}
349
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700350void* ElfReader::MapData(MappedFileFragment* fragment, off64_t offs, off64_t size) {
351 off64_t end;
352 CHECK(safe_add(&end, offs, size));
353
354 // If the data is already mapped just return it
355 if (static_cast<off64_t>(file_fragment_.size()) >= end) {
356 return static_cast<char*>(file_fragment_.data()) + offs;
357 }
358 // Use the passed-in fragment if area is not mapped. We can't remap the original fragment
359 // because that invalidates all previous pointers if the file is remapped to a different
360 // virtual address. A local variable can't be used in place of the passed-in fragment because
361 // the area would be unmapped as soon as the local object goes out of scope.
362 if (fragment->Map(fd_, file_offset_, offs, size)) {
363 return fragment->data();
364 }
365 return nullptr;
366}
367
Elliott Hughes650be4e2013-03-05 18:47:58 -0800368// Loads the program header table from an ELF file into a read-only private
369// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700370bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800371 phdr_num_ = header_.e_phnum;
372
373 // Like the kernel, we only accept program header tables that
374 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800375 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700376 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800377 return false;
378 }
379
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800380 // Boundary checks
381 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700382 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
383 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
384 name_.c_str(),
385 static_cast<size_t>(header_.e_phoff),
386 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800387 return false;
388 }
389
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700390 void* phdr_data = MapData(&phdr_fragment_, header_.e_phoff, size);
391 if (phdr_data == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700392 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800393 return false;
394 }
395
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700396 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_data);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800397 return true;
398}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200399
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700400bool ElfReader::ReadSectionHeaders() {
401 shdr_num_ = header_.e_shnum;
402
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800403 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700404 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800405 return false;
406 }
407
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800408 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700409 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
410 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
411 name_.c_str(),
412 static_cast<size_t>(header_.e_shoff),
413 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800414 return false;
415 }
416
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700417 void* shdr_data = MapData(&shdr_fragment_, header_.e_shoff, size);
418 if (shdr_data == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700419 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
420 return false;
421 }
422
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700423 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_data);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700424 return true;
425}
426
427bool ElfReader::ReadDynamicSection() {
428 // 1. Find .dynamic section (in section headers)
429 const ElfW(Shdr)* dynamic_shdr = nullptr;
430 for (size_t i = 0; i < shdr_num_; ++i) {
431 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
432 dynamic_shdr = &shdr_table_ [i];
433 break;
434 }
435 }
436
437 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700438 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700439 return false;
440 }
441
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700442 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
443 size_t pt_dynamic_offset = 0;
444 size_t pt_dynamic_filesz = 0;
445 for (size_t i = 0; i < phdr_num_; ++i) {
446 const ElfW(Phdr)* phdr = &phdr_table_[i];
447 if (phdr->p_type == PT_DYNAMIC) {
448 pt_dynamic_offset = phdr->p_offset;
449 pt_dynamic_filesz = phdr->p_filesz;
450 }
451 }
452
453 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800454 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800455 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
456 "expected to match PT_DYNAMIC offset: 0x%zx",
457 name_.c_str(),
458 static_cast<size_t>(dynamic_shdr->sh_offset),
459 pt_dynamic_offset);
460 return false;
461 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800462 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800463 "invalid-elf-header_section-headers-enforced-for-api-level-26",
464 "\"%s\" .dynamic section has invalid offset: 0x%zx "
465 "(expected to match PT_DYNAMIC offset 0x%zx)",
466 name_.c_str(),
467 static_cast<size_t>(dynamic_shdr->sh_offset),
468 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800469 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700470 }
471
472 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800473 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800474 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
475 "expected to match PT_DYNAMIC filesz: 0x%zx",
476 name_.c_str(),
477 static_cast<size_t>(dynamic_shdr->sh_size),
478 pt_dynamic_filesz);
479 return false;
480 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800481 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800482 "invalid-elf-header_section-headers-enforced-for-api-level-26",
483 "\"%s\" .dynamic section has invalid size: 0x%zx "
484 "(expected to match PT_DYNAMIC filesz 0x%zx)",
485 name_.c_str(),
486 static_cast<size_t>(dynamic_shdr->sh_size),
487 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800488 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700489 }
490
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700491 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700492 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
493 name_.c_str(),
494 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700495 return false;
496 }
497
498 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
499
500 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700501 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
502 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700503 return false;
504 }
505
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700506 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
507 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800508 return false;
509 }
510
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700511 void* dynamic_data = MapData(&dynamic_fragment_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size);
512 if (dynamic_data == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700513 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
514 return false;
515 }
516
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700517 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_data);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700518
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700519 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
520 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
521 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800522 return false;
523 }
524
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700525 void* strtab_data = MapData(&strtab_fragment_, strtab_shdr->sh_offset, strtab_shdr->sh_size);
526 if (strtab_data == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700527 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
528 return false;
529 }
530
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700531 strtab_ = static_cast<const char*>(strtab_data);
532 strtab_size_ = strtab_shdr->sh_size;
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700533 return true;
534}
535
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800536/* Returns the size of the extent of all the possibly non-contiguous
537 * loadable segments in an ELF program header table. This corresponds
538 * to the page-aligned size in bytes that needs to be reserved in the
539 * process' address space. If there are no loadable segments, 0 is
540 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200541 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700542 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800543 * set to the minimum and maximum addresses of pages to be reserved,
544 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200545 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800546size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
547 ElfW(Addr)* out_min_vaddr,
548 ElfW(Addr)* out_max_vaddr) {
549 ElfW(Addr) min_vaddr = UINTPTR_MAX;
550 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200551
Elliott Hughes0266ae52014-02-10 17:46:57 -0800552 bool found_pt_load = false;
553 for (size_t i = 0; i < phdr_count; ++i) {
554 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200555
Elliott Hughes0266ae52014-02-10 17:46:57 -0800556 if (phdr->p_type != PT_LOAD) {
557 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200558 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800559 found_pt_load = true;
560
561 if (phdr->p_vaddr < min_vaddr) {
562 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200563 }
564
Elliott Hughes0266ae52014-02-10 17:46:57 -0800565 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
566 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
567 }
568 }
569 if (!found_pt_load) {
570 min_vaddr = 0;
571 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200572
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700573 min_vaddr = page_start(min_vaddr);
574 max_vaddr = page_end(max_vaddr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800575
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700576 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800577 *out_min_vaddr = min_vaddr;
578 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700579 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800580 *out_max_vaddr = max_vaddr;
581 }
582 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200583}
584
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700585// Returns the maximum p_align associated with a loadable segment in the ELF
586// program header table. Used to determine whether the file should be loaded at
587// a specific virtual address alignment for use with huge pages.
588size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700589 size_t maximum_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700590
591 for (size_t i = 0; i < phdr_count; ++i) {
592 const ElfW(Phdr)* phdr = &phdr_table[i];
593
594 // p_align must be 0, 1, or a positive, integral power of two.
595 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
596 continue;
597 }
598
599 if (phdr->p_align > maximum_alignment) {
600 maximum_alignment = phdr->p_align;
601 }
602 }
603
604#if defined(__LP64__)
605 return maximum_alignment;
606#else
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700607 return page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700608#endif
609}
610
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700611// Reserve a virtual address range such that if it's limits were extended to the next 2**align
612// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700613static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
614 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700615 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700616 // Reserve enough space to properly align the library's start address.
617 mapping_align = std::max(mapping_align, start_align);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700618 if (mapping_align == page_size()) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800619 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700620 if (mmap_ptr == MAP_FAILED) {
621 return nullptr;
622 }
623 return mmap_ptr;
624 }
625
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700626 // Minimum alignment of shared library gap. For efficiency, this should match the second level
627 // page size of the platform.
628#if defined(__LP64__)
629 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
630#else
631 constexpr size_t kGapAlignment = 0;
632#endif
633 // Maximum gap size, in the units of kGapAlignment.
634 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700635 // Allocate enough space so that the end of the desired region aligned up is still inside the
636 // mapping.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700637 size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700638 uint8_t* mmap_ptr =
639 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
640 if (mmap_ptr == MAP_FAILED) {
641 return nullptr;
642 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700643 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700644 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
645 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700646 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
647 // This library crosses a 2MB boundary and will fragment a new huge page.
648 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
649 // to improve address randomization and make it harder to locate this library code by probing.
650 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700651 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700652 gap_size =
653 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700654 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700655 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
656 if (mmap_ptr == MAP_FAILED) {
657 return nullptr;
658 }
659 }
660
661 uint8_t *gap_end, *gap_start;
662 if (gap_size) {
663 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
664 gap_start = gap_end - gap_size;
665 } else {
666 gap_start = gap_end = mmap_ptr + mmap_size;
667 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700668
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700669 uint8_t* first = align_up(mmap_ptr, mapping_align);
670 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900671
Tom Cherry66bc4282018-11-08 13:40:52 -0800672 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900673 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700674 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
675 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700676 // Unmap the extra space around the allocation.
677 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
678 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700679 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700680 munmap(start + size, gap_start - (start + size));
681 if (gap_end != mmap_ptr + mmap_size) {
682 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
683 }
684 *out_gap_start = gap_start;
685 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700686 return start;
687}
688
Elliott Hughes650be4e2013-03-05 18:47:58 -0800689// Reserve a virtual address range big enough to hold all loadable
690// segments of a program header table. This is done by creating a
691// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400692bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800693 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800694 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800695 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700696 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800697 return false;
698 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200699
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800700 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000701 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000702
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400703 if (load_size_ > address_space->reserved_size) {
704 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000705 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400706 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000707 return false;
708 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700709 size_t start_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700710 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
711 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
712 // Limit alignment to PMD size as other alignments reduce the number of
713 // bits available for ASLR for no benefit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700714 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700715 }
716 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
717 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700718 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700719 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000720 return false;
721 }
722 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400723 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700724 gap_start_ = nullptr;
725 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800726 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400727
728 // Update the reserved address space to subtract the space used by this library.
729 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
730 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800731 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200732
Elliott Hughes650be4e2013-03-05 18:47:58 -0800733 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800734 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800735 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200736}
737
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700738/*
739 * Returns true if the kernel supports page size migration, else false.
740 */
741bool page_size_migration_supported() {
742 static bool pgsize_migration_enabled = []() {
743 std::string enabled;
744 if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
745 return false;
746 }
747 return enabled.find("1") != std::string::npos;
748 }();
749 return pgsize_migration_enabled;
750}
751
Kalesh Singh377f0b92024-01-31 20:23:39 -0800752// Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
753bool ElfReader::ReadPadSegmentNote() {
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700754 if (!page_size_migration_supported()) {
755 // Don't attempt to read the note, since segment extension isn't
756 // supported; but return true so that loading can continue normally.
757 return true;
758 }
759
Kalesh Singh377f0b92024-01-31 20:23:39 -0800760 // The ELF can have multiple PT_NOTE's, check them all
761 for (size_t i = 0; i < phdr_num_; ++i) {
762 const ElfW(Phdr)* phdr = &phdr_table_[i];
763
764 if (phdr->p_type != PT_NOTE) {
765 continue;
766 }
767
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800768 // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
769 // point to any part of the ELF (p_memsz == 0). Skip these since there is
770 // nothing to decode. See: b/324468126
771 if (phdr->p_memsz == 0) {
772 continue;
773 }
774
Kalesh Singh751bb8a2024-03-29 17:55:37 -0700775 // If the PT_NOTE extends beyond the file. The ELF is doing something
776 // strange -- obfuscation, embedding hidden loaders, ...
777 //
778 // It doesn't contain the pad_segment note. Skip it to avoid SIGBUS
779 // by accesses beyond the file.
780 off64_t note_end_off = file_offset_ + phdr->p_offset + phdr->p_filesz;
781 if (note_end_off > file_size_) {
782 continue;
783 }
784
Kalesh Singh377f0b92024-01-31 20:23:39 -0800785 // note_fragment is scoped to within the loop so that there is
786 // at most 1 PT_NOTE mapped at anytime during this search.
787 MappedFileFragment note_fragment;
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700788 void* note_data = MapData(&note_fragment, phdr->p_offset, phdr->p_memsz);
789 if (note_data == nullptr) {
Kalesh Singh32b6d8c2024-02-13 18:37:12 -0800790 DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
791 name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
792 reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800793 return false;
Kalesh Singh377f0b92024-01-31 20:23:39 -0800794 }
795
796 const ElfW(Nhdr)* note_hdr = nullptr;
797 const char* note_desc = nullptr;
798 if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700799 reinterpret_cast<ElfW(Addr)>(note_data),
Kalesh Singh377f0b92024-01-31 20:23:39 -0800800 phdr, &note_hdr, &note_desc)) {
801 continue;
802 }
803
804 if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
805 DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
806 name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
807 return false;
808 }
809
810 // 1 == enabled, 0 == disabled
811 should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
812 return true;
813 }
814
815 return true;
816}
817
Kalesh Singh4084b552024-03-13 13:35:49 -0700818static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
819 size_t phdr_idx, ElfW(Addr)* p_memsz,
820 ElfW(Addr)* p_filesz, bool should_pad_segments) {
821 const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
822 const ElfW(Phdr)* next = nullptr;
823 size_t next_idx = phdr_idx + 1;
824
Kalesh Singhe1e74792024-04-09 11:48:52 -0700825 // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
826 // field e.g. 2MiB p_align for THPs and are relatively small in number.
827 //
828 // The kernel can only represent padding for p_align up to 64KiB. This is because
829 // the kernel uses 4 available bits in the vm_area_struct to represent padding
830 // extent; and so cannot enable mitigations to avoid breaking app compatibility for
831 // p_aligns > 64KiB.
832 //
833 // Don't perform segment extension on these to avoid app compatibility issues.
834 if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
Kalesh Singh4084b552024-03-13 13:35:49 -0700835 return;
836 }
837
838 if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
839 next = &phdr_table[next_idx];
840 }
841
842 // If this is the last LOAD segment, no extension is needed
843 if (!next || *p_memsz != *p_filesz) {
844 return;
845 }
846
847 ElfW(Addr) next_start = page_start(next->p_vaddr);
848 ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
849
850 // If adjacent segment mappings overlap, no extension is needed.
851 if (curr_end >= next_start) {
852 return;
853 }
854
855 // Extend the LOAD segment mapping to be contiguous with that of
856 // the next LOAD segment.
857 ElfW(Addr) extend = next_start - curr_end;
858 *p_memsz += extend;
859 *p_filesz += extend;
860}
861
Elliott Hughes650be4e2013-03-05 18:47:58 -0800862bool ElfReader::LoadSegments() {
863 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800864 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200865
Elliott Hughes650be4e2013-03-05 18:47:58 -0800866 if (phdr->p_type != PT_LOAD) {
867 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200868 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800869
Kalesh Singh4084b552024-03-13 13:35:49 -0700870 ElfW(Addr) p_memsz = phdr->p_memsz;
871 ElfW(Addr) p_filesz = phdr->p_filesz;
872 _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
873
Elliott Hughes650be4e2013-03-05 18:47:58 -0800874 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800875 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
Kalesh Singh4084b552024-03-13 13:35:49 -0700876 ElfW(Addr) seg_end = seg_start + p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800877
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700878 ElfW(Addr) seg_page_start = page_start(seg_start);
879 ElfW(Addr) seg_page_end = page_end(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800880
Kalesh Singh4084b552024-03-13 13:35:49 -0700881 ElfW(Addr) seg_file_end = seg_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800882
883 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800884 ElfW(Addr) file_start = phdr->p_offset;
Kalesh Singh4084b552024-03-13 13:35:49 -0700885 ElfW(Addr) file_end = file_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800886
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700887 ElfW(Addr) file_page_start = page_start(file_start);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800888 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800889
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700890 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700891 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700892 return false;
893 }
894
Kalesh Singh4084b552024-03-13 13:35:49 -0700895 if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700896 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
897 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700898 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700899 reinterpret_cast<void*>(phdr->p_filesz),
Kalesh Singh4084b552024-03-13 13:35:49 -0700900 reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700901 return false;
902 }
903
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700904 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700905 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700906 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800907 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800908 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800909 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800910 return false;
911 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800912 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800913 "writable-and-executable-segments-enforced-for-api-level-26",
914 "\"%s\" has load segments that are both writable and executable",
915 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800916 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700917 }
918
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700919 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700920 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700921 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700922 MAP_FIXED|MAP_PRIVATE,
923 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700924 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700925 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700926 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700927 return false;
928 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700929
930 // Mark segments as huge page eligible if they meet the requirements
931 // (executable and PMD aligned).
932 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
933 get_transparent_hugepages_supported()) {
934 madvise(seg_addr, file_length, MADV_HUGEPAGE);
935 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800936 }
937
Kalesh Singh1d3ba112024-03-06 17:33:36 -0800938 // if the segment is writable, and does not end on a page boundary,
939 // zero-fill it until the page limit.
Kalesh Singh4084b552024-03-13 13:35:49 -0700940 //
Kalesh Singh1d3ba112024-03-06 17:33:36 -0800941 // Do not attempt to zero the extended region past the first partial page,
942 // since doing so may:
943 // 1) Result in a SIGBUS, as the region is not backed by the underlying
944 // file.
945 // 2) Break the COW backing, faulting in new anon pages for a region
946 // that will not be used.
947
Kalesh Singh51347622024-03-18 17:27:59 -0700948 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
949 if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
950 memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
951 kPageSize - page_offset(unextended_seg_file_end));
952 }
953
954 // Pages may be brought in due to readahead.
955 // Drop the padding (zero) pages, to avoid reclaim work later.
956 //
957 // NOTE: The madvise() here is special, as it also serves to hint to the
958 // kernel the portion of the LOAD segment that is padding.
959 //
960 // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
961 // [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
962 uint64_t pad_start = page_end(unextended_seg_file_end);
963 uint64_t pad_end = page_end(seg_file_end);
964 CHECK(pad_start <= pad_end);
965 uint64_t pad_len = pad_end - pad_start;
966 if (page_size_migration_supported() && pad_len > 0 &&
967 madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
968 DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
969 name_.c_str(), pad_start, pad_len);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800970 }
971
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700972 seg_file_end = page_end(seg_file_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800973
974 // seg_file_end is now the first page address after the file
975 // content. If seg_end is larger, we need to zero anything
976 // between them. This is done by using a private anonymous
977 // map for all extra pages.
978 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800979 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800980 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800981 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800982 PFLAGS_TO_PROT(phdr->p_flags),
983 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
984 -1,
985 0);
986 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700987 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800988 return false;
989 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800990
991 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800992 }
993 }
994 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200995}
996
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000997/* Used internally. Used to set the protection bits of all loaded segments
998 * with optional extra flags (i.e. really PROT_WRITE). Used by
999 * phdr_table_protect_segments and phdr_table_unprotect_segments.
1000 */
1001static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001002 ElfW(Addr) load_bias, int extra_prot_flags,
1003 bool should_pad_segments) {
1004 for (size_t i = 0; i < phdr_count; ++i) {
1005 const ElfW(Phdr)* phdr = &phdr_table[i];
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001006
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001007 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
1008 continue;
1009 }
1010
Kalesh Singh4084b552024-03-13 13:35:49 -07001011 ElfW(Addr) p_memsz = phdr->p_memsz;
1012 ElfW(Addr) p_filesz = phdr->p_filesz;
1013 _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
1014
1015 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
1016 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001017
Tamas Petz8d55d182020-02-24 14:15:25 +01001018 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
1019 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001020 // make sure we're never simultaneously writable / executable
1021 prot &= ~PROT_EXEC;
1022 }
Tamas Petz8d55d182020-02-24 14:15:25 +01001023#if defined(__aarch64__)
1024 if ((prot & PROT_EXEC) == 0) {
1025 // Though it is not specified don't add PROT_BTI if segment is not
1026 // executable.
1027 prot &= ~PROT_BTI;
1028 }
1029#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001030
Tamas Petz8d55d182020-02-24 14:15:25 +01001031 int ret =
1032 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001033 if (ret < 0) {
1034 return -1;
1035 }
1036 }
1037 return 0;
1038}
1039
1040/* Restore the original protection modes for all loadable segments.
1041 * You should only call this after phdr_table_unprotect_segments and
1042 * applying all relocations.
1043 *
Tamas Petz8d55d182020-02-24 14:15:25 +01001044 * AArch64: also called from linker_main and ElfReader::Load to apply
1045 * PROT_BTI for loaded main so and other so-s.
1046 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001047 * Input:
1048 * phdr_table -> program header table
1049 * phdr_count -> number of entries in tables
1050 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001051 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Tamas Petz8d55d182020-02-24 14:15:25 +01001052 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001053 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001054 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001055 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001056int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001057 ElfW(Addr) load_bias, bool should_pad_segments,
1058 const GnuPropertySection* prop __unused) {
Tamas Petz8d55d182020-02-24 14:15:25 +01001059 int prot = 0;
1060#if defined(__aarch64__)
1061 if ((prop != nullptr) && prop->IsBTICompatible()) {
1062 prot |= PROT_BTI;
1063 }
1064#endif
Kalesh Singh4084b552024-03-13 13:35:49 -07001065 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001066}
1067
1068/* Change the protection of all loaded segments in memory to writable.
1069 * This is useful before performing relocations. Once completed, you
1070 * will have to call phdr_table_protect_segments to restore the original
1071 * protection flags on all segments.
1072 *
1073 * Note that some writable segments can also have their content turned
1074 * to read-only by calling phdr_table_protect_gnu_relro. This is no
1075 * performed here.
1076 *
1077 * Input:
1078 * phdr_table -> program header table
1079 * phdr_count -> number of entries in tables
1080 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001081 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001082 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001083 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001084 */
1085int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
Kalesh Singh4084b552024-03-13 13:35:49 -07001086 size_t phdr_count, ElfW(Addr) load_bias,
1087 bool should_pad_segments) {
1088 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
1089 should_pad_segments);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001090}
1091
Kalesh Singh702d9b02024-03-13 13:38:04 -07001092static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1093 const ElfW(Phdr)* phdr_table, size_t phdr_count,
1094 ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
1095 bool should_pad_segments) {
1096 // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1097 for (size_t index = 0; index < phdr_count; ++index) {
1098 const ElfW(Phdr)* phdr = &phdr_table[index];
1099
1100 if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1101 // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1102 // LOAD segment mem size, we need to protect only a partial region of the
1103 // LOAD segment and therefore cannot avoid a VMA split.
1104 //
1105 // Note: Don't check the page-aligned mem sizes since the extended protection
1106 // may incorrectly write protect non-relocation data.
1107 //
1108 // Example:
1109 //
1110 // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1111 // ----------------------------------------------------------------
1112 // | | | | |
1113 // SEG X | RO | RO | RW | | SEG Y
1114 // | | | | |
1115 // ----------------------------------------------------------------
1116 // | | |
1117 // | | |
1118 // | | |
1119 // relro_vaddr relro_vaddr relro_vaddr
1120 // (load_vaddr) + +
1121 // relro_memsz load_memsz
1122 //
1123 // ----------------------------------------------------------------
1124 // | PAGE | PAGE |
1125 // ----------------------------------------------------------------
1126 // | Potential |
1127 // |----- Extended RO ----|
1128 // | Protection |
1129 //
1130 // If the check below uses page aligned mem sizes it will cause incorrect write
1131 // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1132 if (relro_phdr->p_memsz < phdr->p_memsz) {
1133 return;
1134 }
1135
1136 ElfW(Addr) p_memsz = phdr->p_memsz;
1137 ElfW(Addr) p_filesz = phdr->p_filesz;
1138
1139 // Attempt extending the VMA (mprotect range). Without extending the range,
1140 // mprotect will only RO protect a part of the extended RW LOAD segment, which
1141 // will leave an extra split RW VMA (the gap).
1142 _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
1143 should_pad_segments);
1144
1145 *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1146 return;
1147 }
1148 }
1149}
1150
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001151/* Used internally by phdr_table_protect_gnu_relro and
1152 * phdr_table_unprotect_gnu_relro.
1153 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001154static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh702d9b02024-03-13 13:38:04 -07001155 ElfW(Addr) load_bias, int prot_flags,
1156 bool should_pad_segments) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001157 const ElfW(Phdr)* phdr = phdr_table;
1158 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001159
Elliott Hughes0266ae52014-02-10 17:46:57 -08001160 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1161 if (phdr->p_type != PT_GNU_RELRO) {
1162 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001163 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001164
1165 // Tricky: what happens when the relro segment does not start
1166 // or end at page boundaries? We're going to be over-protective
1167 // here and put every page touched by the segment as read-only.
1168
1169 // This seems to match Ian Lance Taylor's description of the
1170 // feature at http://www.airs.com/blog/archives/189.
1171
1172 // Extract:
1173 // Note that the current dynamic linker code will only work
1174 // correctly if the PT_GNU_RELRO segment starts on a page
1175 // boundary. This is because the dynamic linker rounds the
1176 // p_vaddr field down to the previous page boundary. If
1177 // there is anything on the page which should not be read-only,
1178 // the program is likely to fail at runtime. So in effect the
1179 // linker must only emit a PT_GNU_RELRO segment if it ensures
1180 // that it starts on a page boundary.
Zheng Pan9535c322024-02-14 00:04:10 +00001181 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1182 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Kalesh Singh702d9b02024-03-13 13:38:04 -07001183 _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
1184 should_pad_segments);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001185
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001186 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -08001187 seg_page_end - seg_page_start,
1188 prot_flags);
1189 if (ret < 0) {
1190 return -1;
1191 }
1192 }
1193 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001194}
1195
1196/* Apply GNU relro protection if specified by the program header. This will
1197 * turn some of the pages of a writable PT_LOAD segment to read-only, as
1198 * specified by one or more PT_GNU_RELRO segments. This must be always
1199 * performed after relocations.
1200 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001201 * The areas typically covered are .got and .data.rel.ro, these are
1202 * read-only from the program's POV, but contain absolute addresses
1203 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001204 *
1205 * Input:
1206 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001207 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001208 * load_bias -> load bias
Kalesh Singh702d9b02024-03-13 13:38:04 -07001209 * should_pad_segments -> Were segments extended to avoid gaps in the memory map
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001210 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001211 * 0 on success, -1 on failure (error code in errno).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001212 */
Kalesh Singh702d9b02024-03-13 13:38:04 -07001213int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1214 ElfW(Addr) load_bias, bool should_pad_segments) {
1215 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
1216 should_pad_segments);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001217}
1218
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001219/* Serialize the GNU relro segments to the given file descriptor. This can be
1220 * performed after relocations to allow another process to later share the
1221 * relocated segment, if it was loaded at the same address.
1222 *
1223 * Input:
1224 * phdr_table -> program header table
1225 * phdr_count -> number of entries in tables
1226 * load_bias -> load bias
1227 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001228 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001229 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001230 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001231 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001232int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1233 size_t phdr_count,
1234 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001235 int fd,
1236 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001237 const ElfW(Phdr)* phdr = phdr_table;
1238 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001239
1240 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1241 if (phdr->p_type != PT_GNU_RELRO) {
1242 continue;
1243 }
1244
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001245 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1246 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001247 ssize_t size = seg_page_end - seg_page_start;
1248
1249 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1250 if (written != size) {
1251 return -1;
1252 }
1253 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001254 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001255 if (map == MAP_FAILED) {
1256 return -1;
1257 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001258 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001259 }
1260 return 0;
1261}
1262
1263/* Where possible, replace the GNU relro segments with mappings of the given
1264 * file descriptor. This can be performed after relocations to allow a file
1265 * previously created by phdr_table_serialize_gnu_relro in another process to
1266 * replace the dirty relocated pages, saving memory, if it was loaded at the
1267 * same address. We have to compare the data before we map over it, since some
1268 * parts of the relro segment may not be identical due to other libraries in
1269 * the process being loaded at different addresses.
1270 *
1271 * Input:
1272 * phdr_table -> program header table
1273 * phdr_count -> number of entries in tables
1274 * load_bias -> load bias
1275 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001276 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001277 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001278 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001279 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001280int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1281 size_t phdr_count,
1282 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001283 int fd,
1284 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001285 // Map the file at a temporary location so we can compare its contents.
1286 struct stat file_stat;
1287 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1288 return -1;
1289 }
1290 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001291 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001292 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001293 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001294 if (temp_mapping == MAP_FAILED) {
1295 return -1;
1296 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001297 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001298
1299 // Iterate over the relro segments and compare/remap the pages.
1300 const ElfW(Phdr)* phdr = phdr_table;
1301 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1302
1303 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1304 if (phdr->p_type != PT_GNU_RELRO) {
1305 continue;
1306 }
1307
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001308 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1309 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001310
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001311 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001312 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1313 size_t match_offset = 0;
1314 size_t size = seg_page_end - seg_page_start;
1315
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001316 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001317 // File is too short to compare to this segment. The contents are likely
1318 // different as well (it's probably for a different library version) so
1319 // just don't bother checking.
1320 break;
1321 }
1322
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001323 while (match_offset < size) {
1324 // Skip over dissimilar pages.
1325 while (match_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001326 memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1327 match_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001328 }
1329
1330 // Count similar pages.
1331 size_t mismatch_offset = match_offset;
1332 while (mismatch_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001333 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1334 mismatch_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001335 }
1336
1337 // Map over similar pages.
1338 if (mismatch_offset > match_offset) {
1339 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001340 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001341 if (map == MAP_FAILED) {
1342 munmap(temp_mapping, file_size);
1343 return -1;
1344 }
1345 }
1346
1347 match_offset = mismatch_offset;
1348 }
1349
1350 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001351 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001352 }
1353 munmap(temp_mapping, file_size);
1354 return 0;
1355}
1356
1357
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001358#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001359/* Return the address and size of the .ARM.exidx section in memory,
1360 * if present.
1361 *
1362 * Input:
1363 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001364 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001365 * load_bias -> load bias
1366 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001367 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001368 * arm_exidx_count -> number of items in table (0 on failure).
1369 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001370 * 0 on success, -1 on failure (_no_ error code in errno)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001371 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001372int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1373 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001374 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001375 const ElfW(Phdr)* phdr = phdr_table;
1376 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001377
Elliott Hughes0266ae52014-02-10 17:46:57 -08001378 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1379 if (phdr->p_type != PT_ARM_EXIDX) {
1380 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001381 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001382
1383 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001384 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001385 return 0;
1386 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001387 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001388 *arm_exidx_count = 0;
1389 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001390}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001391#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001392
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001393/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001394 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001395 *
1396 * Input:
1397 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001398 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001399 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001400 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001401 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001402 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001403 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001404 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001405 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001406void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001407 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1408 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001409 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001410 for (size_t i = 0; i<phdr_count; ++i) {
1411 const ElfW(Phdr)& phdr = phdr_table[i];
1412 if (phdr.p_type == PT_DYNAMIC) {
1413 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001414 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001415 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001416 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001417 return;
1418 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001419 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001420}
1421
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001422/* Return the program interpreter string, or nullptr if missing.
1423 *
1424 * Input:
1425 * phdr_table -> program header table
1426 * phdr_count -> number of entries in tables
1427 * load_bias -> load bias
1428 * Return:
1429 * pointer to the program interpreter string.
1430 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001431const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001432 ElfW(Addr) load_bias) {
1433 for (size_t i = 0; i<phdr_count; ++i) {
1434 const ElfW(Phdr)& phdr = phdr_table[i];
1435 if (phdr.p_type == PT_INTERP) {
1436 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1437 }
1438 }
1439 return nullptr;
1440}
1441
Robert Grosse4544d9f2014-10-15 14:32:19 -07001442// Sets loaded_phdr_ to the address of the program header table as it appears
1443// in the loaded segments in memory. This is in contrast with phdr_table_,
1444// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001445bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001446 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001447
Elliott Hughes650be4e2013-03-05 18:47:58 -08001448 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001449 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001450 if (phdr->p_type == PT_PHDR) {
1451 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001452 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001453 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001454
Elliott Hughes650be4e2013-03-05 18:47:58 -08001455 // Otherwise, check the first loadable segment. If its file offset
1456 // is 0, it starts with the ELF header, and we can trivially find the
1457 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001458 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001459 if (phdr->p_type == PT_LOAD) {
1460 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001461 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001462 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001463 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001464 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001465 }
1466 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001467 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001468 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001469
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001470 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001471 return false;
1472}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001473
Tamas Petz8d55d182020-02-24 14:15:25 +01001474// Tries to find .note.gnu.property section.
1475// It is not considered an error if such section is missing.
1476bool ElfReader::FindGnuPropertySection() {
1477#if defined(__aarch64__)
1478 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1479#endif
1480 return true;
1481}
1482
Elliott Hughes650be4e2013-03-05 18:47:58 -08001483// Ensures that our program header is actually within a loadable
1484// segment. This should help catch badly-formed ELF files that
1485// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001486bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1487 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1488 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001489 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001490 if (phdr->p_type != PT_LOAD) {
1491 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001492 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001493 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1494 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001495 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001496 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001497 return true;
1498 }
1499 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001500 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1501 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001502 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001503}