blob: a842dfd19b97fe4a83469a2f55d938e574c72026 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Evgenii Stepanov6bbb75a2023-12-06 18:54:45 +000042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Kalesh Singh377f0b92024-01-31 20:23:39 -080045#include "private/bionic_asm_note.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070046#include "private/CFIShadow.h" // For kLibraryAlignment
Kalesh Singh377f0b92024-01-31 20:23:39 -080047#include "private/elf_note.h"
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080048
Kalesh Singhc5c1d192024-04-09 16:27:56 -070049#include <android-base/file.h>
50
Elliott Hughesb5140262014-12-02 16:16:29 -080051static int GetTargetElfMachine() {
52#if defined(__arm__)
53 return EM_ARM;
54#elif defined(__aarch64__)
55 return EM_AARCH64;
56#elif defined(__i386__)
57 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000058#elif defined(__riscv)
59 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080060#elif defined(__x86_64__)
61 return EM_X86_64;
62#endif
63}
64
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020065/**
66 TECHNICAL NOTE ON ELF LOADING.
67
68 An ELF file's program header table contains one or more PT_LOAD
69 segments, which corresponds to portions of the file that need to
70 be mapped into the process' address space.
71
72 Each loadable segment has the following important properties:
73
74 p_offset -> segment file offset
75 p_filesz -> segment file size
76 p_memsz -> segment memory size (always >= p_filesz)
77 p_vaddr -> segment's virtual address
78 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070079 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020080
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070081 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020082
83 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
84 ranges of virtual addresses. A few rules apply:
85
86 - the virtual address ranges should not overlap.
87
88 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
89 between them should always be initialized to 0.
90
91 - ranges do not necessarily start or end at page boundaries. Two distinct
92 segments can have their start and end on the same page. In this case, the
93 page inherits the mapping flags of the latter segment.
94
95 Finally, the real load addrs of each segment is not p_vaddr. Instead the
96 loader decides where to load the first segment, then will load all others
97 relative to the first one to respect the initial range layout.
98
99 For example, consider the following list:
100
101 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
102 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
103
104 This corresponds to two segments that cover these virtual address ranges:
105
106 0x30000...0x34000
107 0x40000...0x48000
108
109 If the loader decides to load the first segment at address 0xa0000000
110 then the segments' load address ranges will be:
111
112 0xa0030000...0xa0034000
113 0xa0040000...0xa0048000
114
115 In other words, all segments must be loaded at an address that has the same
116 constant offset from their p_vaddr value. This offset is computed as the
117 difference between the first segment's load address, and its p_vaddr value.
118
119 However, in practice, segments do _not_ start at page boundaries. Since we
120 can only memory-map at page boundaries, this means that the bias is
121 computed as:
122
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700123 load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200124
125 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
126 possible wrap around UINT32_MAX for possible large p_vaddr values).
127
128 And that the phdr0_load_address must start at a page boundary, with
129 the segment's real content starting at:
130
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700131 phdr0_load_address + page_offset(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200132
133 Note that ELF requires the following condition to make the mmap()-ing work:
134
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700135 page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200136
137 The load_bias must be added to any p_vaddr value read from the ELF file to
138 determine the corresponding memory address.
139
140 **/
141
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800142#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200143#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
144 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
145 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
146
Kalesh Singh1dd68582024-02-01 00:14:36 -0800147static const size_t kPageSize = page_size();
148
149/*
150 * Generic PMD size calculation:
151 * - Each page table (PT) is of size 1 page.
152 * - Each page table entry (PTE) is of size 64 bits.
153 * - Each PTE locates one physical page frame (PFN) of size 1 page.
154 * - A PMD entry locates 1 page table (PT)
155 *
156 * PMD size = Num entries in a PT * page_size
157 */
158static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700159
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700160ElfReader::ElfReader()
161 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
162 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800163 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
164 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700165}
166
167bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900168 if (did_read_) {
169 return true;
170 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700171 name_ = name;
172 fd_ = fd;
173 file_offset_ = file_offset;
174 file_size_ = file_size;
175
176 if (ReadElfHeader() &&
177 VerifyElfHeader() &&
178 ReadProgramHeaders() &&
179 ReadSectionHeaders() &&
Kalesh Singh377f0b92024-01-31 20:23:39 -0800180 ReadDynamicSection() &&
181 ReadPadSegmentNote()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700182 did_read_ = true;
183 }
184
185 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200186}
187
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400188bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900190 if (did_load_) {
191 return true;
192 }
huangchaochaobdc37962022-12-27 19:38:41 +0800193 bool reserveSuccess = ReserveAddressSpace(address_space);
194 if (reserveSuccess && LoadSegments() && FindPhdr() &&
Tamas Petz8d55d182020-02-24 14:15:25 +0100195 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700196 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100197#if defined(__aarch64__)
198 // For Armv8.5-A loaded executable segments may require PROT_BTI.
199 if (note_gnu_property_.IsBTICompatible()) {
200 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
Kalesh Singh4084b552024-03-13 13:35:49 -0700201 should_pad_segments_, &note_gnu_property_) == 0);
Tamas Petz8d55d182020-02-24 14:15:25 +0100202 }
203#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700204 }
huangchaochaobdc37962022-12-27 19:38:41 +0800205 if (reserveSuccess && !did_load_) {
206 if (load_start_ != nullptr && load_size_ != 0) {
207 if (!mapped_by_caller_) {
208 munmap(load_start_, load_size_);
209 }
210 }
211 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700212
213 return did_load_;
214}
215
216const char* ElfReader::get_string(ElfW(Word) index) const {
217 CHECK(strtab_ != nullptr);
218 CHECK(index < strtab_size_);
219
220 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800221}
222
223bool ElfReader::ReadElfHeader() {
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000224 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
225 if (rc < 0) {
226 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
227 return false;
228 }
229
230 if (rc != sizeof(header_)) {
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700231 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000232 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800233 return false;
234 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800235 return true;
236}
237
Elliott Hughes72007ee2017-04-19 17:44:57 -0700238static const char* EM_to_string(int em) {
239 if (em == EM_386) return "EM_386";
240 if (em == EM_AARCH64) return "EM_AARCH64";
241 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000242 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700243 if (em == EM_X86_64) return "EM_X86_64";
244 return "EM_???";
245}
246
Elliott Hughes650be4e2013-03-05 18:47:58 -0800247bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700248 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700249 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
250 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800251 return false;
252 }
253
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700254 // Try to give a clear diagnostic for ELF class mismatches, since they're
255 // an easy mistake to make during the 32-bit/64-bit transition period.
256 int elf_class = header_.e_ident[EI_CLASS];
257#if defined(__LP64__)
258 if (elf_class != ELFCLASS64) {
259 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700260 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700261 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700262 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700263 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800264 return false;
265 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700266#else
267 if (elf_class != ELFCLASS32) {
268 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700269 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700270 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700271 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700272 }
273 return false;
274 }
275#endif
276
Elliott Hughes650be4e2013-03-05 18:47:58 -0800277 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700278 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800279 return false;
280 }
281
282 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700283 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800284 return false;
285 }
286
287 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700288 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800289 return false;
290 }
291
Elliott Hughesb5140262014-12-02 16:16:29 -0800292 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700293 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
294 name_.c_str(),
295 EM_to_string(header_.e_machine), header_.e_machine,
296 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800297 return false;
298 }
299
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700300 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800301 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800302 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
303 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
304 return false;
305 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800306 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800307 "invalid-elf-header_section-headers-enforced-for-api-level-26",
308 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
309 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800310 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700311 }
312
313 if (header_.e_shstrndx == 0) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800314 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800315 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
316 return false;
317 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800318 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800319 "invalid-elf-header_section-headers-enforced-for-api-level-26",
320 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800321 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700322 }
323
Elliott Hughes650be4e2013-03-05 18:47:58 -0800324 return true;
325}
326
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700327bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800328 off64_t range_start;
329 off64_t range_end;
330
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700331 // Only header can be located at the 0 offset... This function called to
332 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700333 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700334
335 return offset > 0 &&
336 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800337 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700338 (range_start < file_size_) &&
339 (range_end <= file_size_) &&
340 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800341}
342
Elliott Hughes650be4e2013-03-05 18:47:58 -0800343// Loads the program header table from an ELF file into a read-only private
344// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700345bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800346 phdr_num_ = header_.e_phnum;
347
348 // Like the kernel, we only accept program header tables that
349 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800350 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700351 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800352 return false;
353 }
354
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800355 // Boundary checks
356 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700357 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
358 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
359 name_.c_str(),
360 static_cast<size_t>(header_.e_phoff),
361 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800362 return false;
363 }
364
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000365 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000366 DL_ERR("\"%s\" phdr mmap failed: %m", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800367 return false;
368 }
369
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000370 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800371 return true;
372}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200373
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700374bool ElfReader::ReadSectionHeaders() {
375 shdr_num_ = header_.e_shnum;
376
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800377 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700378 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800379 return false;
380 }
381
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800382 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700383 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
384 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
385 name_.c_str(),
386 static_cast<size_t>(header_.e_shoff),
387 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800388 return false;
389 }
390
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000391 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000392 DL_ERR("\"%s\" shdr mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700393 return false;
394 }
395
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000396 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700397 return true;
398}
399
400bool ElfReader::ReadDynamicSection() {
401 // 1. Find .dynamic section (in section headers)
402 const ElfW(Shdr)* dynamic_shdr = nullptr;
403 for (size_t i = 0; i < shdr_num_; ++i) {
404 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
405 dynamic_shdr = &shdr_table_ [i];
406 break;
407 }
408 }
409
410 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700411 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700412 return false;
413 }
414
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700415 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
416 size_t pt_dynamic_offset = 0;
417 size_t pt_dynamic_filesz = 0;
418 for (size_t i = 0; i < phdr_num_; ++i) {
419 const ElfW(Phdr)* phdr = &phdr_table_[i];
420 if (phdr->p_type == PT_DYNAMIC) {
421 pt_dynamic_offset = phdr->p_offset;
422 pt_dynamic_filesz = phdr->p_filesz;
423 }
424 }
425
426 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800427 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800428 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
429 "expected to match PT_DYNAMIC offset: 0x%zx",
430 name_.c_str(),
431 static_cast<size_t>(dynamic_shdr->sh_offset),
432 pt_dynamic_offset);
433 return false;
434 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800435 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800436 "invalid-elf-header_section-headers-enforced-for-api-level-26",
437 "\"%s\" .dynamic section has invalid offset: 0x%zx "
438 "(expected to match PT_DYNAMIC offset 0x%zx)",
439 name_.c_str(),
440 static_cast<size_t>(dynamic_shdr->sh_offset),
441 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800442 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700443 }
444
445 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800446 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800447 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
448 "expected to match PT_DYNAMIC filesz: 0x%zx",
449 name_.c_str(),
450 static_cast<size_t>(dynamic_shdr->sh_size),
451 pt_dynamic_filesz);
452 return false;
453 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800454 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800455 "invalid-elf-header_section-headers-enforced-for-api-level-26",
456 "\"%s\" .dynamic section has invalid size: 0x%zx "
457 "(expected to match PT_DYNAMIC filesz 0x%zx)",
458 name_.c_str(),
459 static_cast<size_t>(dynamic_shdr->sh_size),
460 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800461 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700462 }
463
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700464 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700465 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
466 name_.c_str(),
467 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700468 return false;
469 }
470
471 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
472
473 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700474 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
475 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700476 return false;
477 }
478
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700479 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
480 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800481 return false;
482 }
483
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000484 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000485 DL_ERR("\"%s\" dynamic section mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700486 return false;
487 }
488
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000489 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700490
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700491 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
492 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
493 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800494 return false;
495 }
496
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000497 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000498 DL_ERR("\"%s\" strtab section mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700499 return false;
500 }
501
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000502 strtab_ = static_cast<const char*>(strtab_fragment_.data());
503 strtab_size_ = strtab_fragment_.size();
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700504 return true;
505}
506
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800507/* Returns the size of the extent of all the possibly non-contiguous
508 * loadable segments in an ELF program header table. This corresponds
509 * to the page-aligned size in bytes that needs to be reserved in the
510 * process' address space. If there are no loadable segments, 0 is
511 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200512 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700513 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800514 * set to the minimum and maximum addresses of pages to be reserved,
515 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200516 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800517size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
518 ElfW(Addr)* out_min_vaddr,
519 ElfW(Addr)* out_max_vaddr) {
520 ElfW(Addr) min_vaddr = UINTPTR_MAX;
521 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200522
Elliott Hughes0266ae52014-02-10 17:46:57 -0800523 bool found_pt_load = false;
524 for (size_t i = 0; i < phdr_count; ++i) {
525 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200526
Elliott Hughes0266ae52014-02-10 17:46:57 -0800527 if (phdr->p_type != PT_LOAD) {
528 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200529 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800530 found_pt_load = true;
531
532 if (phdr->p_vaddr < min_vaddr) {
533 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200534 }
535
Elliott Hughes0266ae52014-02-10 17:46:57 -0800536 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
537 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
538 }
539 }
540 if (!found_pt_load) {
541 min_vaddr = 0;
542 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200543
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700544 min_vaddr = page_start(min_vaddr);
545 max_vaddr = page_end(max_vaddr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800546
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700547 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800548 *out_min_vaddr = min_vaddr;
549 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700550 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800551 *out_max_vaddr = max_vaddr;
552 }
553 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200554}
555
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700556// Returns the maximum p_align associated with a loadable segment in the ELF
557// program header table. Used to determine whether the file should be loaded at
558// a specific virtual address alignment for use with huge pages.
559size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700560 size_t maximum_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700561
562 for (size_t i = 0; i < phdr_count; ++i) {
563 const ElfW(Phdr)* phdr = &phdr_table[i];
564
565 // p_align must be 0, 1, or a positive, integral power of two.
566 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
567 continue;
568 }
569
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000570 maximum_alignment = std::max(maximum_alignment, static_cast<size_t>(phdr->p_align));
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700571 }
572
573#if defined(__LP64__)
574 return maximum_alignment;
575#else
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700576 return page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700577#endif
578}
579
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000580// Returns the minimum p_align associated with a loadable segment in the ELF
581// program header table. Used to determine if the program alignment is compatible
582// with the page size of this system.
583size_t phdr_table_get_minimum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
584 size_t minimum_alignment = page_size();
585
586 for (size_t i = 0; i < phdr_count; ++i) {
587 const ElfW(Phdr)* phdr = &phdr_table[i];
588
589 // p_align must be 0, 1, or a positive, integral power of two.
590 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
591 continue;
592 }
593
594 if (phdr->p_align <= 1) {
595 continue;
596 }
597
598 minimum_alignment = std::min(minimum_alignment, static_cast<size_t>(phdr->p_align));
599 }
600
601 return minimum_alignment;
602}
603
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700604// Reserve a virtual address range such that if it's limits were extended to the next 2**align
605// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700606static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
607 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700608 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700609 // Reserve enough space to properly align the library's start address.
610 mapping_align = std::max(mapping_align, start_align);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700611 if (mapping_align == page_size()) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800612 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700613 if (mmap_ptr == MAP_FAILED) {
614 return nullptr;
615 }
616 return mmap_ptr;
617 }
618
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700619 // Minimum alignment of shared library gap. For efficiency, this should match the second level
620 // page size of the platform.
621#if defined(__LP64__)
622 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
623#else
624 constexpr size_t kGapAlignment = 0;
625#endif
626 // Maximum gap size, in the units of kGapAlignment.
627 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700628 // Allocate enough space so that the end of the desired region aligned up is still inside the
629 // mapping.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700630 size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700631 uint8_t* mmap_ptr =
632 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
633 if (mmap_ptr == MAP_FAILED) {
634 return nullptr;
635 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700636 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700637 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
638 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700639 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
640 // This library crosses a 2MB boundary and will fragment a new huge page.
641 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
642 // to improve address randomization and make it harder to locate this library code by probing.
643 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700644 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700645 gap_size =
646 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700647 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700648 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
649 if (mmap_ptr == MAP_FAILED) {
650 return nullptr;
651 }
652 }
653
654 uint8_t *gap_end, *gap_start;
655 if (gap_size) {
656 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
657 gap_start = gap_end - gap_size;
658 } else {
659 gap_start = gap_end = mmap_ptr + mmap_size;
660 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700661
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700662 uint8_t* first = align_up(mmap_ptr, mapping_align);
663 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900664
Tom Cherry66bc4282018-11-08 13:40:52 -0800665 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900666 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700667 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
668 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700669 // Unmap the extra space around the allocation.
670 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
671 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700672 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700673 munmap(start + size, gap_start - (start + size));
674 if (gap_end != mmap_ptr + mmap_size) {
675 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
676 }
677 *out_gap_start = gap_start;
678 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700679 return start;
680}
681
Elliott Hughes650be4e2013-03-05 18:47:58 -0800682// Reserve a virtual address range big enough to hold all loadable
683// segments of a program header table. This is done by creating a
684// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400685bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800686 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800687 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800688 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700689 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800690 return false;
691 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200692
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800693 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000694 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000695
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400696 if (load_size_ > address_space->reserved_size) {
697 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000698 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400699 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000700 return false;
701 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700702 size_t start_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700703 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
704 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
705 // Limit alignment to PMD size as other alignments reduce the number of
706 // bits available for ASLR for no benefit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700707 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700708 }
709 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
710 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700711 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700712 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000713 return false;
714 }
715 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400716 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700717 gap_start_ = nullptr;
718 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800719 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400720
721 // Update the reserved address space to subtract the space used by this library.
722 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
723 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800724 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200725
Elliott Hughes650be4e2013-03-05 18:47:58 -0800726 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800727 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800728 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200729}
730
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700731/*
732 * Returns true if the kernel supports page size migration, else false.
733 */
734bool page_size_migration_supported() {
735 static bool pgsize_migration_enabled = []() {
736 std::string enabled;
737 if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
738 return false;
739 }
740 return enabled.find("1") != std::string::npos;
741 }();
742 return pgsize_migration_enabled;
743}
744
Kalesh Singh377f0b92024-01-31 20:23:39 -0800745// Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
746bool ElfReader::ReadPadSegmentNote() {
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700747 if (!page_size_migration_supported()) {
748 // Don't attempt to read the note, since segment extension isn't
749 // supported; but return true so that loading can continue normally.
750 return true;
751 }
752
Kalesh Singh377f0b92024-01-31 20:23:39 -0800753 // The ELF can have multiple PT_NOTE's, check them all
754 for (size_t i = 0; i < phdr_num_; ++i) {
755 const ElfW(Phdr)* phdr = &phdr_table_[i];
756
757 if (phdr->p_type != PT_NOTE) {
758 continue;
759 }
760
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800761 // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
762 // point to any part of the ELF (p_memsz == 0). Skip these since there is
763 // nothing to decode. See: b/324468126
764 if (phdr->p_memsz == 0) {
765 continue;
766 }
767
Kalesh Singh751bb8a2024-03-29 17:55:37 -0700768 // If the PT_NOTE extends beyond the file. The ELF is doing something
769 // strange -- obfuscation, embedding hidden loaders, ...
770 //
771 // It doesn't contain the pad_segment note. Skip it to avoid SIGBUS
772 // by accesses beyond the file.
773 off64_t note_end_off = file_offset_ + phdr->p_offset + phdr->p_filesz;
774 if (note_end_off > file_size_) {
775 continue;
776 }
777
Kalesh Singh377f0b92024-01-31 20:23:39 -0800778 // note_fragment is scoped to within the loop so that there is
779 // at most 1 PT_NOTE mapped at anytime during this search.
780 MappedFileFragment note_fragment;
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000781 if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
Kalesh Singh32b6d8c2024-02-13 18:37:12 -0800782 DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
783 name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
784 reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800785 return false;
Kalesh Singh377f0b92024-01-31 20:23:39 -0800786 }
787
788 const ElfW(Nhdr)* note_hdr = nullptr;
789 const char* note_desc = nullptr;
790 if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000791 reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
Kalesh Singh377f0b92024-01-31 20:23:39 -0800792 phdr, &note_hdr, &note_desc)) {
793 continue;
794 }
795
796 if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
797 DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
798 name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
799 return false;
800 }
801
802 // 1 == enabled, 0 == disabled
803 should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
804 return true;
805 }
806
807 return true;
808}
809
Kalesh Singh4084b552024-03-13 13:35:49 -0700810static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
811 size_t phdr_idx, ElfW(Addr)* p_memsz,
812 ElfW(Addr)* p_filesz, bool should_pad_segments) {
813 const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
814 const ElfW(Phdr)* next = nullptr;
815 size_t next_idx = phdr_idx + 1;
816
Kalesh Singhe1e74792024-04-09 11:48:52 -0700817 // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
818 // field e.g. 2MiB p_align for THPs and are relatively small in number.
819 //
820 // The kernel can only represent padding for p_align up to 64KiB. This is because
821 // the kernel uses 4 available bits in the vm_area_struct to represent padding
822 // extent; and so cannot enable mitigations to avoid breaking app compatibility for
823 // p_aligns > 64KiB.
824 //
825 // Don't perform segment extension on these to avoid app compatibility issues.
826 if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
Kalesh Singh4084b552024-03-13 13:35:49 -0700827 return;
828 }
829
830 if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
831 next = &phdr_table[next_idx];
832 }
833
834 // If this is the last LOAD segment, no extension is needed
835 if (!next || *p_memsz != *p_filesz) {
836 return;
837 }
838
839 ElfW(Addr) next_start = page_start(next->p_vaddr);
840 ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
841
842 // If adjacent segment mappings overlap, no extension is needed.
843 if (curr_end >= next_start) {
844 return;
845 }
846
847 // Extend the LOAD segment mapping to be contiguous with that of
848 // the next LOAD segment.
849 ElfW(Addr) extend = next_start - curr_end;
850 *p_memsz += extend;
851 *p_filesz += extend;
852}
853
Kalesh Singh86e04f62024-09-05 06:24:14 +0000854bool ElfReader::MapSegment(size_t seg_idx, size_t len) {
855 const ElfW(Phdr)* phdr = &phdr_table_[seg_idx];
856
857 void* start = reinterpret_cast<void*>(page_start(phdr->p_vaddr + load_bias_));
858
859 // The ELF could be being loaded directly from a zipped APK,
860 // the zip offset must be added to find the segment offset.
861 const ElfW(Addr) offset = file_offset_ + page_start(phdr->p_offset);
862
863 int prot = PFLAGS_TO_PROT(phdr->p_flags);
864
865 void* seg_addr = mmap64(start, len, prot, MAP_FIXED | MAP_PRIVATE, fd_, offset);
866
867 if (seg_addr == MAP_FAILED) {
868 DL_ERR("couldn't map \"%s\" segment %zd: %m", name_.c_str(), seg_idx);
869 return false;
870 }
871
872 // Mark segments as huge page eligible if they meet the requirements
873 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
874 get_transparent_hugepages_supported()) {
875 madvise(seg_addr, len, MADV_HUGEPAGE);
876 }
877
878 return true;
879}
880
Kalesh Singh37bcaea2024-09-05 06:32:07 +0000881void ElfReader::ZeroFillSegment(const ElfW(Phdr)* phdr) {
882 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
883 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
884
885 // If the segment is writable, and does not end on a page boundary,
886 // zero-fill it until the page limit.
887 //
888 // Do not attempt to zero the extended region past the first partial page,
889 // since doing so may:
890 // 1) Result in a SIGBUS, as the region is not backed by the underlying
891 // file.
892 // 2) Break the COW backing, faulting in new anon pages for a region
893 // that will not be used.
894 if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
895 memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
896 kPageSize - page_offset(unextended_seg_file_end));
897 }
898}
899
Elliott Hughes650be4e2013-03-05 18:47:58 -0800900bool ElfReader::LoadSegments() {
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000901 size_t min_palign = phdr_table_get_minimum_alignment(phdr_table_, phdr_num_);
902 // Only enforce this on 16 KB systems. Apps may rely on undefined behavior
903 // here on 4 KB systems, which is the norm before this change is introduced.
904 if (kPageSize >= 16384 && min_palign < kPageSize) {
905 DL_ERR("\"%s\" program alignment (%zu) cannot be smaller than system page size (%zu)",
906 name_.c_str(), min_palign, kPageSize);
907 return false;
908 }
909
Elliott Hughes650be4e2013-03-05 18:47:58 -0800910 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800911 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200912
Elliott Hughes650be4e2013-03-05 18:47:58 -0800913 if (phdr->p_type != PT_LOAD) {
914 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200915 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800916
Kalesh Singh4084b552024-03-13 13:35:49 -0700917 ElfW(Addr) p_memsz = phdr->p_memsz;
918 ElfW(Addr) p_filesz = phdr->p_filesz;
919 _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
920
Elliott Hughes650be4e2013-03-05 18:47:58 -0800921 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800922 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
Kalesh Singh4084b552024-03-13 13:35:49 -0700923 ElfW(Addr) seg_end = seg_start + p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800924
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700925 ElfW(Addr) seg_page_start = page_start(seg_start);
926 ElfW(Addr) seg_page_end = page_end(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800927
Kalesh Singh4084b552024-03-13 13:35:49 -0700928 ElfW(Addr) seg_file_end = seg_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800929
930 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800931 ElfW(Addr) file_start = phdr->p_offset;
Kalesh Singh4084b552024-03-13 13:35:49 -0700932 ElfW(Addr) file_end = file_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800933
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700934 ElfW(Addr) file_page_start = page_start(file_start);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800935 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800936
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700937 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700938 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700939 return false;
940 }
941
Kalesh Singh4084b552024-03-13 13:35:49 -0700942 if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700943 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
944 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700945 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700946 reinterpret_cast<void*>(phdr->p_filesz),
Kalesh Singh4084b552024-03-13 13:35:49 -0700947 reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700948 return false;
949 }
950
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700951 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700952 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700953 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800954 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800955 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800956 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800957 return false;
958 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800959 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800960 "writable-and-executable-segments-enforced-for-api-level-26",
961 "\"%s\" has load segments that are both writable and executable",
962 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800963 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700964 }
965
Kalesh Singh86e04f62024-09-05 06:24:14 +0000966 // Pass the file_length, since it may have been extended by _extend_load_segment_vma().
967 if (!MapSegment(i, file_length)) {
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700968 return false;
969 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800970 }
971
Kalesh Singh37bcaea2024-09-05 06:32:07 +0000972 ZeroFillSegment(phdr);
Kalesh Singh1d3ba112024-03-06 17:33:36 -0800973
Kalesh Singh51347622024-03-18 17:27:59 -0700974 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
Kalesh Singh51347622024-03-18 17:27:59 -0700975 // Pages may be brought in due to readahead.
976 // Drop the padding (zero) pages, to avoid reclaim work later.
977 //
978 // NOTE: The madvise() here is special, as it also serves to hint to the
979 // kernel the portion of the LOAD segment that is padding.
980 //
981 // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
982 // [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
983 uint64_t pad_start = page_end(unextended_seg_file_end);
984 uint64_t pad_end = page_end(seg_file_end);
985 CHECK(pad_start <= pad_end);
986 uint64_t pad_len = pad_end - pad_start;
987 if (page_size_migration_supported() && pad_len > 0 &&
988 madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
989 DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
990 name_.c_str(), pad_start, pad_len);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800991 }
992
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700993 seg_file_end = page_end(seg_file_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800994
995 // seg_file_end is now the first page address after the file
996 // content. If seg_end is larger, we need to zero anything
997 // between them. This is done by using a private anonymous
998 // map for all extra pages.
999 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -08001000 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001001 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -08001002 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -08001003 PFLAGS_TO_PROT(phdr->p_flags),
1004 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
1005 -1,
1006 0);
1007 if (zeromap == MAP_FAILED) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +00001008 DL_ERR("couldn't zero fill \"%s\" gap: %m", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001009 return false;
1010 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -08001011
1012 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -08001013 }
1014 }
1015 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001016}
1017
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001018/* Used internally. Used to set the protection bits of all loaded segments
1019 * with optional extra flags (i.e. really PROT_WRITE). Used by
1020 * phdr_table_protect_segments and phdr_table_unprotect_segments.
1021 */
1022static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001023 ElfW(Addr) load_bias, int extra_prot_flags,
1024 bool should_pad_segments) {
1025 for (size_t i = 0; i < phdr_count; ++i) {
1026 const ElfW(Phdr)* phdr = &phdr_table[i];
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001027
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001028 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
1029 continue;
1030 }
1031
Kalesh Singh4084b552024-03-13 13:35:49 -07001032 ElfW(Addr) p_memsz = phdr->p_memsz;
1033 ElfW(Addr) p_filesz = phdr->p_filesz;
1034 _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
1035
1036 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
1037 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001038
Tamas Petz8d55d182020-02-24 14:15:25 +01001039 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
1040 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001041 // make sure we're never simultaneously writable / executable
1042 prot &= ~PROT_EXEC;
1043 }
Tamas Petz8d55d182020-02-24 14:15:25 +01001044#if defined(__aarch64__)
1045 if ((prot & PROT_EXEC) == 0) {
1046 // Though it is not specified don't add PROT_BTI if segment is not
1047 // executable.
1048 prot &= ~PROT_BTI;
1049 }
1050#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001051
Tamas Petz8d55d182020-02-24 14:15:25 +01001052 int ret =
1053 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001054 if (ret < 0) {
1055 return -1;
1056 }
1057 }
1058 return 0;
1059}
1060
1061/* Restore the original protection modes for all loadable segments.
1062 * You should only call this after phdr_table_unprotect_segments and
1063 * applying all relocations.
1064 *
Tamas Petz8d55d182020-02-24 14:15:25 +01001065 * AArch64: also called from linker_main and ElfReader::Load to apply
1066 * PROT_BTI for loaded main so and other so-s.
1067 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001068 * Input:
1069 * phdr_table -> program header table
1070 * phdr_count -> number of entries in tables
1071 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001072 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Tamas Petz8d55d182020-02-24 14:15:25 +01001073 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001074 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001075 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001076 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001077int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001078 ElfW(Addr) load_bias, bool should_pad_segments,
1079 const GnuPropertySection* prop __unused) {
Tamas Petz8d55d182020-02-24 14:15:25 +01001080 int prot = 0;
1081#if defined(__aarch64__)
1082 if ((prop != nullptr) && prop->IsBTICompatible()) {
1083 prot |= PROT_BTI;
1084 }
1085#endif
Kalesh Singh4084b552024-03-13 13:35:49 -07001086 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001087}
1088
1089/* Change the protection of all loaded segments in memory to writable.
1090 * This is useful before performing relocations. Once completed, you
1091 * will have to call phdr_table_protect_segments to restore the original
1092 * protection flags on all segments.
1093 *
1094 * Note that some writable segments can also have their content turned
1095 * to read-only by calling phdr_table_protect_gnu_relro. This is no
1096 * performed here.
1097 *
1098 * Input:
1099 * phdr_table -> program header table
1100 * phdr_count -> number of entries in tables
1101 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001102 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001103 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001104 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001105 */
1106int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
Kalesh Singh4084b552024-03-13 13:35:49 -07001107 size_t phdr_count, ElfW(Addr) load_bias,
1108 bool should_pad_segments) {
1109 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
1110 should_pad_segments);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001111}
1112
Kalesh Singh702d9b02024-03-13 13:38:04 -07001113static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1114 const ElfW(Phdr)* phdr_table, size_t phdr_count,
1115 ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
1116 bool should_pad_segments) {
1117 // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1118 for (size_t index = 0; index < phdr_count; ++index) {
1119 const ElfW(Phdr)* phdr = &phdr_table[index];
1120
1121 if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1122 // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1123 // LOAD segment mem size, we need to protect only a partial region of the
1124 // LOAD segment and therefore cannot avoid a VMA split.
1125 //
1126 // Note: Don't check the page-aligned mem sizes since the extended protection
1127 // may incorrectly write protect non-relocation data.
1128 //
1129 // Example:
1130 //
1131 // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1132 // ----------------------------------------------------------------
1133 // | | | | |
1134 // SEG X | RO | RO | RW | | SEG Y
1135 // | | | | |
1136 // ----------------------------------------------------------------
1137 // | | |
1138 // | | |
1139 // | | |
1140 // relro_vaddr relro_vaddr relro_vaddr
1141 // (load_vaddr) + +
1142 // relro_memsz load_memsz
1143 //
1144 // ----------------------------------------------------------------
1145 // | PAGE | PAGE |
1146 // ----------------------------------------------------------------
1147 // | Potential |
1148 // |----- Extended RO ----|
1149 // | Protection |
1150 //
1151 // If the check below uses page aligned mem sizes it will cause incorrect write
1152 // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1153 if (relro_phdr->p_memsz < phdr->p_memsz) {
1154 return;
1155 }
1156
1157 ElfW(Addr) p_memsz = phdr->p_memsz;
1158 ElfW(Addr) p_filesz = phdr->p_filesz;
1159
1160 // Attempt extending the VMA (mprotect range). Without extending the range,
1161 // mprotect will only RO protect a part of the extended RW LOAD segment, which
1162 // will leave an extra split RW VMA (the gap).
1163 _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
1164 should_pad_segments);
1165
1166 *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1167 return;
1168 }
1169 }
1170}
1171
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001172/* Used internally by phdr_table_protect_gnu_relro and
1173 * phdr_table_unprotect_gnu_relro.
1174 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001175static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh702d9b02024-03-13 13:38:04 -07001176 ElfW(Addr) load_bias, int prot_flags,
1177 bool should_pad_segments) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001178 const ElfW(Phdr)* phdr = phdr_table;
1179 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001180
Elliott Hughes0266ae52014-02-10 17:46:57 -08001181 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1182 if (phdr->p_type != PT_GNU_RELRO) {
1183 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001184 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001185
1186 // Tricky: what happens when the relro segment does not start
1187 // or end at page boundaries? We're going to be over-protective
1188 // here and put every page touched by the segment as read-only.
1189
1190 // This seems to match Ian Lance Taylor's description of the
1191 // feature at http://www.airs.com/blog/archives/189.
1192
1193 // Extract:
1194 // Note that the current dynamic linker code will only work
1195 // correctly if the PT_GNU_RELRO segment starts on a page
1196 // boundary. This is because the dynamic linker rounds the
1197 // p_vaddr field down to the previous page boundary. If
1198 // there is anything on the page which should not be read-only,
1199 // the program is likely to fail at runtime. So in effect the
1200 // linker must only emit a PT_GNU_RELRO segment if it ensures
1201 // that it starts on a page boundary.
Zheng Pan9535c322024-02-14 00:04:10 +00001202 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1203 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Kalesh Singh702d9b02024-03-13 13:38:04 -07001204 _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
1205 should_pad_segments);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001206
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001207 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -08001208 seg_page_end - seg_page_start,
1209 prot_flags);
1210 if (ret < 0) {
1211 return -1;
1212 }
1213 }
1214 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001215}
1216
1217/* Apply GNU relro protection if specified by the program header. This will
1218 * turn some of the pages of a writable PT_LOAD segment to read-only, as
1219 * specified by one or more PT_GNU_RELRO segments. This must be always
1220 * performed after relocations.
1221 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001222 * The areas typically covered are .got and .data.rel.ro, these are
1223 * read-only from the program's POV, but contain absolute addresses
1224 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001225 *
1226 * Input:
1227 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001228 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001229 * load_bias -> load bias
Kalesh Singh702d9b02024-03-13 13:38:04 -07001230 * should_pad_segments -> Were segments extended to avoid gaps in the memory map
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001231 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001232 * 0 on success, -1 on failure (error code in errno).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001233 */
Kalesh Singh702d9b02024-03-13 13:38:04 -07001234int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1235 ElfW(Addr) load_bias, bool should_pad_segments) {
1236 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
1237 should_pad_segments);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001238}
1239
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001240/* Serialize the GNU relro segments to the given file descriptor. This can be
1241 * performed after relocations to allow another process to later share the
1242 * relocated segment, if it was loaded at the same address.
1243 *
1244 * Input:
1245 * phdr_table -> program header table
1246 * phdr_count -> number of entries in tables
1247 * load_bias -> load bias
1248 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001249 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001250 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001251 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001252 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001253int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1254 size_t phdr_count,
1255 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001256 int fd,
1257 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001258 const ElfW(Phdr)* phdr = phdr_table;
1259 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001260
1261 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1262 if (phdr->p_type != PT_GNU_RELRO) {
1263 continue;
1264 }
1265
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001266 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1267 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001268 ssize_t size = seg_page_end - seg_page_start;
1269
1270 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1271 if (written != size) {
1272 return -1;
1273 }
1274 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001275 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001276 if (map == MAP_FAILED) {
1277 return -1;
1278 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001279 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001280 }
1281 return 0;
1282}
1283
1284/* Where possible, replace the GNU relro segments with mappings of the given
1285 * file descriptor. This can be performed after relocations to allow a file
1286 * previously created by phdr_table_serialize_gnu_relro in another process to
1287 * replace the dirty relocated pages, saving memory, if it was loaded at the
1288 * same address. We have to compare the data before we map over it, since some
1289 * parts of the relro segment may not be identical due to other libraries in
1290 * the process being loaded at different addresses.
1291 *
1292 * Input:
1293 * phdr_table -> program header table
1294 * phdr_count -> number of entries in tables
1295 * load_bias -> load bias
1296 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001297 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001298 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001299 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001300 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001301int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1302 size_t phdr_count,
1303 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001304 int fd,
1305 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001306 // Map the file at a temporary location so we can compare its contents.
1307 struct stat file_stat;
1308 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1309 return -1;
1310 }
1311 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001312 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001313 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001314 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001315 if (temp_mapping == MAP_FAILED) {
1316 return -1;
1317 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001318 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001319
1320 // Iterate over the relro segments and compare/remap the pages.
1321 const ElfW(Phdr)* phdr = phdr_table;
1322 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1323
1324 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1325 if (phdr->p_type != PT_GNU_RELRO) {
1326 continue;
1327 }
1328
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001329 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1330 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001331
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001332 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001333 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1334 size_t match_offset = 0;
1335 size_t size = seg_page_end - seg_page_start;
1336
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001337 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001338 // File is too short to compare to this segment. The contents are likely
1339 // different as well (it's probably for a different library version) so
1340 // just don't bother checking.
1341 break;
1342 }
1343
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001344 while (match_offset < size) {
1345 // Skip over dissimilar pages.
1346 while (match_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001347 memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1348 match_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001349 }
1350
1351 // Count similar pages.
1352 size_t mismatch_offset = match_offset;
1353 while (mismatch_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001354 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1355 mismatch_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001356 }
1357
1358 // Map over similar pages.
1359 if (mismatch_offset > match_offset) {
1360 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001361 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001362 if (map == MAP_FAILED) {
1363 munmap(temp_mapping, file_size);
1364 return -1;
1365 }
1366 }
1367
1368 match_offset = mismatch_offset;
1369 }
1370
1371 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001372 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001373 }
1374 munmap(temp_mapping, file_size);
1375 return 0;
1376}
1377
1378
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001379#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001380/* Return the address and size of the .ARM.exidx section in memory,
1381 * if present.
1382 *
1383 * Input:
1384 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001385 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001386 * load_bias -> load bias
1387 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001388 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001389 * arm_exidx_count -> number of items in table (0 on failure).
1390 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001391 * 0 on success, -1 on failure (_no_ error code in errno)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001392 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001393int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1394 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001395 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001396 const ElfW(Phdr)* phdr = phdr_table;
1397 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001398
Elliott Hughes0266ae52014-02-10 17:46:57 -08001399 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1400 if (phdr->p_type != PT_ARM_EXIDX) {
1401 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001402 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001403
1404 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001405 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001406 return 0;
1407 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001408 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001409 *arm_exidx_count = 0;
1410 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001411}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001412#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001413
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001414/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001415 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001416 *
1417 * Input:
1418 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001419 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001420 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001421 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001422 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001423 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001424 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001425 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001426 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001427void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001428 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1429 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001430 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001431 for (size_t i = 0; i<phdr_count; ++i) {
1432 const ElfW(Phdr)& phdr = phdr_table[i];
1433 if (phdr.p_type == PT_DYNAMIC) {
1434 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001435 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001436 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001437 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001438 return;
1439 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001440 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001441}
1442
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001443/* Return the program interpreter string, or nullptr if missing.
1444 *
1445 * Input:
1446 * phdr_table -> program header table
1447 * phdr_count -> number of entries in tables
1448 * load_bias -> load bias
1449 * Return:
1450 * pointer to the program interpreter string.
1451 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001452const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001453 ElfW(Addr) load_bias) {
1454 for (size_t i = 0; i<phdr_count; ++i) {
1455 const ElfW(Phdr)& phdr = phdr_table[i];
1456 if (phdr.p_type == PT_INTERP) {
1457 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1458 }
1459 }
1460 return nullptr;
1461}
1462
Robert Grosse4544d9f2014-10-15 14:32:19 -07001463// Sets loaded_phdr_ to the address of the program header table as it appears
1464// in the loaded segments in memory. This is in contrast with phdr_table_,
1465// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001466bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001467 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001468
Elliott Hughes650be4e2013-03-05 18:47:58 -08001469 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001470 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001471 if (phdr->p_type == PT_PHDR) {
1472 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001473 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001474 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001475
Elliott Hughes650be4e2013-03-05 18:47:58 -08001476 // Otherwise, check the first loadable segment. If its file offset
1477 // is 0, it starts with the ELF header, and we can trivially find the
1478 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001479 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001480 if (phdr->p_type == PT_LOAD) {
1481 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001482 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001483 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001484 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001485 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001486 }
1487 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001488 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001489 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001490
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001491 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001492 return false;
1493}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001494
Tamas Petz8d55d182020-02-24 14:15:25 +01001495// Tries to find .note.gnu.property section.
1496// It is not considered an error if such section is missing.
1497bool ElfReader::FindGnuPropertySection() {
1498#if defined(__aarch64__)
1499 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1500#endif
1501 return true;
1502}
1503
Elliott Hughes650be4e2013-03-05 18:47:58 -08001504// Ensures that our program header is actually within a loadable
1505// segment. This should help catch badly-formed ELF files that
1506// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001507bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1508 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1509 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001510 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001511 if (phdr->p_type != PT_LOAD) {
1512 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001513 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001514 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1515 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001516 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001517 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001518 return true;
1519 }
1520 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001521 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1522 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001523 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001524}