blob: fa712a10a86b6fd1c584b4c1f097e3a157899aee [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Evgenii Stepanov6bbb75a2023-12-06 18:54:45 +000042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Kalesh Singh377f0b92024-01-31 20:23:39 -080045#include "private/bionic_asm_note.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070046#include "private/CFIShadow.h" // For kLibraryAlignment
Kalesh Singh377f0b92024-01-31 20:23:39 -080047#include "private/elf_note.h"
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080048
Kalesh Singhc5c1d192024-04-09 16:27:56 -070049#include <android-base/file.h>
50
Elliott Hughesb5140262014-12-02 16:16:29 -080051static int GetTargetElfMachine() {
52#if defined(__arm__)
53 return EM_ARM;
54#elif defined(__aarch64__)
55 return EM_AARCH64;
56#elif defined(__i386__)
57 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000058#elif defined(__riscv)
59 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080060#elif defined(__x86_64__)
61 return EM_X86_64;
62#endif
63}
64
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020065/**
66 TECHNICAL NOTE ON ELF LOADING.
67
68 An ELF file's program header table contains one or more PT_LOAD
69 segments, which corresponds to portions of the file that need to
70 be mapped into the process' address space.
71
72 Each loadable segment has the following important properties:
73
74 p_offset -> segment file offset
75 p_filesz -> segment file size
76 p_memsz -> segment memory size (always >= p_filesz)
77 p_vaddr -> segment's virtual address
78 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070079 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020080
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070081 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020082
83 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
84 ranges of virtual addresses. A few rules apply:
85
86 - the virtual address ranges should not overlap.
87
88 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
89 between them should always be initialized to 0.
90
91 - ranges do not necessarily start or end at page boundaries. Two distinct
92 segments can have their start and end on the same page. In this case, the
93 page inherits the mapping flags of the latter segment.
94
95 Finally, the real load addrs of each segment is not p_vaddr. Instead the
96 loader decides where to load the first segment, then will load all others
97 relative to the first one to respect the initial range layout.
98
99 For example, consider the following list:
100
101 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
102 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
103
104 This corresponds to two segments that cover these virtual address ranges:
105
106 0x30000...0x34000
107 0x40000...0x48000
108
109 If the loader decides to load the first segment at address 0xa0000000
110 then the segments' load address ranges will be:
111
112 0xa0030000...0xa0034000
113 0xa0040000...0xa0048000
114
115 In other words, all segments must be loaded at an address that has the same
116 constant offset from their p_vaddr value. This offset is computed as the
117 difference between the first segment's load address, and its p_vaddr value.
118
119 However, in practice, segments do _not_ start at page boundaries. Since we
120 can only memory-map at page boundaries, this means that the bias is
121 computed as:
122
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700123 load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200124
125 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
126 possible wrap around UINT32_MAX for possible large p_vaddr values).
127
128 And that the phdr0_load_address must start at a page boundary, with
129 the segment's real content starting at:
130
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700131 phdr0_load_address + page_offset(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200132
133 Note that ELF requires the following condition to make the mmap()-ing work:
134
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700135 page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200136
137 The load_bias must be added to any p_vaddr value read from the ELF file to
138 determine the corresponding memory address.
139
140 **/
141
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800142#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200143#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
144 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
145 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
146
Kalesh Singh1dd68582024-02-01 00:14:36 -0800147static const size_t kPageSize = page_size();
148
149/*
150 * Generic PMD size calculation:
151 * - Each page table (PT) is of size 1 page.
152 * - Each page table entry (PTE) is of size 64 bits.
153 * - Each PTE locates one physical page frame (PFN) of size 1 page.
154 * - A PMD entry locates 1 page table (PT)
155 *
156 * PMD size = Num entries in a PT * page_size
157 */
158static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700159
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700160ElfReader::ElfReader()
161 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
162 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800163 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
164 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700165}
166
167bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900168 if (did_read_) {
169 return true;
170 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700171 name_ = name;
172 fd_ = fd;
173 file_offset_ = file_offset;
174 file_size_ = file_size;
175
176 if (ReadElfHeader() &&
177 VerifyElfHeader() &&
178 ReadProgramHeaders() &&
179 ReadSectionHeaders() &&
Kalesh Singh377f0b92024-01-31 20:23:39 -0800180 ReadDynamicSection() &&
181 ReadPadSegmentNote()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700182 did_read_ = true;
183 }
184
185 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200186}
187
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400188bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900190 if (did_load_) {
191 return true;
192 }
huangchaochaobdc37962022-12-27 19:38:41 +0800193 bool reserveSuccess = ReserveAddressSpace(address_space);
194 if (reserveSuccess && LoadSegments() && FindPhdr() &&
Tamas Petz8d55d182020-02-24 14:15:25 +0100195 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700196 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100197#if defined(__aarch64__)
198 // For Armv8.5-A loaded executable segments may require PROT_BTI.
199 if (note_gnu_property_.IsBTICompatible()) {
200 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
Kalesh Singh4084b552024-03-13 13:35:49 -0700201 should_pad_segments_, &note_gnu_property_) == 0);
Tamas Petz8d55d182020-02-24 14:15:25 +0100202 }
203#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700204 }
huangchaochaobdc37962022-12-27 19:38:41 +0800205 if (reserveSuccess && !did_load_) {
206 if (load_start_ != nullptr && load_size_ != 0) {
207 if (!mapped_by_caller_) {
208 munmap(load_start_, load_size_);
209 }
210 }
211 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700212
213 return did_load_;
214}
215
216const char* ElfReader::get_string(ElfW(Word) index) const {
217 CHECK(strtab_ != nullptr);
218 CHECK(index < strtab_size_);
219
220 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800221}
222
223bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700224 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800225 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700226 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800227 return false;
228 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700229
Elliott Hughes650be4e2013-03-05 18:47:58 -0800230 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700231 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700232 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800233 return false;
234 }
235 return true;
236}
237
Elliott Hughes72007ee2017-04-19 17:44:57 -0700238static const char* EM_to_string(int em) {
239 if (em == EM_386) return "EM_386";
240 if (em == EM_AARCH64) return "EM_AARCH64";
241 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000242 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700243 if (em == EM_X86_64) return "EM_X86_64";
244 return "EM_???";
245}
246
Elliott Hughes650be4e2013-03-05 18:47:58 -0800247bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700248 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700249 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
250 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800251 return false;
252 }
253
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700254 // Try to give a clear diagnostic for ELF class mismatches, since they're
255 // an easy mistake to make during the 32-bit/64-bit transition period.
256 int elf_class = header_.e_ident[EI_CLASS];
257#if defined(__LP64__)
258 if (elf_class != ELFCLASS64) {
259 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700260 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700261 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700262 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700263 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800264 return false;
265 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700266#else
267 if (elf_class != ELFCLASS32) {
268 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700269 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700270 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700271 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700272 }
273 return false;
274 }
275#endif
276
Elliott Hughes650be4e2013-03-05 18:47:58 -0800277 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700278 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800279 return false;
280 }
281
282 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700283 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800284 return false;
285 }
286
287 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700288 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800289 return false;
290 }
291
Elliott Hughesb5140262014-12-02 16:16:29 -0800292 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700293 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
294 name_.c_str(),
295 EM_to_string(header_.e_machine), header_.e_machine,
296 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800297 return false;
298 }
299
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700300 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800301 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800302 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800303 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
304 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
305 return false;
306 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800307 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800308 "invalid-elf-header_section-headers-enforced-for-api-level-26",
309 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
310 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800311 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700312 }
313
314 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800315 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800316 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800317 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
318 return false;
319 }
320
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800321 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800322 "invalid-elf-header_section-headers-enforced-for-api-level-26",
323 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800324 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700325 }
326
Elliott Hughes650be4e2013-03-05 18:47:58 -0800327 return true;
328}
329
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700330bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800331 off64_t range_start;
332 off64_t range_end;
333
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700334 // Only header can be located at the 0 offset... This function called to
335 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700336 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700337
338 return offset > 0 &&
339 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800340 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700341 (range_start < file_size_) &&
342 (range_end <= file_size_) &&
343 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800344}
345
Elliott Hughes650be4e2013-03-05 18:47:58 -0800346// Loads the program header table from an ELF file into a read-only private
347// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700348bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800349 phdr_num_ = header_.e_phnum;
350
351 // Like the kernel, we only accept program header tables that
352 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800353 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700354 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800355 return false;
356 }
357
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800358 // Boundary checks
359 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700360 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
361 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
362 name_.c_str(),
363 static_cast<size_t>(header_.e_phoff),
364 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800365 return false;
366 }
367
368 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700369 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800370 return false;
371 }
372
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700373 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800374 return true;
375}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200376
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700377bool ElfReader::ReadSectionHeaders() {
378 shdr_num_ = header_.e_shnum;
379
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800380 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700381 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800382 return false;
383 }
384
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800385 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700386 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
387 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
388 name_.c_str(),
389 static_cast<size_t>(header_.e_shoff),
390 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800391 return false;
392 }
393
394 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700395 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
396 return false;
397 }
398
399 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
400 return true;
401}
402
403bool ElfReader::ReadDynamicSection() {
404 // 1. Find .dynamic section (in section headers)
405 const ElfW(Shdr)* dynamic_shdr = nullptr;
406 for (size_t i = 0; i < shdr_num_; ++i) {
407 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
408 dynamic_shdr = &shdr_table_ [i];
409 break;
410 }
411 }
412
413 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700414 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700415 return false;
416 }
417
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700418 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
419 size_t pt_dynamic_offset = 0;
420 size_t pt_dynamic_filesz = 0;
421 for (size_t i = 0; i < phdr_num_; ++i) {
422 const ElfW(Phdr)* phdr = &phdr_table_[i];
423 if (phdr->p_type == PT_DYNAMIC) {
424 pt_dynamic_offset = phdr->p_offset;
425 pt_dynamic_filesz = phdr->p_filesz;
426 }
427 }
428
429 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800430 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800431 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
432 "expected to match PT_DYNAMIC offset: 0x%zx",
433 name_.c_str(),
434 static_cast<size_t>(dynamic_shdr->sh_offset),
435 pt_dynamic_offset);
436 return false;
437 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800438 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800439 "invalid-elf-header_section-headers-enforced-for-api-level-26",
440 "\"%s\" .dynamic section has invalid offset: 0x%zx "
441 "(expected to match PT_DYNAMIC offset 0x%zx)",
442 name_.c_str(),
443 static_cast<size_t>(dynamic_shdr->sh_offset),
444 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800445 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700446 }
447
448 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800449 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800450 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
451 "expected to match PT_DYNAMIC filesz: 0x%zx",
452 name_.c_str(),
453 static_cast<size_t>(dynamic_shdr->sh_size),
454 pt_dynamic_filesz);
455 return false;
456 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800457 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800458 "invalid-elf-header_section-headers-enforced-for-api-level-26",
459 "\"%s\" .dynamic section has invalid size: 0x%zx "
460 "(expected to match PT_DYNAMIC filesz 0x%zx)",
461 name_.c_str(),
462 static_cast<size_t>(dynamic_shdr->sh_size),
463 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800464 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700465 }
466
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700467 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700468 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
469 name_.c_str(),
470 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700471 return false;
472 }
473
474 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
475
476 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700477 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
478 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700479 return false;
480 }
481
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700482 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
483 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800484 return false;
485 }
486
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700487 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
488 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
489 return false;
490 }
491
492 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
493
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700494 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
495 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
496 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800497 return false;
498 }
499
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700500 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
501 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
502 return false;
503 }
504
505 strtab_ = static_cast<const char*>(strtab_fragment_.data());
506 strtab_size_ = strtab_fragment_.size();
507 return true;
508}
509
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800510/* Returns the size of the extent of all the possibly non-contiguous
511 * loadable segments in an ELF program header table. This corresponds
512 * to the page-aligned size in bytes that needs to be reserved in the
513 * process' address space. If there are no loadable segments, 0 is
514 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200515 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700516 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800517 * set to the minimum and maximum addresses of pages to be reserved,
518 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200519 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800520size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
521 ElfW(Addr)* out_min_vaddr,
522 ElfW(Addr)* out_max_vaddr) {
523 ElfW(Addr) min_vaddr = UINTPTR_MAX;
524 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200525
Elliott Hughes0266ae52014-02-10 17:46:57 -0800526 bool found_pt_load = false;
527 for (size_t i = 0; i < phdr_count; ++i) {
528 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200529
Elliott Hughes0266ae52014-02-10 17:46:57 -0800530 if (phdr->p_type != PT_LOAD) {
531 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200532 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800533 found_pt_load = true;
534
535 if (phdr->p_vaddr < min_vaddr) {
536 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200537 }
538
Elliott Hughes0266ae52014-02-10 17:46:57 -0800539 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
540 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
541 }
542 }
543 if (!found_pt_load) {
544 min_vaddr = 0;
545 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200546
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700547 min_vaddr = page_start(min_vaddr);
548 max_vaddr = page_end(max_vaddr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800549
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700550 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800551 *out_min_vaddr = min_vaddr;
552 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700553 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800554 *out_max_vaddr = max_vaddr;
555 }
556 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200557}
558
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700559// Returns the maximum p_align associated with a loadable segment in the ELF
560// program header table. Used to determine whether the file should be loaded at
561// a specific virtual address alignment for use with huge pages.
562size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700563 size_t maximum_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700564
565 for (size_t i = 0; i < phdr_count; ++i) {
566 const ElfW(Phdr)* phdr = &phdr_table[i];
567
568 // p_align must be 0, 1, or a positive, integral power of two.
569 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
570 continue;
571 }
572
573 if (phdr->p_align > maximum_alignment) {
574 maximum_alignment = phdr->p_align;
575 }
576 }
577
578#if defined(__LP64__)
579 return maximum_alignment;
580#else
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700581 return page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700582#endif
583}
584
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700585// Reserve a virtual address range such that if it's limits were extended to the next 2**align
586// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700587static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
588 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700589 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700590 // Reserve enough space to properly align the library's start address.
591 mapping_align = std::max(mapping_align, start_align);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700592 if (mapping_align == page_size()) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800593 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700594 if (mmap_ptr == MAP_FAILED) {
595 return nullptr;
596 }
597 return mmap_ptr;
598 }
599
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700600 // Minimum alignment of shared library gap. For efficiency, this should match the second level
601 // page size of the platform.
602#if defined(__LP64__)
603 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
604#else
605 constexpr size_t kGapAlignment = 0;
606#endif
607 // Maximum gap size, in the units of kGapAlignment.
608 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700609 // Allocate enough space so that the end of the desired region aligned up is still inside the
610 // mapping.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700611 size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700612 uint8_t* mmap_ptr =
613 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
614 if (mmap_ptr == MAP_FAILED) {
615 return nullptr;
616 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700617 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700618 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
619 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700620 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
621 // This library crosses a 2MB boundary and will fragment a new huge page.
622 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
623 // to improve address randomization and make it harder to locate this library code by probing.
624 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700625 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700626 gap_size =
627 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700628 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700629 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
630 if (mmap_ptr == MAP_FAILED) {
631 return nullptr;
632 }
633 }
634
635 uint8_t *gap_end, *gap_start;
636 if (gap_size) {
637 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
638 gap_start = gap_end - gap_size;
639 } else {
640 gap_start = gap_end = mmap_ptr + mmap_size;
641 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700642
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700643 uint8_t* first = align_up(mmap_ptr, mapping_align);
644 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900645
Tom Cherry66bc4282018-11-08 13:40:52 -0800646 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900647 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700648 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
649 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700650 // Unmap the extra space around the allocation.
651 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
652 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700653 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700654 munmap(start + size, gap_start - (start + size));
655 if (gap_end != mmap_ptr + mmap_size) {
656 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
657 }
658 *out_gap_start = gap_start;
659 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700660 return start;
661}
662
Elliott Hughes650be4e2013-03-05 18:47:58 -0800663// Reserve a virtual address range big enough to hold all loadable
664// segments of a program header table. This is done by creating a
665// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400666bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800667 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800668 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800669 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700670 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800671 return false;
672 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200673
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800674 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000675 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000676
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400677 if (load_size_ > address_space->reserved_size) {
678 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000679 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400680 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000681 return false;
682 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700683 size_t start_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700684 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
685 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
686 // Limit alignment to PMD size as other alignments reduce the number of
687 // bits available for ASLR for no benefit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700688 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700689 }
690 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
691 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700692 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700693 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000694 return false;
695 }
696 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400697 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700698 gap_start_ = nullptr;
699 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800700 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400701
702 // Update the reserved address space to subtract the space used by this library.
703 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
704 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800705 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200706
Elliott Hughes650be4e2013-03-05 18:47:58 -0800707 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800708 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800709 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200710}
711
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700712/*
713 * Returns true if the kernel supports page size migration, else false.
714 */
715bool page_size_migration_supported() {
716 static bool pgsize_migration_enabled = []() {
717 std::string enabled;
718 if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
719 return false;
720 }
721 return enabled.find("1") != std::string::npos;
722 }();
723 return pgsize_migration_enabled;
724}
725
Kalesh Singh377f0b92024-01-31 20:23:39 -0800726// Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
727bool ElfReader::ReadPadSegmentNote() {
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700728 if (!page_size_migration_supported()) {
729 // Don't attempt to read the note, since segment extension isn't
730 // supported; but return true so that loading can continue normally.
731 return true;
732 }
733
Kalesh Singh377f0b92024-01-31 20:23:39 -0800734 // The ELF can have multiple PT_NOTE's, check them all
735 for (size_t i = 0; i < phdr_num_; ++i) {
736 const ElfW(Phdr)* phdr = &phdr_table_[i];
737
738 if (phdr->p_type != PT_NOTE) {
739 continue;
740 }
741
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800742 // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
743 // point to any part of the ELF (p_memsz == 0). Skip these since there is
744 // nothing to decode. See: b/324468126
745 if (phdr->p_memsz == 0) {
746 continue;
747 }
748
Kalesh Singh751bb8a2024-03-29 17:55:37 -0700749 // If the PT_NOTE extends beyond the file. The ELF is doing something
750 // strange -- obfuscation, embedding hidden loaders, ...
751 //
752 // It doesn't contain the pad_segment note. Skip it to avoid SIGBUS
753 // by accesses beyond the file.
754 off64_t note_end_off = file_offset_ + phdr->p_offset + phdr->p_filesz;
755 if (note_end_off > file_size_) {
756 continue;
757 }
758
Kalesh Singh377f0b92024-01-31 20:23:39 -0800759 // note_fragment is scoped to within the loop so that there is
760 // at most 1 PT_NOTE mapped at anytime during this search.
761 MappedFileFragment note_fragment;
762 if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
Kalesh Singh32b6d8c2024-02-13 18:37:12 -0800763 DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
764 name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
765 reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800766 return false;
Kalesh Singh377f0b92024-01-31 20:23:39 -0800767 }
768
769 const ElfW(Nhdr)* note_hdr = nullptr;
770 const char* note_desc = nullptr;
771 if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
772 reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
773 phdr, &note_hdr, &note_desc)) {
774 continue;
775 }
776
777 if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
778 DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
779 name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
780 return false;
781 }
782
783 // 1 == enabled, 0 == disabled
784 should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
785 return true;
786 }
787
788 return true;
789}
790
Kalesh Singh4084b552024-03-13 13:35:49 -0700791static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
792 size_t phdr_idx, ElfW(Addr)* p_memsz,
793 ElfW(Addr)* p_filesz, bool should_pad_segments) {
794 const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
795 const ElfW(Phdr)* next = nullptr;
796 size_t next_idx = phdr_idx + 1;
797
Kalesh Singhe1e74792024-04-09 11:48:52 -0700798 // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
799 // field e.g. 2MiB p_align for THPs and are relatively small in number.
800 //
801 // The kernel can only represent padding for p_align up to 64KiB. This is because
802 // the kernel uses 4 available bits in the vm_area_struct to represent padding
803 // extent; and so cannot enable mitigations to avoid breaking app compatibility for
804 // p_aligns > 64KiB.
805 //
806 // Don't perform segment extension on these to avoid app compatibility issues.
807 if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
Kalesh Singh4084b552024-03-13 13:35:49 -0700808 return;
809 }
810
811 if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
812 next = &phdr_table[next_idx];
813 }
814
815 // If this is the last LOAD segment, no extension is needed
816 if (!next || *p_memsz != *p_filesz) {
817 return;
818 }
819
820 ElfW(Addr) next_start = page_start(next->p_vaddr);
821 ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
822
823 // If adjacent segment mappings overlap, no extension is needed.
824 if (curr_end >= next_start) {
825 return;
826 }
827
828 // Extend the LOAD segment mapping to be contiguous with that of
829 // the next LOAD segment.
830 ElfW(Addr) extend = next_start - curr_end;
831 *p_memsz += extend;
832 *p_filesz += extend;
833}
834
Elliott Hughes650be4e2013-03-05 18:47:58 -0800835bool ElfReader::LoadSegments() {
836 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800837 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200838
Elliott Hughes650be4e2013-03-05 18:47:58 -0800839 if (phdr->p_type != PT_LOAD) {
840 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200841 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800842
Kalesh Singh4084b552024-03-13 13:35:49 -0700843 ElfW(Addr) p_memsz = phdr->p_memsz;
844 ElfW(Addr) p_filesz = phdr->p_filesz;
845 _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
846
Elliott Hughes650be4e2013-03-05 18:47:58 -0800847 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800848 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
Kalesh Singh4084b552024-03-13 13:35:49 -0700849 ElfW(Addr) seg_end = seg_start + p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800850
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700851 ElfW(Addr) seg_page_start = page_start(seg_start);
852 ElfW(Addr) seg_page_end = page_end(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800853
Kalesh Singh4084b552024-03-13 13:35:49 -0700854 ElfW(Addr) seg_file_end = seg_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800855
856 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800857 ElfW(Addr) file_start = phdr->p_offset;
Kalesh Singh4084b552024-03-13 13:35:49 -0700858 ElfW(Addr) file_end = file_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800859
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700860 ElfW(Addr) file_page_start = page_start(file_start);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800861 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800862
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700863 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700864 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700865 return false;
866 }
867
Kalesh Singh4084b552024-03-13 13:35:49 -0700868 if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700869 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
870 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700871 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700872 reinterpret_cast<void*>(phdr->p_filesz),
Kalesh Singh4084b552024-03-13 13:35:49 -0700873 reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700874 return false;
875 }
876
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700877 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700878 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700879 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800880 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800881 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800882 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800883 return false;
884 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800885 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800886 "writable-and-executable-segments-enforced-for-api-level-26",
887 "\"%s\" has load segments that are both writable and executable",
888 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800889 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700890 }
891
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700892 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700893 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700894 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700895 MAP_FIXED|MAP_PRIVATE,
896 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700897 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700898 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700899 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700900 return false;
901 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700902
903 // Mark segments as huge page eligible if they meet the requirements
904 // (executable and PMD aligned).
905 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
906 get_transparent_hugepages_supported()) {
907 madvise(seg_addr, file_length, MADV_HUGEPAGE);
908 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800909 }
910
Kalesh Singh1d3ba112024-03-06 17:33:36 -0800911 // if the segment is writable, and does not end on a page boundary,
912 // zero-fill it until the page limit.
Kalesh Singh4084b552024-03-13 13:35:49 -0700913 //
Kalesh Singh1d3ba112024-03-06 17:33:36 -0800914 // Do not attempt to zero the extended region past the first partial page,
915 // since doing so may:
916 // 1) Result in a SIGBUS, as the region is not backed by the underlying
917 // file.
918 // 2) Break the COW backing, faulting in new anon pages for a region
919 // that will not be used.
920
Kalesh Singh51347622024-03-18 17:27:59 -0700921 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
922 if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
923 memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
924 kPageSize - page_offset(unextended_seg_file_end));
925 }
926
927 // Pages may be brought in due to readahead.
928 // Drop the padding (zero) pages, to avoid reclaim work later.
929 //
930 // NOTE: The madvise() here is special, as it also serves to hint to the
931 // kernel the portion of the LOAD segment that is padding.
932 //
933 // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
934 // [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
935 uint64_t pad_start = page_end(unextended_seg_file_end);
936 uint64_t pad_end = page_end(seg_file_end);
937 CHECK(pad_start <= pad_end);
938 uint64_t pad_len = pad_end - pad_start;
939 if (page_size_migration_supported() && pad_len > 0 &&
940 madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
941 DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
942 name_.c_str(), pad_start, pad_len);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800943 }
944
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700945 seg_file_end = page_end(seg_file_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800946
947 // seg_file_end is now the first page address after the file
948 // content. If seg_end is larger, we need to zero anything
949 // between them. This is done by using a private anonymous
950 // map for all extra pages.
951 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800952 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800953 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800954 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800955 PFLAGS_TO_PROT(phdr->p_flags),
956 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
957 -1,
958 0);
959 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700960 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800961 return false;
962 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800963
964 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800965 }
966 }
967 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200968}
969
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000970/* Used internally. Used to set the protection bits of all loaded segments
971 * with optional extra flags (i.e. really PROT_WRITE). Used by
972 * phdr_table_protect_segments and phdr_table_unprotect_segments.
973 */
974static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -0700975 ElfW(Addr) load_bias, int extra_prot_flags,
976 bool should_pad_segments) {
977 for (size_t i = 0; i < phdr_count; ++i) {
978 const ElfW(Phdr)* phdr = &phdr_table[i];
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000979
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000980 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
981 continue;
982 }
983
Kalesh Singh4084b552024-03-13 13:35:49 -0700984 ElfW(Addr) p_memsz = phdr->p_memsz;
985 ElfW(Addr) p_filesz = phdr->p_filesz;
986 _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
987
988 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
989 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000990
Tamas Petz8d55d182020-02-24 14:15:25 +0100991 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
992 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700993 // make sure we're never simultaneously writable / executable
994 prot &= ~PROT_EXEC;
995 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100996#if defined(__aarch64__)
997 if ((prot & PROT_EXEC) == 0) {
998 // Though it is not specified don't add PROT_BTI if segment is not
999 // executable.
1000 prot &= ~PROT_BTI;
1001 }
1002#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001003
Tamas Petz8d55d182020-02-24 14:15:25 +01001004 int ret =
1005 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001006 if (ret < 0) {
1007 return -1;
1008 }
1009 }
1010 return 0;
1011}
1012
1013/* Restore the original protection modes for all loadable segments.
1014 * You should only call this after phdr_table_unprotect_segments and
1015 * applying all relocations.
1016 *
Tamas Petz8d55d182020-02-24 14:15:25 +01001017 * AArch64: also called from linker_main and ElfReader::Load to apply
1018 * PROT_BTI for loaded main so and other so-s.
1019 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001020 * Input:
1021 * phdr_table -> program header table
1022 * phdr_count -> number of entries in tables
1023 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001024 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Tamas Petz8d55d182020-02-24 14:15:25 +01001025 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001026 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001027 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001028 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001029int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001030 ElfW(Addr) load_bias, bool should_pad_segments,
1031 const GnuPropertySection* prop __unused) {
Tamas Petz8d55d182020-02-24 14:15:25 +01001032 int prot = 0;
1033#if defined(__aarch64__)
1034 if ((prop != nullptr) && prop->IsBTICompatible()) {
1035 prot |= PROT_BTI;
1036 }
1037#endif
Kalesh Singh4084b552024-03-13 13:35:49 -07001038 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001039}
1040
1041/* Change the protection of all loaded segments in memory to writable.
1042 * This is useful before performing relocations. Once completed, you
1043 * will have to call phdr_table_protect_segments to restore the original
1044 * protection flags on all segments.
1045 *
1046 * Note that some writable segments can also have their content turned
1047 * to read-only by calling phdr_table_protect_gnu_relro. This is no
1048 * performed here.
1049 *
1050 * Input:
1051 * phdr_table -> program header table
1052 * phdr_count -> number of entries in tables
1053 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001054 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001055 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001056 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001057 */
1058int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
Kalesh Singh4084b552024-03-13 13:35:49 -07001059 size_t phdr_count, ElfW(Addr) load_bias,
1060 bool should_pad_segments) {
1061 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
1062 should_pad_segments);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001063}
1064
Kalesh Singh702d9b02024-03-13 13:38:04 -07001065static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1066 const ElfW(Phdr)* phdr_table, size_t phdr_count,
1067 ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
1068 bool should_pad_segments) {
1069 // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1070 for (size_t index = 0; index < phdr_count; ++index) {
1071 const ElfW(Phdr)* phdr = &phdr_table[index];
1072
1073 if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1074 // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1075 // LOAD segment mem size, we need to protect only a partial region of the
1076 // LOAD segment and therefore cannot avoid a VMA split.
1077 //
1078 // Note: Don't check the page-aligned mem sizes since the extended protection
1079 // may incorrectly write protect non-relocation data.
1080 //
1081 // Example:
1082 //
1083 // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1084 // ----------------------------------------------------------------
1085 // | | | | |
1086 // SEG X | RO | RO | RW | | SEG Y
1087 // | | | | |
1088 // ----------------------------------------------------------------
1089 // | | |
1090 // | | |
1091 // | | |
1092 // relro_vaddr relro_vaddr relro_vaddr
1093 // (load_vaddr) + +
1094 // relro_memsz load_memsz
1095 //
1096 // ----------------------------------------------------------------
1097 // | PAGE | PAGE |
1098 // ----------------------------------------------------------------
1099 // | Potential |
1100 // |----- Extended RO ----|
1101 // | Protection |
1102 //
1103 // If the check below uses page aligned mem sizes it will cause incorrect write
1104 // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1105 if (relro_phdr->p_memsz < phdr->p_memsz) {
1106 return;
1107 }
1108
1109 ElfW(Addr) p_memsz = phdr->p_memsz;
1110 ElfW(Addr) p_filesz = phdr->p_filesz;
1111
1112 // Attempt extending the VMA (mprotect range). Without extending the range,
1113 // mprotect will only RO protect a part of the extended RW LOAD segment, which
1114 // will leave an extra split RW VMA (the gap).
1115 _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
1116 should_pad_segments);
1117
1118 *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1119 return;
1120 }
1121 }
1122}
1123
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001124/* Used internally by phdr_table_protect_gnu_relro and
1125 * phdr_table_unprotect_gnu_relro.
1126 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001127static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh702d9b02024-03-13 13:38:04 -07001128 ElfW(Addr) load_bias, int prot_flags,
1129 bool should_pad_segments) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001130 const ElfW(Phdr)* phdr = phdr_table;
1131 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001132
Elliott Hughes0266ae52014-02-10 17:46:57 -08001133 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1134 if (phdr->p_type != PT_GNU_RELRO) {
1135 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001136 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001137
1138 // Tricky: what happens when the relro segment does not start
1139 // or end at page boundaries? We're going to be over-protective
1140 // here and put every page touched by the segment as read-only.
1141
1142 // This seems to match Ian Lance Taylor's description of the
1143 // feature at http://www.airs.com/blog/archives/189.
1144
1145 // Extract:
1146 // Note that the current dynamic linker code will only work
1147 // correctly if the PT_GNU_RELRO segment starts on a page
1148 // boundary. This is because the dynamic linker rounds the
1149 // p_vaddr field down to the previous page boundary. If
1150 // there is anything on the page which should not be read-only,
1151 // the program is likely to fail at runtime. So in effect the
1152 // linker must only emit a PT_GNU_RELRO segment if it ensures
1153 // that it starts on a page boundary.
Zheng Pan9535c322024-02-14 00:04:10 +00001154 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1155 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Kalesh Singh702d9b02024-03-13 13:38:04 -07001156 _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
1157 should_pad_segments);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001158
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001159 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -08001160 seg_page_end - seg_page_start,
1161 prot_flags);
1162 if (ret < 0) {
1163 return -1;
1164 }
1165 }
1166 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001167}
1168
1169/* Apply GNU relro protection if specified by the program header. This will
1170 * turn some of the pages of a writable PT_LOAD segment to read-only, as
1171 * specified by one or more PT_GNU_RELRO segments. This must be always
1172 * performed after relocations.
1173 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001174 * The areas typically covered are .got and .data.rel.ro, these are
1175 * read-only from the program's POV, but contain absolute addresses
1176 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001177 *
1178 * Input:
1179 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001180 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001181 * load_bias -> load bias
Kalesh Singh702d9b02024-03-13 13:38:04 -07001182 * should_pad_segments -> Were segments extended to avoid gaps in the memory map
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001183 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001184 * 0 on success, -1 on failure (error code in errno).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001185 */
Kalesh Singh702d9b02024-03-13 13:38:04 -07001186int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1187 ElfW(Addr) load_bias, bool should_pad_segments) {
1188 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
1189 should_pad_segments);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001190}
1191
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001192/* Serialize the GNU relro segments to the given file descriptor. This can be
1193 * performed after relocations to allow another process to later share the
1194 * relocated segment, if it was loaded at the same address.
1195 *
1196 * Input:
1197 * phdr_table -> program header table
1198 * phdr_count -> number of entries in tables
1199 * load_bias -> load bias
1200 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001201 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001202 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001203 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001204 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001205int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1206 size_t phdr_count,
1207 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001208 int fd,
1209 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001210 const ElfW(Phdr)* phdr = phdr_table;
1211 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001212
1213 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1214 if (phdr->p_type != PT_GNU_RELRO) {
1215 continue;
1216 }
1217
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001218 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1219 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001220 ssize_t size = seg_page_end - seg_page_start;
1221
1222 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1223 if (written != size) {
1224 return -1;
1225 }
1226 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001227 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001228 if (map == MAP_FAILED) {
1229 return -1;
1230 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001231 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001232 }
1233 return 0;
1234}
1235
1236/* Where possible, replace the GNU relro segments with mappings of the given
1237 * file descriptor. This can be performed after relocations to allow a file
1238 * previously created by phdr_table_serialize_gnu_relro in another process to
1239 * replace the dirty relocated pages, saving memory, if it was loaded at the
1240 * same address. We have to compare the data before we map over it, since some
1241 * parts of the relro segment may not be identical due to other libraries in
1242 * the process being loaded at different addresses.
1243 *
1244 * Input:
1245 * phdr_table -> program header table
1246 * phdr_count -> number of entries in tables
1247 * load_bias -> load bias
1248 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001249 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001250 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001251 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001252 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001253int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1254 size_t phdr_count,
1255 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001256 int fd,
1257 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001258 // Map the file at a temporary location so we can compare its contents.
1259 struct stat file_stat;
1260 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1261 return -1;
1262 }
1263 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001264 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001265 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001266 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001267 if (temp_mapping == MAP_FAILED) {
1268 return -1;
1269 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001270 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001271
1272 // Iterate over the relro segments and compare/remap the pages.
1273 const ElfW(Phdr)* phdr = phdr_table;
1274 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1275
1276 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1277 if (phdr->p_type != PT_GNU_RELRO) {
1278 continue;
1279 }
1280
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001281 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1282 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001283
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001284 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001285 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1286 size_t match_offset = 0;
1287 size_t size = seg_page_end - seg_page_start;
1288
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001289 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001290 // File is too short to compare to this segment. The contents are likely
1291 // different as well (it's probably for a different library version) so
1292 // just don't bother checking.
1293 break;
1294 }
1295
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001296 while (match_offset < size) {
1297 // Skip over dissimilar pages.
1298 while (match_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001299 memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1300 match_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001301 }
1302
1303 // Count similar pages.
1304 size_t mismatch_offset = match_offset;
1305 while (mismatch_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001306 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1307 mismatch_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001308 }
1309
1310 // Map over similar pages.
1311 if (mismatch_offset > match_offset) {
1312 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001313 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001314 if (map == MAP_FAILED) {
1315 munmap(temp_mapping, file_size);
1316 return -1;
1317 }
1318 }
1319
1320 match_offset = mismatch_offset;
1321 }
1322
1323 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001324 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001325 }
1326 munmap(temp_mapping, file_size);
1327 return 0;
1328}
1329
1330
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001331#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001332/* Return the address and size of the .ARM.exidx section in memory,
1333 * if present.
1334 *
1335 * Input:
1336 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001337 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001338 * load_bias -> load bias
1339 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001340 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001341 * arm_exidx_count -> number of items in table (0 on failure).
1342 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001343 * 0 on success, -1 on failure (_no_ error code in errno)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001344 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001345int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1346 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001347 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001348 const ElfW(Phdr)* phdr = phdr_table;
1349 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001350
Elliott Hughes0266ae52014-02-10 17:46:57 -08001351 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1352 if (phdr->p_type != PT_ARM_EXIDX) {
1353 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001354 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001355
1356 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001357 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001358 return 0;
1359 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001360 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001361 *arm_exidx_count = 0;
1362 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001363}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001364#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001365
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001366/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001367 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001368 *
1369 * Input:
1370 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001371 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001372 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001373 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001374 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001375 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001376 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001377 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001378 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001379void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001380 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1381 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001382 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001383 for (size_t i = 0; i<phdr_count; ++i) {
1384 const ElfW(Phdr)& phdr = phdr_table[i];
1385 if (phdr.p_type == PT_DYNAMIC) {
1386 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001387 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001388 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001389 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001390 return;
1391 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001392 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001393}
1394
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001395/* Return the program interpreter string, or nullptr if missing.
1396 *
1397 * Input:
1398 * phdr_table -> program header table
1399 * phdr_count -> number of entries in tables
1400 * load_bias -> load bias
1401 * Return:
1402 * pointer to the program interpreter string.
1403 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001404const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001405 ElfW(Addr) load_bias) {
1406 for (size_t i = 0; i<phdr_count; ++i) {
1407 const ElfW(Phdr)& phdr = phdr_table[i];
1408 if (phdr.p_type == PT_INTERP) {
1409 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1410 }
1411 }
1412 return nullptr;
1413}
1414
Robert Grosse4544d9f2014-10-15 14:32:19 -07001415// Sets loaded_phdr_ to the address of the program header table as it appears
1416// in the loaded segments in memory. This is in contrast with phdr_table_,
1417// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001418bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001419 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001420
Elliott Hughes650be4e2013-03-05 18:47:58 -08001421 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001422 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001423 if (phdr->p_type == PT_PHDR) {
1424 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001425 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001426 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001427
Elliott Hughes650be4e2013-03-05 18:47:58 -08001428 // Otherwise, check the first loadable segment. If its file offset
1429 // is 0, it starts with the ELF header, and we can trivially find the
1430 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001431 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001432 if (phdr->p_type == PT_LOAD) {
1433 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001434 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001435 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001436 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001437 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001438 }
1439 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001440 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001441 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001442
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001443 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001444 return false;
1445}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001446
Tamas Petz8d55d182020-02-24 14:15:25 +01001447// Tries to find .note.gnu.property section.
1448// It is not considered an error if such section is missing.
1449bool ElfReader::FindGnuPropertySection() {
1450#if defined(__aarch64__)
1451 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1452#endif
1453 return true;
1454}
1455
Elliott Hughes650be4e2013-03-05 18:47:58 -08001456// Ensures that our program header is actually within a loadable
1457// segment. This should help catch badly-formed ELF files that
1458// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001459bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1460 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1461 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001462 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001463 if (phdr->p_type != PT_LOAD) {
1464 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001465 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001466 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1467 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001468 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001469 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001470 return true;
1471 }
1472 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001473 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1474 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001475 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001476}