blob: 4cbaa2e23daa522a84cc0412714da7c62d86b9ab [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Daniel Chapin8eceeea2024-10-24 21:55:27 +000042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Kalesh Singh377f0b92024-01-31 20:23:39 -080045#include "private/bionic_asm_note.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070046#include "private/CFIShadow.h" // For kLibraryAlignment
Kalesh Singh377f0b92024-01-31 20:23:39 -080047#include "private/elf_note.h"
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080048
Kalesh Singhc5c1d192024-04-09 16:27:56 -070049#include <android-base/file.h>
Kalesh Singhb23787f2024-09-05 08:22:06 +000050#include <android-base/properties.h>
Kalesh Singhc5c1d192024-04-09 16:27:56 -070051
Elliott Hughesb5140262014-12-02 16:16:29 -080052static int GetTargetElfMachine() {
53#if defined(__arm__)
54 return EM_ARM;
55#elif defined(__aarch64__)
56 return EM_AARCH64;
57#elif defined(__i386__)
58 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000059#elif defined(__riscv)
60 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080061#elif defined(__x86_64__)
62 return EM_X86_64;
63#endif
64}
65
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020066/**
67 TECHNICAL NOTE ON ELF LOADING.
68
69 An ELF file's program header table contains one or more PT_LOAD
70 segments, which corresponds to portions of the file that need to
71 be mapped into the process' address space.
72
73 Each loadable segment has the following important properties:
74
75 p_offset -> segment file offset
76 p_filesz -> segment file size
77 p_memsz -> segment memory size (always >= p_filesz)
78 p_vaddr -> segment's virtual address
79 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070080 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020081
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070082 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020083
84 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
85 ranges of virtual addresses. A few rules apply:
86
87 - the virtual address ranges should not overlap.
88
89 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
90 between them should always be initialized to 0.
91
92 - ranges do not necessarily start or end at page boundaries. Two distinct
93 segments can have their start and end on the same page. In this case, the
94 page inherits the mapping flags of the latter segment.
95
96 Finally, the real load addrs of each segment is not p_vaddr. Instead the
97 loader decides where to load the first segment, then will load all others
98 relative to the first one to respect the initial range layout.
99
100 For example, consider the following list:
101
102 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
103 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
104
105 This corresponds to two segments that cover these virtual address ranges:
106
107 0x30000...0x34000
108 0x40000...0x48000
109
110 If the loader decides to load the first segment at address 0xa0000000
111 then the segments' load address ranges will be:
112
113 0xa0030000...0xa0034000
114 0xa0040000...0xa0048000
115
116 In other words, all segments must be loaded at an address that has the same
117 constant offset from their p_vaddr value. This offset is computed as the
118 difference between the first segment's load address, and its p_vaddr value.
119
120 However, in practice, segments do _not_ start at page boundaries. Since we
121 can only memory-map at page boundaries, this means that the bias is
122 computed as:
123
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700124 load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200125
126 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
127 possible wrap around UINT32_MAX for possible large p_vaddr values).
128
129 And that the phdr0_load_address must start at a page boundary, with
130 the segment's real content starting at:
131
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700132 phdr0_load_address + page_offset(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200133
134 Note that ELF requires the following condition to make the mmap()-ing work:
135
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700136 page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200137
138 The load_bias must be added to any p_vaddr value read from the ELF file to
139 determine the corresponding memory address.
140
141 **/
142
Kalesh Singh1dd68582024-02-01 00:14:36 -0800143static const size_t kPageSize = page_size();
144
145/*
146 * Generic PMD size calculation:
147 * - Each page table (PT) is of size 1 page.
148 * - Each page table entry (PTE) is of size 64 bits.
149 * - Each PTE locates one physical page frame (PFN) of size 1 page.
150 * - A PMD entry locates 1 page table (PT)
151 *
152 * PMD size = Num entries in a PT * page_size
153 */
154static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700155
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700156ElfReader::ElfReader()
157 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
158 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800159 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
160 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700161}
162
163bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900164 if (did_read_) {
165 return true;
166 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700167 name_ = name;
168 fd_ = fd;
169 file_offset_ = file_offset;
170 file_size_ = file_size;
171
172 if (ReadElfHeader() &&
173 VerifyElfHeader() &&
174 ReadProgramHeaders() &&
175 ReadSectionHeaders() &&
Kalesh Singh377f0b92024-01-31 20:23:39 -0800176 ReadDynamicSection() &&
177 ReadPadSegmentNote()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700178 did_read_ = true;
179 }
180
Kalesh Singhb23787f2024-09-05 08:22:06 +0000181 if (kPageSize == 0x4000 && phdr_table_get_minimum_alignment(phdr_table_, phdr_num_) == 0x1000) {
182 // This prop needs to be read on 16KiB devices for each ELF where min_palign is 4KiB.
183 // It cannot be cached since the developer may toggle app compat on/off.
184 // This check will be removed once app compat is made the default on 16KiB devices.
185 should_use_16kib_app_compat_ =
Pawan Wagh8e5de062024-10-17 18:05:19 +0000186 ::android::base::GetBoolProperty("bionic.linker.16kb.app_compat.enabled", false) ||
187 get_16kb_appcompat_mode();
Kalesh Singhb23787f2024-09-05 08:22:06 +0000188 }
189
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700190 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200191}
192
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400193bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700194 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900195 if (did_load_) {
196 return true;
197 }
huangchaochaobdc37962022-12-27 19:38:41 +0800198 bool reserveSuccess = ReserveAddressSpace(address_space);
199 if (reserveSuccess && LoadSegments() && FindPhdr() &&
Tamas Petz8d55d182020-02-24 14:15:25 +0100200 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700201 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100202#if defined(__aarch64__)
203 // For Armv8.5-A loaded executable segments may require PROT_BTI.
204 if (note_gnu_property_.IsBTICompatible()) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000205 did_load_ =
206 (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, should_pad_segments_,
207 should_use_16kib_app_compat_, &note_gnu_property_) == 0);
Tamas Petz8d55d182020-02-24 14:15:25 +0100208 }
209#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700210 }
huangchaochaobdc37962022-12-27 19:38:41 +0800211 if (reserveSuccess && !did_load_) {
212 if (load_start_ != nullptr && load_size_ != 0) {
213 if (!mapped_by_caller_) {
214 munmap(load_start_, load_size_);
215 }
216 }
217 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700218
219 return did_load_;
220}
221
222const char* ElfReader::get_string(ElfW(Word) index) const {
223 CHECK(strtab_ != nullptr);
224 CHECK(index < strtab_size_);
225
226 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800227}
228
229bool ElfReader::ReadElfHeader() {
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000230 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
231 if (rc < 0) {
232 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
233 return false;
234 }
235
236 if (rc != sizeof(header_)) {
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700237 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000238 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800239 return false;
240 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800241 return true;
242}
243
Elliott Hughes72007ee2017-04-19 17:44:57 -0700244static const char* EM_to_string(int em) {
245 if (em == EM_386) return "EM_386";
246 if (em == EM_AARCH64) return "EM_AARCH64";
247 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000248 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700249 if (em == EM_X86_64) return "EM_X86_64";
250 return "EM_???";
251}
252
Elliott Hughes650be4e2013-03-05 18:47:58 -0800253bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700254 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700255 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
256 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800257 return false;
258 }
259
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700260 // Try to give a clear diagnostic for ELF class mismatches, since they're
261 // an easy mistake to make during the 32-bit/64-bit transition period.
262 int elf_class = header_.e_ident[EI_CLASS];
263#if defined(__LP64__)
264 if (elf_class != ELFCLASS64) {
265 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700266 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700267 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700268 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700269 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800270 return false;
271 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700272#else
273 if (elf_class != ELFCLASS32) {
274 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700275 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700276 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700277 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700278 }
279 return false;
280 }
281#endif
282
Elliott Hughes650be4e2013-03-05 18:47:58 -0800283 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700284 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800285 return false;
286 }
287
288 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700289 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800290 return false;
291 }
292
293 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700294 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800295 return false;
296 }
297
Elliott Hughesb5140262014-12-02 16:16:29 -0800298 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700299 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
300 name_.c_str(),
301 EM_to_string(header_.e_machine), header_.e_machine,
302 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800303 return false;
304 }
305
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700306 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800307 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800308 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
309 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
310 return false;
311 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800312 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800313 "invalid-elf-header_section-headers-enforced-for-api-level-26",
314 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
315 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800316 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700317 }
318
319 if (header_.e_shstrndx == 0) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800320 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800321 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
322 return false;
323 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800324 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800325 "invalid-elf-header_section-headers-enforced-for-api-level-26",
326 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800327 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700328 }
329
Elliott Hughes650be4e2013-03-05 18:47:58 -0800330 return true;
331}
332
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700333bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800334 off64_t range_start;
335 off64_t range_end;
336
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700337 // Only header can be located at the 0 offset... This function called to
338 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700339 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700340
341 return offset > 0 &&
342 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800343 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700344 (range_start < file_size_) &&
345 (range_end <= file_size_) &&
346 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800347}
348
Elliott Hughes650be4e2013-03-05 18:47:58 -0800349// Loads the program header table from an ELF file into a read-only private
350// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700351bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800352 phdr_num_ = header_.e_phnum;
353
354 // Like the kernel, we only accept program header tables that
355 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800356 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700357 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800358 return false;
359 }
360
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800361 // Boundary checks
362 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700363 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
364 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
365 name_.c_str(),
366 static_cast<size_t>(header_.e_phoff),
367 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800368 return false;
369 }
370
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000371 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000372 DL_ERR("\"%s\" phdr mmap failed: %m", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800373 return false;
374 }
375
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000376 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800377 return true;
378}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200379
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700380bool ElfReader::ReadSectionHeaders() {
381 shdr_num_ = header_.e_shnum;
382
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800383 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700384 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800385 return false;
386 }
387
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800388 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700389 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
390 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
391 name_.c_str(),
392 static_cast<size_t>(header_.e_shoff),
393 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800394 return false;
395 }
396
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000397 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000398 DL_ERR("\"%s\" shdr mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700399 return false;
400 }
401
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000402 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700403 return true;
404}
405
406bool ElfReader::ReadDynamicSection() {
407 // 1. Find .dynamic section (in section headers)
408 const ElfW(Shdr)* dynamic_shdr = nullptr;
409 for (size_t i = 0; i < shdr_num_; ++i) {
410 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
411 dynamic_shdr = &shdr_table_ [i];
412 break;
413 }
414 }
415
416 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700417 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700418 return false;
419 }
420
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700421 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
422 size_t pt_dynamic_offset = 0;
423 size_t pt_dynamic_filesz = 0;
424 for (size_t i = 0; i < phdr_num_; ++i) {
425 const ElfW(Phdr)* phdr = &phdr_table_[i];
426 if (phdr->p_type == PT_DYNAMIC) {
427 pt_dynamic_offset = phdr->p_offset;
428 pt_dynamic_filesz = phdr->p_filesz;
429 }
430 }
431
432 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800433 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800434 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
435 "expected to match PT_DYNAMIC offset: 0x%zx",
436 name_.c_str(),
437 static_cast<size_t>(dynamic_shdr->sh_offset),
438 pt_dynamic_offset);
439 return false;
440 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800441 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800442 "invalid-elf-header_section-headers-enforced-for-api-level-26",
443 "\"%s\" .dynamic section has invalid offset: 0x%zx "
444 "(expected to match PT_DYNAMIC offset 0x%zx)",
445 name_.c_str(),
446 static_cast<size_t>(dynamic_shdr->sh_offset),
447 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800448 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700449 }
450
451 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800452 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800453 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
454 "expected to match PT_DYNAMIC filesz: 0x%zx",
455 name_.c_str(),
456 static_cast<size_t>(dynamic_shdr->sh_size),
457 pt_dynamic_filesz);
458 return false;
459 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800460 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800461 "invalid-elf-header_section-headers-enforced-for-api-level-26",
462 "\"%s\" .dynamic section has invalid size: 0x%zx "
463 "(expected to match PT_DYNAMIC filesz 0x%zx)",
464 name_.c_str(),
465 static_cast<size_t>(dynamic_shdr->sh_size),
466 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800467 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700468 }
469
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700470 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700471 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
472 name_.c_str(),
473 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700474 return false;
475 }
476
477 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
478
479 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700480 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
481 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700482 return false;
483 }
484
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700485 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
486 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800487 return false;
488 }
489
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000490 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000491 DL_ERR("\"%s\" dynamic section mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700492 return false;
493 }
494
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000495 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700496
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700497 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
498 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
499 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800500 return false;
501 }
502
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000503 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000504 DL_ERR("\"%s\" strtab section mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700505 return false;
506 }
507
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000508 strtab_ = static_cast<const char*>(strtab_fragment_.data());
509 strtab_size_ = strtab_fragment_.size();
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700510 return true;
511}
512
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800513/* Returns the size of the extent of all the possibly non-contiguous
514 * loadable segments in an ELF program header table. This corresponds
515 * to the page-aligned size in bytes that needs to be reserved in the
516 * process' address space. If there are no loadable segments, 0 is
517 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200518 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700519 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800520 * set to the minimum and maximum addresses of pages to be reserved,
521 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200522 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800523size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
524 ElfW(Addr)* out_min_vaddr,
525 ElfW(Addr)* out_max_vaddr) {
526 ElfW(Addr) min_vaddr = UINTPTR_MAX;
527 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200528
Elliott Hughes0266ae52014-02-10 17:46:57 -0800529 bool found_pt_load = false;
530 for (size_t i = 0; i < phdr_count; ++i) {
531 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200532
Elliott Hughes0266ae52014-02-10 17:46:57 -0800533 if (phdr->p_type != PT_LOAD) {
534 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200535 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800536 found_pt_load = true;
537
538 if (phdr->p_vaddr < min_vaddr) {
539 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200540 }
541
Elliott Hughes0266ae52014-02-10 17:46:57 -0800542 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
543 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
544 }
545 }
546 if (!found_pt_load) {
547 min_vaddr = 0;
548 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200549
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700550 min_vaddr = page_start(min_vaddr);
551 max_vaddr = page_end(max_vaddr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800552
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700553 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800554 *out_min_vaddr = min_vaddr;
555 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700556 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800557 *out_max_vaddr = max_vaddr;
558 }
559 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200560}
561
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700562// Returns the maximum p_align associated with a loadable segment in the ELF
563// program header table. Used to determine whether the file should be loaded at
564// a specific virtual address alignment for use with huge pages.
565size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700566 size_t maximum_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700567
568 for (size_t i = 0; i < phdr_count; ++i) {
569 const ElfW(Phdr)* phdr = &phdr_table[i];
570
571 // p_align must be 0, 1, or a positive, integral power of two.
572 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
573 continue;
574 }
575
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000576 maximum_alignment = std::max(maximum_alignment, static_cast<size_t>(phdr->p_align));
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700577 }
578
579#if defined(__LP64__)
580 return maximum_alignment;
581#else
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700582 return page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700583#endif
584}
585
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000586// Returns the minimum p_align associated with a loadable segment in the ELF
587// program header table. Used to determine if the program alignment is compatible
588// with the page size of this system.
589size_t phdr_table_get_minimum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
590 size_t minimum_alignment = page_size();
591
592 for (size_t i = 0; i < phdr_count; ++i) {
593 const ElfW(Phdr)* phdr = &phdr_table[i];
594
595 // p_align must be 0, 1, or a positive, integral power of two.
596 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
597 continue;
598 }
599
600 if (phdr->p_align <= 1) {
601 continue;
602 }
603
604 minimum_alignment = std::min(minimum_alignment, static_cast<size_t>(phdr->p_align));
605 }
606
607 return minimum_alignment;
608}
609
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700610// Reserve a virtual address range such that if it's limits were extended to the next 2**align
611// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700612static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
613 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700614 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700615 // Reserve enough space to properly align the library's start address.
616 mapping_align = std::max(mapping_align, start_align);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700617 if (mapping_align == page_size()) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800618 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700619 if (mmap_ptr == MAP_FAILED) {
620 return nullptr;
621 }
622 return mmap_ptr;
623 }
624
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700625 // Minimum alignment of shared library gap. For efficiency, this should match the second level
626 // page size of the platform.
627#if defined(__LP64__)
628 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
629#else
630 constexpr size_t kGapAlignment = 0;
631#endif
632 // Maximum gap size, in the units of kGapAlignment.
633 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700634 // Allocate enough space so that the end of the desired region aligned up is still inside the
635 // mapping.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700636 size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700637 uint8_t* mmap_ptr =
638 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
639 if (mmap_ptr == MAP_FAILED) {
640 return nullptr;
641 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700642 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700643 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
644 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700645 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
646 // This library crosses a 2MB boundary and will fragment a new huge page.
647 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
648 // to improve address randomization and make it harder to locate this library code by probing.
649 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700650 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700651 gap_size =
652 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700653 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700654 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
655 if (mmap_ptr == MAP_FAILED) {
656 return nullptr;
657 }
658 }
659
660 uint8_t *gap_end, *gap_start;
661 if (gap_size) {
662 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
663 gap_start = gap_end - gap_size;
664 } else {
665 gap_start = gap_end = mmap_ptr + mmap_size;
666 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700667
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700668 uint8_t* first = align_up(mmap_ptr, mapping_align);
669 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900670
Tom Cherry66bc4282018-11-08 13:40:52 -0800671 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900672 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700673 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
674 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700675 // Unmap the extra space around the allocation.
676 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
677 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700678 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700679 munmap(start + size, gap_start - (start + size));
680 if (gap_end != mmap_ptr + mmap_size) {
681 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
682 }
683 *out_gap_start = gap_start;
684 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700685 return start;
686}
687
Elliott Hughes650be4e2013-03-05 18:47:58 -0800688// Reserve a virtual address range big enough to hold all loadable
689// segments of a program header table. This is done by creating a
690// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400691bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800692 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800693 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800694 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700695 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800696 return false;
697 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200698
Kalesh Singhce1c3cf2024-09-30 13:26:23 -0700699 if (should_use_16kib_app_compat_) {
700 // Reserve additional space for aligning the permission boundary in compat loading
701 // Up to kPageSize-kCompatPageSize additional space is needed, but reservation
702 // is done with mmap which gives kPageSize multiple-sized reservations.
703 load_size_ += kPageSize;
704 }
705
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800706 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000707 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000708
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400709 if (load_size_ > address_space->reserved_size) {
710 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000711 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400712 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000713 return false;
714 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700715 size_t start_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700716 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
717 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
718 // Limit alignment to PMD size as other alignments reduce the number of
719 // bits available for ASLR for no benefit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700720 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700721 }
722 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
723 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700724 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700725 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000726 return false;
727 }
728 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400729 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700730 gap_start_ = nullptr;
731 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800732 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400733
734 // Update the reserved address space to subtract the space used by this library.
735 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
736 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800737 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200738
Elliott Hughes650be4e2013-03-05 18:47:58 -0800739 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800740 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Kalesh Singhce1c3cf2024-09-30 13:26:23 -0700741
742 if (should_use_16kib_app_compat_) {
743 // In compat mode make the initial mapping RW since the ELF contents will be read
744 // into it; instead of mapped over it.
745 mprotect(reinterpret_cast<void*>(start), load_size_, PROT_READ | PROT_WRITE);
746 }
747
Elliott Hughes650be4e2013-03-05 18:47:58 -0800748 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200749}
750
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700751/*
752 * Returns true if the kernel supports page size migration, else false.
753 */
754bool page_size_migration_supported() {
755 static bool pgsize_migration_enabled = []() {
756 std::string enabled;
757 if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
758 return false;
759 }
760 return enabled.find("1") != std::string::npos;
761 }();
762 return pgsize_migration_enabled;
763}
764
Kalesh Singh377f0b92024-01-31 20:23:39 -0800765// Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
766bool ElfReader::ReadPadSegmentNote() {
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700767 if (!page_size_migration_supported()) {
768 // Don't attempt to read the note, since segment extension isn't
769 // supported; but return true so that loading can continue normally.
770 return true;
771 }
772
Kalesh Singh377f0b92024-01-31 20:23:39 -0800773 // The ELF can have multiple PT_NOTE's, check them all
774 for (size_t i = 0; i < phdr_num_; ++i) {
775 const ElfW(Phdr)* phdr = &phdr_table_[i];
776
777 if (phdr->p_type != PT_NOTE) {
778 continue;
779 }
780
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800781 // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
782 // point to any part of the ELF (p_memsz == 0). Skip these since there is
783 // nothing to decode. See: b/324468126
784 if (phdr->p_memsz == 0) {
785 continue;
786 }
787
Kalesh Singh751bb8a2024-03-29 17:55:37 -0700788 // If the PT_NOTE extends beyond the file. The ELF is doing something
789 // strange -- obfuscation, embedding hidden loaders, ...
790 //
791 // It doesn't contain the pad_segment note. Skip it to avoid SIGBUS
792 // by accesses beyond the file.
793 off64_t note_end_off = file_offset_ + phdr->p_offset + phdr->p_filesz;
794 if (note_end_off > file_size_) {
795 continue;
796 }
797
Kalesh Singh377f0b92024-01-31 20:23:39 -0800798 // note_fragment is scoped to within the loop so that there is
799 // at most 1 PT_NOTE mapped at anytime during this search.
800 MappedFileFragment note_fragment;
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000801 if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
Kalesh Singh32b6d8c2024-02-13 18:37:12 -0800802 DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
803 name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
804 reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800805 return false;
Kalesh Singh377f0b92024-01-31 20:23:39 -0800806 }
807
808 const ElfW(Nhdr)* note_hdr = nullptr;
809 const char* note_desc = nullptr;
810 if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000811 reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
Kalesh Singh377f0b92024-01-31 20:23:39 -0800812 phdr, &note_hdr, &note_desc)) {
813 continue;
814 }
815
816 if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
817 DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
818 name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
819 return false;
820 }
821
822 // 1 == enabled, 0 == disabled
823 should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
824 return true;
825 }
826
827 return true;
828}
829
Kalesh Singh4084b552024-03-13 13:35:49 -0700830static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singhb23787f2024-09-05 08:22:06 +0000831 size_t phdr_idx, ElfW(Addr)* p_memsz,
832 ElfW(Addr)* p_filesz, bool should_pad_segments,
833 bool should_use_16kib_app_compat) {
834 // NOTE: Segment extension is only applicable where the ELF's max-page-size > runtime page size;
835 // to save kernel VMA slab memory. 16KiB compat mode is the exact opposite scenario.
836 if (should_use_16kib_app_compat) {
837 return;
838 }
839
Kalesh Singh4084b552024-03-13 13:35:49 -0700840 const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
841 const ElfW(Phdr)* next = nullptr;
842 size_t next_idx = phdr_idx + 1;
843
Kalesh Singhe1e74792024-04-09 11:48:52 -0700844 // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
845 // field e.g. 2MiB p_align for THPs and are relatively small in number.
846 //
847 // The kernel can only represent padding for p_align up to 64KiB. This is because
848 // the kernel uses 4 available bits in the vm_area_struct to represent padding
849 // extent; and so cannot enable mitigations to avoid breaking app compatibility for
850 // p_aligns > 64KiB.
851 //
852 // Don't perform segment extension on these to avoid app compatibility issues.
853 if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
Kalesh Singh4084b552024-03-13 13:35:49 -0700854 return;
855 }
856
857 if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
858 next = &phdr_table[next_idx];
859 }
860
861 // If this is the last LOAD segment, no extension is needed
862 if (!next || *p_memsz != *p_filesz) {
863 return;
864 }
865
866 ElfW(Addr) next_start = page_start(next->p_vaddr);
867 ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
868
869 // If adjacent segment mappings overlap, no extension is needed.
870 if (curr_end >= next_start) {
871 return;
872 }
873
874 // Extend the LOAD segment mapping to be contiguous with that of
875 // the next LOAD segment.
876 ElfW(Addr) extend = next_start - curr_end;
877 *p_memsz += extend;
878 *p_filesz += extend;
879}
880
Kalesh Singh86e04f62024-09-05 06:24:14 +0000881bool ElfReader::MapSegment(size_t seg_idx, size_t len) {
882 const ElfW(Phdr)* phdr = &phdr_table_[seg_idx];
883
884 void* start = reinterpret_cast<void*>(page_start(phdr->p_vaddr + load_bias_));
885
886 // The ELF could be being loaded directly from a zipped APK,
887 // the zip offset must be added to find the segment offset.
888 const ElfW(Addr) offset = file_offset_ + page_start(phdr->p_offset);
889
890 int prot = PFLAGS_TO_PROT(phdr->p_flags);
891
892 void* seg_addr = mmap64(start, len, prot, MAP_FIXED | MAP_PRIVATE, fd_, offset);
893
894 if (seg_addr == MAP_FAILED) {
895 DL_ERR("couldn't map \"%s\" segment %zd: %m", name_.c_str(), seg_idx);
896 return false;
897 }
898
899 // Mark segments as huge page eligible if they meet the requirements
900 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
901 get_transparent_hugepages_supported()) {
902 madvise(seg_addr, len, MADV_HUGEPAGE);
903 }
904
905 return true;
906}
907
Kalesh Singh37bcaea2024-09-05 06:32:07 +0000908void ElfReader::ZeroFillSegment(const ElfW(Phdr)* phdr) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000909 // NOTE: In 16KiB app compat mode, the ELF mapping is anonymous, meaning that
910 // RW segments are COW-ed from the kernel's zero page. So there is no need to
911 // explicitly zero-fill until the last page's limit.
912 if (should_use_16kib_app_compat_) {
913 return;
914 }
915
Kalesh Singh37bcaea2024-09-05 06:32:07 +0000916 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
917 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
918
919 // If the segment is writable, and does not end on a page boundary,
920 // zero-fill it until the page limit.
921 //
922 // Do not attempt to zero the extended region past the first partial page,
923 // since doing so may:
924 // 1) Result in a SIGBUS, as the region is not backed by the underlying
925 // file.
926 // 2) Break the COW backing, faulting in new anon pages for a region
927 // that will not be used.
928 if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
929 memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
930 kPageSize - page_offset(unextended_seg_file_end));
931 }
932}
933
Kalesh Singhe0f4a372024-09-05 07:07:21 +0000934void ElfReader::DropPaddingPages(const ElfW(Phdr)* phdr, uint64_t seg_file_end) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000935 // NOTE: Padding pages are only applicable where the ELF's max-page-size > runtime page size;
936 // 16KiB compat mode is the exact opposite scenario.
937 if (should_use_16kib_app_compat_) {
938 return;
939 }
940
Kalesh Singhe0f4a372024-09-05 07:07:21 +0000941 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
942 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
943
944 uint64_t pad_start = page_end(unextended_seg_file_end);
945 uint64_t pad_end = page_end(seg_file_end);
946 CHECK(pad_start <= pad_end);
947
948 uint64_t pad_len = pad_end - pad_start;
949 if (pad_len == 0 || !page_size_migration_supported()) {
950 return;
951 }
952
953 // Pages may be brought in due to readahead.
954 // Drop the padding (zero) pages, to avoid reclaim work later.
955 //
956 // NOTE: The madvise() here is special, as it also serves to hint to the
957 // kernel the portion of the LOAD segment that is padding.
958 //
959 // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
960 // [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
961 if (madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
962 DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
963 name_.c_str(), pad_start, pad_len);
964 }
965}
966
Kalesh Singh138a9552024-09-05 08:05:56 +0000967bool ElfReader::MapBssSection(const ElfW(Phdr)* phdr, ElfW(Addr) seg_page_end,
968 ElfW(Addr) seg_file_end) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000969 // NOTE: We do not need to handle .bss in 16KiB compat mode since the mapping
970 // reservation is anonymous and RW to begin with.
971 if (should_use_16kib_app_compat_) {
972 return true;
973 }
974
Kalesh Singh138a9552024-09-05 08:05:56 +0000975 // seg_file_end is now the first page address after the file content.
976 seg_file_end = page_end(seg_file_end);
977
978 if (seg_page_end <= seg_file_end) {
979 return true;
980 }
981
982 // If seg_page_end is larger than seg_file_end, we need to zero
983 // anything between them. This is done by using a private anonymous
984 // map for all extra pages
985 size_t zeromap_size = seg_page_end - seg_file_end;
986 void* zeromap =
987 mmap(reinterpret_cast<void*>(seg_file_end), zeromap_size, PFLAGS_TO_PROT(phdr->p_flags),
988 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
989 if (zeromap == MAP_FAILED) {
990 DL_ERR("couldn't map .bss section for \"%s\": %m", name_.c_str());
991 return false;
992 }
993
994 // Set the VMA name using prctl
995 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
996
997 return true;
998}
999
Elliott Hughes650be4e2013-03-05 18:47:58 -08001000bool ElfReader::LoadSegments() {
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001001 // NOTE: The compat(legacy) page size (4096) must be used when aligning
1002 // the 4KiB segments for loading in compat mode. The larger 16KiB page size
1003 // will lead to overwriting adjacent segments since the ELF's segment(s)
1004 // are not 16KiB aligned.
1005 size_t seg_align = should_use_16kib_app_compat_ ? kCompatPageSize : kPageSize;
Kalesh Singhb23787f2024-09-05 08:22:06 +00001006
Steven Morelandfc89c8a2024-08-01 21:20:33 +00001007 size_t min_palign = phdr_table_get_minimum_alignment(phdr_table_, phdr_num_);
Kalesh Singhb23787f2024-09-05 08:22:06 +00001008 // Only enforce this on 16 KB systems with app compat disabled.
1009 // Apps may rely on undefined behavior here on 4 KB systems,
1010 // which is the norm before this change is introduced
1011 if (kPageSize >= 16384 && min_palign < kPageSize && !should_use_16kib_app_compat_) {
Steven Morelandfc89c8a2024-08-01 21:20:33 +00001012 DL_ERR("\"%s\" program alignment (%zu) cannot be smaller than system page size (%zu)",
1013 name_.c_str(), min_palign, kPageSize);
1014 return false;
1015 }
1016
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001017 if (!Setup16KiBAppCompat()) {
1018 DL_ERR("\"%s\" failed to setup 16KiB App Compat", name_.c_str());
1019 return false;
1020 }
1021
Elliott Hughes650be4e2013-03-05 18:47:58 -08001022 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001023 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001024
Elliott Hughes650be4e2013-03-05 18:47:58 -08001025 if (phdr->p_type != PT_LOAD) {
1026 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001027 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001028
Kalesh Singh4084b552024-03-13 13:35:49 -07001029 ElfW(Addr) p_memsz = phdr->p_memsz;
1030 ElfW(Addr) p_filesz = phdr->p_filesz;
Kalesh Singhb23787f2024-09-05 08:22:06 +00001031 _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_,
1032 should_use_16kib_app_compat_);
Kalesh Singh4084b552024-03-13 13:35:49 -07001033
Elliott Hughes650be4e2013-03-05 18:47:58 -08001034 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001035 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
Kalesh Singh4084b552024-03-13 13:35:49 -07001036 ElfW(Addr) seg_end = seg_start + p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001037
Kalesh Singhb23787f2024-09-05 08:22:06 +00001038 ElfW(Addr) seg_page_end = align_up(seg_end, seg_align);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001039
Kalesh Singh4084b552024-03-13 13:35:49 -07001040 ElfW(Addr) seg_file_end = seg_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001041
1042 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001043 ElfW(Addr) file_start = phdr->p_offset;
Kalesh Singh4084b552024-03-13 13:35:49 -07001044 ElfW(Addr) file_end = file_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001045
Kalesh Singhb23787f2024-09-05 08:22:06 +00001046 ElfW(Addr) file_page_start = align_down(file_start, seg_align);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001047 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001048
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001049 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001050 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001051 return false;
1052 }
1053
Kalesh Singh4084b552024-03-13 13:35:49 -07001054 if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001055 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
1056 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001057 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001058 reinterpret_cast<void*>(phdr->p_filesz),
Kalesh Singh4084b552024-03-13 13:35:49 -07001059 reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001060 return false;
1061 }
1062
Brian Carlstrom82dcc792013-05-21 16:49:24 -07001063 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -07001064 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -07001065 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -08001066 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -08001067 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -08001068 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -08001069 return false;
1070 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -08001071 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -08001072 "writable-and-executable-segments-enforced-for-api-level-26",
1073 "\"%s\" has load segments that are both writable and executable",
1074 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -08001075 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -07001076 }
1077
Kalesh Singh86e04f62024-09-05 06:24:14 +00001078 // Pass the file_length, since it may have been extended by _extend_load_segment_vma().
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001079 if (should_use_16kib_app_compat_) {
1080 if (!CompatMapSegment(i, file_length)) {
1081 return false;
1082 }
1083 } else {
1084 if (!MapSegment(i, file_length)) {
1085 return false;
1086 }
Brian Carlstrom82dcc792013-05-21 16:49:24 -07001087 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001088 }
1089
Kalesh Singh37bcaea2024-09-05 06:32:07 +00001090 ZeroFillSegment(phdr);
Kalesh Singh1d3ba112024-03-06 17:33:36 -08001091
Kalesh Singhe0f4a372024-09-05 07:07:21 +00001092 DropPaddingPages(phdr, seg_file_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001093
Kalesh Singh138a9552024-09-05 08:05:56 +00001094 if (!MapBssSection(phdr, seg_page_end, seg_file_end)) {
1095 return false;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001096 }
1097 }
1098 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001099}
1100
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001101/* Used internally. Used to set the protection bits of all loaded segments
1102 * with optional extra flags (i.e. really PROT_WRITE). Used by
1103 * phdr_table_protect_segments and phdr_table_unprotect_segments.
1104 */
1105static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001106 ElfW(Addr) load_bias, int extra_prot_flags,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001107 bool should_pad_segments, bool should_use_16kib_app_compat) {
Kalesh Singh4084b552024-03-13 13:35:49 -07001108 for (size_t i = 0; i < phdr_count; ++i) {
1109 const ElfW(Phdr)* phdr = &phdr_table[i];
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001110
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001111 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
1112 continue;
1113 }
1114
Kalesh Singh4084b552024-03-13 13:35:49 -07001115 ElfW(Addr) p_memsz = phdr->p_memsz;
1116 ElfW(Addr) p_filesz = phdr->p_filesz;
Kalesh Singhb23787f2024-09-05 08:22:06 +00001117 _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments,
1118 should_use_16kib_app_compat);
Kalesh Singh4084b552024-03-13 13:35:49 -07001119
1120 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
1121 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001122
Tamas Petz8d55d182020-02-24 14:15:25 +01001123 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
1124 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001125 // make sure we're never simultaneously writable / executable
1126 prot &= ~PROT_EXEC;
1127 }
Tamas Petz8d55d182020-02-24 14:15:25 +01001128#if defined(__aarch64__)
1129 if ((prot & PROT_EXEC) == 0) {
1130 // Though it is not specified don't add PROT_BTI if segment is not
1131 // executable.
1132 prot &= ~PROT_BTI;
1133 }
1134#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001135
Tamas Petz8d55d182020-02-24 14:15:25 +01001136 int ret =
1137 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001138 if (ret < 0) {
1139 return -1;
1140 }
1141 }
1142 return 0;
1143}
1144
1145/* Restore the original protection modes for all loadable segments.
1146 * You should only call this after phdr_table_unprotect_segments and
1147 * applying all relocations.
1148 *
Tamas Petz8d55d182020-02-24 14:15:25 +01001149 * AArch64: also called from linker_main and ElfReader::Load to apply
1150 * PROT_BTI for loaded main so and other so-s.
1151 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001152 * Input:
1153 * phdr_table -> program header table
1154 * phdr_count -> number of entries in tables
1155 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001156 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Kalesh Singhb23787f2024-09-05 08:22:06 +00001157 * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
Tamas Petz8d55d182020-02-24 14:15:25 +01001158 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001159 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001160 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001161 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001162int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001163 ElfW(Addr) load_bias, bool should_pad_segments,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001164 bool should_use_16kib_app_compat,
Kalesh Singh4084b552024-03-13 13:35:49 -07001165 const GnuPropertySection* prop __unused) {
Tamas Petz8d55d182020-02-24 14:15:25 +01001166 int prot = 0;
1167#if defined(__aarch64__)
1168 if ((prop != nullptr) && prop->IsBTICompatible()) {
1169 prot |= PROT_BTI;
1170 }
1171#endif
Kalesh Singhb23787f2024-09-05 08:22:06 +00001172 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments,
1173 should_use_16kib_app_compat);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001174}
1175
1176/* Change the protection of all loaded segments in memory to writable.
1177 * This is useful before performing relocations. Once completed, you
1178 * will have to call phdr_table_protect_segments to restore the original
1179 * protection flags on all segments.
1180 *
1181 * Note that some writable segments can also have their content turned
1182 * to read-only by calling phdr_table_protect_gnu_relro. This is no
1183 * performed here.
1184 *
1185 * Input:
1186 * phdr_table -> program header table
1187 * phdr_count -> number of entries in tables
1188 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001189 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Kalesh Singhb23787f2024-09-05 08:22:06 +00001190 * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001191 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001192 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001193 */
Kalesh Singhb23787f2024-09-05 08:22:06 +00001194int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1195 ElfW(Addr) load_bias, bool should_pad_segments,
1196 bool should_use_16kib_app_compat) {
Kalesh Singh4084b552024-03-13 13:35:49 -07001197 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001198 should_pad_segments, should_use_16kib_app_compat);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001199}
1200
Kalesh Singh702d9b02024-03-13 13:38:04 -07001201static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1202 const ElfW(Phdr)* phdr_table, size_t phdr_count,
1203 ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001204 bool should_pad_segments,
1205 bool should_use_16kib_app_compat) {
Kalesh Singh702d9b02024-03-13 13:38:04 -07001206 // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1207 for (size_t index = 0; index < phdr_count; ++index) {
1208 const ElfW(Phdr)* phdr = &phdr_table[index];
1209
1210 if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1211 // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1212 // LOAD segment mem size, we need to protect only a partial region of the
1213 // LOAD segment and therefore cannot avoid a VMA split.
1214 //
1215 // Note: Don't check the page-aligned mem sizes since the extended protection
1216 // may incorrectly write protect non-relocation data.
1217 //
1218 // Example:
1219 //
1220 // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1221 // ----------------------------------------------------------------
1222 // | | | | |
1223 // SEG X | RO | RO | RW | | SEG Y
1224 // | | | | |
1225 // ----------------------------------------------------------------
1226 // | | |
1227 // | | |
1228 // | | |
1229 // relro_vaddr relro_vaddr relro_vaddr
1230 // (load_vaddr) + +
1231 // relro_memsz load_memsz
1232 //
1233 // ----------------------------------------------------------------
1234 // | PAGE | PAGE |
1235 // ----------------------------------------------------------------
1236 // | Potential |
1237 // |----- Extended RO ----|
1238 // | Protection |
1239 //
1240 // If the check below uses page aligned mem sizes it will cause incorrect write
1241 // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1242 if (relro_phdr->p_memsz < phdr->p_memsz) {
1243 return;
1244 }
1245
1246 ElfW(Addr) p_memsz = phdr->p_memsz;
1247 ElfW(Addr) p_filesz = phdr->p_filesz;
1248
1249 // Attempt extending the VMA (mprotect range). Without extending the range,
1250 // mprotect will only RO protect a part of the extended RW LOAD segment, which
1251 // will leave an extra split RW VMA (the gap).
1252 _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001253 should_pad_segments, should_use_16kib_app_compat);
Kalesh Singh702d9b02024-03-13 13:38:04 -07001254
1255 *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1256 return;
1257 }
1258 }
1259}
1260
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001261/* Used internally by phdr_table_protect_gnu_relro and
1262 * phdr_table_unprotect_gnu_relro.
1263 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001264static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh702d9b02024-03-13 13:38:04 -07001265 ElfW(Addr) load_bias, int prot_flags,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001266 bool should_pad_segments,
1267 bool should_use_16kib_app_compat) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001268 const ElfW(Phdr)* phdr = phdr_table;
1269 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001270
Elliott Hughes0266ae52014-02-10 17:46:57 -08001271 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1272 if (phdr->p_type != PT_GNU_RELRO) {
1273 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001274 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001275
1276 // Tricky: what happens when the relro segment does not start
1277 // or end at page boundaries? We're going to be over-protective
1278 // here and put every page touched by the segment as read-only.
1279
1280 // This seems to match Ian Lance Taylor's description of the
1281 // feature at http://www.airs.com/blog/archives/189.
1282
1283 // Extract:
1284 // Note that the current dynamic linker code will only work
1285 // correctly if the PT_GNU_RELRO segment starts on a page
1286 // boundary. This is because the dynamic linker rounds the
1287 // p_vaddr field down to the previous page boundary. If
1288 // there is anything on the page which should not be read-only,
1289 // the program is likely to fail at runtime. So in effect the
1290 // linker must only emit a PT_GNU_RELRO segment if it ensures
1291 // that it starts on a page boundary.
Zheng Pan9535c322024-02-14 00:04:10 +00001292 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1293 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Kalesh Singh702d9b02024-03-13 13:38:04 -07001294 _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001295 should_pad_segments, should_use_16kib_app_compat);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001296
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001297 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -08001298 seg_page_end - seg_page_start,
1299 prot_flags);
1300 if (ret < 0) {
1301 return -1;
1302 }
1303 }
1304 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001305}
1306
1307/* Apply GNU relro protection if specified by the program header. This will
1308 * turn some of the pages of a writable PT_LOAD segment to read-only, as
1309 * specified by one or more PT_GNU_RELRO segments. This must be always
1310 * performed after relocations.
1311 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001312 * The areas typically covered are .got and .data.rel.ro, these are
1313 * read-only from the program's POV, but contain absolute addresses
1314 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001315 *
1316 * Input:
1317 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001318 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001319 * load_bias -> load bias
Kalesh Singh702d9b02024-03-13 13:38:04 -07001320 * should_pad_segments -> Were segments extended to avoid gaps in the memory map
Kalesh Singhb23787f2024-09-05 08:22:06 +00001321 * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001322 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001323 * 0 on success, -1 on failure (error code in errno).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001324 */
Kalesh Singh702d9b02024-03-13 13:38:04 -07001325int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001326 ElfW(Addr) load_bias, bool should_pad_segments,
1327 bool should_use_16kib_app_compat) {
Kalesh Singh702d9b02024-03-13 13:38:04 -07001328 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001329 should_pad_segments, should_use_16kib_app_compat);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001330}
1331
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001332/*
1333 * Apply RX protection to the compat relro region of the ELF being loaded in
1334 * 16KiB compat mode.
1335 *
1336 * Input:
1337 * start -> start address of the compat relro region.
1338 * size -> size of the compat relro region in bytes.
1339 * Return:
1340 * 0 on success, -1 on failure (error code in errno).
1341 */
1342int phdr_table_protect_gnu_relro_16kib_compat(ElfW(Addr) start, ElfW(Addr) size) {
1343 return mprotect(reinterpret_cast<void*>(start), size, PROT_READ | PROT_EXEC);
1344}
1345
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001346/* Serialize the GNU relro segments to the given file descriptor. This can be
1347 * performed after relocations to allow another process to later share the
1348 * relocated segment, if it was loaded at the same address.
1349 *
1350 * Input:
1351 * phdr_table -> program header table
1352 * phdr_count -> number of entries in tables
1353 * load_bias -> load bias
1354 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001355 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001356 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001357 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001358 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001359int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1360 size_t phdr_count,
1361 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001362 int fd,
1363 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001364 const ElfW(Phdr)* phdr = phdr_table;
1365 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001366
1367 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1368 if (phdr->p_type != PT_GNU_RELRO) {
1369 continue;
1370 }
1371
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001372 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1373 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001374 ssize_t size = seg_page_end - seg_page_start;
1375
1376 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1377 if (written != size) {
1378 return -1;
1379 }
1380 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001381 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001382 if (map == MAP_FAILED) {
1383 return -1;
1384 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001385 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001386 }
1387 return 0;
1388}
1389
1390/* Where possible, replace the GNU relro segments with mappings of the given
1391 * file descriptor. This can be performed after relocations to allow a file
1392 * previously created by phdr_table_serialize_gnu_relro in another process to
1393 * replace the dirty relocated pages, saving memory, if it was loaded at the
1394 * same address. We have to compare the data before we map over it, since some
1395 * parts of the relro segment may not be identical due to other libraries in
1396 * the process being loaded at different addresses.
1397 *
1398 * Input:
1399 * phdr_table -> program header table
1400 * phdr_count -> number of entries in tables
1401 * load_bias -> load bias
1402 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001403 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001404 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001405 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001406 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001407int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1408 size_t phdr_count,
1409 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001410 int fd,
1411 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001412 // Map the file at a temporary location so we can compare its contents.
1413 struct stat file_stat;
1414 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1415 return -1;
1416 }
1417 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001418 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001419 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001420 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001421 if (temp_mapping == MAP_FAILED) {
1422 return -1;
1423 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001424 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001425
1426 // Iterate over the relro segments and compare/remap the pages.
1427 const ElfW(Phdr)* phdr = phdr_table;
1428 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1429
1430 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1431 if (phdr->p_type != PT_GNU_RELRO) {
1432 continue;
1433 }
1434
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001435 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1436 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001437
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001438 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001439 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1440 size_t match_offset = 0;
1441 size_t size = seg_page_end - seg_page_start;
1442
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001443 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001444 // File is too short to compare to this segment. The contents are likely
1445 // different as well (it's probably for a different library version) so
1446 // just don't bother checking.
1447 break;
1448 }
1449
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001450 while (match_offset < size) {
1451 // Skip over dissimilar pages.
1452 while (match_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001453 memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1454 match_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001455 }
1456
1457 // Count similar pages.
1458 size_t mismatch_offset = match_offset;
1459 while (mismatch_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001460 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1461 mismatch_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001462 }
1463
1464 // Map over similar pages.
1465 if (mismatch_offset > match_offset) {
1466 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001467 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001468 if (map == MAP_FAILED) {
1469 munmap(temp_mapping, file_size);
1470 return -1;
1471 }
1472 }
1473
1474 match_offset = mismatch_offset;
1475 }
1476
1477 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001478 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001479 }
1480 munmap(temp_mapping, file_size);
1481 return 0;
1482}
1483
1484
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001485#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001486/* Return the address and size of the .ARM.exidx section in memory,
1487 * if present.
1488 *
1489 * Input:
1490 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001491 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001492 * load_bias -> load bias
1493 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001494 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001495 * arm_exidx_count -> number of items in table (0 on failure).
1496 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001497 * 0 on success, -1 on failure (_no_ error code in errno)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001498 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001499int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1500 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001501 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001502 const ElfW(Phdr)* phdr = phdr_table;
1503 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001504
Elliott Hughes0266ae52014-02-10 17:46:57 -08001505 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1506 if (phdr->p_type != PT_ARM_EXIDX) {
1507 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001508 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001509
1510 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001511 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001512 return 0;
1513 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001514 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001515 *arm_exidx_count = 0;
1516 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001517}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001518#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001519
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001520/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001521 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001522 *
1523 * Input:
1524 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001525 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001526 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001527 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001528 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001529 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001530 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001531 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001532 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001533void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001534 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1535 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001536 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001537 for (size_t i = 0; i<phdr_count; ++i) {
1538 const ElfW(Phdr)& phdr = phdr_table[i];
1539 if (phdr.p_type == PT_DYNAMIC) {
1540 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001541 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001542 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001543 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001544 return;
1545 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001546 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001547}
1548
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001549/* Return the program interpreter string, or nullptr if missing.
1550 *
1551 * Input:
1552 * phdr_table -> program header table
1553 * phdr_count -> number of entries in tables
1554 * load_bias -> load bias
1555 * Return:
1556 * pointer to the program interpreter string.
1557 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001558const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001559 ElfW(Addr) load_bias) {
1560 for (size_t i = 0; i<phdr_count; ++i) {
1561 const ElfW(Phdr)& phdr = phdr_table[i];
1562 if (phdr.p_type == PT_INTERP) {
1563 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1564 }
1565 }
1566 return nullptr;
1567}
1568
Robert Grosse4544d9f2014-10-15 14:32:19 -07001569// Sets loaded_phdr_ to the address of the program header table as it appears
1570// in the loaded segments in memory. This is in contrast with phdr_table_,
1571// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001572bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001573 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001574
Elliott Hughes650be4e2013-03-05 18:47:58 -08001575 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001576 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001577 if (phdr->p_type == PT_PHDR) {
1578 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001579 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001580 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001581
Elliott Hughes650be4e2013-03-05 18:47:58 -08001582 // Otherwise, check the first loadable segment. If its file offset
1583 // is 0, it starts with the ELF header, and we can trivially find the
1584 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001585 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001586 if (phdr->p_type == PT_LOAD) {
1587 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001588 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001589 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001590 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001591 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001592 }
1593 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001594 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001595 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001596
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001597 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001598 return false;
1599}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001600
Tamas Petz8d55d182020-02-24 14:15:25 +01001601// Tries to find .note.gnu.property section.
1602// It is not considered an error if such section is missing.
1603bool ElfReader::FindGnuPropertySection() {
1604#if defined(__aarch64__)
1605 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1606#endif
1607 return true;
1608}
1609
Elliott Hughes650be4e2013-03-05 18:47:58 -08001610// Ensures that our program header is actually within a loadable
1611// segment. This should help catch badly-formed ELF files that
1612// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001613bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1614 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1615 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001616 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001617 if (phdr->p_type != PT_LOAD) {
1618 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001619 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001620 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1621 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001622 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001623 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001624 return true;
1625 }
1626 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001627 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1628 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001629 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001630}