blob: 7691031f37638ddb9f9d4942d4fd7f2999a8d96c [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Daniel Chapin8eceeea2024-10-24 21:55:27 +000042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Kalesh Singh377f0b92024-01-31 20:23:39 -080045#include "private/bionic_asm_note.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070046#include "private/CFIShadow.h" // For kLibraryAlignment
Kalesh Singh377f0b92024-01-31 20:23:39 -080047#include "private/elf_note.h"
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080048
Kalesh Singhc5c1d192024-04-09 16:27:56 -070049#include <android-base/file.h>
Kalesh Singhb23787f2024-09-05 08:22:06 +000050#include <android-base/properties.h>
Kalesh Singhc5c1d192024-04-09 16:27:56 -070051
Elliott Hughesb5140262014-12-02 16:16:29 -080052static int GetTargetElfMachine() {
53#if defined(__arm__)
54 return EM_ARM;
55#elif defined(__aarch64__)
56 return EM_AARCH64;
57#elif defined(__i386__)
58 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000059#elif defined(__riscv)
60 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080061#elif defined(__x86_64__)
62 return EM_X86_64;
63#endif
64}
65
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020066/**
67 TECHNICAL NOTE ON ELF LOADING.
68
69 An ELF file's program header table contains one or more PT_LOAD
70 segments, which corresponds to portions of the file that need to
71 be mapped into the process' address space.
72
73 Each loadable segment has the following important properties:
74
75 p_offset -> segment file offset
76 p_filesz -> segment file size
77 p_memsz -> segment memory size (always >= p_filesz)
78 p_vaddr -> segment's virtual address
79 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070080 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020081
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070082 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020083
84 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
85 ranges of virtual addresses. A few rules apply:
86
87 - the virtual address ranges should not overlap.
88
89 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
90 between them should always be initialized to 0.
91
92 - ranges do not necessarily start or end at page boundaries. Two distinct
93 segments can have their start and end on the same page. In this case, the
94 page inherits the mapping flags of the latter segment.
95
96 Finally, the real load addrs of each segment is not p_vaddr. Instead the
97 loader decides where to load the first segment, then will load all others
98 relative to the first one to respect the initial range layout.
99
100 For example, consider the following list:
101
102 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
103 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
104
105 This corresponds to two segments that cover these virtual address ranges:
106
107 0x30000...0x34000
108 0x40000...0x48000
109
110 If the loader decides to load the first segment at address 0xa0000000
111 then the segments' load address ranges will be:
112
113 0xa0030000...0xa0034000
114 0xa0040000...0xa0048000
115
116 In other words, all segments must be loaded at an address that has the same
117 constant offset from their p_vaddr value. This offset is computed as the
118 difference between the first segment's load address, and its p_vaddr value.
119
120 However, in practice, segments do _not_ start at page boundaries. Since we
121 can only memory-map at page boundaries, this means that the bias is
122 computed as:
123
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700124 load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200125
126 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
127 possible wrap around UINT32_MAX for possible large p_vaddr values).
128
129 And that the phdr0_load_address must start at a page boundary, with
130 the segment's real content starting at:
131
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700132 phdr0_load_address + page_offset(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200133
134 Note that ELF requires the following condition to make the mmap()-ing work:
135
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700136 page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200137
138 The load_bias must be added to any p_vaddr value read from the ELF file to
139 determine the corresponding memory address.
140
141 **/
142
Kalesh Singh1dd68582024-02-01 00:14:36 -0800143static const size_t kPageSize = page_size();
144
145/*
146 * Generic PMD size calculation:
147 * - Each page table (PT) is of size 1 page.
148 * - Each page table entry (PTE) is of size 64 bits.
149 * - Each PTE locates one physical page frame (PFN) of size 1 page.
150 * - A PMD entry locates 1 page table (PT)
151 *
152 * PMD size = Num entries in a PT * page_size
153 */
154static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700155
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700156ElfReader::ElfReader()
157 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
158 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800159 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
160 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700161}
162
163bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900164 if (did_read_) {
165 return true;
166 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700167 name_ = name;
168 fd_ = fd;
169 file_offset_ = file_offset;
170 file_size_ = file_size;
171
172 if (ReadElfHeader() &&
173 VerifyElfHeader() &&
174 ReadProgramHeaders() &&
175 ReadSectionHeaders() &&
Kalesh Singh377f0b92024-01-31 20:23:39 -0800176 ReadDynamicSection() &&
177 ReadPadSegmentNote()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700178 did_read_ = true;
179 }
180
Kalesh Singhb23787f2024-09-05 08:22:06 +0000181 if (kPageSize == 0x4000 && phdr_table_get_minimum_alignment(phdr_table_, phdr_num_) == 0x1000) {
182 // This prop needs to be read on 16KiB devices for each ELF where min_palign is 4KiB.
183 // It cannot be cached since the developer may toggle app compat on/off.
184 // This check will be removed once app compat is made the default on 16KiB devices.
185 should_use_16kib_app_compat_ =
186 ::android::base::GetBoolProperty("bionic.linker.16kb.app_compat.enabled", false);
187 }
188
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200190}
191
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400192bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700193 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900194 if (did_load_) {
195 return true;
196 }
huangchaochaobdc37962022-12-27 19:38:41 +0800197 bool reserveSuccess = ReserveAddressSpace(address_space);
198 if (reserveSuccess && LoadSegments() && FindPhdr() &&
Tamas Petz8d55d182020-02-24 14:15:25 +0100199 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700200 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100201#if defined(__aarch64__)
202 // For Armv8.5-A loaded executable segments may require PROT_BTI.
203 if (note_gnu_property_.IsBTICompatible()) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000204 did_load_ =
205 (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, should_pad_segments_,
206 should_use_16kib_app_compat_, &note_gnu_property_) == 0);
Tamas Petz8d55d182020-02-24 14:15:25 +0100207 }
208#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700209 }
huangchaochaobdc37962022-12-27 19:38:41 +0800210 if (reserveSuccess && !did_load_) {
211 if (load_start_ != nullptr && load_size_ != 0) {
212 if (!mapped_by_caller_) {
213 munmap(load_start_, load_size_);
214 }
215 }
216 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700217
218 return did_load_;
219}
220
221const char* ElfReader::get_string(ElfW(Word) index) const {
222 CHECK(strtab_ != nullptr);
223 CHECK(index < strtab_size_);
224
225 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800226}
227
228bool ElfReader::ReadElfHeader() {
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000229 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
230 if (rc < 0) {
231 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
232 return false;
233 }
234
235 if (rc != sizeof(header_)) {
Suren Baghdasaryanea5dd952024-07-19 17:12:16 -0700236 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000237 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800238 return false;
239 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800240 return true;
241}
242
Elliott Hughes72007ee2017-04-19 17:44:57 -0700243static const char* EM_to_string(int em) {
244 if (em == EM_386) return "EM_386";
245 if (em == EM_AARCH64) return "EM_AARCH64";
246 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000247 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700248 if (em == EM_X86_64) return "EM_X86_64";
249 return "EM_???";
250}
251
Elliott Hughes650be4e2013-03-05 18:47:58 -0800252bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700253 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700254 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
255 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800256 return false;
257 }
258
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700259 // Try to give a clear diagnostic for ELF class mismatches, since they're
260 // an easy mistake to make during the 32-bit/64-bit transition period.
261 int elf_class = header_.e_ident[EI_CLASS];
262#if defined(__LP64__)
263 if (elf_class != ELFCLASS64) {
264 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700265 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700266 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700267 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700268 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800269 return false;
270 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700271#else
272 if (elf_class != ELFCLASS32) {
273 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700274 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700275 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700276 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700277 }
278 return false;
279 }
280#endif
281
Elliott Hughes650be4e2013-03-05 18:47:58 -0800282 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700283 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800284 return false;
285 }
286
287 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700288 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800289 return false;
290 }
291
292 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700293 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800294 return false;
295 }
296
Elliott Hughesb5140262014-12-02 16:16:29 -0800297 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700298 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
299 name_.c_str(),
300 EM_to_string(header_.e_machine), header_.e_machine,
301 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800302 return false;
303 }
304
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700305 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800306 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800307 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
308 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
309 return false;
310 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800311 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800312 "invalid-elf-header_section-headers-enforced-for-api-level-26",
313 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
314 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800315 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700316 }
317
318 if (header_.e_shstrndx == 0) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800319 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800320 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
321 return false;
322 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800323 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800324 "invalid-elf-header_section-headers-enforced-for-api-level-26",
325 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800326 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700327 }
328
Elliott Hughes650be4e2013-03-05 18:47:58 -0800329 return true;
330}
331
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700332bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800333 off64_t range_start;
334 off64_t range_end;
335
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700336 // Only header can be located at the 0 offset... This function called to
337 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700338 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700339
340 return offset > 0 &&
341 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800342 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700343 (range_start < file_size_) &&
344 (range_end <= file_size_) &&
345 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800346}
347
Elliott Hughes650be4e2013-03-05 18:47:58 -0800348// Loads the program header table from an ELF file into a read-only private
349// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700350bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800351 phdr_num_ = header_.e_phnum;
352
353 // Like the kernel, we only accept program header tables that
354 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800355 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700356 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800357 return false;
358 }
359
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800360 // Boundary checks
361 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700362 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
363 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
364 name_.c_str(),
365 static_cast<size_t>(header_.e_phoff),
366 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800367 return false;
368 }
369
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000370 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000371 DL_ERR("\"%s\" phdr mmap failed: %m", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800372 return false;
373 }
374
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000375 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800376 return true;
377}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200378
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700379bool ElfReader::ReadSectionHeaders() {
380 shdr_num_ = header_.e_shnum;
381
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800382 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700383 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800384 return false;
385 }
386
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800387 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700388 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
389 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
390 name_.c_str(),
391 static_cast<size_t>(header_.e_shoff),
392 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800393 return false;
394 }
395
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000396 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000397 DL_ERR("\"%s\" shdr mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700398 return false;
399 }
400
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000401 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700402 return true;
403}
404
405bool ElfReader::ReadDynamicSection() {
406 // 1. Find .dynamic section (in section headers)
407 const ElfW(Shdr)* dynamic_shdr = nullptr;
408 for (size_t i = 0; i < shdr_num_; ++i) {
409 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
410 dynamic_shdr = &shdr_table_ [i];
411 break;
412 }
413 }
414
415 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700416 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700417 return false;
418 }
419
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700420 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
421 size_t pt_dynamic_offset = 0;
422 size_t pt_dynamic_filesz = 0;
423 for (size_t i = 0; i < phdr_num_; ++i) {
424 const ElfW(Phdr)* phdr = &phdr_table_[i];
425 if (phdr->p_type == PT_DYNAMIC) {
426 pt_dynamic_offset = phdr->p_offset;
427 pt_dynamic_filesz = phdr->p_filesz;
428 }
429 }
430
431 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800432 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800433 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
434 "expected to match PT_DYNAMIC offset: 0x%zx",
435 name_.c_str(),
436 static_cast<size_t>(dynamic_shdr->sh_offset),
437 pt_dynamic_offset);
438 return false;
439 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800440 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800441 "invalid-elf-header_section-headers-enforced-for-api-level-26",
442 "\"%s\" .dynamic section has invalid offset: 0x%zx "
443 "(expected to match PT_DYNAMIC offset 0x%zx)",
444 name_.c_str(),
445 static_cast<size_t>(dynamic_shdr->sh_offset),
446 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800447 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700448 }
449
450 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800451 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800452 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
453 "expected to match PT_DYNAMIC filesz: 0x%zx",
454 name_.c_str(),
455 static_cast<size_t>(dynamic_shdr->sh_size),
456 pt_dynamic_filesz);
457 return false;
458 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800459 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800460 "invalid-elf-header_section-headers-enforced-for-api-level-26",
461 "\"%s\" .dynamic section has invalid size: 0x%zx "
462 "(expected to match PT_DYNAMIC filesz 0x%zx)",
463 name_.c_str(),
464 static_cast<size_t>(dynamic_shdr->sh_size),
465 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800466 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700467 }
468
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700469 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700470 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
471 name_.c_str(),
472 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700473 return false;
474 }
475
476 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
477
478 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700479 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
480 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700481 return false;
482 }
483
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700484 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
485 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800486 return false;
487 }
488
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000489 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000490 DL_ERR("\"%s\" dynamic section mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700491 return false;
492 }
493
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000494 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700495
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700496 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
497 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
498 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800499 return false;
500 }
501
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000502 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
Elliott Hughesf5e21d92024-07-26 11:48:19 +0000503 DL_ERR("\"%s\" strtab section mmap failed: %m", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700504 return false;
505 }
506
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000507 strtab_ = static_cast<const char*>(strtab_fragment_.data());
508 strtab_size_ = strtab_fragment_.size();
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700509 return true;
510}
511
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800512/* Returns the size of the extent of all the possibly non-contiguous
513 * loadable segments in an ELF program header table. This corresponds
514 * to the page-aligned size in bytes that needs to be reserved in the
515 * process' address space. If there are no loadable segments, 0 is
516 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200517 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700518 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800519 * set to the minimum and maximum addresses of pages to be reserved,
520 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200521 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800522size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
523 ElfW(Addr)* out_min_vaddr,
524 ElfW(Addr)* out_max_vaddr) {
525 ElfW(Addr) min_vaddr = UINTPTR_MAX;
526 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200527
Elliott Hughes0266ae52014-02-10 17:46:57 -0800528 bool found_pt_load = false;
529 for (size_t i = 0; i < phdr_count; ++i) {
530 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200531
Elliott Hughes0266ae52014-02-10 17:46:57 -0800532 if (phdr->p_type != PT_LOAD) {
533 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200534 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800535 found_pt_load = true;
536
537 if (phdr->p_vaddr < min_vaddr) {
538 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200539 }
540
Elliott Hughes0266ae52014-02-10 17:46:57 -0800541 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
542 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
543 }
544 }
545 if (!found_pt_load) {
546 min_vaddr = 0;
547 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200548
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700549 min_vaddr = page_start(min_vaddr);
550 max_vaddr = page_end(max_vaddr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800551
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700552 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800553 *out_min_vaddr = min_vaddr;
554 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700555 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800556 *out_max_vaddr = max_vaddr;
557 }
558 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200559}
560
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700561// Returns the maximum p_align associated with a loadable segment in the ELF
562// program header table. Used to determine whether the file should be loaded at
563// a specific virtual address alignment for use with huge pages.
564size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700565 size_t maximum_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700566
567 for (size_t i = 0; i < phdr_count; ++i) {
568 const ElfW(Phdr)* phdr = &phdr_table[i];
569
570 // p_align must be 0, 1, or a positive, integral power of two.
571 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
572 continue;
573 }
574
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000575 maximum_alignment = std::max(maximum_alignment, static_cast<size_t>(phdr->p_align));
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700576 }
577
578#if defined(__LP64__)
579 return maximum_alignment;
580#else
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700581 return page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700582#endif
583}
584
Steven Morelandfc89c8a2024-08-01 21:20:33 +0000585// Returns the minimum p_align associated with a loadable segment in the ELF
586// program header table. Used to determine if the program alignment is compatible
587// with the page size of this system.
588size_t phdr_table_get_minimum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
589 size_t minimum_alignment = page_size();
590
591 for (size_t i = 0; i < phdr_count; ++i) {
592 const ElfW(Phdr)* phdr = &phdr_table[i];
593
594 // p_align must be 0, 1, or a positive, integral power of two.
595 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
596 continue;
597 }
598
599 if (phdr->p_align <= 1) {
600 continue;
601 }
602
603 minimum_alignment = std::min(minimum_alignment, static_cast<size_t>(phdr->p_align));
604 }
605
606 return minimum_alignment;
607}
608
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700609// Reserve a virtual address range such that if it's limits were extended to the next 2**align
610// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700611static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
612 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700613 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700614 // Reserve enough space to properly align the library's start address.
615 mapping_align = std::max(mapping_align, start_align);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700616 if (mapping_align == page_size()) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800617 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700618 if (mmap_ptr == MAP_FAILED) {
619 return nullptr;
620 }
621 return mmap_ptr;
622 }
623
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700624 // Minimum alignment of shared library gap. For efficiency, this should match the second level
625 // page size of the platform.
626#if defined(__LP64__)
627 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
628#else
629 constexpr size_t kGapAlignment = 0;
630#endif
631 // Maximum gap size, in the units of kGapAlignment.
632 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700633 // Allocate enough space so that the end of the desired region aligned up is still inside the
634 // mapping.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700635 size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700636 uint8_t* mmap_ptr =
637 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
638 if (mmap_ptr == MAP_FAILED) {
639 return nullptr;
640 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700641 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700642 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
643 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700644 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
645 // This library crosses a 2MB boundary and will fragment a new huge page.
646 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
647 // to improve address randomization and make it harder to locate this library code by probing.
648 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700649 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700650 gap_size =
651 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700652 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700653 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
654 if (mmap_ptr == MAP_FAILED) {
655 return nullptr;
656 }
657 }
658
659 uint8_t *gap_end, *gap_start;
660 if (gap_size) {
661 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
662 gap_start = gap_end - gap_size;
663 } else {
664 gap_start = gap_end = mmap_ptr + mmap_size;
665 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700666
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700667 uint8_t* first = align_up(mmap_ptr, mapping_align);
668 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900669
Tom Cherry66bc4282018-11-08 13:40:52 -0800670 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900671 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700672 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
673 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700674 // Unmap the extra space around the allocation.
675 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
676 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700677 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700678 munmap(start + size, gap_start - (start + size));
679 if (gap_end != mmap_ptr + mmap_size) {
680 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
681 }
682 *out_gap_start = gap_start;
683 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700684 return start;
685}
686
Elliott Hughes650be4e2013-03-05 18:47:58 -0800687// Reserve a virtual address range big enough to hold all loadable
688// segments of a program header table. This is done by creating a
689// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400690bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800691 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800692 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800693 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700694 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800695 return false;
696 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200697
Kalesh Singhce1c3cf2024-09-30 13:26:23 -0700698 if (should_use_16kib_app_compat_) {
699 // Reserve additional space for aligning the permission boundary in compat loading
700 // Up to kPageSize-kCompatPageSize additional space is needed, but reservation
701 // is done with mmap which gives kPageSize multiple-sized reservations.
702 load_size_ += kPageSize;
703 }
704
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800705 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000706 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000707
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400708 if (load_size_ > address_space->reserved_size) {
709 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000710 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400711 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000712 return false;
713 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700714 size_t start_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700715 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
716 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
717 // Limit alignment to PMD size as other alignments reduce the number of
718 // bits available for ASLR for no benefit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700719 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700720 }
721 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
722 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700723 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700724 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000725 return false;
726 }
727 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400728 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700729 gap_start_ = nullptr;
730 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800731 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400732
733 // Update the reserved address space to subtract the space used by this library.
734 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
735 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800736 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200737
Elliott Hughes650be4e2013-03-05 18:47:58 -0800738 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800739 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Kalesh Singhce1c3cf2024-09-30 13:26:23 -0700740
741 if (should_use_16kib_app_compat_) {
742 // In compat mode make the initial mapping RW since the ELF contents will be read
743 // into it; instead of mapped over it.
744 mprotect(reinterpret_cast<void*>(start), load_size_, PROT_READ | PROT_WRITE);
745 }
746
Elliott Hughes650be4e2013-03-05 18:47:58 -0800747 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200748}
749
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700750/*
751 * Returns true if the kernel supports page size migration, else false.
752 */
753bool page_size_migration_supported() {
754 static bool pgsize_migration_enabled = []() {
755 std::string enabled;
756 if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
757 return false;
758 }
759 return enabled.find("1") != std::string::npos;
760 }();
761 return pgsize_migration_enabled;
762}
763
Kalesh Singh377f0b92024-01-31 20:23:39 -0800764// Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
765bool ElfReader::ReadPadSegmentNote() {
Kalesh Singhc5c1d192024-04-09 16:27:56 -0700766 if (!page_size_migration_supported()) {
767 // Don't attempt to read the note, since segment extension isn't
768 // supported; but return true so that loading can continue normally.
769 return true;
770 }
771
Kalesh Singh377f0b92024-01-31 20:23:39 -0800772 // The ELF can have multiple PT_NOTE's, check them all
773 for (size_t i = 0; i < phdr_num_; ++i) {
774 const ElfW(Phdr)* phdr = &phdr_table_[i];
775
776 if (phdr->p_type != PT_NOTE) {
777 continue;
778 }
779
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800780 // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
781 // point to any part of the ELF (p_memsz == 0). Skip these since there is
782 // nothing to decode. See: b/324468126
783 if (phdr->p_memsz == 0) {
784 continue;
785 }
786
Kalesh Singh751bb8a2024-03-29 17:55:37 -0700787 // If the PT_NOTE extends beyond the file. The ELF is doing something
788 // strange -- obfuscation, embedding hidden loaders, ...
789 //
790 // It doesn't contain the pad_segment note. Skip it to avoid SIGBUS
791 // by accesses beyond the file.
792 off64_t note_end_off = file_offset_ + phdr->p_offset + phdr->p_filesz;
793 if (note_end_off > file_size_) {
794 continue;
795 }
796
Kalesh Singh377f0b92024-01-31 20:23:39 -0800797 // note_fragment is scoped to within the loop so that there is
798 // at most 1 PT_NOTE mapped at anytime during this search.
799 MappedFileFragment note_fragment;
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000800 if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
Kalesh Singh32b6d8c2024-02-13 18:37:12 -0800801 DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
802 name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
803 reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
Kalesh Singh13fb3cf2024-02-08 14:58:04 -0800804 return false;
Kalesh Singh377f0b92024-01-31 20:23:39 -0800805 }
806
807 const ElfW(Nhdr)* note_hdr = nullptr;
808 const char* note_desc = nullptr;
809 if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
Suren Baghdasaryanc16828b2024-08-01 00:14:15 +0000810 reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
Kalesh Singh377f0b92024-01-31 20:23:39 -0800811 phdr, &note_hdr, &note_desc)) {
812 continue;
813 }
814
815 if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
816 DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
817 name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
818 return false;
819 }
820
821 // 1 == enabled, 0 == disabled
822 should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
823 return true;
824 }
825
826 return true;
827}
828
Kalesh Singh4084b552024-03-13 13:35:49 -0700829static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singhb23787f2024-09-05 08:22:06 +0000830 size_t phdr_idx, ElfW(Addr)* p_memsz,
831 ElfW(Addr)* p_filesz, bool should_pad_segments,
832 bool should_use_16kib_app_compat) {
833 // NOTE: Segment extension is only applicable where the ELF's max-page-size > runtime page size;
834 // to save kernel VMA slab memory. 16KiB compat mode is the exact opposite scenario.
835 if (should_use_16kib_app_compat) {
836 return;
837 }
838
Kalesh Singh4084b552024-03-13 13:35:49 -0700839 const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
840 const ElfW(Phdr)* next = nullptr;
841 size_t next_idx = phdr_idx + 1;
842
Kalesh Singhe1e74792024-04-09 11:48:52 -0700843 // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
844 // field e.g. 2MiB p_align for THPs and are relatively small in number.
845 //
846 // The kernel can only represent padding for p_align up to 64KiB. This is because
847 // the kernel uses 4 available bits in the vm_area_struct to represent padding
848 // extent; and so cannot enable mitigations to avoid breaking app compatibility for
849 // p_aligns > 64KiB.
850 //
851 // Don't perform segment extension on these to avoid app compatibility issues.
852 if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
Kalesh Singh4084b552024-03-13 13:35:49 -0700853 return;
854 }
855
856 if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
857 next = &phdr_table[next_idx];
858 }
859
860 // If this is the last LOAD segment, no extension is needed
861 if (!next || *p_memsz != *p_filesz) {
862 return;
863 }
864
865 ElfW(Addr) next_start = page_start(next->p_vaddr);
866 ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
867
868 // If adjacent segment mappings overlap, no extension is needed.
869 if (curr_end >= next_start) {
870 return;
871 }
872
873 // Extend the LOAD segment mapping to be contiguous with that of
874 // the next LOAD segment.
875 ElfW(Addr) extend = next_start - curr_end;
876 *p_memsz += extend;
877 *p_filesz += extend;
878}
879
Kalesh Singh86e04f62024-09-05 06:24:14 +0000880bool ElfReader::MapSegment(size_t seg_idx, size_t len) {
881 const ElfW(Phdr)* phdr = &phdr_table_[seg_idx];
882
883 void* start = reinterpret_cast<void*>(page_start(phdr->p_vaddr + load_bias_));
884
885 // The ELF could be being loaded directly from a zipped APK,
886 // the zip offset must be added to find the segment offset.
887 const ElfW(Addr) offset = file_offset_ + page_start(phdr->p_offset);
888
889 int prot = PFLAGS_TO_PROT(phdr->p_flags);
890
891 void* seg_addr = mmap64(start, len, prot, MAP_FIXED | MAP_PRIVATE, fd_, offset);
892
893 if (seg_addr == MAP_FAILED) {
894 DL_ERR("couldn't map \"%s\" segment %zd: %m", name_.c_str(), seg_idx);
895 return false;
896 }
897
898 // Mark segments as huge page eligible if they meet the requirements
899 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
900 get_transparent_hugepages_supported()) {
901 madvise(seg_addr, len, MADV_HUGEPAGE);
902 }
903
904 return true;
905}
906
Kalesh Singh37bcaea2024-09-05 06:32:07 +0000907void ElfReader::ZeroFillSegment(const ElfW(Phdr)* phdr) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000908 // NOTE: In 16KiB app compat mode, the ELF mapping is anonymous, meaning that
909 // RW segments are COW-ed from the kernel's zero page. So there is no need to
910 // explicitly zero-fill until the last page's limit.
911 if (should_use_16kib_app_compat_) {
912 return;
913 }
914
Kalesh Singh37bcaea2024-09-05 06:32:07 +0000915 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
916 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
917
918 // If the segment is writable, and does not end on a page boundary,
919 // zero-fill it until the page limit.
920 //
921 // Do not attempt to zero the extended region past the first partial page,
922 // since doing so may:
923 // 1) Result in a SIGBUS, as the region is not backed by the underlying
924 // file.
925 // 2) Break the COW backing, faulting in new anon pages for a region
926 // that will not be used.
927 if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
928 memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
929 kPageSize - page_offset(unextended_seg_file_end));
930 }
931}
932
Kalesh Singhe0f4a372024-09-05 07:07:21 +0000933void ElfReader::DropPaddingPages(const ElfW(Phdr)* phdr, uint64_t seg_file_end) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000934 // NOTE: Padding pages are only applicable where the ELF's max-page-size > runtime page size;
935 // 16KiB compat mode is the exact opposite scenario.
936 if (should_use_16kib_app_compat_) {
937 return;
938 }
939
Kalesh Singhe0f4a372024-09-05 07:07:21 +0000940 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
941 uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
942
943 uint64_t pad_start = page_end(unextended_seg_file_end);
944 uint64_t pad_end = page_end(seg_file_end);
945 CHECK(pad_start <= pad_end);
946
947 uint64_t pad_len = pad_end - pad_start;
948 if (pad_len == 0 || !page_size_migration_supported()) {
949 return;
950 }
951
952 // Pages may be brought in due to readahead.
953 // Drop the padding (zero) pages, to avoid reclaim work later.
954 //
955 // NOTE: The madvise() here is special, as it also serves to hint to the
956 // kernel the portion of the LOAD segment that is padding.
957 //
958 // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
959 // [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
960 if (madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
961 DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
962 name_.c_str(), pad_start, pad_len);
963 }
964}
965
Kalesh Singh138a9552024-09-05 08:05:56 +0000966bool ElfReader::MapBssSection(const ElfW(Phdr)* phdr, ElfW(Addr) seg_page_end,
967 ElfW(Addr) seg_file_end) {
Kalesh Singhb23787f2024-09-05 08:22:06 +0000968 // NOTE: We do not need to handle .bss in 16KiB compat mode since the mapping
969 // reservation is anonymous and RW to begin with.
970 if (should_use_16kib_app_compat_) {
971 return true;
972 }
973
Kalesh Singh138a9552024-09-05 08:05:56 +0000974 // seg_file_end is now the first page address after the file content.
975 seg_file_end = page_end(seg_file_end);
976
977 if (seg_page_end <= seg_file_end) {
978 return true;
979 }
980
981 // If seg_page_end is larger than seg_file_end, we need to zero
982 // anything between them. This is done by using a private anonymous
983 // map for all extra pages
984 size_t zeromap_size = seg_page_end - seg_file_end;
985 void* zeromap =
986 mmap(reinterpret_cast<void*>(seg_file_end), zeromap_size, PFLAGS_TO_PROT(phdr->p_flags),
987 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
988 if (zeromap == MAP_FAILED) {
989 DL_ERR("couldn't map .bss section for \"%s\": %m", name_.c_str());
990 return false;
991 }
992
993 // Set the VMA name using prctl
994 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
995
996 return true;
997}
998
Elliott Hughes650be4e2013-03-05 18:47:58 -0800999bool ElfReader::LoadSegments() {
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001000 // NOTE: The compat(legacy) page size (4096) must be used when aligning
1001 // the 4KiB segments for loading in compat mode. The larger 16KiB page size
1002 // will lead to overwriting adjacent segments since the ELF's segment(s)
1003 // are not 16KiB aligned.
1004 size_t seg_align = should_use_16kib_app_compat_ ? kCompatPageSize : kPageSize;
Kalesh Singhb23787f2024-09-05 08:22:06 +00001005
Steven Morelandfc89c8a2024-08-01 21:20:33 +00001006 size_t min_palign = phdr_table_get_minimum_alignment(phdr_table_, phdr_num_);
Kalesh Singhb23787f2024-09-05 08:22:06 +00001007 // Only enforce this on 16 KB systems with app compat disabled.
1008 // Apps may rely on undefined behavior here on 4 KB systems,
1009 // which is the norm before this change is introduced
1010 if (kPageSize >= 16384 && min_palign < kPageSize && !should_use_16kib_app_compat_) {
Steven Morelandfc89c8a2024-08-01 21:20:33 +00001011 DL_ERR("\"%s\" program alignment (%zu) cannot be smaller than system page size (%zu)",
1012 name_.c_str(), min_palign, kPageSize);
1013 return false;
1014 }
1015
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001016 if (!Setup16KiBAppCompat()) {
1017 DL_ERR("\"%s\" failed to setup 16KiB App Compat", name_.c_str());
1018 return false;
1019 }
1020
Elliott Hughes650be4e2013-03-05 18:47:58 -08001021 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001022 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001023
Elliott Hughes650be4e2013-03-05 18:47:58 -08001024 if (phdr->p_type != PT_LOAD) {
1025 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001026 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001027
Kalesh Singh4084b552024-03-13 13:35:49 -07001028 ElfW(Addr) p_memsz = phdr->p_memsz;
1029 ElfW(Addr) p_filesz = phdr->p_filesz;
Kalesh Singhb23787f2024-09-05 08:22:06 +00001030 _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_,
1031 should_use_16kib_app_compat_);
Kalesh Singh4084b552024-03-13 13:35:49 -07001032
Elliott Hughes650be4e2013-03-05 18:47:58 -08001033 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001034 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
Kalesh Singh4084b552024-03-13 13:35:49 -07001035 ElfW(Addr) seg_end = seg_start + p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001036
Kalesh Singhb23787f2024-09-05 08:22:06 +00001037 ElfW(Addr) seg_page_end = align_up(seg_end, seg_align);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001038
Kalesh Singh4084b552024-03-13 13:35:49 -07001039 ElfW(Addr) seg_file_end = seg_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001040
1041 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001042 ElfW(Addr) file_start = phdr->p_offset;
Kalesh Singh4084b552024-03-13 13:35:49 -07001043 ElfW(Addr) file_end = file_start + p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001044
Kalesh Singhb23787f2024-09-05 08:22:06 +00001045 ElfW(Addr) file_page_start = align_down(file_start, seg_align);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001046 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001047
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001048 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001049 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001050 return false;
1051 }
1052
Kalesh Singh4084b552024-03-13 13:35:49 -07001053 if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001054 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
1055 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001056 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001057 reinterpret_cast<void*>(phdr->p_filesz),
Kalesh Singh4084b552024-03-13 13:35:49 -07001058 reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -07001059 return false;
1060 }
1061
Brian Carlstrom82dcc792013-05-21 16:49:24 -07001062 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -07001063 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -07001064 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -08001065 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -08001066 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -08001067 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -08001068 return false;
1069 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -08001070 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -08001071 "writable-and-executable-segments-enforced-for-api-level-26",
1072 "\"%s\" has load segments that are both writable and executable",
1073 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -08001074 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -07001075 }
1076
Kalesh Singh86e04f62024-09-05 06:24:14 +00001077 // Pass the file_length, since it may have been extended by _extend_load_segment_vma().
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001078 if (should_use_16kib_app_compat_) {
1079 if (!CompatMapSegment(i, file_length)) {
1080 return false;
1081 }
1082 } else {
1083 if (!MapSegment(i, file_length)) {
1084 return false;
1085 }
Brian Carlstrom82dcc792013-05-21 16:49:24 -07001086 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001087 }
1088
Kalesh Singh37bcaea2024-09-05 06:32:07 +00001089 ZeroFillSegment(phdr);
Kalesh Singh1d3ba112024-03-06 17:33:36 -08001090
Kalesh Singhe0f4a372024-09-05 07:07:21 +00001091 DropPaddingPages(phdr, seg_file_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001092
Kalesh Singh138a9552024-09-05 08:05:56 +00001093 if (!MapBssSection(phdr, seg_page_end, seg_file_end)) {
1094 return false;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001095 }
1096 }
1097 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001098}
1099
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001100/* Used internally. Used to set the protection bits of all loaded segments
1101 * with optional extra flags (i.e. really PROT_WRITE). Used by
1102 * phdr_table_protect_segments and phdr_table_unprotect_segments.
1103 */
1104static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001105 ElfW(Addr) load_bias, int extra_prot_flags,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001106 bool should_pad_segments, bool should_use_16kib_app_compat) {
Kalesh Singh4084b552024-03-13 13:35:49 -07001107 for (size_t i = 0; i < phdr_count; ++i) {
1108 const ElfW(Phdr)* phdr = &phdr_table[i];
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001109
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001110 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
1111 continue;
1112 }
1113
Kalesh Singh4084b552024-03-13 13:35:49 -07001114 ElfW(Addr) p_memsz = phdr->p_memsz;
1115 ElfW(Addr) p_filesz = phdr->p_filesz;
Kalesh Singhb23787f2024-09-05 08:22:06 +00001116 _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments,
1117 should_use_16kib_app_compat);
Kalesh Singh4084b552024-03-13 13:35:49 -07001118
1119 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
1120 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001121
Tamas Petz8d55d182020-02-24 14:15:25 +01001122 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
1123 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001124 // make sure we're never simultaneously writable / executable
1125 prot &= ~PROT_EXEC;
1126 }
Tamas Petz8d55d182020-02-24 14:15:25 +01001127#if defined(__aarch64__)
1128 if ((prot & PROT_EXEC) == 0) {
1129 // Though it is not specified don't add PROT_BTI if segment is not
1130 // executable.
1131 prot &= ~PROT_BTI;
1132 }
1133#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -07001134
Tamas Petz8d55d182020-02-24 14:15:25 +01001135 int ret =
1136 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001137 if (ret < 0) {
1138 return -1;
1139 }
1140 }
1141 return 0;
1142}
1143
1144/* Restore the original protection modes for all loadable segments.
1145 * You should only call this after phdr_table_unprotect_segments and
1146 * applying all relocations.
1147 *
Tamas Petz8d55d182020-02-24 14:15:25 +01001148 * AArch64: also called from linker_main and ElfReader::Load to apply
1149 * PROT_BTI for loaded main so and other so-s.
1150 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001151 * Input:
1152 * phdr_table -> program header table
1153 * phdr_count -> number of entries in tables
1154 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001155 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Kalesh Singhb23787f2024-09-05 08:22:06 +00001156 * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
Tamas Petz8d55d182020-02-24 14:15:25 +01001157 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001158 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001159 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001160 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001161int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh4084b552024-03-13 13:35:49 -07001162 ElfW(Addr) load_bias, bool should_pad_segments,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001163 bool should_use_16kib_app_compat,
Kalesh Singh4084b552024-03-13 13:35:49 -07001164 const GnuPropertySection* prop __unused) {
Tamas Petz8d55d182020-02-24 14:15:25 +01001165 int prot = 0;
1166#if defined(__aarch64__)
1167 if ((prop != nullptr) && prop->IsBTICompatible()) {
1168 prot |= PROT_BTI;
1169 }
1170#endif
Kalesh Singhb23787f2024-09-05 08:22:06 +00001171 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments,
1172 should_use_16kib_app_compat);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001173}
1174
1175/* Change the protection of all loaded segments in memory to writable.
1176 * This is useful before performing relocations. Once completed, you
1177 * will have to call phdr_table_protect_segments to restore the original
1178 * protection flags on all segments.
1179 *
1180 * Note that some writable segments can also have their content turned
1181 * to read-only by calling phdr_table_protect_gnu_relro. This is no
1182 * performed here.
1183 *
1184 * Input:
1185 * phdr_table -> program header table
1186 * phdr_count -> number of entries in tables
1187 * load_bias -> load bias
Kalesh Singh4084b552024-03-13 13:35:49 -07001188 * should_pad_segments -> Are segments extended to avoid gaps in the memory map
Kalesh Singhb23787f2024-09-05 08:22:06 +00001189 * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001190 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001191 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001192 */
Kalesh Singhb23787f2024-09-05 08:22:06 +00001193int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1194 ElfW(Addr) load_bias, bool should_pad_segments,
1195 bool should_use_16kib_app_compat) {
Kalesh Singh4084b552024-03-13 13:35:49 -07001196 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001197 should_pad_segments, should_use_16kib_app_compat);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +00001198}
1199
Kalesh Singh702d9b02024-03-13 13:38:04 -07001200static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1201 const ElfW(Phdr)* phdr_table, size_t phdr_count,
1202 ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001203 bool should_pad_segments,
1204 bool should_use_16kib_app_compat) {
Kalesh Singh702d9b02024-03-13 13:38:04 -07001205 // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1206 for (size_t index = 0; index < phdr_count; ++index) {
1207 const ElfW(Phdr)* phdr = &phdr_table[index];
1208
1209 if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1210 // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1211 // LOAD segment mem size, we need to protect only a partial region of the
1212 // LOAD segment and therefore cannot avoid a VMA split.
1213 //
1214 // Note: Don't check the page-aligned mem sizes since the extended protection
1215 // may incorrectly write protect non-relocation data.
1216 //
1217 // Example:
1218 //
1219 // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1220 // ----------------------------------------------------------------
1221 // | | | | |
1222 // SEG X | RO | RO | RW | | SEG Y
1223 // | | | | |
1224 // ----------------------------------------------------------------
1225 // | | |
1226 // | | |
1227 // | | |
1228 // relro_vaddr relro_vaddr relro_vaddr
1229 // (load_vaddr) + +
1230 // relro_memsz load_memsz
1231 //
1232 // ----------------------------------------------------------------
1233 // | PAGE | PAGE |
1234 // ----------------------------------------------------------------
1235 // | Potential |
1236 // |----- Extended RO ----|
1237 // | Protection |
1238 //
1239 // If the check below uses page aligned mem sizes it will cause incorrect write
1240 // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1241 if (relro_phdr->p_memsz < phdr->p_memsz) {
1242 return;
1243 }
1244
1245 ElfW(Addr) p_memsz = phdr->p_memsz;
1246 ElfW(Addr) p_filesz = phdr->p_filesz;
1247
1248 // Attempt extending the VMA (mprotect range). Without extending the range,
1249 // mprotect will only RO protect a part of the extended RW LOAD segment, which
1250 // will leave an extra split RW VMA (the gap).
1251 _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001252 should_pad_segments, should_use_16kib_app_compat);
Kalesh Singh702d9b02024-03-13 13:38:04 -07001253
1254 *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1255 return;
1256 }
1257 }
1258}
1259
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001260/* Used internally by phdr_table_protect_gnu_relro and
1261 * phdr_table_unprotect_gnu_relro.
1262 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001263static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singh702d9b02024-03-13 13:38:04 -07001264 ElfW(Addr) load_bias, int prot_flags,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001265 bool should_pad_segments,
1266 bool should_use_16kib_app_compat) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001267 const ElfW(Phdr)* phdr = phdr_table;
1268 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001269
Elliott Hughes0266ae52014-02-10 17:46:57 -08001270 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1271 if (phdr->p_type != PT_GNU_RELRO) {
1272 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001273 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001274
1275 // Tricky: what happens when the relro segment does not start
1276 // or end at page boundaries? We're going to be over-protective
1277 // here and put every page touched by the segment as read-only.
1278
1279 // This seems to match Ian Lance Taylor's description of the
1280 // feature at http://www.airs.com/blog/archives/189.
1281
1282 // Extract:
1283 // Note that the current dynamic linker code will only work
1284 // correctly if the PT_GNU_RELRO segment starts on a page
1285 // boundary. This is because the dynamic linker rounds the
1286 // p_vaddr field down to the previous page boundary. If
1287 // there is anything on the page which should not be read-only,
1288 // the program is likely to fail at runtime. So in effect the
1289 // linker must only emit a PT_GNU_RELRO segment if it ensures
1290 // that it starts on a page boundary.
Zheng Pan9535c322024-02-14 00:04:10 +00001291 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1292 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Kalesh Singh702d9b02024-03-13 13:38:04 -07001293 _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001294 should_pad_segments, should_use_16kib_app_compat);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001295
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001296 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -08001297 seg_page_end - seg_page_start,
1298 prot_flags);
1299 if (ret < 0) {
1300 return -1;
1301 }
1302 }
1303 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001304}
1305
1306/* Apply GNU relro protection if specified by the program header. This will
1307 * turn some of the pages of a writable PT_LOAD segment to read-only, as
1308 * specified by one or more PT_GNU_RELRO segments. This must be always
1309 * performed after relocations.
1310 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001311 * The areas typically covered are .got and .data.rel.ro, these are
1312 * read-only from the program's POV, but contain absolute addresses
1313 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001314 *
1315 * Input:
1316 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001317 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001318 * load_bias -> load bias
Kalesh Singh702d9b02024-03-13 13:38:04 -07001319 * should_pad_segments -> Were segments extended to avoid gaps in the memory map
Kalesh Singhb23787f2024-09-05 08:22:06 +00001320 * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001321 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001322 * 0 on success, -1 on failure (error code in errno).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001323 */
Kalesh Singh702d9b02024-03-13 13:38:04 -07001324int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001325 ElfW(Addr) load_bias, bool should_pad_segments,
1326 bool should_use_16kib_app_compat) {
Kalesh Singh702d9b02024-03-13 13:38:04 -07001327 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
Kalesh Singhb23787f2024-09-05 08:22:06 +00001328 should_pad_segments, should_use_16kib_app_compat);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001329}
1330
Kalesh Singhce1c3cf2024-09-30 13:26:23 -07001331/*
1332 * Apply RX protection to the compat relro region of the ELF being loaded in
1333 * 16KiB compat mode.
1334 *
1335 * Input:
1336 * start -> start address of the compat relro region.
1337 * size -> size of the compat relro region in bytes.
1338 * Return:
1339 * 0 on success, -1 on failure (error code in errno).
1340 */
1341int phdr_table_protect_gnu_relro_16kib_compat(ElfW(Addr) start, ElfW(Addr) size) {
1342 return mprotect(reinterpret_cast<void*>(start), size, PROT_READ | PROT_EXEC);
1343}
1344
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001345/* Serialize the GNU relro segments to the given file descriptor. This can be
1346 * performed after relocations to allow another process to later share the
1347 * relocated segment, if it was loaded at the same address.
1348 *
1349 * Input:
1350 * phdr_table -> program header table
1351 * phdr_count -> number of entries in tables
1352 * load_bias -> load bias
1353 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001354 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001355 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001356 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001357 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001358int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1359 size_t phdr_count,
1360 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001361 int fd,
1362 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001363 const ElfW(Phdr)* phdr = phdr_table;
1364 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001365
1366 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1367 if (phdr->p_type != PT_GNU_RELRO) {
1368 continue;
1369 }
1370
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001371 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1372 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001373 ssize_t size = seg_page_end - seg_page_start;
1374
1375 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1376 if (written != size) {
1377 return -1;
1378 }
1379 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001380 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001381 if (map == MAP_FAILED) {
1382 return -1;
1383 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001384 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001385 }
1386 return 0;
1387}
1388
1389/* Where possible, replace the GNU relro segments with mappings of the given
1390 * file descriptor. This can be performed after relocations to allow a file
1391 * previously created by phdr_table_serialize_gnu_relro in another process to
1392 * replace the dirty relocated pages, saving memory, if it was loaded at the
1393 * same address. We have to compare the data before we map over it, since some
1394 * parts of the relro segment may not be identical due to other libraries in
1395 * the process being loaded at different addresses.
1396 *
1397 * Input:
1398 * phdr_table -> program header table
1399 * phdr_count -> number of entries in tables
1400 * load_bias -> load bias
1401 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001402 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001403 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001404 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001405 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001406int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1407 size_t phdr_count,
1408 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001409 int fd,
1410 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001411 // Map the file at a temporary location so we can compare its contents.
1412 struct stat file_stat;
1413 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1414 return -1;
1415 }
1416 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001417 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001418 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001419 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001420 if (temp_mapping == MAP_FAILED) {
1421 return -1;
1422 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001423 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001424
1425 // Iterate over the relro segments and compare/remap the pages.
1426 const ElfW(Phdr)* phdr = phdr_table;
1427 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1428
1429 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1430 if (phdr->p_type != PT_GNU_RELRO) {
1431 continue;
1432 }
1433
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001434 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1435 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001436
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001437 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001438 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1439 size_t match_offset = 0;
1440 size_t size = seg_page_end - seg_page_start;
1441
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001442 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001443 // File is too short to compare to this segment. The contents are likely
1444 // different as well (it's probably for a different library version) so
1445 // just don't bother checking.
1446 break;
1447 }
1448
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001449 while (match_offset < size) {
1450 // Skip over dissimilar pages.
1451 while (match_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001452 memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1453 match_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001454 }
1455
1456 // Count similar pages.
1457 size_t mismatch_offset = match_offset;
1458 while (mismatch_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001459 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1460 mismatch_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001461 }
1462
1463 // Map over similar pages.
1464 if (mismatch_offset > match_offset) {
1465 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001466 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001467 if (map == MAP_FAILED) {
1468 munmap(temp_mapping, file_size);
1469 return -1;
1470 }
1471 }
1472
1473 match_offset = mismatch_offset;
1474 }
1475
1476 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001477 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001478 }
1479 munmap(temp_mapping, file_size);
1480 return 0;
1481}
1482
1483
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001484#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001485/* Return the address and size of the .ARM.exidx section in memory,
1486 * if present.
1487 *
1488 * Input:
1489 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001490 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001491 * load_bias -> load bias
1492 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001493 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001494 * arm_exidx_count -> number of items in table (0 on failure).
1495 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001496 * 0 on success, -1 on failure (_no_ error code in errno)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001497 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001498int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1499 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001500 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001501 const ElfW(Phdr)* phdr = phdr_table;
1502 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001503
Elliott Hughes0266ae52014-02-10 17:46:57 -08001504 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1505 if (phdr->p_type != PT_ARM_EXIDX) {
1506 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001507 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001508
1509 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001510 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001511 return 0;
1512 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001513 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001514 *arm_exidx_count = 0;
1515 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001516}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001517#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001518
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001519/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001520 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001521 *
1522 * Input:
1523 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001524 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001525 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001526 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001527 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001528 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001529 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001530 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001531 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001532void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001533 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1534 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001535 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001536 for (size_t i = 0; i<phdr_count; ++i) {
1537 const ElfW(Phdr)& phdr = phdr_table[i];
1538 if (phdr.p_type == PT_DYNAMIC) {
1539 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001540 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001541 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001542 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001543 return;
1544 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001545 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001546}
1547
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001548/* Return the program interpreter string, or nullptr if missing.
1549 *
1550 * Input:
1551 * phdr_table -> program header table
1552 * phdr_count -> number of entries in tables
1553 * load_bias -> load bias
1554 * Return:
1555 * pointer to the program interpreter string.
1556 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001557const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001558 ElfW(Addr) load_bias) {
1559 for (size_t i = 0; i<phdr_count; ++i) {
1560 const ElfW(Phdr)& phdr = phdr_table[i];
1561 if (phdr.p_type == PT_INTERP) {
1562 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1563 }
1564 }
1565 return nullptr;
1566}
1567
Robert Grosse4544d9f2014-10-15 14:32:19 -07001568// Sets loaded_phdr_ to the address of the program header table as it appears
1569// in the loaded segments in memory. This is in contrast with phdr_table_,
1570// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001571bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001572 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001573
Elliott Hughes650be4e2013-03-05 18:47:58 -08001574 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001575 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001576 if (phdr->p_type == PT_PHDR) {
1577 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001578 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001579 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001580
Elliott Hughes650be4e2013-03-05 18:47:58 -08001581 // Otherwise, check the first loadable segment. If its file offset
1582 // is 0, it starts with the ELF header, and we can trivially find the
1583 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001584 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001585 if (phdr->p_type == PT_LOAD) {
1586 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001587 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001588 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001589 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001590 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001591 }
1592 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001593 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001594 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001595
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001596 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001597 return false;
1598}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001599
Tamas Petz8d55d182020-02-24 14:15:25 +01001600// Tries to find .note.gnu.property section.
1601// It is not considered an error if such section is missing.
1602bool ElfReader::FindGnuPropertySection() {
1603#if defined(__aarch64__)
1604 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1605#endif
1606 return true;
1607}
1608
Elliott Hughes650be4e2013-03-05 18:47:58 -08001609// Ensures that our program header is actually within a loadable
1610// segment. This should help catch badly-formed ELF files that
1611// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001612bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1613 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1614 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001615 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001616 if (phdr->p_type != PT_LOAD) {
1617 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001618 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001619 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1620 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001621 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001622 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001623 return true;
1624 }
1625 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001626 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1627 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001628 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001629}