blob: 1ea79f0baf9d93e41d982447c837626fb0351595 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Evgenii Stepanov6bbb75a2023-12-06 18:54:45 +000042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Kalesh Singh377f0b92024-01-31 20:23:39 -080045#include "private/bionic_asm_note.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070046#include "private/CFIShadow.h" // For kLibraryAlignment
Kalesh Singh377f0b92024-01-31 20:23:39 -080047#include "private/elf_note.h"
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080048
Elliott Hughesb5140262014-12-02 16:16:29 -080049static int GetTargetElfMachine() {
50#if defined(__arm__)
51 return EM_ARM;
52#elif defined(__aarch64__)
53 return EM_AARCH64;
54#elif defined(__i386__)
55 return EM_386;
Elliott Hughes43462702022-10-10 19:21:44 +000056#elif defined(__riscv)
57 return EM_RISCV;
Elliott Hughesb5140262014-12-02 16:16:29 -080058#elif defined(__x86_64__)
59 return EM_X86_64;
60#endif
61}
62
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020063/**
64 TECHNICAL NOTE ON ELF LOADING.
65
66 An ELF file's program header table contains one or more PT_LOAD
67 segments, which corresponds to portions of the file that need to
68 be mapped into the process' address space.
69
70 Each loadable segment has the following important properties:
71
72 p_offset -> segment file offset
73 p_filesz -> segment file size
74 p_memsz -> segment memory size (always >= p_filesz)
75 p_vaddr -> segment's virtual address
76 p_flags -> segment flags (e.g. readable, writable, executable)
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070077 p_align -> segment's in-memory and in-file alignment
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020078
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -070079 We will ignore the p_paddr field of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020080
81 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
82 ranges of virtual addresses. A few rules apply:
83
84 - the virtual address ranges should not overlap.
85
86 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
87 between them should always be initialized to 0.
88
89 - ranges do not necessarily start or end at page boundaries. Two distinct
90 segments can have their start and end on the same page. In this case, the
91 page inherits the mapping flags of the latter segment.
92
93 Finally, the real load addrs of each segment is not p_vaddr. Instead the
94 loader decides where to load the first segment, then will load all others
95 relative to the first one to respect the initial range layout.
96
97 For example, consider the following list:
98
99 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
100 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
101
102 This corresponds to two segments that cover these virtual address ranges:
103
104 0x30000...0x34000
105 0x40000...0x48000
106
107 If the loader decides to load the first segment at address 0xa0000000
108 then the segments' load address ranges will be:
109
110 0xa0030000...0xa0034000
111 0xa0040000...0xa0048000
112
113 In other words, all segments must be loaded at an address that has the same
114 constant offset from their p_vaddr value. This offset is computed as the
115 difference between the first segment's load address, and its p_vaddr value.
116
117 However, in practice, segments do _not_ start at page boundaries. Since we
118 can only memory-map at page boundaries, this means that the bias is
119 computed as:
120
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700121 load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200122
123 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
124 possible wrap around UINT32_MAX for possible large p_vaddr values).
125
126 And that the phdr0_load_address must start at a page boundary, with
127 the segment's real content starting at:
128
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700129 phdr0_load_address + page_offset(phdr0->p_vaddr)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200130
131 Note that ELF requires the following condition to make the mmap()-ing work:
132
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700133 page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200134
135 The load_bias must be added to any p_vaddr value read from the ELF file to
136 determine the corresponding memory address.
137
138 **/
139
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800140#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200141#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
142 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
143 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
144
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700145// Default PMD size for x86_64 and aarch64 (2MB).
146static constexpr size_t kPmdSize = (1UL << 21);
147
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700148ElfReader::ElfReader()
149 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
150 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800151 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
152 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700153}
154
155bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900156 if (did_read_) {
157 return true;
158 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700159 name_ = name;
160 fd_ = fd;
161 file_offset_ = file_offset;
162 file_size_ = file_size;
163
164 if (ReadElfHeader() &&
165 VerifyElfHeader() &&
166 ReadProgramHeaders() &&
167 ReadSectionHeaders() &&
Kalesh Singh377f0b92024-01-31 20:23:39 -0800168 ReadDynamicSection() &&
169 ReadPadSegmentNote()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700170 did_read_ = true;
171 }
172
173 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200174}
175
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400176bool ElfReader::Load(address_space_params* address_space) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700177 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900178 if (did_load_) {
179 return true;
180 }
huangchaochaobdc37962022-12-27 19:38:41 +0800181 bool reserveSuccess = ReserveAddressSpace(address_space);
182 if (reserveSuccess && LoadSegments() && FindPhdr() &&
Tamas Petz8d55d182020-02-24 14:15:25 +0100183 FindGnuPropertySection()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700184 did_load_ = true;
Tamas Petz8d55d182020-02-24 14:15:25 +0100185#if defined(__aarch64__)
186 // For Armv8.5-A loaded executable segments may require PROT_BTI.
187 if (note_gnu_property_.IsBTICompatible()) {
188 did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
189 &note_gnu_property_) == 0);
190 }
191#endif
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700192 }
huangchaochaobdc37962022-12-27 19:38:41 +0800193 if (reserveSuccess && !did_load_) {
194 if (load_start_ != nullptr && load_size_ != 0) {
195 if (!mapped_by_caller_) {
196 munmap(load_start_, load_size_);
197 }
198 }
199 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700200
201 return did_load_;
202}
203
204const char* ElfReader::get_string(ElfW(Word) index) const {
205 CHECK(strtab_ != nullptr);
206 CHECK(index < strtab_size_);
207
208 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800209}
210
211bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700212 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800213 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700214 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800215 return false;
216 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700217
Elliott Hughes650be4e2013-03-05 18:47:58 -0800218 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700219 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700220 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800221 return false;
222 }
223 return true;
224}
225
Elliott Hughes72007ee2017-04-19 17:44:57 -0700226static const char* EM_to_string(int em) {
227 if (em == EM_386) return "EM_386";
228 if (em == EM_AARCH64) return "EM_AARCH64";
229 if (em == EM_ARM) return "EM_ARM";
Ulya Trafimovichb973c752022-11-15 14:39:44 +0000230 if (em == EM_RISCV) return "EM_RISCV";
Elliott Hughes72007ee2017-04-19 17:44:57 -0700231 if (em == EM_X86_64) return "EM_X86_64";
232 return "EM_???";
233}
234
Elliott Hughes650be4e2013-03-05 18:47:58 -0800235bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700236 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700237 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
238 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800239 return false;
240 }
241
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700242 // Try to give a clear diagnostic for ELF class mismatches, since they're
243 // an easy mistake to make during the 32-bit/64-bit transition period.
244 int elf_class = header_.e_ident[EI_CLASS];
245#if defined(__LP64__)
246 if (elf_class != ELFCLASS64) {
247 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700248 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700249 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700250 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700251 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800252 return false;
253 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700254#else
255 if (elf_class != ELFCLASS32) {
256 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700257 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700258 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700259 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700260 }
261 return false;
262 }
263#endif
264
Elliott Hughes650be4e2013-03-05 18:47:58 -0800265 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700266 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800267 return false;
268 }
269
270 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700271 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800272 return false;
273 }
274
275 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700276 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800277 return false;
278 }
279
Elliott Hughesb5140262014-12-02 16:16:29 -0800280 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700281 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
282 name_.c_str(),
283 EM_to_string(header_.e_machine), header_.e_machine,
284 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800285 return false;
286 }
287
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700288 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800289 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800290 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800291 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
292 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
293 return false;
294 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800295 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800296 "invalid-elf-header_section-headers-enforced-for-api-level-26",
297 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
298 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800299 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700300 }
301
302 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800303 // Fail if app is targeting Android O or above
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800304 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800305 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
306 return false;
307 }
308
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800309 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800310 "invalid-elf-header_section-headers-enforced-for-api-level-26",
311 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800312 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700313 }
314
Elliott Hughes650be4e2013-03-05 18:47:58 -0800315 return true;
316}
317
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700318bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800319 off64_t range_start;
320 off64_t range_end;
321
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700322 // Only header can be located at the 0 offset... This function called to
323 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700324 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700325
326 return offset > 0 &&
327 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800328 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700329 (range_start < file_size_) &&
330 (range_end <= file_size_) &&
331 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800332}
333
Elliott Hughes650be4e2013-03-05 18:47:58 -0800334// Loads the program header table from an ELF file into a read-only private
335// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700336bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800337 phdr_num_ = header_.e_phnum;
338
339 // Like the kernel, we only accept program header tables that
340 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800341 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700342 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800343 return false;
344 }
345
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800346 // Boundary checks
347 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700348 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
349 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
350 name_.c_str(),
351 static_cast<size_t>(header_.e_phoff),
352 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800353 return false;
354 }
355
356 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700357 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800358 return false;
359 }
360
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700361 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800362 return true;
363}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200364
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700365bool ElfReader::ReadSectionHeaders() {
366 shdr_num_ = header_.e_shnum;
367
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800368 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700369 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800370 return false;
371 }
372
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800373 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700374 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
375 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
376 name_.c_str(),
377 static_cast<size_t>(header_.e_shoff),
378 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800379 return false;
380 }
381
382 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700383 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
384 return false;
385 }
386
387 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
388 return true;
389}
390
391bool ElfReader::ReadDynamicSection() {
392 // 1. Find .dynamic section (in section headers)
393 const ElfW(Shdr)* dynamic_shdr = nullptr;
394 for (size_t i = 0; i < shdr_num_; ++i) {
395 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
396 dynamic_shdr = &shdr_table_ [i];
397 break;
398 }
399 }
400
401 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700402 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700403 return false;
404 }
405
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700406 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
407 size_t pt_dynamic_offset = 0;
408 size_t pt_dynamic_filesz = 0;
409 for (size_t i = 0; i < phdr_num_; ++i) {
410 const ElfW(Phdr)* phdr = &phdr_table_[i];
411 if (phdr->p_type == PT_DYNAMIC) {
412 pt_dynamic_offset = phdr->p_offset;
413 pt_dynamic_filesz = phdr->p_filesz;
414 }
415 }
416
417 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800418 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800419 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
420 "expected to match PT_DYNAMIC offset: 0x%zx",
421 name_.c_str(),
422 static_cast<size_t>(dynamic_shdr->sh_offset),
423 pt_dynamic_offset);
424 return false;
425 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800426 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800427 "invalid-elf-header_section-headers-enforced-for-api-level-26",
428 "\"%s\" .dynamic section has invalid offset: 0x%zx "
429 "(expected to match PT_DYNAMIC offset 0x%zx)",
430 name_.c_str(),
431 static_cast<size_t>(dynamic_shdr->sh_offset),
432 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800433 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700434 }
435
436 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800437 if (get_application_target_sdk_version() >= 26) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800438 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
439 "expected to match PT_DYNAMIC filesz: 0x%zx",
440 name_.c_str(),
441 static_cast<size_t>(dynamic_shdr->sh_size),
442 pt_dynamic_filesz);
443 return false;
444 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800445 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800446 "invalid-elf-header_section-headers-enforced-for-api-level-26",
447 "\"%s\" .dynamic section has invalid size: 0x%zx "
448 "(expected to match PT_DYNAMIC filesz 0x%zx)",
449 name_.c_str(),
450 static_cast<size_t>(dynamic_shdr->sh_size),
451 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800452 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700453 }
454
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700455 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700456 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
457 name_.c_str(),
458 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700459 return false;
460 }
461
462 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
463
464 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700465 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
466 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700467 return false;
468 }
469
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700470 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
471 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800472 return false;
473 }
474
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700475 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
476 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
477 return false;
478 }
479
480 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
481
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700482 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
483 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
484 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800485 return false;
486 }
487
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700488 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
489 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
490 return false;
491 }
492
493 strtab_ = static_cast<const char*>(strtab_fragment_.data());
494 strtab_size_ = strtab_fragment_.size();
495 return true;
496}
497
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800498/* Returns the size of the extent of all the possibly non-contiguous
499 * loadable segments in an ELF program header table. This corresponds
500 * to the page-aligned size in bytes that needs to be reserved in the
501 * process' address space. If there are no loadable segments, 0 is
502 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200503 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700504 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800505 * set to the minimum and maximum addresses of pages to be reserved,
506 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200507 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800508size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
509 ElfW(Addr)* out_min_vaddr,
510 ElfW(Addr)* out_max_vaddr) {
511 ElfW(Addr) min_vaddr = UINTPTR_MAX;
512 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200513
Elliott Hughes0266ae52014-02-10 17:46:57 -0800514 bool found_pt_load = false;
515 for (size_t i = 0; i < phdr_count; ++i) {
516 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200517
Elliott Hughes0266ae52014-02-10 17:46:57 -0800518 if (phdr->p_type != PT_LOAD) {
519 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200520 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800521 found_pt_load = true;
522
523 if (phdr->p_vaddr < min_vaddr) {
524 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200525 }
526
Elliott Hughes0266ae52014-02-10 17:46:57 -0800527 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
528 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
529 }
530 }
531 if (!found_pt_load) {
532 min_vaddr = 0;
533 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200534
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700535 min_vaddr = page_start(min_vaddr);
536 max_vaddr = page_end(max_vaddr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800537
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700538 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800539 *out_min_vaddr = min_vaddr;
540 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700541 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800542 *out_max_vaddr = max_vaddr;
543 }
544 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200545}
546
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700547// Returns the maximum p_align associated with a loadable segment in the ELF
548// program header table. Used to determine whether the file should be loaded at
549// a specific virtual address alignment for use with huge pages.
550size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700551 size_t maximum_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700552
553 for (size_t i = 0; i < phdr_count; ++i) {
554 const ElfW(Phdr)* phdr = &phdr_table[i];
555
556 // p_align must be 0, 1, or a positive, integral power of two.
557 if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
558 continue;
559 }
560
561 if (phdr->p_align > maximum_alignment) {
562 maximum_alignment = phdr->p_align;
563 }
564 }
565
566#if defined(__LP64__)
567 return maximum_alignment;
568#else
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700569 return page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700570#endif
571}
572
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700573// Reserve a virtual address range such that if it's limits were extended to the next 2**align
574// boundary, it would not overlap with any existing mappings.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700575static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
576 void** out_gap_start, size_t* out_gap_size) {
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700577 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700578 // Reserve enough space to properly align the library's start address.
579 mapping_align = std::max(mapping_align, start_align);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700580 if (mapping_align == page_size()) {
Elliott Hughes8178c412018-11-05 13:34:36 -0800581 void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700582 if (mmap_ptr == MAP_FAILED) {
583 return nullptr;
584 }
585 return mmap_ptr;
586 }
587
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700588 // Minimum alignment of shared library gap. For efficiency, this should match the second level
589 // page size of the platform.
590#if defined(__LP64__)
591 constexpr size_t kGapAlignment = 1ul << 21; // 2MB
592#else
593 constexpr size_t kGapAlignment = 0;
594#endif
595 // Maximum gap size, in the units of kGapAlignment.
596 constexpr size_t kMaxGapUnits = 32;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700597 // Allocate enough space so that the end of the desired region aligned up is still inside the
598 // mapping.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700599 size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700600 uint8_t* mmap_ptr =
601 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
602 if (mmap_ptr == MAP_FAILED) {
603 return nullptr;
604 }
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700605 size_t gap_size = 0;
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700606 size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
607 size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700608 if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
609 // This library crosses a 2MB boundary and will fragment a new huge page.
610 // Lets take advantage of that and insert a random number of inaccessible huge pages before that
611 // to improve address randomization and make it harder to locate this library code by probing.
612 munmap(mmap_ptr, mmap_size);
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700613 mapping_align = std::max(mapping_align, kGapAlignment);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700614 gap_size =
615 kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700616 mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700617 mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
618 if (mmap_ptr == MAP_FAILED) {
619 return nullptr;
620 }
621 }
622
623 uint8_t *gap_end, *gap_start;
624 if (gap_size) {
625 gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
626 gap_start = gap_end - gap_size;
627 } else {
628 gap_start = gap_end = mmap_ptr + mmap_size;
629 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700630
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700631 uint8_t* first = align_up(mmap_ptr, mapping_align);
632 uint8_t* last = align_down(gap_start, mapping_align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900633
Tom Cherry66bc4282018-11-08 13:40:52 -0800634 // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
Jiyong Park31cd08f2018-06-01 19:18:56 +0900635 // created. Don't randomize then.
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700636 size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
637 uint8_t* start = first + n * start_align;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700638 // Unmap the extra space around the allocation.
639 // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
640 // to defeat ASLR by probing for readable memory mappings.
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700641 munmap(mmap_ptr, start - mmap_ptr);
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700642 munmap(start + size, gap_start - (start + size));
643 if (gap_end != mmap_ptr + mmap_size) {
644 munmap(gap_end, mmap_ptr + mmap_size - gap_end);
645 }
646 *out_gap_start = gap_start;
647 *out_gap_size = gap_size;
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700648 return start;
649}
650
Elliott Hughes650be4e2013-03-05 18:47:58 -0800651// Reserve a virtual address range big enough to hold all loadable
652// segments of a program header table. This is done by creating a
653// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400654bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800655 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800656 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800657 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700658 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800659 return false;
660 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200661
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800662 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000663 void* start;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000664
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400665 if (load_size_ > address_space->reserved_size) {
666 if (address_space->must_use_address) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000667 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400668 load_size_ - address_space->reserved_size, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000669 return false;
670 }
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700671 size_t start_alignment = page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700672 if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
673 size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
674 // Limit alignment to PMD size as other alignments reduce the number of
675 // bits available for ASLR for no benefit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700676 start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700677 }
678 start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
679 &gap_size_);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700680 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700681 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000682 return false;
683 }
684 } else {
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400685 start = address_space->start_addr;
Evgenii Stepanove0848bb2020-07-14 16:44:57 -0700686 gap_start_ = nullptr;
687 gap_size_ = 0;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800688 mapped_by_caller_ = true;
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -0400689
690 // Update the reserved address space to subtract the space used by this library.
691 address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
692 address_space->reserved_size -= load_size_;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800693 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200694
Elliott Hughes650be4e2013-03-05 18:47:58 -0800695 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800696 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800697 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200698}
699
Kalesh Singh377f0b92024-01-31 20:23:39 -0800700// Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
701bool ElfReader::ReadPadSegmentNote() {
702 // The ELF can have multiple PT_NOTE's, check them all
703 for (size_t i = 0; i < phdr_num_; ++i) {
704 const ElfW(Phdr)* phdr = &phdr_table_[i];
705
706 if (phdr->p_type != PT_NOTE) {
707 continue;
708 }
709
710 // note_fragment is scoped to within the loop so that there is
711 // at most 1 PT_NOTE mapped at anytime during this search.
712 MappedFileFragment note_fragment;
713 if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
714 DL_ERR("\"%s\" note mmap failed: %s", name_.c_str(), strerror(errno));
715 return false;
716 }
717
718 const ElfW(Nhdr)* note_hdr = nullptr;
719 const char* note_desc = nullptr;
720 if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
721 reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
722 phdr, &note_hdr, &note_desc)) {
723 continue;
724 }
725
726 if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
727 DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
728 name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
729 return false;
730 }
731
732 // 1 == enabled, 0 == disabled
733 should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
734 return true;
735 }
736
737 return true;
738}
739
Elliott Hughes650be4e2013-03-05 18:47:58 -0800740bool ElfReader::LoadSegments() {
741 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800742 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200743
Elliott Hughes650be4e2013-03-05 18:47:58 -0800744 if (phdr->p_type != PT_LOAD) {
745 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200746 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800747
748 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800749 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
750 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800751
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700752 ElfW(Addr) seg_page_start = page_start(seg_start);
753 ElfW(Addr) seg_page_end = page_end(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800754
Elliott Hughes0266ae52014-02-10 17:46:57 -0800755 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800756
757 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800758 ElfW(Addr) file_start = phdr->p_offset;
759 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800760
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700761 ElfW(Addr) file_page_start = page_start(file_start);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800762 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800763
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700764 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700765 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700766 return false;
767 }
768
skvalex93ce3542015-08-20 01:06:42 +0300769 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700770 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
771 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700772 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700773 reinterpret_cast<void*>(phdr->p_filesz),
774 reinterpret_cast<void*>(file_end), file_size_);
775 return false;
776 }
777
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700778 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700779 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700780 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800781 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800782 if (get_application_target_sdk_version() >= 26) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800783 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800784 return false;
785 }
Elliott Hughes95c6cd72019-12-20 13:26:14 -0800786 DL_WARN_documented_change(26,
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800787 "writable-and-executable-segments-enforced-for-api-level-26",
788 "\"%s\" has load segments that are both writable and executable",
789 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800790 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700791 }
792
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700793 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700794 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700795 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700796 MAP_FIXED|MAP_PRIVATE,
797 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700798 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700799 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700800 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700801 return false;
802 }
Collin Fijalkovich47d27aa2021-03-24 10:17:39 -0700803
804 // Mark segments as huge page eligible if they meet the requirements
805 // (executable and PMD aligned).
806 if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
807 get_transparent_hugepages_supported()) {
808 madvise(seg_addr, file_length, MADV_HUGEPAGE);
809 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800810 }
811
812 // if the segment is writable, and does not end on a page boundary,
813 // zero-fill it until the page limit.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700814 if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
815 memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800816 }
817
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700818 seg_file_end = page_end(seg_file_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800819
820 // seg_file_end is now the first page address after the file
821 // content. If seg_end is larger, we need to zero anything
822 // between them. This is done by using a private anonymous
823 // map for all extra pages.
824 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800825 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800826 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800827 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800828 PFLAGS_TO_PROT(phdr->p_flags),
829 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
830 -1,
831 0);
832 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700833 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800834 return false;
835 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800836
837 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800838 }
839 }
840 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200841}
842
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000843/* Used internally. Used to set the protection bits of all loaded segments
844 * with optional extra flags (i.e. really PROT_WRITE). Used by
845 * phdr_table_protect_segments and phdr_table_unprotect_segments.
846 */
847static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
848 ElfW(Addr) load_bias, int extra_prot_flags) {
849 const ElfW(Phdr)* phdr = phdr_table;
850 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
851
852 for (; phdr < phdr_limit; phdr++) {
853 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
854 continue;
855 }
856
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700857 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
858 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000859
Tamas Petz8d55d182020-02-24 14:15:25 +0100860 int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
861 if ((prot & PROT_WRITE) != 0) {
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700862 // make sure we're never simultaneously writable / executable
863 prot &= ~PROT_EXEC;
864 }
Tamas Petz8d55d182020-02-24 14:15:25 +0100865#if defined(__aarch64__)
866 if ((prot & PROT_EXEC) == 0) {
867 // Though it is not specified don't add PROT_BTI if segment is not
868 // executable.
869 prot &= ~PROT_BTI;
870 }
871#endif
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700872
Tamas Petz8d55d182020-02-24 14:15:25 +0100873 int ret =
874 mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000875 if (ret < 0) {
876 return -1;
877 }
878 }
879 return 0;
880}
881
882/* Restore the original protection modes for all loadable segments.
883 * You should only call this after phdr_table_unprotect_segments and
884 * applying all relocations.
885 *
Tamas Petz8d55d182020-02-24 14:15:25 +0100886 * AArch64: also called from linker_main and ElfReader::Load to apply
887 * PROT_BTI for loaded main so and other so-s.
888 *
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000889 * Input:
890 * phdr_table -> program header table
891 * phdr_count -> number of entries in tables
892 * load_bias -> load bias
Tamas Petz8d55d182020-02-24 14:15:25 +0100893 * prop -> GnuPropertySection or nullptr
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000894 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +0000895 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000896 */
Tamas Petz8d55d182020-02-24 14:15:25 +0100897int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
898 ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
899 int prot = 0;
900#if defined(__aarch64__)
901 if ((prop != nullptr) && prop->IsBTICompatible()) {
902 prot |= PROT_BTI;
903 }
904#endif
905 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000906}
907
908/* Change the protection of all loaded segments in memory to writable.
909 * This is useful before performing relocations. Once completed, you
910 * will have to call phdr_table_protect_segments to restore the original
911 * protection flags on all segments.
912 *
913 * Note that some writable segments can also have their content turned
914 * to read-only by calling phdr_table_protect_gnu_relro. This is no
915 * performed here.
916 *
917 * Input:
918 * phdr_table -> program header table
919 * phdr_count -> number of entries in tables
920 * load_bias -> load bias
921 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +0000922 * 0 on success, -1 on failure (error code in errno).
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000923 */
924int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
925 size_t phdr_count, ElfW(Addr) load_bias) {
926 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
927}
928
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200929/* Used internally by phdr_table_protect_gnu_relro and
930 * phdr_table_unprotect_gnu_relro.
931 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800932static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
933 ElfW(Addr) load_bias, int prot_flags) {
934 const ElfW(Phdr)* phdr = phdr_table;
935 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200936
Elliott Hughes0266ae52014-02-10 17:46:57 -0800937 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
938 if (phdr->p_type != PT_GNU_RELRO) {
939 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200940 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800941
942 // Tricky: what happens when the relro segment does not start
943 // or end at page boundaries? We're going to be over-protective
944 // here and put every page touched by the segment as read-only.
945
946 // This seems to match Ian Lance Taylor's description of the
947 // feature at http://www.airs.com/blog/archives/189.
948
949 // Extract:
950 // Note that the current dynamic linker code will only work
951 // correctly if the PT_GNU_RELRO segment starts on a page
952 // boundary. This is because the dynamic linker rounds the
953 // p_vaddr field down to the previous page boundary. If
954 // there is anything on the page which should not be read-only,
955 // the program is likely to fail at runtime. So in effect the
956 // linker must only emit a PT_GNU_RELRO segment if it ensures
957 // that it starts on a page boundary.
Peter Collingbournebb11ee62022-05-02 12:26:16 -0700958 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
959 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800960
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800961 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800962 seg_page_end - seg_page_start,
963 prot_flags);
964 if (ret < 0) {
965 return -1;
966 }
967 }
968 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200969}
970
971/* Apply GNU relro protection if specified by the program header. This will
972 * turn some of the pages of a writable PT_LOAD segment to read-only, as
973 * specified by one or more PT_GNU_RELRO segments. This must be always
974 * performed after relocations.
975 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200976 * The areas typically covered are .got and .data.rel.ro, these are
977 * read-only from the program's POV, but contain absolute addresses
978 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200979 *
980 * Input:
981 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700982 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200983 * load_bias -> load bias
984 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +0000985 * 0 on success, -1 on failure (error code in errno).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200986 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700987int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
988 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800989 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200990}
991
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000992/* Serialize the GNU relro segments to the given file descriptor. This can be
993 * performed after relocations to allow another process to later share the
994 * relocated segment, if it was loaded at the same address.
995 *
996 * Input:
997 * phdr_table -> program header table
998 * phdr_count -> number of entries in tables
999 * load_bias -> load bias
1000 * fd -> writable file descriptor to use
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001001 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001002 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001003 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001004 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001005int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1006 size_t phdr_count,
1007 ElfW(Addr) load_bias,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001008 int fd,
1009 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001010 const ElfW(Phdr)* phdr = phdr_table;
1011 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001012
1013 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1014 if (phdr->p_type != PT_GNU_RELRO) {
1015 continue;
1016 }
1017
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001018 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1019 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001020 ssize_t size = seg_page_end - seg_page_start;
1021
1022 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1023 if (written != size) {
1024 return -1;
1025 }
1026 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001027 MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001028 if (map == MAP_FAILED) {
1029 return -1;
1030 }
Torne (Richard Coles)fa9f7f22019-04-02 17:04:42 -04001031 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001032 }
1033 return 0;
1034}
1035
1036/* Where possible, replace the GNU relro segments with mappings of the given
1037 * file descriptor. This can be performed after relocations to allow a file
1038 * previously created by phdr_table_serialize_gnu_relro in another process to
1039 * replace the dirty relocated pages, saving memory, if it was loaded at the
1040 * same address. We have to compare the data before we map over it, since some
1041 * parts of the relro segment may not be identical due to other libraries in
1042 * the process being loaded at different addresses.
1043 *
1044 * Input:
1045 * phdr_table -> program header table
1046 * phdr_count -> number of entries in tables
1047 * load_bias -> load bias
1048 * fd -> readable file descriptor to use
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001049 * file_offset -> pointer to offset into file descriptor to use/update
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001050 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001051 * 0 on success, -1 on failure (error code in errno).
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001052 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001053int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1054 size_t phdr_count,
1055 ElfW(Addr) load_bias,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001056 int fd,
1057 size_t* file_offset) {
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001058 // Map the file at a temporary location so we can compare its contents.
1059 struct stat file_stat;
1060 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1061 return -1;
1062 }
1063 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001064 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001065 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001066 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001067 if (temp_mapping == MAP_FAILED) {
1068 return -1;
1069 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001070 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001071
1072 // Iterate over the relro segments and compare/remap the pages.
1073 const ElfW(Phdr)* phdr = phdr_table;
1074 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1075
1076 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1077 if (phdr->p_type != PT_GNU_RELRO) {
1078 continue;
1079 }
1080
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001081 ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1082 ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001083
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001084 char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001085 char* mem_base = reinterpret_cast<char*>(seg_page_start);
1086 size_t match_offset = 0;
1087 size_t size = seg_page_end - seg_page_start;
1088
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001089 if (file_size - *file_offset < size) {
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +01001090 // File is too short to compare to this segment. The contents are likely
1091 // different as well (it's probably for a different library version) so
1092 // just don't bother checking.
1093 break;
1094 }
1095
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001096 while (match_offset < size) {
1097 // Skip over dissimilar pages.
1098 while (match_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001099 memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1100 match_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001101 }
1102
1103 // Count similar pages.
1104 size_t mismatch_offset = match_offset;
1105 while (mismatch_offset < size &&
Peter Collingbournebb11ee62022-05-02 12:26:16 -07001106 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1107 mismatch_offset += page_size();
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001108 }
1109
1110 // Map over similar pages.
1111 if (mismatch_offset > match_offset) {
1112 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001113 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001114 if (map == MAP_FAILED) {
1115 munmap(temp_mapping, file_size);
1116 return -1;
1117 }
1118 }
1119
1120 match_offset = mismatch_offset;
1121 }
1122
1123 // Add to the base file offset in case there are multiple relro segments.
Torne (Richard Coles)efbe9a52018-10-17 15:59:38 -04001124 *file_offset += size;
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +00001125 }
1126 munmap(temp_mapping, file_size);
1127 return 0;
1128}
1129
1130
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001131#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001132
1133# ifndef PT_ARM_EXIDX
1134# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
1135# endif
1136
1137/* Return the address and size of the .ARM.exidx section in memory,
1138 * if present.
1139 *
1140 * Input:
1141 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001142 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001143 * load_bias -> load bias
1144 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001145 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001146 * arm_exidx_count -> number of items in table (0 on failure).
1147 * Return:
Mitch Phillips117e45e2023-10-20 13:32:33 +00001148 * 0 on success, -1 on failure (_no_ error code in errno)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001149 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001150int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1151 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001152 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001153 const ElfW(Phdr)* phdr = phdr_table;
1154 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001155
Elliott Hughes0266ae52014-02-10 17:46:57 -08001156 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1157 if (phdr->p_type != PT_ARM_EXIDX) {
1158 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001159 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001160
1161 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001162 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001163 return 0;
1164 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001165 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001166 *arm_exidx_count = 0;
1167 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001168}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001169#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001170
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001171/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001172 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001173 *
1174 * Input:
1175 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001176 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001177 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001178 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001179 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001180 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001181 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001182 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001183 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001184void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001185 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1186 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001187 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001188 for (size_t i = 0; i<phdr_count; ++i) {
1189 const ElfW(Phdr)& phdr = phdr_table[i];
1190 if (phdr.p_type == PT_DYNAMIC) {
1191 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001192 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001193 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001194 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001195 return;
1196 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001197 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001198}
1199
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001200/* Return the program interpreter string, or nullptr if missing.
1201 *
1202 * Input:
1203 * phdr_table -> program header table
1204 * phdr_count -> number of entries in tables
1205 * load_bias -> load bias
1206 * Return:
1207 * pointer to the program interpreter string.
1208 */
Tamas Petz8d55d182020-02-24 14:15:25 +01001209const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001210 ElfW(Addr) load_bias) {
1211 for (size_t i = 0; i<phdr_count; ++i) {
1212 const ElfW(Phdr)& phdr = phdr_table[i];
1213 if (phdr.p_type == PT_INTERP) {
1214 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1215 }
1216 }
1217 return nullptr;
1218}
1219
Robert Grosse4544d9f2014-10-15 14:32:19 -07001220// Sets loaded_phdr_ to the address of the program header table as it appears
1221// in the loaded segments in memory. This is in contrast with phdr_table_,
1222// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001223bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001224 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001225
Elliott Hughes650be4e2013-03-05 18:47:58 -08001226 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001227 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001228 if (phdr->p_type == PT_PHDR) {
1229 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001230 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001231 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001232
Elliott Hughes650be4e2013-03-05 18:47:58 -08001233 // Otherwise, check the first loadable segment. If its file offset
1234 // is 0, it starts with the ELF header, and we can trivially find the
1235 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001236 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001237 if (phdr->p_type == PT_LOAD) {
1238 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001239 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001240 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001241 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001242 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001243 }
1244 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001245 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001246 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001247
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001248 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001249 return false;
1250}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001251
Tamas Petz8d55d182020-02-24 14:15:25 +01001252// Tries to find .note.gnu.property section.
1253// It is not considered an error if such section is missing.
1254bool ElfReader::FindGnuPropertySection() {
1255#if defined(__aarch64__)
1256 note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1257#endif
1258 return true;
1259}
1260
Elliott Hughes650be4e2013-03-05 18:47:58 -08001261// Ensures that our program header is actually within a loadable
1262// segment. This should help catch badly-formed ELF files that
1263// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001264bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1265 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1266 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001267 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001268 if (phdr->p_type != PT_LOAD) {
1269 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001270 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001271 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1272 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001273 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001274 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001275 return true;
1276 }
1277 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001278 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1279 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001280 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001281}