blob: 42c29c8c2f1d39b6e4466688883abc411f85c773 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
54#elif defined(__mips__)
55 return EM_MIPS;
56#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
75
Elliott Hughes0266ae52014-02-10 17:46:57 -080076 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020077
78 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79 ranges of virtual addresses. A few rules apply:
80
81 - the virtual address ranges should not overlap.
82
83 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84 between them should always be initialized to 0.
85
86 - ranges do not necessarily start or end at page boundaries. Two distinct
87 segments can have their start and end on the same page. In this case, the
88 page inherits the mapping flags of the latter segment.
89
90 Finally, the real load addrs of each segment is not p_vaddr. Instead the
91 loader decides where to load the first segment, then will load all others
92 relative to the first one to respect the initial range layout.
93
94 For example, consider the following list:
95
96 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98
99 This corresponds to two segments that cover these virtual address ranges:
100
101 0x30000...0x34000
102 0x40000...0x48000
103
104 If the loader decides to load the first segment at address 0xa0000000
105 then the segments' load address ranges will be:
106
107 0xa0030000...0xa0034000
108 0xa0040000...0xa0048000
109
110 In other words, all segments must be loaded at an address that has the same
111 constant offset from their p_vaddr value. This offset is computed as the
112 difference between the first segment's load address, and its p_vaddr value.
113
114 However, in practice, segments do _not_ start at page boundaries. Since we
115 can only memory-map at page boundaries, this means that the bias is
116 computed as:
117
118 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119
120 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121 possible wrap around UINT32_MAX for possible large p_vaddr values).
122
123 And that the phdr0_load_address must start at a page boundary, with
124 the segment's real content starting at:
125
126 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127
128 Note that ELF requires the following condition to make the mmap()-ing work:
129
130 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131
132 The load_bias must be added to any p_vaddr value read from the ELF file to
133 determine the corresponding memory address.
134
135 **/
136
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800137#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200138#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142ElfReader::ElfReader()
143 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700147}
148
149bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
150 CHECK(!did_read_);
151 CHECK(!did_load_);
152 name_ = name;
153 fd_ = fd;
154 file_offset_ = file_offset;
155 file_size_ = file_size;
156
157 if (ReadElfHeader() &&
158 VerifyElfHeader() &&
159 ReadProgramHeaders() &&
160 ReadSectionHeaders() &&
161 ReadDynamicSection()) {
162 did_read_ = true;
163 }
164
165 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200166}
167
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000168bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700169 CHECK(did_read_);
170 CHECK(!did_load_);
171 if (ReserveAddressSpace(extinfo) &&
172 LoadSegments() &&
173 FindPhdr()) {
174 did_load_ = true;
175 }
176
177 return did_load_;
178}
179
180const char* ElfReader::get_string(ElfW(Word) index) const {
181 CHECK(strtab_ != nullptr);
182 CHECK(index < strtab_size_);
183
184 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800185}
186
187bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700188 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800189 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700190 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800191 return false;
192 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700193
Elliott Hughes650be4e2013-03-05 18:47:58 -0800194 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700195 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700196 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800197 return false;
198 }
199 return true;
200}
201
Elliott Hughes72007ee2017-04-19 17:44:57 -0700202static const char* EM_to_string(int em) {
203 if (em == EM_386) return "EM_386";
204 if (em == EM_AARCH64) return "EM_AARCH64";
205 if (em == EM_ARM) return "EM_ARM";
206 if (em == EM_MIPS) return "EM_MIPS";
207 if (em == EM_X86_64) return "EM_X86_64";
208 return "EM_???";
209}
210
Elliott Hughes650be4e2013-03-05 18:47:58 -0800211bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700212 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700213 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800214 return false;
215 }
216
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700217 // Try to give a clear diagnostic for ELF class mismatches, since they're
218 // an easy mistake to make during the 32-bit/64-bit transition period.
219 int elf_class = header_.e_ident[EI_CLASS];
220#if defined(__LP64__)
221 if (elf_class != ELFCLASS64) {
222 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700223 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700224 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700225 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700226 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800227 return false;
228 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700229#else
230 if (elf_class != ELFCLASS32) {
231 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700232 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700233 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700234 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700235 }
236 return false;
237 }
238#endif
239
Elliott Hughes650be4e2013-03-05 18:47:58 -0800240 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700241 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800242 return false;
243 }
244
245 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700246 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800247 return false;
248 }
249
250 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700251 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800252 return false;
253 }
254
Elliott Hughesb5140262014-12-02 16:16:29 -0800255 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughes72007ee2017-04-19 17:44:57 -0700256 DL_ERR("\"%s\" has unexpected e_machine: %d (%s)", name_.c_str(), header_.e_machine,
257 EM_to_string(header_.e_machine));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800258 return false;
259 }
260
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700261 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800262 // Fail if app is targeting Android O or above
263 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
264 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
265 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
266 return false;
267 }
268 DL_WARN("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
269 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
270 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700271 }
272
273 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800274 // Fail if app is targeting Android O or above
275 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
276 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
277 return false;
278 }
279
280 DL_WARN("\"%s\" has invalid e_shstrndx", name_.c_str());
281 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700282 }
283
Elliott Hughes650be4e2013-03-05 18:47:58 -0800284 return true;
285}
286
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700287bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800288 off64_t range_start;
289 off64_t range_end;
290
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700291 // Only header can be located at the 0 offset... This function called to
292 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700293 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700294
295 return offset > 0 &&
296 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800297 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700298 (range_start < file_size_) &&
299 (range_end <= file_size_) &&
300 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800301}
302
Elliott Hughes650be4e2013-03-05 18:47:58 -0800303// Loads the program header table from an ELF file into a read-only private
304// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700305bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800306 phdr_num_ = header_.e_phnum;
307
308 // Like the kernel, we only accept program header tables that
309 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800310 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700311 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800312 return false;
313 }
314
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800315 // Boundary checks
316 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700317 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
318 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
319 name_.c_str(),
320 static_cast<size_t>(header_.e_phoff),
321 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800322 return false;
323 }
324
325 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700326 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800327 return false;
328 }
329
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700330 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800331 return true;
332}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200333
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700334bool ElfReader::ReadSectionHeaders() {
335 shdr_num_ = header_.e_shnum;
336
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800337 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700338 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800339 return false;
340 }
341
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800342 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700343 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
344 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
345 name_.c_str(),
346 static_cast<size_t>(header_.e_shoff),
347 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800348 return false;
349 }
350
351 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700352 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
353 return false;
354 }
355
356 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
357 return true;
358}
359
360bool ElfReader::ReadDynamicSection() {
361 // 1. Find .dynamic section (in section headers)
362 const ElfW(Shdr)* dynamic_shdr = nullptr;
363 for (size_t i = 0; i < shdr_num_; ++i) {
364 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
365 dynamic_shdr = &shdr_table_ [i];
366 break;
367 }
368 }
369
370 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700371 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700372 return false;
373 }
374
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700375 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
376 size_t pt_dynamic_offset = 0;
377 size_t pt_dynamic_filesz = 0;
378 for (size_t i = 0; i < phdr_num_; ++i) {
379 const ElfW(Phdr)* phdr = &phdr_table_[i];
380 if (phdr->p_type == PT_DYNAMIC) {
381 pt_dynamic_offset = phdr->p_offset;
382 pt_dynamic_filesz = phdr->p_filesz;
383 }
384 }
385
386 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800387 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
388 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
389 "expected to match PT_DYNAMIC offset: 0x%zx",
390 name_.c_str(),
391 static_cast<size_t>(dynamic_shdr->sh_offset),
392 pt_dynamic_offset);
393 return false;
394 }
395 DL_WARN("\"%s\" .dynamic section has invalid offset: 0x%zx, "
396 "expected to match PT_DYNAMIC offset: 0x%zx",
397 name_.c_str(),
398 static_cast<size_t>(dynamic_shdr->sh_offset),
399 pt_dynamic_offset);
400 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700401 }
402
403 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800404 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
405 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
406 "expected to match PT_DYNAMIC filesz: 0x%zx",
407 name_.c_str(),
408 static_cast<size_t>(dynamic_shdr->sh_size),
409 pt_dynamic_filesz);
410 return false;
411 }
412 DL_WARN("\"%s\" .dynamic section has invalid size: 0x%zx, "
413 "expected to match PT_DYNAMIC filesz: 0x%zx",
414 name_.c_str(),
415 static_cast<size_t>(dynamic_shdr->sh_size),
416 pt_dynamic_filesz);
417 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700418 }
419
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700420 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700421 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
422 name_.c_str(),
423 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700424 return false;
425 }
426
427 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
428
429 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700430 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
431 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700432 return false;
433 }
434
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700435 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
436 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800437 return false;
438 }
439
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700440 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
441 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
442 return false;
443 }
444
445 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
446
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700447 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
448 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
449 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800450 return false;
451 }
452
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700453 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
454 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
455 return false;
456 }
457
458 strtab_ = static_cast<const char*>(strtab_fragment_.data());
459 strtab_size_ = strtab_fragment_.size();
460 return true;
461}
462
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800463/* Returns the size of the extent of all the possibly non-contiguous
464 * loadable segments in an ELF program header table. This corresponds
465 * to the page-aligned size in bytes that needs to be reserved in the
466 * process' address space. If there are no loadable segments, 0 is
467 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200468 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700469 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800470 * set to the minimum and maximum addresses of pages to be reserved,
471 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200472 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800473size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
474 ElfW(Addr)* out_min_vaddr,
475 ElfW(Addr)* out_max_vaddr) {
476 ElfW(Addr) min_vaddr = UINTPTR_MAX;
477 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200478
Elliott Hughes0266ae52014-02-10 17:46:57 -0800479 bool found_pt_load = false;
480 for (size_t i = 0; i < phdr_count; ++i) {
481 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200482
Elliott Hughes0266ae52014-02-10 17:46:57 -0800483 if (phdr->p_type != PT_LOAD) {
484 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200485 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800486 found_pt_load = true;
487
488 if (phdr->p_vaddr < min_vaddr) {
489 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200490 }
491
Elliott Hughes0266ae52014-02-10 17:46:57 -0800492 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
493 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
494 }
495 }
496 if (!found_pt_load) {
497 min_vaddr = 0;
498 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200499
Elliott Hughes0266ae52014-02-10 17:46:57 -0800500 min_vaddr = PAGE_START(min_vaddr);
501 max_vaddr = PAGE_END(max_vaddr);
502
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700503 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800504 *out_min_vaddr = min_vaddr;
505 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700506 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800507 *out_max_vaddr = max_vaddr;
508 }
509 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200510}
511
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700512// Reserve a virtual address range such that if it's limits were extended to the next 2**align
513// boundary, it would not overlap with any existing mappings.
514static void* ReserveAligned(void* hint, size_t size, size_t align) {
515 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
516 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
517 // with it.
518 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
519 // mapping at the requested address?
520 if (align == PAGE_SIZE || hint != nullptr) {
521 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
522 if (mmap_ptr == MAP_FAILED) {
523 return nullptr;
524 }
525 return mmap_ptr;
526 }
527
528 // Allocate enough space so that the end of the desired region aligned up is still inside the
529 // mapping.
530 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
531 uint8_t* mmap_ptr =
532 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
533 if (mmap_ptr == MAP_FAILED) {
534 return nullptr;
535 }
536
537 uint8_t* first = align_up(mmap_ptr, align);
538 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
539 size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
540 uint8_t* start = first + n * PAGE_SIZE;
541 munmap(mmap_ptr, start - mmap_ptr);
542 munmap(start + size, mmap_ptr + mmap_size - (start + size));
543 return start;
544}
545
Elliott Hughes650be4e2013-03-05 18:47:58 -0800546// Reserve a virtual address range big enough to hold all loadable
547// segments of a program header table. This is done by creating a
548// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000549bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800550 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800551 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800552 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700553 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800554 return false;
555 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200556
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800557 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000558 void* start;
559 size_t reserved_size = 0;
560 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700561 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700562 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700563 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000564
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700565 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000566 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
567 reserved_size = extinfo->reserved_size;
568 reserved_hint = false;
569 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
570 reserved_size = extinfo->reserved_size;
571 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700572
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700573 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700574 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700575 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
576 mmap_hint = extinfo->reserved_addr;
577 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700578 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000579 }
580
581 if (load_size_ > reserved_size) {
582 if (!reserved_hint) {
583 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700584 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000585 return false;
586 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700587 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
588 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700589 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000590 return false;
591 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700592 if (strict_hint && (start != mmap_hint)) {
593 munmap(start, load_size_);
594 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
595 load_size_, mmap_hint, name_.c_str());
596 return false;
597 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000598 } else {
599 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800600 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800601 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200602
Elliott Hughes650be4e2013-03-05 18:47:58 -0800603 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800604 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800605 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200606}
607
Elliott Hughes650be4e2013-03-05 18:47:58 -0800608bool ElfReader::LoadSegments() {
609 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800610 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200611
Elliott Hughes650be4e2013-03-05 18:47:58 -0800612 if (phdr->p_type != PT_LOAD) {
613 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200614 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800615
616 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800617 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
618 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800619
Elliott Hughes0266ae52014-02-10 17:46:57 -0800620 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
621 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800622
Elliott Hughes0266ae52014-02-10 17:46:57 -0800623 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800624
625 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800626 ElfW(Addr) file_start = phdr->p_offset;
627 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800628
Elliott Hughes0266ae52014-02-10 17:46:57 -0800629 ElfW(Addr) file_page_start = PAGE_START(file_start);
630 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800631
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700632 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700633 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700634 return false;
635 }
636
skvalex93ce3542015-08-20 01:06:42 +0300637 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700638 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
639 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700640 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700641 reinterpret_cast<void*>(phdr->p_filesz),
642 reinterpret_cast<void*>(file_end), file_size_);
643 return false;
644 }
645
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700646 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700647 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700648 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800649 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800650 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800651 DL_ERR_AND_LOG("\"%s\": W + E load segments are not allowed", name_.c_str());
652 return false;
653 }
654 DL_WARN("\"%s\": W + E load segments are not allowed", name_.c_str());
655 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700656 }
657
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700658 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700659 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700660 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700661 MAP_FIXED|MAP_PRIVATE,
662 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700663 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700664 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700665 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700666 return false;
667 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800668 }
669
670 // if the segment is writable, and does not end on a page boundary,
671 // zero-fill it until the page limit.
672 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800673 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800674 }
675
676 seg_file_end = PAGE_END(seg_file_end);
677
678 // seg_file_end is now the first page address after the file
679 // content. If seg_end is larger, we need to zero anything
680 // between them. This is done by using a private anonymous
681 // map for all extra pages.
682 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800683 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800684 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800685 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800686 PFLAGS_TO_PROT(phdr->p_flags),
687 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
688 -1,
689 0);
690 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700691 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800692 return false;
693 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800694
695 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800696 }
697 }
698 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200699}
700
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000701/* Used internally. Used to set the protection bits of all loaded segments
702 * with optional extra flags (i.e. really PROT_WRITE). Used by
703 * phdr_table_protect_segments and phdr_table_unprotect_segments.
704 */
705static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
706 ElfW(Addr) load_bias, int extra_prot_flags) {
707 const ElfW(Phdr)* phdr = phdr_table;
708 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
709
710 for (; phdr < phdr_limit; phdr++) {
711 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
712 continue;
713 }
714
715 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
716 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
717
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700718 int prot = PFLAGS_TO_PROT(phdr->p_flags);
719 if ((extra_prot_flags & PROT_WRITE) != 0) {
720 // make sure we're never simultaneously writable / executable
721 prot &= ~PROT_EXEC;
722 }
723
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000724 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
725 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700726 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000727 if (ret < 0) {
728 return -1;
729 }
730 }
731 return 0;
732}
733
734/* Restore the original protection modes for all loadable segments.
735 * You should only call this after phdr_table_unprotect_segments and
736 * applying all relocations.
737 *
738 * Input:
739 * phdr_table -> program header table
740 * phdr_count -> number of entries in tables
741 * load_bias -> load bias
742 * Return:
743 * 0 on error, -1 on failure (error code in errno).
744 */
745int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
746 size_t phdr_count, ElfW(Addr) load_bias) {
747 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
748}
749
750/* Change the protection of all loaded segments in memory to writable.
751 * This is useful before performing relocations. Once completed, you
752 * will have to call phdr_table_protect_segments to restore the original
753 * protection flags on all segments.
754 *
755 * Note that some writable segments can also have their content turned
756 * to read-only by calling phdr_table_protect_gnu_relro. This is no
757 * performed here.
758 *
759 * Input:
760 * phdr_table -> program header table
761 * phdr_count -> number of entries in tables
762 * load_bias -> load bias
763 * Return:
764 * 0 on error, -1 on failure (error code in errno).
765 */
766int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
767 size_t phdr_count, ElfW(Addr) load_bias) {
768 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
769}
770
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200771/* Used internally by phdr_table_protect_gnu_relro and
772 * phdr_table_unprotect_gnu_relro.
773 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800774static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
775 ElfW(Addr) load_bias, int prot_flags) {
776 const ElfW(Phdr)* phdr = phdr_table;
777 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200778
Elliott Hughes0266ae52014-02-10 17:46:57 -0800779 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
780 if (phdr->p_type != PT_GNU_RELRO) {
781 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200782 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800783
784 // Tricky: what happens when the relro segment does not start
785 // or end at page boundaries? We're going to be over-protective
786 // here and put every page touched by the segment as read-only.
787
788 // This seems to match Ian Lance Taylor's description of the
789 // feature at http://www.airs.com/blog/archives/189.
790
791 // Extract:
792 // Note that the current dynamic linker code will only work
793 // correctly if the PT_GNU_RELRO segment starts on a page
794 // boundary. This is because the dynamic linker rounds the
795 // p_vaddr field down to the previous page boundary. If
796 // there is anything on the page which should not be read-only,
797 // the program is likely to fail at runtime. So in effect the
798 // linker must only emit a PT_GNU_RELRO segment if it ensures
799 // that it starts on a page boundary.
800 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
801 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
802
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800803 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800804 seg_page_end - seg_page_start,
805 prot_flags);
806 if (ret < 0) {
807 return -1;
808 }
809 }
810 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200811}
812
813/* Apply GNU relro protection if specified by the program header. This will
814 * turn some of the pages of a writable PT_LOAD segment to read-only, as
815 * specified by one or more PT_GNU_RELRO segments. This must be always
816 * performed after relocations.
817 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200818 * The areas typically covered are .got and .data.rel.ro, these are
819 * read-only from the program's POV, but contain absolute addresses
820 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200821 *
822 * Input:
823 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700824 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200825 * load_bias -> load bias
826 * Return:
827 * 0 on error, -1 on failure (error code in errno).
828 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700829int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
830 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800831 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200832}
833
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000834/* Serialize the GNU relro segments to the given file descriptor. This can be
835 * performed after relocations to allow another process to later share the
836 * relocated segment, if it was loaded at the same address.
837 *
838 * Input:
839 * phdr_table -> program header table
840 * phdr_count -> number of entries in tables
841 * load_bias -> load bias
842 * fd -> writable file descriptor to use
843 * Return:
844 * 0 on error, -1 on failure (error code in errno).
845 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700846int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
847 size_t phdr_count,
848 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000849 int fd) {
850 const ElfW(Phdr)* phdr = phdr_table;
851 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
852 ssize_t file_offset = 0;
853
854 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
855 if (phdr->p_type != PT_GNU_RELRO) {
856 continue;
857 }
858
859 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
860 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
861 ssize_t size = seg_page_end - seg_page_start;
862
863 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
864 if (written != size) {
865 return -1;
866 }
867 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
868 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
869 if (map == MAP_FAILED) {
870 return -1;
871 }
872 file_offset += size;
873 }
874 return 0;
875}
876
877/* Where possible, replace the GNU relro segments with mappings of the given
878 * file descriptor. This can be performed after relocations to allow a file
879 * previously created by phdr_table_serialize_gnu_relro in another process to
880 * replace the dirty relocated pages, saving memory, if it was loaded at the
881 * same address. We have to compare the data before we map over it, since some
882 * parts of the relro segment may not be identical due to other libraries in
883 * the process being loaded at different addresses.
884 *
885 * Input:
886 * phdr_table -> program header table
887 * phdr_count -> number of entries in tables
888 * load_bias -> load bias
889 * fd -> readable file descriptor to use
890 * Return:
891 * 0 on error, -1 on failure (error code in errno).
892 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700893int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
894 size_t phdr_count,
895 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000896 int fd) {
897 // Map the file at a temporary location so we can compare its contents.
898 struct stat file_stat;
899 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
900 return -1;
901 }
902 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700903 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100904 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700905 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100906 if (temp_mapping == MAP_FAILED) {
907 return -1;
908 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000909 }
910 size_t file_offset = 0;
911
912 // Iterate over the relro segments and compare/remap the pages.
913 const ElfW(Phdr)* phdr = phdr_table;
914 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
915
916 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
917 if (phdr->p_type != PT_GNU_RELRO) {
918 continue;
919 }
920
921 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
922 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
923
924 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
925 char* mem_base = reinterpret_cast<char*>(seg_page_start);
926 size_t match_offset = 0;
927 size_t size = seg_page_end - seg_page_start;
928
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100929 if (file_size - file_offset < size) {
930 // File is too short to compare to this segment. The contents are likely
931 // different as well (it's probably for a different library version) so
932 // just don't bother checking.
933 break;
934 }
935
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000936 while (match_offset < size) {
937 // Skip over dissimilar pages.
938 while (match_offset < size &&
939 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
940 match_offset += PAGE_SIZE;
941 }
942
943 // Count similar pages.
944 size_t mismatch_offset = match_offset;
945 while (mismatch_offset < size &&
946 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
947 mismatch_offset += PAGE_SIZE;
948 }
949
950 // Map over similar pages.
951 if (mismatch_offset > match_offset) {
952 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
953 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
954 if (map == MAP_FAILED) {
955 munmap(temp_mapping, file_size);
956 return -1;
957 }
958 }
959
960 match_offset = mismatch_offset;
961 }
962
963 // Add to the base file offset in case there are multiple relro segments.
964 file_offset += size;
965 }
966 munmap(temp_mapping, file_size);
967 return 0;
968}
969
970
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700971#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200972
973# ifndef PT_ARM_EXIDX
974# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
975# endif
976
977/* Return the address and size of the .ARM.exidx section in memory,
978 * if present.
979 *
980 * Input:
981 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700982 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200983 * load_bias -> load bias
984 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700985 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200986 * arm_exidx_count -> number of items in table (0 on failure).
987 * Return:
988 * 0 on error, -1 on failure (_no_ error code in errno)
989 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800990int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
991 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800992 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800993 const ElfW(Phdr)* phdr = phdr_table;
994 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200995
Elliott Hughes0266ae52014-02-10 17:46:57 -0800996 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
997 if (phdr->p_type != PT_ARM_EXIDX) {
998 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200999 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001000
1001 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001002 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001003 return 0;
1004 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001005 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001006 *arm_exidx_count = 0;
1007 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001008}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001009#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001010
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001011/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001012 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001013 *
1014 * Input:
1015 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001016 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001017 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001018 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001019 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001020 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001021 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001022 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001023 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001024void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001025 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1026 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001027 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001028 for (size_t i = 0; i<phdr_count; ++i) {
1029 const ElfW(Phdr)& phdr = phdr_table[i];
1030 if (phdr.p_type == PT_DYNAMIC) {
1031 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001032 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001033 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001034 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001035 return;
1036 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001037 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001038}
1039
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001040/* Return the program interpreter string, or nullptr if missing.
1041 *
1042 * Input:
1043 * phdr_table -> program header table
1044 * phdr_count -> number of entries in tables
1045 * load_bias -> load bias
1046 * Return:
1047 * pointer to the program interpreter string.
1048 */
1049const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1050 ElfW(Addr) load_bias) {
1051 for (size_t i = 0; i<phdr_count; ++i) {
1052 const ElfW(Phdr)& phdr = phdr_table[i];
1053 if (phdr.p_type == PT_INTERP) {
1054 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1055 }
1056 }
1057 return nullptr;
1058}
1059
Robert Grosse4544d9f2014-10-15 14:32:19 -07001060// Sets loaded_phdr_ to the address of the program header table as it appears
1061// in the loaded segments in memory. This is in contrast with phdr_table_,
1062// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001063bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001064 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001065
Elliott Hughes650be4e2013-03-05 18:47:58 -08001066 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001067 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001068 if (phdr->p_type == PT_PHDR) {
1069 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001070 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001071 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001072
Elliott Hughes650be4e2013-03-05 18:47:58 -08001073 // Otherwise, check the first loadable segment. If its file offset
1074 // is 0, it starts with the ELF header, and we can trivially find the
1075 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001076 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001077 if (phdr->p_type == PT_LOAD) {
1078 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001079 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001080 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001081 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001082 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001083 }
1084 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001085 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001086 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001087
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001088 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001089 return false;
1090}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001091
Elliott Hughes650be4e2013-03-05 18:47:58 -08001092// Ensures that our program header is actually within a loadable
1093// segment. This should help catch badly-formed ELF files that
1094// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001095bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1096 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1097 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001098 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001099 if (phdr->p_type != PT_LOAD) {
1100 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001101 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001102 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1103 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001104 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001105 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001106 return true;
1107 }
1108 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001109 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1110 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001111 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001112}