blob: 72549cc73f428bc85c61b30b77d81f48590ff412 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
54#elif defined(__mips__)
55 return EM_MIPS;
56#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
75
Elliott Hughes0266ae52014-02-10 17:46:57 -080076 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020077
78 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79 ranges of virtual addresses. A few rules apply:
80
81 - the virtual address ranges should not overlap.
82
83 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84 between them should always be initialized to 0.
85
86 - ranges do not necessarily start or end at page boundaries. Two distinct
87 segments can have their start and end on the same page. In this case, the
88 page inherits the mapping flags of the latter segment.
89
90 Finally, the real load addrs of each segment is not p_vaddr. Instead the
91 loader decides where to load the first segment, then will load all others
92 relative to the first one to respect the initial range layout.
93
94 For example, consider the following list:
95
96 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98
99 This corresponds to two segments that cover these virtual address ranges:
100
101 0x30000...0x34000
102 0x40000...0x48000
103
104 If the loader decides to load the first segment at address 0xa0000000
105 then the segments' load address ranges will be:
106
107 0xa0030000...0xa0034000
108 0xa0040000...0xa0048000
109
110 In other words, all segments must be loaded at an address that has the same
111 constant offset from their p_vaddr value. This offset is computed as the
112 difference between the first segment's load address, and its p_vaddr value.
113
114 However, in practice, segments do _not_ start at page boundaries. Since we
115 can only memory-map at page boundaries, this means that the bias is
116 computed as:
117
118 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119
120 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121 possible wrap around UINT32_MAX for possible large p_vaddr values).
122
123 And that the phdr0_load_address must start at a page boundary, with
124 the segment's real content starting at:
125
126 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127
128 Note that ELF requires the following condition to make the mmap()-ing work:
129
130 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131
132 The load_bias must be added to any p_vaddr value read from the ELF file to
133 determine the corresponding memory address.
134
135 **/
136
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800137#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200138#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142ElfReader::ElfReader()
143 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700147}
148
149bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
150 CHECK(!did_read_);
151 CHECK(!did_load_);
152 name_ = name;
153 fd_ = fd;
154 file_offset_ = file_offset;
155 file_size_ = file_size;
156
157 if (ReadElfHeader() &&
158 VerifyElfHeader() &&
159 ReadProgramHeaders() &&
160 ReadSectionHeaders() &&
161 ReadDynamicSection()) {
162 did_read_ = true;
163 }
164
165 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200166}
167
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000168bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700169 CHECK(did_read_);
170 CHECK(!did_load_);
171 if (ReserveAddressSpace(extinfo) &&
172 LoadSegments() &&
173 FindPhdr()) {
174 did_load_ = true;
175 }
176
177 return did_load_;
178}
179
180const char* ElfReader::get_string(ElfW(Word) index) const {
181 CHECK(strtab_ != nullptr);
182 CHECK(index < strtab_size_);
183
184 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800185}
186
187bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700188 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800189 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700190 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800191 return false;
192 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700193
Elliott Hughes650be4e2013-03-05 18:47:58 -0800194 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700195 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700196 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800197 return false;
198 }
199 return true;
200}
201
202bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700203 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700204 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800205 return false;
206 }
207
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700208 // Try to give a clear diagnostic for ELF class mismatches, since they're
209 // an easy mistake to make during the 32-bit/64-bit transition period.
210 int elf_class = header_.e_ident[EI_CLASS];
211#if defined(__LP64__)
212 if (elf_class != ELFCLASS64) {
213 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700214 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700215 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700216 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700217 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800218 return false;
219 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700220#else
221 if (elf_class != ELFCLASS32) {
222 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700223 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700224 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700225 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700226 }
227 return false;
228 }
229#endif
230
Elliott Hughes650be4e2013-03-05 18:47:58 -0800231 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700232 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800233 return false;
234 }
235
236 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700237 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800238 return false;
239 }
240
241 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700242 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800243 return false;
244 }
245
Elliott Hughesb5140262014-12-02 16:16:29 -0800246 if (header_.e_machine != GetTargetElfMachine()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700247 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800248 return false;
249 }
250
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700251 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800252 // Fail if app is targeting Android O or above
253 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
254 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
255 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
256 return false;
257 }
258 DL_WARN("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
259 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
260 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700261 }
262
263 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800264 // Fail if app is targeting Android O or above
265 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
266 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
267 return false;
268 }
269
270 DL_WARN("\"%s\" has invalid e_shstrndx", name_.c_str());
271 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700272 }
273
Elliott Hughes650be4e2013-03-05 18:47:58 -0800274 return true;
275}
276
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700277bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800278 off64_t range_start;
279 off64_t range_end;
280
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700281 // Only header can be located at the 0 offset... This function called to
282 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700283 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700284
285 return offset > 0 &&
286 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800287 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700288 (range_start < file_size_) &&
289 (range_end <= file_size_) &&
290 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800291}
292
Elliott Hughes650be4e2013-03-05 18:47:58 -0800293// Loads the program header table from an ELF file into a read-only private
294// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700295bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800296 phdr_num_ = header_.e_phnum;
297
298 // Like the kernel, we only accept program header tables that
299 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800300 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700301 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800302 return false;
303 }
304
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800305 // Boundary checks
306 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700307 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
308 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
309 name_.c_str(),
310 static_cast<size_t>(header_.e_phoff),
311 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800312 return false;
313 }
314
315 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700316 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800317 return false;
318 }
319
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700320 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800321 return true;
322}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200323
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700324bool ElfReader::ReadSectionHeaders() {
325 shdr_num_ = header_.e_shnum;
326
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800327 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700328 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800329 return false;
330 }
331
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800332 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700333 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
334 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
335 name_.c_str(),
336 static_cast<size_t>(header_.e_shoff),
337 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800338 return false;
339 }
340
341 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700342 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
343 return false;
344 }
345
346 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
347 return true;
348}
349
350bool ElfReader::ReadDynamicSection() {
351 // 1. Find .dynamic section (in section headers)
352 const ElfW(Shdr)* dynamic_shdr = nullptr;
353 for (size_t i = 0; i < shdr_num_; ++i) {
354 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
355 dynamic_shdr = &shdr_table_ [i];
356 break;
357 }
358 }
359
360 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700361 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700362 return false;
363 }
364
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700365 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
366 size_t pt_dynamic_offset = 0;
367 size_t pt_dynamic_filesz = 0;
368 for (size_t i = 0; i < phdr_num_; ++i) {
369 const ElfW(Phdr)* phdr = &phdr_table_[i];
370 if (phdr->p_type == PT_DYNAMIC) {
371 pt_dynamic_offset = phdr->p_offset;
372 pt_dynamic_filesz = phdr->p_filesz;
373 }
374 }
375
376 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800377 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
378 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
379 "expected to match PT_DYNAMIC offset: 0x%zx",
380 name_.c_str(),
381 static_cast<size_t>(dynamic_shdr->sh_offset),
382 pt_dynamic_offset);
383 return false;
384 }
385 DL_WARN("\"%s\" .dynamic section has invalid offset: 0x%zx, "
386 "expected to match PT_DYNAMIC offset: 0x%zx",
387 name_.c_str(),
388 static_cast<size_t>(dynamic_shdr->sh_offset),
389 pt_dynamic_offset);
390 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700391 }
392
393 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800394 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
395 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
396 "expected to match PT_DYNAMIC filesz: 0x%zx",
397 name_.c_str(),
398 static_cast<size_t>(dynamic_shdr->sh_size),
399 pt_dynamic_filesz);
400 return false;
401 }
402 DL_WARN("\"%s\" .dynamic section has invalid size: 0x%zx, "
403 "expected to match PT_DYNAMIC filesz: 0x%zx",
404 name_.c_str(),
405 static_cast<size_t>(dynamic_shdr->sh_size),
406 pt_dynamic_filesz);
407 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700408 }
409
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700410 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700411 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
412 name_.c_str(),
413 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700414 return false;
415 }
416
417 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
418
419 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700420 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
421 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700422 return false;
423 }
424
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700425 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
426 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800427 return false;
428 }
429
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700430 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
431 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
432 return false;
433 }
434
435 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
436
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700437 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
438 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
439 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800440 return false;
441 }
442
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700443 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
444 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
445 return false;
446 }
447
448 strtab_ = static_cast<const char*>(strtab_fragment_.data());
449 strtab_size_ = strtab_fragment_.size();
450 return true;
451}
452
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800453/* Returns the size of the extent of all the possibly non-contiguous
454 * loadable segments in an ELF program header table. This corresponds
455 * to the page-aligned size in bytes that needs to be reserved in the
456 * process' address space. If there are no loadable segments, 0 is
457 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200458 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700459 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800460 * set to the minimum and maximum addresses of pages to be reserved,
461 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200462 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800463size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
464 ElfW(Addr)* out_min_vaddr,
465 ElfW(Addr)* out_max_vaddr) {
466 ElfW(Addr) min_vaddr = UINTPTR_MAX;
467 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200468
Elliott Hughes0266ae52014-02-10 17:46:57 -0800469 bool found_pt_load = false;
470 for (size_t i = 0; i < phdr_count; ++i) {
471 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200472
Elliott Hughes0266ae52014-02-10 17:46:57 -0800473 if (phdr->p_type != PT_LOAD) {
474 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200475 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800476 found_pt_load = true;
477
478 if (phdr->p_vaddr < min_vaddr) {
479 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200480 }
481
Elliott Hughes0266ae52014-02-10 17:46:57 -0800482 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
483 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
484 }
485 }
486 if (!found_pt_load) {
487 min_vaddr = 0;
488 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200489
Elliott Hughes0266ae52014-02-10 17:46:57 -0800490 min_vaddr = PAGE_START(min_vaddr);
491 max_vaddr = PAGE_END(max_vaddr);
492
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700493 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800494 *out_min_vaddr = min_vaddr;
495 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700496 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800497 *out_max_vaddr = max_vaddr;
498 }
499 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200500}
501
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700502// Reserve a virtual address range such that if it's limits were extended to the next 2**align
503// boundary, it would not overlap with any existing mappings.
504static void* ReserveAligned(void* hint, size_t size, size_t align) {
505 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
506 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
507 // with it.
508 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
509 // mapping at the requested address?
510 if (align == PAGE_SIZE || hint != nullptr) {
511 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
512 if (mmap_ptr == MAP_FAILED) {
513 return nullptr;
514 }
515 return mmap_ptr;
516 }
517
518 // Allocate enough space so that the end of the desired region aligned up is still inside the
519 // mapping.
520 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
521 uint8_t* mmap_ptr =
522 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
523 if (mmap_ptr == MAP_FAILED) {
524 return nullptr;
525 }
526
527 uint8_t* first = align_up(mmap_ptr, align);
528 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
529 size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
530 uint8_t* start = first + n * PAGE_SIZE;
531 munmap(mmap_ptr, start - mmap_ptr);
532 munmap(start + size, mmap_ptr + mmap_size - (start + size));
533 return start;
534}
535
Elliott Hughes650be4e2013-03-05 18:47:58 -0800536// Reserve a virtual address range big enough to hold all loadable
537// segments of a program header table. This is done by creating a
538// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000539bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800540 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800541 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800542 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700543 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800544 return false;
545 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200546
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800547 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000548 void* start;
549 size_t reserved_size = 0;
550 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700551 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700552 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700553 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000554
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700555 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000556 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
557 reserved_size = extinfo->reserved_size;
558 reserved_hint = false;
559 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
560 reserved_size = extinfo->reserved_size;
561 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700562
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700563 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700564 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700565 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
566 mmap_hint = extinfo->reserved_addr;
567 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700568 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000569 }
570
571 if (load_size_ > reserved_size) {
572 if (!reserved_hint) {
573 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700574 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000575 return false;
576 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700577 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
578 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700579 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000580 return false;
581 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700582 if (strict_hint && (start != mmap_hint)) {
583 munmap(start, load_size_);
584 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
585 load_size_, mmap_hint, name_.c_str());
586 return false;
587 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000588 } else {
589 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800590 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800591 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200592
Elliott Hughes650be4e2013-03-05 18:47:58 -0800593 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800594 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800595 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200596}
597
Elliott Hughes650be4e2013-03-05 18:47:58 -0800598bool ElfReader::LoadSegments() {
599 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800600 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200601
Elliott Hughes650be4e2013-03-05 18:47:58 -0800602 if (phdr->p_type != PT_LOAD) {
603 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200604 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800605
606 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800607 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
608 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800609
Elliott Hughes0266ae52014-02-10 17:46:57 -0800610 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
611 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800612
Elliott Hughes0266ae52014-02-10 17:46:57 -0800613 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800614
615 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800616 ElfW(Addr) file_start = phdr->p_offset;
617 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800618
Elliott Hughes0266ae52014-02-10 17:46:57 -0800619 ElfW(Addr) file_page_start = PAGE_START(file_start);
620 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800621
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700622 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700623 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700624 return false;
625 }
626
skvalex93ce3542015-08-20 01:06:42 +0300627 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700628 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
629 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700630 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700631 reinterpret_cast<void*>(phdr->p_filesz),
632 reinterpret_cast<void*>(file_end), file_size_);
633 return false;
634 }
635
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700636 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700637 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700638 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800639 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800640 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800641 DL_ERR_AND_LOG("\"%s\": W + E load segments are not allowed", name_.c_str());
642 return false;
643 }
644 DL_WARN("\"%s\": W + E load segments are not allowed", name_.c_str());
645 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700646 }
647
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700648 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700649 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700650 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700651 MAP_FIXED|MAP_PRIVATE,
652 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700653 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700654 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700655 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700656 return false;
657 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800658 }
659
660 // if the segment is writable, and does not end on a page boundary,
661 // zero-fill it until the page limit.
662 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800663 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800664 }
665
666 seg_file_end = PAGE_END(seg_file_end);
667
668 // seg_file_end is now the first page address after the file
669 // content. If seg_end is larger, we need to zero anything
670 // between them. This is done by using a private anonymous
671 // map for all extra pages.
672 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800673 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800674 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800675 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800676 PFLAGS_TO_PROT(phdr->p_flags),
677 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
678 -1,
679 0);
680 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700681 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800682 return false;
683 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800684
685 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800686 }
687 }
688 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200689}
690
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000691/* Used internally. Used to set the protection bits of all loaded segments
692 * with optional extra flags (i.e. really PROT_WRITE). Used by
693 * phdr_table_protect_segments and phdr_table_unprotect_segments.
694 */
695static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
696 ElfW(Addr) load_bias, int extra_prot_flags) {
697 const ElfW(Phdr)* phdr = phdr_table;
698 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
699
700 for (; phdr < phdr_limit; phdr++) {
701 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
702 continue;
703 }
704
705 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
706 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
707
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700708 int prot = PFLAGS_TO_PROT(phdr->p_flags);
709 if ((extra_prot_flags & PROT_WRITE) != 0) {
710 // make sure we're never simultaneously writable / executable
711 prot &= ~PROT_EXEC;
712 }
713
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000714 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
715 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700716 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000717 if (ret < 0) {
718 return -1;
719 }
720 }
721 return 0;
722}
723
724/* Restore the original protection modes for all loadable segments.
725 * You should only call this after phdr_table_unprotect_segments and
726 * applying all relocations.
727 *
728 * Input:
729 * phdr_table -> program header table
730 * phdr_count -> number of entries in tables
731 * load_bias -> load bias
732 * Return:
733 * 0 on error, -1 on failure (error code in errno).
734 */
735int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
736 size_t phdr_count, ElfW(Addr) load_bias) {
737 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
738}
739
740/* Change the protection of all loaded segments in memory to writable.
741 * This is useful before performing relocations. Once completed, you
742 * will have to call phdr_table_protect_segments to restore the original
743 * protection flags on all segments.
744 *
745 * Note that some writable segments can also have their content turned
746 * to read-only by calling phdr_table_protect_gnu_relro. This is no
747 * performed here.
748 *
749 * Input:
750 * phdr_table -> program header table
751 * phdr_count -> number of entries in tables
752 * load_bias -> load bias
753 * Return:
754 * 0 on error, -1 on failure (error code in errno).
755 */
756int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
757 size_t phdr_count, ElfW(Addr) load_bias) {
758 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
759}
760
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200761/* Used internally by phdr_table_protect_gnu_relro and
762 * phdr_table_unprotect_gnu_relro.
763 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800764static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
765 ElfW(Addr) load_bias, int prot_flags) {
766 const ElfW(Phdr)* phdr = phdr_table;
767 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200768
Elliott Hughes0266ae52014-02-10 17:46:57 -0800769 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
770 if (phdr->p_type != PT_GNU_RELRO) {
771 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200772 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800773
774 // Tricky: what happens when the relro segment does not start
775 // or end at page boundaries? We're going to be over-protective
776 // here and put every page touched by the segment as read-only.
777
778 // This seems to match Ian Lance Taylor's description of the
779 // feature at http://www.airs.com/blog/archives/189.
780
781 // Extract:
782 // Note that the current dynamic linker code will only work
783 // correctly if the PT_GNU_RELRO segment starts on a page
784 // boundary. This is because the dynamic linker rounds the
785 // p_vaddr field down to the previous page boundary. If
786 // there is anything on the page which should not be read-only,
787 // the program is likely to fail at runtime. So in effect the
788 // linker must only emit a PT_GNU_RELRO segment if it ensures
789 // that it starts on a page boundary.
790 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
791 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
792
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800793 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800794 seg_page_end - seg_page_start,
795 prot_flags);
796 if (ret < 0) {
797 return -1;
798 }
799 }
800 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200801}
802
803/* Apply GNU relro protection if specified by the program header. This will
804 * turn some of the pages of a writable PT_LOAD segment to read-only, as
805 * specified by one or more PT_GNU_RELRO segments. This must be always
806 * performed after relocations.
807 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200808 * The areas typically covered are .got and .data.rel.ro, these are
809 * read-only from the program's POV, but contain absolute addresses
810 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200811 *
812 * Input:
813 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700814 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200815 * load_bias -> load bias
816 * Return:
817 * 0 on error, -1 on failure (error code in errno).
818 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700819int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
820 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800821 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200822}
823
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000824/* Serialize the GNU relro segments to the given file descriptor. This can be
825 * performed after relocations to allow another process to later share the
826 * relocated segment, if it was loaded at the same address.
827 *
828 * Input:
829 * phdr_table -> program header table
830 * phdr_count -> number of entries in tables
831 * load_bias -> load bias
832 * fd -> writable file descriptor to use
833 * Return:
834 * 0 on error, -1 on failure (error code in errno).
835 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700836int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
837 size_t phdr_count,
838 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000839 int fd) {
840 const ElfW(Phdr)* phdr = phdr_table;
841 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
842 ssize_t file_offset = 0;
843
844 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
845 if (phdr->p_type != PT_GNU_RELRO) {
846 continue;
847 }
848
849 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
850 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
851 ssize_t size = seg_page_end - seg_page_start;
852
853 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
854 if (written != size) {
855 return -1;
856 }
857 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
858 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
859 if (map == MAP_FAILED) {
860 return -1;
861 }
862 file_offset += size;
863 }
864 return 0;
865}
866
867/* Where possible, replace the GNU relro segments with mappings of the given
868 * file descriptor. This can be performed after relocations to allow a file
869 * previously created by phdr_table_serialize_gnu_relro in another process to
870 * replace the dirty relocated pages, saving memory, if it was loaded at the
871 * same address. We have to compare the data before we map over it, since some
872 * parts of the relro segment may not be identical due to other libraries in
873 * the process being loaded at different addresses.
874 *
875 * Input:
876 * phdr_table -> program header table
877 * phdr_count -> number of entries in tables
878 * load_bias -> load bias
879 * fd -> readable file descriptor to use
880 * Return:
881 * 0 on error, -1 on failure (error code in errno).
882 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700883int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
884 size_t phdr_count,
885 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000886 int fd) {
887 // Map the file at a temporary location so we can compare its contents.
888 struct stat file_stat;
889 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
890 return -1;
891 }
892 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700893 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100894 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700895 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100896 if (temp_mapping == MAP_FAILED) {
897 return -1;
898 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000899 }
900 size_t file_offset = 0;
901
902 // Iterate over the relro segments and compare/remap the pages.
903 const ElfW(Phdr)* phdr = phdr_table;
904 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
905
906 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
907 if (phdr->p_type != PT_GNU_RELRO) {
908 continue;
909 }
910
911 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
912 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
913
914 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
915 char* mem_base = reinterpret_cast<char*>(seg_page_start);
916 size_t match_offset = 0;
917 size_t size = seg_page_end - seg_page_start;
918
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100919 if (file_size - file_offset < size) {
920 // File is too short to compare to this segment. The contents are likely
921 // different as well (it's probably for a different library version) so
922 // just don't bother checking.
923 break;
924 }
925
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000926 while (match_offset < size) {
927 // Skip over dissimilar pages.
928 while (match_offset < size &&
929 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
930 match_offset += PAGE_SIZE;
931 }
932
933 // Count similar pages.
934 size_t mismatch_offset = match_offset;
935 while (mismatch_offset < size &&
936 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
937 mismatch_offset += PAGE_SIZE;
938 }
939
940 // Map over similar pages.
941 if (mismatch_offset > match_offset) {
942 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
943 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
944 if (map == MAP_FAILED) {
945 munmap(temp_mapping, file_size);
946 return -1;
947 }
948 }
949
950 match_offset = mismatch_offset;
951 }
952
953 // Add to the base file offset in case there are multiple relro segments.
954 file_offset += size;
955 }
956 munmap(temp_mapping, file_size);
957 return 0;
958}
959
960
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700961#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200962
963# ifndef PT_ARM_EXIDX
964# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
965# endif
966
967/* Return the address and size of the .ARM.exidx section in memory,
968 * if present.
969 *
970 * Input:
971 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700972 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200973 * load_bias -> load bias
974 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700975 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200976 * arm_exidx_count -> number of items in table (0 on failure).
977 * Return:
978 * 0 on error, -1 on failure (_no_ error code in errno)
979 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800980int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
981 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800982 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800983 const ElfW(Phdr)* phdr = phdr_table;
984 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200985
Elliott Hughes0266ae52014-02-10 17:46:57 -0800986 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
987 if (phdr->p_type != PT_ARM_EXIDX) {
988 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200989 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800990
991 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800992 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800993 return 0;
994 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700995 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800996 *arm_exidx_count = 0;
997 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200998}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700999#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001000
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001001/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001002 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001003 *
1004 * Input:
1005 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001006 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001007 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001008 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001009 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001010 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001011 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001012 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001013 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001014void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001015 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1016 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001017 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001018 for (size_t i = 0; i<phdr_count; ++i) {
1019 const ElfW(Phdr)& phdr = phdr_table[i];
1020 if (phdr.p_type == PT_DYNAMIC) {
1021 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001022 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001023 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001024 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001025 return;
1026 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001027 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001028}
1029
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001030/* Return the program interpreter string, or nullptr if missing.
1031 *
1032 * Input:
1033 * phdr_table -> program header table
1034 * phdr_count -> number of entries in tables
1035 * load_bias -> load bias
1036 * Return:
1037 * pointer to the program interpreter string.
1038 */
1039const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1040 ElfW(Addr) load_bias) {
1041 for (size_t i = 0; i<phdr_count; ++i) {
1042 const ElfW(Phdr)& phdr = phdr_table[i];
1043 if (phdr.p_type == PT_INTERP) {
1044 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1045 }
1046 }
1047 return nullptr;
1048}
1049
Robert Grosse4544d9f2014-10-15 14:32:19 -07001050// Sets loaded_phdr_ to the address of the program header table as it appears
1051// in the loaded segments in memory. This is in contrast with phdr_table_,
1052// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001053bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001054 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001055
Elliott Hughes650be4e2013-03-05 18:47:58 -08001056 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001057 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001058 if (phdr->p_type == PT_PHDR) {
1059 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001060 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001061 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001062
Elliott Hughes650be4e2013-03-05 18:47:58 -08001063 // Otherwise, check the first loadable segment. If its file offset
1064 // is 0, it starts with the ELF header, and we can trivially find the
1065 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001066 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001067 if (phdr->p_type == PT_LOAD) {
1068 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001069 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001070 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001071 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001072 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001073 }
1074 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001075 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001076 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001077
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001078 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001079 return false;
1080}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001081
Elliott Hughes650be4e2013-03-05 18:47:58 -08001082// Ensures that our program header is actually within a loadable
1083// segment. This should help catch badly-formed ELF files that
1084// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001085bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1086 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1087 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001088 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001089 if (phdr->p_type != PT_LOAD) {
1090 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001091 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001092 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1093 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001094 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001095 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001096 return true;
1097 }
1098 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001099 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1100 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001101 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001102}