blob: a9873c483478985b754d1b37af86ee7e5f4a6a56 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
54#elif defined(__mips__)
55 return EM_MIPS;
56#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
75
Elliott Hughes0266ae52014-02-10 17:46:57 -080076 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020077
78 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79 ranges of virtual addresses. A few rules apply:
80
81 - the virtual address ranges should not overlap.
82
83 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84 between them should always be initialized to 0.
85
86 - ranges do not necessarily start or end at page boundaries. Two distinct
87 segments can have their start and end on the same page. In this case, the
88 page inherits the mapping flags of the latter segment.
89
90 Finally, the real load addrs of each segment is not p_vaddr. Instead the
91 loader decides where to load the first segment, then will load all others
92 relative to the first one to respect the initial range layout.
93
94 For example, consider the following list:
95
96 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98
99 This corresponds to two segments that cover these virtual address ranges:
100
101 0x30000...0x34000
102 0x40000...0x48000
103
104 If the loader decides to load the first segment at address 0xa0000000
105 then the segments' load address ranges will be:
106
107 0xa0030000...0xa0034000
108 0xa0040000...0xa0048000
109
110 In other words, all segments must be loaded at an address that has the same
111 constant offset from their p_vaddr value. This offset is computed as the
112 difference between the first segment's load address, and its p_vaddr value.
113
114 However, in practice, segments do _not_ start at page boundaries. Since we
115 can only memory-map at page boundaries, this means that the bias is
116 computed as:
117
118 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119
120 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121 possible wrap around UINT32_MAX for possible large p_vaddr values).
122
123 And that the phdr0_load_address must start at a page boundary, with
124 the segment's real content starting at:
125
126 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127
128 Note that ELF requires the following condition to make the mmap()-ing work:
129
130 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131
132 The load_bias must be added to any p_vaddr value read from the ELF file to
133 determine the corresponding memory address.
134
135 **/
136
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800137#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200138#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142ElfReader::ElfReader()
143 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700147}
148
149bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900150 if (did_read_) {
151 return true;
152 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700153 name_ = name;
154 fd_ = fd;
155 file_offset_ = file_offset;
156 file_size_ = file_size;
157
158 if (ReadElfHeader() &&
159 VerifyElfHeader() &&
160 ReadProgramHeaders() &&
161 ReadSectionHeaders() &&
162 ReadDynamicSection()) {
163 did_read_ = true;
164 }
165
166 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200167}
168
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000169bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700170 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900171 if (did_load_) {
172 return true;
173 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700174 if (ReserveAddressSpace(extinfo) &&
175 LoadSegments() &&
176 FindPhdr()) {
177 did_load_ = true;
178 }
179
180 return did_load_;
181}
182
183const char* ElfReader::get_string(ElfW(Word) index) const {
184 CHECK(strtab_ != nullptr);
185 CHECK(index < strtab_size_);
186
187 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188}
189
190bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700191 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700193 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800194 return false;
195 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700196
Elliott Hughes650be4e2013-03-05 18:47:58 -0800197 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700198 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700199 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800200 return false;
201 }
202 return true;
203}
204
Elliott Hughes72007ee2017-04-19 17:44:57 -0700205static const char* EM_to_string(int em) {
206 if (em == EM_386) return "EM_386";
207 if (em == EM_AARCH64) return "EM_AARCH64";
208 if (em == EM_ARM) return "EM_ARM";
209 if (em == EM_MIPS) return "EM_MIPS";
210 if (em == EM_X86_64) return "EM_X86_64";
211 return "EM_???";
212}
213
Elliott Hughes650be4e2013-03-05 18:47:58 -0800214bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700215 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700216 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800217 return false;
218 }
219
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700220 // Try to give a clear diagnostic for ELF class mismatches, since they're
221 // an easy mistake to make during the 32-bit/64-bit transition period.
222 int elf_class = header_.e_ident[EI_CLASS];
223#if defined(__LP64__)
224 if (elf_class != ELFCLASS64) {
225 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700226 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700227 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700228 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700229 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800230 return false;
231 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700232#else
233 if (elf_class != ELFCLASS32) {
234 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700235 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700236 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700237 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700238 }
239 return false;
240 }
241#endif
242
Elliott Hughes650be4e2013-03-05 18:47:58 -0800243 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700244 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800245 return false;
246 }
247
248 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700249 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800250 return false;
251 }
252
253 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700254 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800255 return false;
256 }
257
Elliott Hughesb5140262014-12-02 16:16:29 -0800258 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughes72007ee2017-04-19 17:44:57 -0700259 DL_ERR("\"%s\" has unexpected e_machine: %d (%s)", name_.c_str(), header_.e_machine,
260 EM_to_string(header_.e_machine));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800261 return false;
262 }
263
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700264 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800265 // Fail if app is targeting Android O or above
266 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
267 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
268 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
269 return false;
270 }
271 DL_WARN("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
272 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
273 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700274 }
275
276 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800277 // Fail if app is targeting Android O or above
278 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
279 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
280 return false;
281 }
282
283 DL_WARN("\"%s\" has invalid e_shstrndx", name_.c_str());
284 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700285 }
286
Elliott Hughes650be4e2013-03-05 18:47:58 -0800287 return true;
288}
289
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700290bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800291 off64_t range_start;
292 off64_t range_end;
293
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700294 // Only header can be located at the 0 offset... This function called to
295 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700296 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700297
298 return offset > 0 &&
299 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800300 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700301 (range_start < file_size_) &&
302 (range_end <= file_size_) &&
303 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800304}
305
Elliott Hughes650be4e2013-03-05 18:47:58 -0800306// Loads the program header table from an ELF file into a read-only private
307// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700308bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800309 phdr_num_ = header_.e_phnum;
310
311 // Like the kernel, we only accept program header tables that
312 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800313 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700314 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800315 return false;
316 }
317
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800318 // Boundary checks
319 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700320 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
321 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
322 name_.c_str(),
323 static_cast<size_t>(header_.e_phoff),
324 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800325 return false;
326 }
327
328 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700329 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800330 return false;
331 }
332
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700333 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800334 return true;
335}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200336
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700337bool ElfReader::ReadSectionHeaders() {
338 shdr_num_ = header_.e_shnum;
339
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800340 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700341 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800342 return false;
343 }
344
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800345 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700346 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
347 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
348 name_.c_str(),
349 static_cast<size_t>(header_.e_shoff),
350 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800351 return false;
352 }
353
354 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700355 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
356 return false;
357 }
358
359 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
360 return true;
361}
362
363bool ElfReader::ReadDynamicSection() {
364 // 1. Find .dynamic section (in section headers)
365 const ElfW(Shdr)* dynamic_shdr = nullptr;
366 for (size_t i = 0; i < shdr_num_; ++i) {
367 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
368 dynamic_shdr = &shdr_table_ [i];
369 break;
370 }
371 }
372
373 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700374 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700375 return false;
376 }
377
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700378 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
379 size_t pt_dynamic_offset = 0;
380 size_t pt_dynamic_filesz = 0;
381 for (size_t i = 0; i < phdr_num_; ++i) {
382 const ElfW(Phdr)* phdr = &phdr_table_[i];
383 if (phdr->p_type == PT_DYNAMIC) {
384 pt_dynamic_offset = phdr->p_offset;
385 pt_dynamic_filesz = phdr->p_filesz;
386 }
387 }
388
389 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800390 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
391 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
392 "expected to match PT_DYNAMIC offset: 0x%zx",
393 name_.c_str(),
394 static_cast<size_t>(dynamic_shdr->sh_offset),
395 pt_dynamic_offset);
396 return false;
397 }
398 DL_WARN("\"%s\" .dynamic section has invalid offset: 0x%zx, "
399 "expected to match PT_DYNAMIC offset: 0x%zx",
400 name_.c_str(),
401 static_cast<size_t>(dynamic_shdr->sh_offset),
402 pt_dynamic_offset);
403 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700404 }
405
406 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800407 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
408 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
409 "expected to match PT_DYNAMIC filesz: 0x%zx",
410 name_.c_str(),
411 static_cast<size_t>(dynamic_shdr->sh_size),
412 pt_dynamic_filesz);
413 return false;
414 }
415 DL_WARN("\"%s\" .dynamic section has invalid size: 0x%zx, "
416 "expected to match PT_DYNAMIC filesz: 0x%zx",
417 name_.c_str(),
418 static_cast<size_t>(dynamic_shdr->sh_size),
419 pt_dynamic_filesz);
420 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700421 }
422
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700423 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700424 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
425 name_.c_str(),
426 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700427 return false;
428 }
429
430 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
431
432 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700433 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
434 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700435 return false;
436 }
437
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700438 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
439 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800440 return false;
441 }
442
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700443 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
444 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
445 return false;
446 }
447
448 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
449
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700450 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
451 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
452 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800453 return false;
454 }
455
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700456 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
457 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
458 return false;
459 }
460
461 strtab_ = static_cast<const char*>(strtab_fragment_.data());
462 strtab_size_ = strtab_fragment_.size();
463 return true;
464}
465
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800466/* Returns the size of the extent of all the possibly non-contiguous
467 * loadable segments in an ELF program header table. This corresponds
468 * to the page-aligned size in bytes that needs to be reserved in the
469 * process' address space. If there are no loadable segments, 0 is
470 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200471 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700472 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800473 * set to the minimum and maximum addresses of pages to be reserved,
474 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200475 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800476size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
477 ElfW(Addr)* out_min_vaddr,
478 ElfW(Addr)* out_max_vaddr) {
479 ElfW(Addr) min_vaddr = UINTPTR_MAX;
480 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200481
Elliott Hughes0266ae52014-02-10 17:46:57 -0800482 bool found_pt_load = false;
483 for (size_t i = 0; i < phdr_count; ++i) {
484 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200485
Elliott Hughes0266ae52014-02-10 17:46:57 -0800486 if (phdr->p_type != PT_LOAD) {
487 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200488 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800489 found_pt_load = true;
490
491 if (phdr->p_vaddr < min_vaddr) {
492 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200493 }
494
Elliott Hughes0266ae52014-02-10 17:46:57 -0800495 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
496 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
497 }
498 }
499 if (!found_pt_load) {
500 min_vaddr = 0;
501 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200502
Elliott Hughes0266ae52014-02-10 17:46:57 -0800503 min_vaddr = PAGE_START(min_vaddr);
504 max_vaddr = PAGE_END(max_vaddr);
505
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700506 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800507 *out_min_vaddr = min_vaddr;
508 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700509 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800510 *out_max_vaddr = max_vaddr;
511 }
512 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200513}
514
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700515// Reserve a virtual address range such that if it's limits were extended to the next 2**align
516// boundary, it would not overlap with any existing mappings.
517static void* ReserveAligned(void* hint, size_t size, size_t align) {
518 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
519 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
520 // with it.
521 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
522 // mapping at the requested address?
523 if (align == PAGE_SIZE || hint != nullptr) {
524 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
525 if (mmap_ptr == MAP_FAILED) {
526 return nullptr;
527 }
528 return mmap_ptr;
529 }
530
531 // Allocate enough space so that the end of the desired region aligned up is still inside the
532 // mapping.
533 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
534 uint8_t* mmap_ptr =
535 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
536 if (mmap_ptr == MAP_FAILED) {
537 return nullptr;
538 }
539
540 uint8_t* first = align_up(mmap_ptr, align);
541 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
542 size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
543 uint8_t* start = first + n * PAGE_SIZE;
544 munmap(mmap_ptr, start - mmap_ptr);
545 munmap(start + size, mmap_ptr + mmap_size - (start + size));
546 return start;
547}
548
Elliott Hughes650be4e2013-03-05 18:47:58 -0800549// Reserve a virtual address range big enough to hold all loadable
550// segments of a program header table. This is done by creating a
551// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000552bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800553 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800554 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800555 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700556 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800557 return false;
558 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200559
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800560 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000561 void* start;
562 size_t reserved_size = 0;
563 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700564 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700565 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700566 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000567
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700568 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000569 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
570 reserved_size = extinfo->reserved_size;
571 reserved_hint = false;
572 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
573 reserved_size = extinfo->reserved_size;
574 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700575
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700576 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700577 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700578 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
579 mmap_hint = extinfo->reserved_addr;
580 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700581 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000582 }
583
584 if (load_size_ > reserved_size) {
585 if (!reserved_hint) {
586 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700587 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000588 return false;
589 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700590 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
591 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700592 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000593 return false;
594 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700595 if (strict_hint && (start != mmap_hint)) {
596 munmap(start, load_size_);
597 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
598 load_size_, mmap_hint, name_.c_str());
599 return false;
600 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000601 } else {
602 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800603 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800604 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200605
Elliott Hughes650be4e2013-03-05 18:47:58 -0800606 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800607 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800608 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200609}
610
Elliott Hughes650be4e2013-03-05 18:47:58 -0800611bool ElfReader::LoadSegments() {
612 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800613 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200614
Elliott Hughes650be4e2013-03-05 18:47:58 -0800615 if (phdr->p_type != PT_LOAD) {
616 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200617 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800618
619 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800620 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
621 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800622
Elliott Hughes0266ae52014-02-10 17:46:57 -0800623 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
624 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800625
Elliott Hughes0266ae52014-02-10 17:46:57 -0800626 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800627
628 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800629 ElfW(Addr) file_start = phdr->p_offset;
630 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800631
Elliott Hughes0266ae52014-02-10 17:46:57 -0800632 ElfW(Addr) file_page_start = PAGE_START(file_start);
633 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800634
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700635 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700636 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700637 return false;
638 }
639
skvalex93ce3542015-08-20 01:06:42 +0300640 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700641 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
642 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700643 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700644 reinterpret_cast<void*>(phdr->p_filesz),
645 reinterpret_cast<void*>(file_end), file_size_);
646 return false;
647 }
648
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700649 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700650 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700651 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800652 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800653 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800654 DL_ERR_AND_LOG("\"%s\": W + E load segments are not allowed", name_.c_str());
655 return false;
656 }
657 DL_WARN("\"%s\": W + E load segments are not allowed", name_.c_str());
658 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700659 }
660
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700661 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700662 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700663 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700664 MAP_FIXED|MAP_PRIVATE,
665 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700666 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700667 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700668 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700669 return false;
670 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800671 }
672
673 // if the segment is writable, and does not end on a page boundary,
674 // zero-fill it until the page limit.
675 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800676 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800677 }
678
679 seg_file_end = PAGE_END(seg_file_end);
680
681 // seg_file_end is now the first page address after the file
682 // content. If seg_end is larger, we need to zero anything
683 // between them. This is done by using a private anonymous
684 // map for all extra pages.
685 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800686 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800687 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800688 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800689 PFLAGS_TO_PROT(phdr->p_flags),
690 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
691 -1,
692 0);
693 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700694 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800695 return false;
696 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800697
698 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800699 }
700 }
701 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200702}
703
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000704/* Used internally. Used to set the protection bits of all loaded segments
705 * with optional extra flags (i.e. really PROT_WRITE). Used by
706 * phdr_table_protect_segments and phdr_table_unprotect_segments.
707 */
708static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
709 ElfW(Addr) load_bias, int extra_prot_flags) {
710 const ElfW(Phdr)* phdr = phdr_table;
711 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
712
713 for (; phdr < phdr_limit; phdr++) {
714 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
715 continue;
716 }
717
718 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
719 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
720
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700721 int prot = PFLAGS_TO_PROT(phdr->p_flags);
722 if ((extra_prot_flags & PROT_WRITE) != 0) {
723 // make sure we're never simultaneously writable / executable
724 prot &= ~PROT_EXEC;
725 }
726
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000727 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
728 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700729 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000730 if (ret < 0) {
731 return -1;
732 }
733 }
734 return 0;
735}
736
737/* Restore the original protection modes for all loadable segments.
738 * You should only call this after phdr_table_unprotect_segments and
739 * applying all relocations.
740 *
741 * Input:
742 * phdr_table -> program header table
743 * phdr_count -> number of entries in tables
744 * load_bias -> load bias
745 * Return:
746 * 0 on error, -1 on failure (error code in errno).
747 */
748int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
749 size_t phdr_count, ElfW(Addr) load_bias) {
750 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
751}
752
753/* Change the protection of all loaded segments in memory to writable.
754 * This is useful before performing relocations. Once completed, you
755 * will have to call phdr_table_protect_segments to restore the original
756 * protection flags on all segments.
757 *
758 * Note that some writable segments can also have their content turned
759 * to read-only by calling phdr_table_protect_gnu_relro. This is no
760 * performed here.
761 *
762 * Input:
763 * phdr_table -> program header table
764 * phdr_count -> number of entries in tables
765 * load_bias -> load bias
766 * Return:
767 * 0 on error, -1 on failure (error code in errno).
768 */
769int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
770 size_t phdr_count, ElfW(Addr) load_bias) {
771 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
772}
773
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200774/* Used internally by phdr_table_protect_gnu_relro and
775 * phdr_table_unprotect_gnu_relro.
776 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800777static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
778 ElfW(Addr) load_bias, int prot_flags) {
779 const ElfW(Phdr)* phdr = phdr_table;
780 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200781
Elliott Hughes0266ae52014-02-10 17:46:57 -0800782 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
783 if (phdr->p_type != PT_GNU_RELRO) {
784 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200785 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800786
787 // Tricky: what happens when the relro segment does not start
788 // or end at page boundaries? We're going to be over-protective
789 // here and put every page touched by the segment as read-only.
790
791 // This seems to match Ian Lance Taylor's description of the
792 // feature at http://www.airs.com/blog/archives/189.
793
794 // Extract:
795 // Note that the current dynamic linker code will only work
796 // correctly if the PT_GNU_RELRO segment starts on a page
797 // boundary. This is because the dynamic linker rounds the
798 // p_vaddr field down to the previous page boundary. If
799 // there is anything on the page which should not be read-only,
800 // the program is likely to fail at runtime. So in effect the
801 // linker must only emit a PT_GNU_RELRO segment if it ensures
802 // that it starts on a page boundary.
803 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
804 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
805
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800806 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800807 seg_page_end - seg_page_start,
808 prot_flags);
809 if (ret < 0) {
810 return -1;
811 }
812 }
813 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200814}
815
816/* Apply GNU relro protection if specified by the program header. This will
817 * turn some of the pages of a writable PT_LOAD segment to read-only, as
818 * specified by one or more PT_GNU_RELRO segments. This must be always
819 * performed after relocations.
820 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200821 * The areas typically covered are .got and .data.rel.ro, these are
822 * read-only from the program's POV, but contain absolute addresses
823 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200824 *
825 * Input:
826 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700827 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200828 * load_bias -> load bias
829 * Return:
830 * 0 on error, -1 on failure (error code in errno).
831 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700832int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
833 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800834 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200835}
836
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000837/* Serialize the GNU relro segments to the given file descriptor. This can be
838 * performed after relocations to allow another process to later share the
839 * relocated segment, if it was loaded at the same address.
840 *
841 * Input:
842 * phdr_table -> program header table
843 * phdr_count -> number of entries in tables
844 * load_bias -> load bias
845 * fd -> writable file descriptor to use
846 * Return:
847 * 0 on error, -1 on failure (error code in errno).
848 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700849int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
850 size_t phdr_count,
851 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000852 int fd) {
853 const ElfW(Phdr)* phdr = phdr_table;
854 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
855 ssize_t file_offset = 0;
856
857 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
858 if (phdr->p_type != PT_GNU_RELRO) {
859 continue;
860 }
861
862 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
863 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
864 ssize_t size = seg_page_end - seg_page_start;
865
866 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
867 if (written != size) {
868 return -1;
869 }
870 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
871 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
872 if (map == MAP_FAILED) {
873 return -1;
874 }
875 file_offset += size;
876 }
877 return 0;
878}
879
880/* Where possible, replace the GNU relro segments with mappings of the given
881 * file descriptor. This can be performed after relocations to allow a file
882 * previously created by phdr_table_serialize_gnu_relro in another process to
883 * replace the dirty relocated pages, saving memory, if it was loaded at the
884 * same address. We have to compare the data before we map over it, since some
885 * parts of the relro segment may not be identical due to other libraries in
886 * the process being loaded at different addresses.
887 *
888 * Input:
889 * phdr_table -> program header table
890 * phdr_count -> number of entries in tables
891 * load_bias -> load bias
892 * fd -> readable file descriptor to use
893 * Return:
894 * 0 on error, -1 on failure (error code in errno).
895 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700896int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
897 size_t phdr_count,
898 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000899 int fd) {
900 // Map the file at a temporary location so we can compare its contents.
901 struct stat file_stat;
902 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
903 return -1;
904 }
905 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700906 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100907 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700908 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100909 if (temp_mapping == MAP_FAILED) {
910 return -1;
911 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000912 }
913 size_t file_offset = 0;
914
915 // Iterate over the relro segments and compare/remap the pages.
916 const ElfW(Phdr)* phdr = phdr_table;
917 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
918
919 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
920 if (phdr->p_type != PT_GNU_RELRO) {
921 continue;
922 }
923
924 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
925 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
926
927 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
928 char* mem_base = reinterpret_cast<char*>(seg_page_start);
929 size_t match_offset = 0;
930 size_t size = seg_page_end - seg_page_start;
931
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100932 if (file_size - file_offset < size) {
933 // File is too short to compare to this segment. The contents are likely
934 // different as well (it's probably for a different library version) so
935 // just don't bother checking.
936 break;
937 }
938
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000939 while (match_offset < size) {
940 // Skip over dissimilar pages.
941 while (match_offset < size &&
942 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
943 match_offset += PAGE_SIZE;
944 }
945
946 // Count similar pages.
947 size_t mismatch_offset = match_offset;
948 while (mismatch_offset < size &&
949 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
950 mismatch_offset += PAGE_SIZE;
951 }
952
953 // Map over similar pages.
954 if (mismatch_offset > match_offset) {
955 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
956 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
957 if (map == MAP_FAILED) {
958 munmap(temp_mapping, file_size);
959 return -1;
960 }
961 }
962
963 match_offset = mismatch_offset;
964 }
965
966 // Add to the base file offset in case there are multiple relro segments.
967 file_offset += size;
968 }
969 munmap(temp_mapping, file_size);
970 return 0;
971}
972
973
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700974#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200975
976# ifndef PT_ARM_EXIDX
977# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
978# endif
979
980/* Return the address and size of the .ARM.exidx section in memory,
981 * if present.
982 *
983 * Input:
984 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700985 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200986 * load_bias -> load bias
987 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700988 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200989 * arm_exidx_count -> number of items in table (0 on failure).
990 * Return:
991 * 0 on error, -1 on failure (_no_ error code in errno)
992 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800993int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
994 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800995 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800996 const ElfW(Phdr)* phdr = phdr_table;
997 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200998
Elliott Hughes0266ae52014-02-10 17:46:57 -0800999 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1000 if (phdr->p_type != PT_ARM_EXIDX) {
1001 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001002 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001003
1004 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001005 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001006 return 0;
1007 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001008 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001009 *arm_exidx_count = 0;
1010 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001011}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001012#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001013
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001014/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001015 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001016 *
1017 * Input:
1018 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001019 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001020 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001021 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001022 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001023 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001024 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001025 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001026 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001027void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001028 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1029 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001030 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001031 for (size_t i = 0; i<phdr_count; ++i) {
1032 const ElfW(Phdr)& phdr = phdr_table[i];
1033 if (phdr.p_type == PT_DYNAMIC) {
1034 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001035 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001036 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001037 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001038 return;
1039 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001040 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001041}
1042
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001043/* Return the program interpreter string, or nullptr if missing.
1044 *
1045 * Input:
1046 * phdr_table -> program header table
1047 * phdr_count -> number of entries in tables
1048 * load_bias -> load bias
1049 * Return:
1050 * pointer to the program interpreter string.
1051 */
1052const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1053 ElfW(Addr) load_bias) {
1054 for (size_t i = 0; i<phdr_count; ++i) {
1055 const ElfW(Phdr)& phdr = phdr_table[i];
1056 if (phdr.p_type == PT_INTERP) {
1057 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1058 }
1059 }
1060 return nullptr;
1061}
1062
Robert Grosse4544d9f2014-10-15 14:32:19 -07001063// Sets loaded_phdr_ to the address of the program header table as it appears
1064// in the loaded segments in memory. This is in contrast with phdr_table_,
1065// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001066bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001067 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001068
Elliott Hughes650be4e2013-03-05 18:47:58 -08001069 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001070 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001071 if (phdr->p_type == PT_PHDR) {
1072 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001073 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001074 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001075
Elliott Hughes650be4e2013-03-05 18:47:58 -08001076 // Otherwise, check the first loadable segment. If its file offset
1077 // is 0, it starts with the ELF header, and we can trivially find the
1078 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001079 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001080 if (phdr->p_type == PT_LOAD) {
1081 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001082 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001083 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001084 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001085 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001086 }
1087 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001088 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001089 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001090
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001091 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001092 return false;
1093}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001094
Elliott Hughes650be4e2013-03-05 18:47:58 -08001095// Ensures that our program header is actually within a loadable
1096// segment. This should help catch badly-formed ELF files that
1097// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001098bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1099 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1100 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001101 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001102 if (phdr->p_type != PT_LOAD) {
1103 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001104 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001105 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1106 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001107 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001108 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001109 return true;
1110 }
1111 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001112 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1113 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001114 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001115}