blob: a5eab44ecf5bc256b0b4bdb853440f5563abdc1f [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
54#elif defined(__mips__)
55 return EM_MIPS;
56#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
75
Elliott Hughes0266ae52014-02-10 17:46:57 -080076 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020077
78 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79 ranges of virtual addresses. A few rules apply:
80
81 - the virtual address ranges should not overlap.
82
83 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84 between them should always be initialized to 0.
85
86 - ranges do not necessarily start or end at page boundaries. Two distinct
87 segments can have their start and end on the same page. In this case, the
88 page inherits the mapping flags of the latter segment.
89
90 Finally, the real load addrs of each segment is not p_vaddr. Instead the
91 loader decides where to load the first segment, then will load all others
92 relative to the first one to respect the initial range layout.
93
94 For example, consider the following list:
95
96 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98
99 This corresponds to two segments that cover these virtual address ranges:
100
101 0x30000...0x34000
102 0x40000...0x48000
103
104 If the loader decides to load the first segment at address 0xa0000000
105 then the segments' load address ranges will be:
106
107 0xa0030000...0xa0034000
108 0xa0040000...0xa0048000
109
110 In other words, all segments must be loaded at an address that has the same
111 constant offset from their p_vaddr value. This offset is computed as the
112 difference between the first segment's load address, and its p_vaddr value.
113
114 However, in practice, segments do _not_ start at page boundaries. Since we
115 can only memory-map at page boundaries, this means that the bias is
116 computed as:
117
118 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119
120 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121 possible wrap around UINT32_MAX for possible large p_vaddr values).
122
123 And that the phdr0_load_address must start at a page boundary, with
124 the segment's real content starting at:
125
126 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127
128 Note that ELF requires the following condition to make the mmap()-ing work:
129
130 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131
132 The load_bias must be added to any p_vaddr value read from the ELF file to
133 determine the corresponding memory address.
134
135 **/
136
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800137#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200138#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142ElfReader::ElfReader()
143 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700147}
148
149bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900150 if (did_read_) {
151 return true;
152 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700153 name_ = name;
154 fd_ = fd;
155 file_offset_ = file_offset;
156 file_size_ = file_size;
157
158 if (ReadElfHeader() &&
159 VerifyElfHeader() &&
160 ReadProgramHeaders() &&
161 ReadSectionHeaders() &&
162 ReadDynamicSection()) {
163 did_read_ = true;
164 }
165
166 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200167}
168
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000169bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700170 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900171 if (did_load_) {
172 return true;
173 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700174 if (ReserveAddressSpace(extinfo) &&
175 LoadSegments() &&
176 FindPhdr()) {
177 did_load_ = true;
178 }
179
180 return did_load_;
181}
182
183const char* ElfReader::get_string(ElfW(Word) index) const {
184 CHECK(strtab_ != nullptr);
185 CHECK(index < strtab_size_);
186
187 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188}
189
190bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700191 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700193 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800194 return false;
195 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700196
Elliott Hughes650be4e2013-03-05 18:47:58 -0800197 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700198 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700199 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800200 return false;
201 }
202 return true;
203}
204
Elliott Hughes72007ee2017-04-19 17:44:57 -0700205static const char* EM_to_string(int em) {
206 if (em == EM_386) return "EM_386";
207 if (em == EM_AARCH64) return "EM_AARCH64";
208 if (em == EM_ARM) return "EM_ARM";
209 if (em == EM_MIPS) return "EM_MIPS";
210 if (em == EM_X86_64) return "EM_X86_64";
211 return "EM_???";
212}
213
Elliott Hughes650be4e2013-03-05 18:47:58 -0800214bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700215 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700216 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800217 return false;
218 }
219
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700220 // Try to give a clear diagnostic for ELF class mismatches, since they're
221 // an easy mistake to make during the 32-bit/64-bit transition period.
222 int elf_class = header_.e_ident[EI_CLASS];
223#if defined(__LP64__)
224 if (elf_class != ELFCLASS64) {
225 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700226 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700227 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700228 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700229 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800230 return false;
231 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700232#else
233 if (elf_class != ELFCLASS32) {
234 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700235 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700236 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700237 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700238 }
239 return false;
240 }
241#endif
242
Elliott Hughes650be4e2013-03-05 18:47:58 -0800243 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700244 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800245 return false;
246 }
247
248 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700249 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800250 return false;
251 }
252
253 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700254 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800255 return false;
256 }
257
Elliott Hughesb5140262014-12-02 16:16:29 -0800258 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughes72007ee2017-04-19 17:44:57 -0700259 DL_ERR("\"%s\" has unexpected e_machine: %d (%s)", name_.c_str(), header_.e_machine,
260 EM_to_string(header_.e_machine));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800261 return false;
262 }
263
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700264 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800265 // Fail if app is targeting Android O or above
266 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
267 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
268 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
269 return false;
270 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800271 DL_WARN_documented_change(__ANDROID_API_O__,
272 "invalid-elf-header_section-headers-enforced-for-api-level-26",
273 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
274 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800275 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700276 }
277
278 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800279 // Fail if app is targeting Android O or above
280 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
281 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
282 return false;
283 }
284
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800285 DL_WARN_documented_change(__ANDROID_API_O__,
286 "invalid-elf-header_section-headers-enforced-for-api-level-26",
287 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800288 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700289 }
290
Elliott Hughes650be4e2013-03-05 18:47:58 -0800291 return true;
292}
293
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700294bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800295 off64_t range_start;
296 off64_t range_end;
297
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700298 // Only header can be located at the 0 offset... This function called to
299 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700300 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700301
302 return offset > 0 &&
303 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800304 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700305 (range_start < file_size_) &&
306 (range_end <= file_size_) &&
307 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800308}
309
Elliott Hughes650be4e2013-03-05 18:47:58 -0800310// Loads the program header table from an ELF file into a read-only private
311// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700312bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800313 phdr_num_ = header_.e_phnum;
314
315 // Like the kernel, we only accept program header tables that
316 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800317 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700318 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800319 return false;
320 }
321
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800322 // Boundary checks
323 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700324 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
325 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
326 name_.c_str(),
327 static_cast<size_t>(header_.e_phoff),
328 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800329 return false;
330 }
331
332 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700333 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800334 return false;
335 }
336
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700337 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800338 return true;
339}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200340
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700341bool ElfReader::ReadSectionHeaders() {
342 shdr_num_ = header_.e_shnum;
343
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800344 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700345 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800346 return false;
347 }
348
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800349 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700350 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
351 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
352 name_.c_str(),
353 static_cast<size_t>(header_.e_shoff),
354 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800355 return false;
356 }
357
358 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700359 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
360 return false;
361 }
362
363 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
364 return true;
365}
366
367bool ElfReader::ReadDynamicSection() {
368 // 1. Find .dynamic section (in section headers)
369 const ElfW(Shdr)* dynamic_shdr = nullptr;
370 for (size_t i = 0; i < shdr_num_; ++i) {
371 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
372 dynamic_shdr = &shdr_table_ [i];
373 break;
374 }
375 }
376
377 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700378 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700379 return false;
380 }
381
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700382 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
383 size_t pt_dynamic_offset = 0;
384 size_t pt_dynamic_filesz = 0;
385 for (size_t i = 0; i < phdr_num_; ++i) {
386 const ElfW(Phdr)* phdr = &phdr_table_[i];
387 if (phdr->p_type == PT_DYNAMIC) {
388 pt_dynamic_offset = phdr->p_offset;
389 pt_dynamic_filesz = phdr->p_filesz;
390 }
391 }
392
393 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800394 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
395 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
396 "expected to match PT_DYNAMIC offset: 0x%zx",
397 name_.c_str(),
398 static_cast<size_t>(dynamic_shdr->sh_offset),
399 pt_dynamic_offset);
400 return false;
401 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800402 DL_WARN_documented_change(__ANDROID_API_O__,
403 "invalid-elf-header_section-headers-enforced-for-api-level-26",
404 "\"%s\" .dynamic section has invalid offset: 0x%zx "
405 "(expected to match PT_DYNAMIC offset 0x%zx)",
406 name_.c_str(),
407 static_cast<size_t>(dynamic_shdr->sh_offset),
408 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800409 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700410 }
411
412 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800413 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
414 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
415 "expected to match PT_DYNAMIC filesz: 0x%zx",
416 name_.c_str(),
417 static_cast<size_t>(dynamic_shdr->sh_size),
418 pt_dynamic_filesz);
419 return false;
420 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800421 DL_WARN_documented_change(__ANDROID_API_O__,
422 "invalid-elf-header_section-headers-enforced-for-api-level-26",
423 "\"%s\" .dynamic section has invalid size: 0x%zx "
424 "(expected to match PT_DYNAMIC filesz 0x%zx)",
425 name_.c_str(),
426 static_cast<size_t>(dynamic_shdr->sh_size),
427 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800428 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700429 }
430
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700431 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700432 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
433 name_.c_str(),
434 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700435 return false;
436 }
437
438 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
439
440 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700441 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
442 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700443 return false;
444 }
445
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700446 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
447 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800448 return false;
449 }
450
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700451 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
452 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
453 return false;
454 }
455
456 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
457
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700458 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
459 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
460 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800461 return false;
462 }
463
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700464 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
465 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
466 return false;
467 }
468
469 strtab_ = static_cast<const char*>(strtab_fragment_.data());
470 strtab_size_ = strtab_fragment_.size();
471 return true;
472}
473
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800474/* Returns the size of the extent of all the possibly non-contiguous
475 * loadable segments in an ELF program header table. This corresponds
476 * to the page-aligned size in bytes that needs to be reserved in the
477 * process' address space. If there are no loadable segments, 0 is
478 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200479 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700480 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800481 * set to the minimum and maximum addresses of pages to be reserved,
482 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200483 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800484size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
485 ElfW(Addr)* out_min_vaddr,
486 ElfW(Addr)* out_max_vaddr) {
487 ElfW(Addr) min_vaddr = UINTPTR_MAX;
488 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200489
Elliott Hughes0266ae52014-02-10 17:46:57 -0800490 bool found_pt_load = false;
491 for (size_t i = 0; i < phdr_count; ++i) {
492 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200493
Elliott Hughes0266ae52014-02-10 17:46:57 -0800494 if (phdr->p_type != PT_LOAD) {
495 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200496 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800497 found_pt_load = true;
498
499 if (phdr->p_vaddr < min_vaddr) {
500 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200501 }
502
Elliott Hughes0266ae52014-02-10 17:46:57 -0800503 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
504 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
505 }
506 }
507 if (!found_pt_load) {
508 min_vaddr = 0;
509 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200510
Elliott Hughes0266ae52014-02-10 17:46:57 -0800511 min_vaddr = PAGE_START(min_vaddr);
512 max_vaddr = PAGE_END(max_vaddr);
513
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700514 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800515 *out_min_vaddr = min_vaddr;
516 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700517 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800518 *out_max_vaddr = max_vaddr;
519 }
520 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200521}
522
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700523// Reserve a virtual address range such that if it's limits were extended to the next 2**align
524// boundary, it would not overlap with any existing mappings.
525static void* ReserveAligned(void* hint, size_t size, size_t align) {
526 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
527 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
528 // with it.
529 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
530 // mapping at the requested address?
531 if (align == PAGE_SIZE || hint != nullptr) {
532 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
533 if (mmap_ptr == MAP_FAILED) {
534 return nullptr;
535 }
536 return mmap_ptr;
537 }
538
539 // Allocate enough space so that the end of the desired region aligned up is still inside the
540 // mapping.
541 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
542 uint8_t* mmap_ptr =
543 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
544 if (mmap_ptr == MAP_FAILED) {
545 return nullptr;
546 }
547
548 uint8_t* first = align_up(mmap_ptr, align);
549 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
550 size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
551 uint8_t* start = first + n * PAGE_SIZE;
552 munmap(mmap_ptr, start - mmap_ptr);
553 munmap(start + size, mmap_ptr + mmap_size - (start + size));
554 return start;
555}
556
Elliott Hughes650be4e2013-03-05 18:47:58 -0800557// Reserve a virtual address range big enough to hold all loadable
558// segments of a program header table. This is done by creating a
559// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000560bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800561 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800562 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800563 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700564 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800565 return false;
566 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200567
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800568 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000569 void* start;
570 size_t reserved_size = 0;
571 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700572 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700573 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700574 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000575
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700576 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000577 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
578 reserved_size = extinfo->reserved_size;
579 reserved_hint = false;
580 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
581 reserved_size = extinfo->reserved_size;
582 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700583
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700584 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700585 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700586 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
587 mmap_hint = extinfo->reserved_addr;
588 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700589 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000590 }
591
592 if (load_size_ > reserved_size) {
593 if (!reserved_hint) {
594 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700595 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000596 return false;
597 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700598 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
599 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700600 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000601 return false;
602 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700603 if (strict_hint && (start != mmap_hint)) {
604 munmap(start, load_size_);
605 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
606 load_size_, mmap_hint, name_.c_str());
607 return false;
608 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000609 } else {
610 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800611 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800612 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200613
Elliott Hughes650be4e2013-03-05 18:47:58 -0800614 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800615 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800616 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200617}
618
Elliott Hughes650be4e2013-03-05 18:47:58 -0800619bool ElfReader::LoadSegments() {
620 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800621 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200622
Elliott Hughes650be4e2013-03-05 18:47:58 -0800623 if (phdr->p_type != PT_LOAD) {
624 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200625 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800626
627 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800628 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
629 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800630
Elliott Hughes0266ae52014-02-10 17:46:57 -0800631 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
632 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800633
Elliott Hughes0266ae52014-02-10 17:46:57 -0800634 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800635
636 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800637 ElfW(Addr) file_start = phdr->p_offset;
638 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800639
Elliott Hughes0266ae52014-02-10 17:46:57 -0800640 ElfW(Addr) file_page_start = PAGE_START(file_start);
641 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800642
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700643 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700644 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700645 return false;
646 }
647
skvalex93ce3542015-08-20 01:06:42 +0300648 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700649 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
650 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700651 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700652 reinterpret_cast<void*>(phdr->p_filesz),
653 reinterpret_cast<void*>(file_end), file_size_);
654 return false;
655 }
656
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700657 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700658 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700659 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800660 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800661 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800662 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800663 return false;
664 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800665 DL_WARN_documented_change(__ANDROID_API_O__,
666 "writable-and-executable-segments-enforced-for-api-level-26",
667 "\"%s\" has load segments that are both writable and executable",
668 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800669 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700670 }
671
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700672 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700673 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700674 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700675 MAP_FIXED|MAP_PRIVATE,
676 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700677 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700678 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700679 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700680 return false;
681 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800682 }
683
684 // if the segment is writable, and does not end on a page boundary,
685 // zero-fill it until the page limit.
686 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800687 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800688 }
689
690 seg_file_end = PAGE_END(seg_file_end);
691
692 // seg_file_end is now the first page address after the file
693 // content. If seg_end is larger, we need to zero anything
694 // between them. This is done by using a private anonymous
695 // map for all extra pages.
696 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800697 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800698 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800699 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800700 PFLAGS_TO_PROT(phdr->p_flags),
701 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
702 -1,
703 0);
704 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700705 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800706 return false;
707 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800708
709 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800710 }
711 }
712 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200713}
714
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000715/* Used internally. Used to set the protection bits of all loaded segments
716 * with optional extra flags (i.e. really PROT_WRITE). Used by
717 * phdr_table_protect_segments and phdr_table_unprotect_segments.
718 */
719static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
720 ElfW(Addr) load_bias, int extra_prot_flags) {
721 const ElfW(Phdr)* phdr = phdr_table;
722 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
723
724 for (; phdr < phdr_limit; phdr++) {
725 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
726 continue;
727 }
728
729 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
730 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
731
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700732 int prot = PFLAGS_TO_PROT(phdr->p_flags);
733 if ((extra_prot_flags & PROT_WRITE) != 0) {
734 // make sure we're never simultaneously writable / executable
735 prot &= ~PROT_EXEC;
736 }
737
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000738 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
739 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700740 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000741 if (ret < 0) {
742 return -1;
743 }
744 }
745 return 0;
746}
747
748/* Restore the original protection modes for all loadable segments.
749 * You should only call this after phdr_table_unprotect_segments and
750 * applying all relocations.
751 *
752 * Input:
753 * phdr_table -> program header table
754 * phdr_count -> number of entries in tables
755 * load_bias -> load bias
756 * Return:
757 * 0 on error, -1 on failure (error code in errno).
758 */
759int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
760 size_t phdr_count, ElfW(Addr) load_bias) {
761 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
762}
763
764/* Change the protection of all loaded segments in memory to writable.
765 * This is useful before performing relocations. Once completed, you
766 * will have to call phdr_table_protect_segments to restore the original
767 * protection flags on all segments.
768 *
769 * Note that some writable segments can also have their content turned
770 * to read-only by calling phdr_table_protect_gnu_relro. This is no
771 * performed here.
772 *
773 * Input:
774 * phdr_table -> program header table
775 * phdr_count -> number of entries in tables
776 * load_bias -> load bias
777 * Return:
778 * 0 on error, -1 on failure (error code in errno).
779 */
780int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
781 size_t phdr_count, ElfW(Addr) load_bias) {
782 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
783}
784
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200785/* Used internally by phdr_table_protect_gnu_relro and
786 * phdr_table_unprotect_gnu_relro.
787 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800788static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
789 ElfW(Addr) load_bias, int prot_flags) {
790 const ElfW(Phdr)* phdr = phdr_table;
791 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200792
Elliott Hughes0266ae52014-02-10 17:46:57 -0800793 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
794 if (phdr->p_type != PT_GNU_RELRO) {
795 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200796 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800797
798 // Tricky: what happens when the relro segment does not start
799 // or end at page boundaries? We're going to be over-protective
800 // here and put every page touched by the segment as read-only.
801
802 // This seems to match Ian Lance Taylor's description of the
803 // feature at http://www.airs.com/blog/archives/189.
804
805 // Extract:
806 // Note that the current dynamic linker code will only work
807 // correctly if the PT_GNU_RELRO segment starts on a page
808 // boundary. This is because the dynamic linker rounds the
809 // p_vaddr field down to the previous page boundary. If
810 // there is anything on the page which should not be read-only,
811 // the program is likely to fail at runtime. So in effect the
812 // linker must only emit a PT_GNU_RELRO segment if it ensures
813 // that it starts on a page boundary.
814 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
815 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
816
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800817 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800818 seg_page_end - seg_page_start,
819 prot_flags);
820 if (ret < 0) {
821 return -1;
822 }
823 }
824 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200825}
826
827/* Apply GNU relro protection if specified by the program header. This will
828 * turn some of the pages of a writable PT_LOAD segment to read-only, as
829 * specified by one or more PT_GNU_RELRO segments. This must be always
830 * performed after relocations.
831 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200832 * The areas typically covered are .got and .data.rel.ro, these are
833 * read-only from the program's POV, but contain absolute addresses
834 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200835 *
836 * Input:
837 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700838 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200839 * load_bias -> load bias
840 * Return:
841 * 0 on error, -1 on failure (error code in errno).
842 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700843int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
844 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800845 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200846}
847
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000848/* Serialize the GNU relro segments to the given file descriptor. This can be
849 * performed after relocations to allow another process to later share the
850 * relocated segment, if it was loaded at the same address.
851 *
852 * Input:
853 * phdr_table -> program header table
854 * phdr_count -> number of entries in tables
855 * load_bias -> load bias
856 * fd -> writable file descriptor to use
857 * Return:
858 * 0 on error, -1 on failure (error code in errno).
859 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700860int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
861 size_t phdr_count,
862 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000863 int fd) {
864 const ElfW(Phdr)* phdr = phdr_table;
865 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
866 ssize_t file_offset = 0;
867
868 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
869 if (phdr->p_type != PT_GNU_RELRO) {
870 continue;
871 }
872
873 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
874 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
875 ssize_t size = seg_page_end - seg_page_start;
876
877 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
878 if (written != size) {
879 return -1;
880 }
881 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
882 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
883 if (map == MAP_FAILED) {
884 return -1;
885 }
886 file_offset += size;
887 }
888 return 0;
889}
890
891/* Where possible, replace the GNU relro segments with mappings of the given
892 * file descriptor. This can be performed after relocations to allow a file
893 * previously created by phdr_table_serialize_gnu_relro in another process to
894 * replace the dirty relocated pages, saving memory, if it was loaded at the
895 * same address. We have to compare the data before we map over it, since some
896 * parts of the relro segment may not be identical due to other libraries in
897 * the process being loaded at different addresses.
898 *
899 * Input:
900 * phdr_table -> program header table
901 * phdr_count -> number of entries in tables
902 * load_bias -> load bias
903 * fd -> readable file descriptor to use
904 * Return:
905 * 0 on error, -1 on failure (error code in errno).
906 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700907int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
908 size_t phdr_count,
909 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000910 int fd) {
911 // Map the file at a temporary location so we can compare its contents.
912 struct stat file_stat;
913 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
914 return -1;
915 }
916 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700917 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100918 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700919 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100920 if (temp_mapping == MAP_FAILED) {
921 return -1;
922 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000923 }
924 size_t file_offset = 0;
925
926 // Iterate over the relro segments and compare/remap the pages.
927 const ElfW(Phdr)* phdr = phdr_table;
928 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
929
930 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
931 if (phdr->p_type != PT_GNU_RELRO) {
932 continue;
933 }
934
935 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
936 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
937
938 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
939 char* mem_base = reinterpret_cast<char*>(seg_page_start);
940 size_t match_offset = 0;
941 size_t size = seg_page_end - seg_page_start;
942
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100943 if (file_size - file_offset < size) {
944 // File is too short to compare to this segment. The contents are likely
945 // different as well (it's probably for a different library version) so
946 // just don't bother checking.
947 break;
948 }
949
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000950 while (match_offset < size) {
951 // Skip over dissimilar pages.
952 while (match_offset < size &&
953 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
954 match_offset += PAGE_SIZE;
955 }
956
957 // Count similar pages.
958 size_t mismatch_offset = match_offset;
959 while (mismatch_offset < size &&
960 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
961 mismatch_offset += PAGE_SIZE;
962 }
963
964 // Map over similar pages.
965 if (mismatch_offset > match_offset) {
966 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
967 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
968 if (map == MAP_FAILED) {
969 munmap(temp_mapping, file_size);
970 return -1;
971 }
972 }
973
974 match_offset = mismatch_offset;
975 }
976
977 // Add to the base file offset in case there are multiple relro segments.
978 file_offset += size;
979 }
980 munmap(temp_mapping, file_size);
981 return 0;
982}
983
984
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700985#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200986
987# ifndef PT_ARM_EXIDX
988# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
989# endif
990
991/* Return the address and size of the .ARM.exidx section in memory,
992 * if present.
993 *
994 * Input:
995 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700996 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200997 * load_bias -> load bias
998 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700999 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001000 * arm_exidx_count -> number of items in table (0 on failure).
1001 * Return:
1002 * 0 on error, -1 on failure (_no_ error code in errno)
1003 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001004int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1005 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001006 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001007 const ElfW(Phdr)* phdr = phdr_table;
1008 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001009
Elliott Hughes0266ae52014-02-10 17:46:57 -08001010 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1011 if (phdr->p_type != PT_ARM_EXIDX) {
1012 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001013 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001014
1015 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001016 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001017 return 0;
1018 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001019 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001020 *arm_exidx_count = 0;
1021 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001022}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001023#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001024
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001025/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001026 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001027 *
1028 * Input:
1029 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001030 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001031 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001032 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001033 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001034 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001035 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001036 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001037 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001038void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001039 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1040 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001041 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001042 for (size_t i = 0; i<phdr_count; ++i) {
1043 const ElfW(Phdr)& phdr = phdr_table[i];
1044 if (phdr.p_type == PT_DYNAMIC) {
1045 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001046 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001047 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001048 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001049 return;
1050 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001051 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001052}
1053
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001054/* Return the program interpreter string, or nullptr if missing.
1055 *
1056 * Input:
1057 * phdr_table -> program header table
1058 * phdr_count -> number of entries in tables
1059 * load_bias -> load bias
1060 * Return:
1061 * pointer to the program interpreter string.
1062 */
1063const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1064 ElfW(Addr) load_bias) {
1065 for (size_t i = 0; i<phdr_count; ++i) {
1066 const ElfW(Phdr)& phdr = phdr_table[i];
1067 if (phdr.p_type == PT_INTERP) {
1068 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1069 }
1070 }
1071 return nullptr;
1072}
1073
Robert Grosse4544d9f2014-10-15 14:32:19 -07001074// Sets loaded_phdr_ to the address of the program header table as it appears
1075// in the loaded segments in memory. This is in contrast with phdr_table_,
1076// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001077bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001078 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001079
Elliott Hughes650be4e2013-03-05 18:47:58 -08001080 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001081 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001082 if (phdr->p_type == PT_PHDR) {
1083 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001084 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001085 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001086
Elliott Hughes650be4e2013-03-05 18:47:58 -08001087 // Otherwise, check the first loadable segment. If its file offset
1088 // is 0, it starts with the ELF header, and we can trivially find the
1089 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001090 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001091 if (phdr->p_type == PT_LOAD) {
1092 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001093 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001094 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001095 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001096 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001097 }
1098 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001099 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001100 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001101
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001102 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001103 return false;
1104}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001105
Elliott Hughes650be4e2013-03-05 18:47:58 -08001106// Ensures that our program header is actually within a loadable
1107// segment. This should help catch badly-formed ELF files that
1108// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001109bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1110 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1111 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001112 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001113 if (phdr->p_type != PT_LOAD) {
1114 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001115 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001116 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1117 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001118 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001119 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001120 return true;
1121 }
1122 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001123 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1124 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001125 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001126}