blob: 8bf4c94c0ec89a97f9f2b40439f8e3d5660b886e [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Elliott Hughes99d54652018-08-22 10:36:23 -070034#include <sys/prctl.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000035#include <sys/types.h>
36#include <sys/stat.h>
37#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020038
Elliott Hughes650be4e2013-03-05 18:47:58 -080039#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080040#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070041#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080042#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080043#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020044
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
54#elif defined(__mips__)
55 return EM_MIPS;
56#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
75
Elliott Hughes0266ae52014-02-10 17:46:57 -080076 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020077
78 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79 ranges of virtual addresses. A few rules apply:
80
81 - the virtual address ranges should not overlap.
82
83 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84 between them should always be initialized to 0.
85
86 - ranges do not necessarily start or end at page boundaries. Two distinct
87 segments can have their start and end on the same page. In this case, the
88 page inherits the mapping flags of the latter segment.
89
90 Finally, the real load addrs of each segment is not p_vaddr. Instead the
91 loader decides where to load the first segment, then will load all others
92 relative to the first one to respect the initial range layout.
93
94 For example, consider the following list:
95
96 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98
99 This corresponds to two segments that cover these virtual address ranges:
100
101 0x30000...0x34000
102 0x40000...0x48000
103
104 If the loader decides to load the first segment at address 0xa0000000
105 then the segments' load address ranges will be:
106
107 0xa0030000...0xa0034000
108 0xa0040000...0xa0048000
109
110 In other words, all segments must be loaded at an address that has the same
111 constant offset from their p_vaddr value. This offset is computed as the
112 difference between the first segment's load address, and its p_vaddr value.
113
114 However, in practice, segments do _not_ start at page boundaries. Since we
115 can only memory-map at page boundaries, this means that the bias is
116 computed as:
117
118 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119
120 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121 possible wrap around UINT32_MAX for possible large p_vaddr values).
122
123 And that the phdr0_load_address must start at a page boundary, with
124 the segment's real content starting at:
125
126 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127
128 Note that ELF requires the following condition to make the mmap()-ing work:
129
130 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131
132 The load_bias must be added to any p_vaddr value read from the ELF file to
133 determine the corresponding memory address.
134
135 **/
136
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800137#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200138#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142ElfReader::ElfReader()
143 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700147}
148
149bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900150 if (did_read_) {
151 return true;
152 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700153 name_ = name;
154 fd_ = fd;
155 file_offset_ = file_offset;
156 file_size_ = file_size;
157
158 if (ReadElfHeader() &&
159 VerifyElfHeader() &&
160 ReadProgramHeaders() &&
161 ReadSectionHeaders() &&
162 ReadDynamicSection()) {
163 did_read_ = true;
164 }
165
166 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200167}
168
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000169bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700170 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900171 if (did_load_) {
172 return true;
173 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700174 if (ReserveAddressSpace(extinfo) &&
175 LoadSegments() &&
176 FindPhdr()) {
177 did_load_ = true;
178 }
179
180 return did_load_;
181}
182
183const char* ElfReader::get_string(ElfW(Word) index) const {
184 CHECK(strtab_ != nullptr);
185 CHECK(index < strtab_size_);
186
187 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188}
189
190bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700191 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700193 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800194 return false;
195 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700196
Elliott Hughes650be4e2013-03-05 18:47:58 -0800197 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700198 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700199 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800200 return false;
201 }
202 return true;
203}
204
Elliott Hughes72007ee2017-04-19 17:44:57 -0700205static const char* EM_to_string(int em) {
206 if (em == EM_386) return "EM_386";
207 if (em == EM_AARCH64) return "EM_AARCH64";
208 if (em == EM_ARM) return "EM_ARM";
209 if (em == EM_MIPS) return "EM_MIPS";
210 if (em == EM_X86_64) return "EM_X86_64";
211 return "EM_???";
212}
213
Elliott Hughes650be4e2013-03-05 18:47:58 -0800214bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700215 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700216 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
217 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800218 return false;
219 }
220
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700221 // Try to give a clear diagnostic for ELF class mismatches, since they're
222 // an easy mistake to make during the 32-bit/64-bit transition period.
223 int elf_class = header_.e_ident[EI_CLASS];
224#if defined(__LP64__)
225 if (elf_class != ELFCLASS64) {
226 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700227 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700228 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700229 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700230 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800231 return false;
232 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700233#else
234 if (elf_class != ELFCLASS32) {
235 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700236 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700237 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700238 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700239 }
240 return false;
241 }
242#endif
243
Elliott Hughes650be4e2013-03-05 18:47:58 -0800244 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700245 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800246 return false;
247 }
248
249 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700250 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800251 return false;
252 }
253
254 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700255 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800256 return false;
257 }
258
Elliott Hughesb5140262014-12-02 16:16:29 -0800259 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughesd16cfac2018-09-17 15:50:09 -0700260 DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
261 name_.c_str(),
262 EM_to_string(header_.e_machine), header_.e_machine,
263 EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800264 return false;
265 }
266
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700267 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800268 // Fail if app is targeting Android O or above
269 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
270 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
271 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
272 return false;
273 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800274 DL_WARN_documented_change(__ANDROID_API_O__,
275 "invalid-elf-header_section-headers-enforced-for-api-level-26",
276 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
277 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800278 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700279 }
280
281 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800282 // Fail if app is targeting Android O or above
283 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
284 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
285 return false;
286 }
287
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800288 DL_WARN_documented_change(__ANDROID_API_O__,
289 "invalid-elf-header_section-headers-enforced-for-api-level-26",
290 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800291 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700292 }
293
Elliott Hughes650be4e2013-03-05 18:47:58 -0800294 return true;
295}
296
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700297bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800298 off64_t range_start;
299 off64_t range_end;
300
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700301 // Only header can be located at the 0 offset... This function called to
302 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700303 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700304
305 return offset > 0 &&
306 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800307 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700308 (range_start < file_size_) &&
309 (range_end <= file_size_) &&
310 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800311}
312
Elliott Hughes650be4e2013-03-05 18:47:58 -0800313// Loads the program header table from an ELF file into a read-only private
314// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700315bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800316 phdr_num_ = header_.e_phnum;
317
318 // Like the kernel, we only accept program header tables that
319 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800320 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700321 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800322 return false;
323 }
324
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800325 // Boundary checks
326 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700327 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
328 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
329 name_.c_str(),
330 static_cast<size_t>(header_.e_phoff),
331 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800332 return false;
333 }
334
335 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700336 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800337 return false;
338 }
339
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700340 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800341 return true;
342}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200343
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700344bool ElfReader::ReadSectionHeaders() {
345 shdr_num_ = header_.e_shnum;
346
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800347 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700348 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800349 return false;
350 }
351
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800352 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700353 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
354 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
355 name_.c_str(),
356 static_cast<size_t>(header_.e_shoff),
357 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800358 return false;
359 }
360
361 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700362 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
363 return false;
364 }
365
366 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
367 return true;
368}
369
370bool ElfReader::ReadDynamicSection() {
371 // 1. Find .dynamic section (in section headers)
372 const ElfW(Shdr)* dynamic_shdr = nullptr;
373 for (size_t i = 0; i < shdr_num_; ++i) {
374 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
375 dynamic_shdr = &shdr_table_ [i];
376 break;
377 }
378 }
379
380 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700381 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700382 return false;
383 }
384
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700385 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
386 size_t pt_dynamic_offset = 0;
387 size_t pt_dynamic_filesz = 0;
388 for (size_t i = 0; i < phdr_num_; ++i) {
389 const ElfW(Phdr)* phdr = &phdr_table_[i];
390 if (phdr->p_type == PT_DYNAMIC) {
391 pt_dynamic_offset = phdr->p_offset;
392 pt_dynamic_filesz = phdr->p_filesz;
393 }
394 }
395
396 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800397 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
398 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
399 "expected to match PT_DYNAMIC offset: 0x%zx",
400 name_.c_str(),
401 static_cast<size_t>(dynamic_shdr->sh_offset),
402 pt_dynamic_offset);
403 return false;
404 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800405 DL_WARN_documented_change(__ANDROID_API_O__,
406 "invalid-elf-header_section-headers-enforced-for-api-level-26",
407 "\"%s\" .dynamic section has invalid offset: 0x%zx "
408 "(expected to match PT_DYNAMIC offset 0x%zx)",
409 name_.c_str(),
410 static_cast<size_t>(dynamic_shdr->sh_offset),
411 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800412 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700413 }
414
415 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800416 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
417 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
418 "expected to match PT_DYNAMIC filesz: 0x%zx",
419 name_.c_str(),
420 static_cast<size_t>(dynamic_shdr->sh_size),
421 pt_dynamic_filesz);
422 return false;
423 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800424 DL_WARN_documented_change(__ANDROID_API_O__,
425 "invalid-elf-header_section-headers-enforced-for-api-level-26",
426 "\"%s\" .dynamic section has invalid size: 0x%zx "
427 "(expected to match PT_DYNAMIC filesz 0x%zx)",
428 name_.c_str(),
429 static_cast<size_t>(dynamic_shdr->sh_size),
430 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800431 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700432 }
433
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700434 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700435 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
436 name_.c_str(),
437 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700438 return false;
439 }
440
441 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
442
443 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700444 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
445 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700446 return false;
447 }
448
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700449 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
450 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800451 return false;
452 }
453
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700454 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
455 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
456 return false;
457 }
458
459 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
460
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700461 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
462 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
463 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800464 return false;
465 }
466
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700467 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
468 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
469 return false;
470 }
471
472 strtab_ = static_cast<const char*>(strtab_fragment_.data());
473 strtab_size_ = strtab_fragment_.size();
474 return true;
475}
476
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800477/* Returns the size of the extent of all the possibly non-contiguous
478 * loadable segments in an ELF program header table. This corresponds
479 * to the page-aligned size in bytes that needs to be reserved in the
480 * process' address space. If there are no loadable segments, 0 is
481 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200482 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700483 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800484 * set to the minimum and maximum addresses of pages to be reserved,
485 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200486 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800487size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
488 ElfW(Addr)* out_min_vaddr,
489 ElfW(Addr)* out_max_vaddr) {
490 ElfW(Addr) min_vaddr = UINTPTR_MAX;
491 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200492
Elliott Hughes0266ae52014-02-10 17:46:57 -0800493 bool found_pt_load = false;
494 for (size_t i = 0; i < phdr_count; ++i) {
495 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200496
Elliott Hughes0266ae52014-02-10 17:46:57 -0800497 if (phdr->p_type != PT_LOAD) {
498 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200499 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800500 found_pt_load = true;
501
502 if (phdr->p_vaddr < min_vaddr) {
503 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200504 }
505
Elliott Hughes0266ae52014-02-10 17:46:57 -0800506 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
507 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
508 }
509 }
510 if (!found_pt_load) {
511 min_vaddr = 0;
512 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200513
Elliott Hughes0266ae52014-02-10 17:46:57 -0800514 min_vaddr = PAGE_START(min_vaddr);
515 max_vaddr = PAGE_END(max_vaddr);
516
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700517 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800518 *out_min_vaddr = min_vaddr;
519 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700520 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800521 *out_max_vaddr = max_vaddr;
522 }
523 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200524}
525
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700526// Reserve a virtual address range such that if it's limits were extended to the next 2**align
527// boundary, it would not overlap with any existing mappings.
528static void* ReserveAligned(void* hint, size_t size, size_t align) {
529 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
530 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
531 // with it.
532 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
533 // mapping at the requested address?
534 if (align == PAGE_SIZE || hint != nullptr) {
535 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
536 if (mmap_ptr == MAP_FAILED) {
537 return nullptr;
538 }
539 return mmap_ptr;
540 }
541
542 // Allocate enough space so that the end of the desired region aligned up is still inside the
543 // mapping.
544 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
545 uint8_t* mmap_ptr =
546 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
547 if (mmap_ptr == MAP_FAILED) {
548 return nullptr;
549 }
550
551 uint8_t* first = align_up(mmap_ptr, align);
552 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900553
554 // arc4random* is not available in init because /dev/urandom hasn't yet been
555 // created. Don't randomize then.
556 size_t n = is_init() ? 0 : arc4random_uniform((last - first) / PAGE_SIZE + 1);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700557 uint8_t* start = first + n * PAGE_SIZE;
558 munmap(mmap_ptr, start - mmap_ptr);
559 munmap(start + size, mmap_ptr + mmap_size - (start + size));
560 return start;
561}
562
Elliott Hughes650be4e2013-03-05 18:47:58 -0800563// Reserve a virtual address range big enough to hold all loadable
564// segments of a program header table. This is done by creating a
565// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000566bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800567 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800568 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800569 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700570 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800571 return false;
572 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200573
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800574 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000575 void* start;
576 size_t reserved_size = 0;
577 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700578 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700579 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700580 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000581
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700582 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000583 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
584 reserved_size = extinfo->reserved_size;
585 reserved_hint = false;
586 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
587 reserved_size = extinfo->reserved_size;
588 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700589
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700590 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700591 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700592 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
593 mmap_hint = extinfo->reserved_addr;
594 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700595 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000596 }
597
598 if (load_size_ > reserved_size) {
599 if (!reserved_hint) {
600 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700601 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000602 return false;
603 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700604 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
605 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700606 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000607 return false;
608 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700609 if (strict_hint && (start != mmap_hint)) {
610 munmap(start, load_size_);
611 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
612 load_size_, mmap_hint, name_.c_str());
613 return false;
614 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000615 } else {
616 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800617 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800618 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200619
Elliott Hughes650be4e2013-03-05 18:47:58 -0800620 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800621 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800622 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200623}
624
Elliott Hughes650be4e2013-03-05 18:47:58 -0800625bool ElfReader::LoadSegments() {
626 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800627 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200628
Elliott Hughes650be4e2013-03-05 18:47:58 -0800629 if (phdr->p_type != PT_LOAD) {
630 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200631 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800632
633 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800634 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
635 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800636
Elliott Hughes0266ae52014-02-10 17:46:57 -0800637 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
638 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800639
Elliott Hughes0266ae52014-02-10 17:46:57 -0800640 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800641
642 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800643 ElfW(Addr) file_start = phdr->p_offset;
644 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800645
Elliott Hughes0266ae52014-02-10 17:46:57 -0800646 ElfW(Addr) file_page_start = PAGE_START(file_start);
647 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800648
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700649 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700650 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700651 return false;
652 }
653
skvalex93ce3542015-08-20 01:06:42 +0300654 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700655 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
656 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700657 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700658 reinterpret_cast<void*>(phdr->p_filesz),
659 reinterpret_cast<void*>(file_end), file_size_);
660 return false;
661 }
662
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700663 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700664 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700665 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800666 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800667 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800668 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800669 return false;
670 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800671 DL_WARN_documented_change(__ANDROID_API_O__,
672 "writable-and-executable-segments-enforced-for-api-level-26",
673 "\"%s\" has load segments that are both writable and executable",
674 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800675 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700676 }
677
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700678 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700679 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700680 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700681 MAP_FIXED|MAP_PRIVATE,
682 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700683 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700684 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700685 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700686 return false;
687 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800688 }
689
690 // if the segment is writable, and does not end on a page boundary,
691 // zero-fill it until the page limit.
692 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800693 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800694 }
695
696 seg_file_end = PAGE_END(seg_file_end);
697
698 // seg_file_end is now the first page address after the file
699 // content. If seg_end is larger, we need to zero anything
700 // between them. This is done by using a private anonymous
701 // map for all extra pages.
702 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800703 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800704 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800705 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800706 PFLAGS_TO_PROT(phdr->p_flags),
707 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
708 -1,
709 0);
710 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700711 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800712 return false;
713 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800714
715 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800716 }
717 }
718 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200719}
720
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000721/* Used internally. Used to set the protection bits of all loaded segments
722 * with optional extra flags (i.e. really PROT_WRITE). Used by
723 * phdr_table_protect_segments and phdr_table_unprotect_segments.
724 */
725static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
726 ElfW(Addr) load_bias, int extra_prot_flags) {
727 const ElfW(Phdr)* phdr = phdr_table;
728 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
729
730 for (; phdr < phdr_limit; phdr++) {
731 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
732 continue;
733 }
734
735 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
736 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
737
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700738 int prot = PFLAGS_TO_PROT(phdr->p_flags);
739 if ((extra_prot_flags & PROT_WRITE) != 0) {
740 // make sure we're never simultaneously writable / executable
741 prot &= ~PROT_EXEC;
742 }
743
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000744 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
745 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700746 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000747 if (ret < 0) {
748 return -1;
749 }
750 }
751 return 0;
752}
753
754/* Restore the original protection modes for all loadable segments.
755 * You should only call this after phdr_table_unprotect_segments and
756 * applying all relocations.
757 *
758 * Input:
759 * phdr_table -> program header table
760 * phdr_count -> number of entries in tables
761 * load_bias -> load bias
762 * Return:
763 * 0 on error, -1 on failure (error code in errno).
764 */
765int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
766 size_t phdr_count, ElfW(Addr) load_bias) {
767 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
768}
769
770/* Change the protection of all loaded segments in memory to writable.
771 * This is useful before performing relocations. Once completed, you
772 * will have to call phdr_table_protect_segments to restore the original
773 * protection flags on all segments.
774 *
775 * Note that some writable segments can also have their content turned
776 * to read-only by calling phdr_table_protect_gnu_relro. This is no
777 * performed here.
778 *
779 * Input:
780 * phdr_table -> program header table
781 * phdr_count -> number of entries in tables
782 * load_bias -> load bias
783 * Return:
784 * 0 on error, -1 on failure (error code in errno).
785 */
786int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
787 size_t phdr_count, ElfW(Addr) load_bias) {
788 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
789}
790
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200791/* Used internally by phdr_table_protect_gnu_relro and
792 * phdr_table_unprotect_gnu_relro.
793 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800794static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
795 ElfW(Addr) load_bias, int prot_flags) {
796 const ElfW(Phdr)* phdr = phdr_table;
797 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200798
Elliott Hughes0266ae52014-02-10 17:46:57 -0800799 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
800 if (phdr->p_type != PT_GNU_RELRO) {
801 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200802 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800803
804 // Tricky: what happens when the relro segment does not start
805 // or end at page boundaries? We're going to be over-protective
806 // here and put every page touched by the segment as read-only.
807
808 // This seems to match Ian Lance Taylor's description of the
809 // feature at http://www.airs.com/blog/archives/189.
810
811 // Extract:
812 // Note that the current dynamic linker code will only work
813 // correctly if the PT_GNU_RELRO segment starts on a page
814 // boundary. This is because the dynamic linker rounds the
815 // p_vaddr field down to the previous page boundary. If
816 // there is anything on the page which should not be read-only,
817 // the program is likely to fail at runtime. So in effect the
818 // linker must only emit a PT_GNU_RELRO segment if it ensures
819 // that it starts on a page boundary.
820 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
821 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
822
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800823 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800824 seg_page_end - seg_page_start,
825 prot_flags);
826 if (ret < 0) {
827 return -1;
828 }
829 }
830 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200831}
832
833/* Apply GNU relro protection if specified by the program header. This will
834 * turn some of the pages of a writable PT_LOAD segment to read-only, as
835 * specified by one or more PT_GNU_RELRO segments. This must be always
836 * performed after relocations.
837 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200838 * The areas typically covered are .got and .data.rel.ro, these are
839 * read-only from the program's POV, but contain absolute addresses
840 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200841 *
842 * Input:
843 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700844 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200845 * load_bias -> load bias
846 * Return:
847 * 0 on error, -1 on failure (error code in errno).
848 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700849int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
850 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800851 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200852}
853
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000854/* Serialize the GNU relro segments to the given file descriptor. This can be
855 * performed after relocations to allow another process to later share the
856 * relocated segment, if it was loaded at the same address.
857 *
858 * Input:
859 * phdr_table -> program header table
860 * phdr_count -> number of entries in tables
861 * load_bias -> load bias
862 * fd -> writable file descriptor to use
863 * Return:
864 * 0 on error, -1 on failure (error code in errno).
865 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700866int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
867 size_t phdr_count,
868 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000869 int fd) {
870 const ElfW(Phdr)* phdr = phdr_table;
871 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
872 ssize_t file_offset = 0;
873
874 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
875 if (phdr->p_type != PT_GNU_RELRO) {
876 continue;
877 }
878
879 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
880 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
881 ssize_t size = seg_page_end - seg_page_start;
882
883 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
884 if (written != size) {
885 return -1;
886 }
887 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
888 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
889 if (map == MAP_FAILED) {
890 return -1;
891 }
892 file_offset += size;
893 }
894 return 0;
895}
896
897/* Where possible, replace the GNU relro segments with mappings of the given
898 * file descriptor. This can be performed after relocations to allow a file
899 * previously created by phdr_table_serialize_gnu_relro in another process to
900 * replace the dirty relocated pages, saving memory, if it was loaded at the
901 * same address. We have to compare the data before we map over it, since some
902 * parts of the relro segment may not be identical due to other libraries in
903 * the process being loaded at different addresses.
904 *
905 * Input:
906 * phdr_table -> program header table
907 * phdr_count -> number of entries in tables
908 * load_bias -> load bias
909 * fd -> readable file descriptor to use
910 * Return:
911 * 0 on error, -1 on failure (error code in errno).
912 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700913int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
914 size_t phdr_count,
915 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000916 int fd) {
917 // Map the file at a temporary location so we can compare its contents.
918 struct stat file_stat;
919 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
920 return -1;
921 }
922 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700923 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100924 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700925 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100926 if (temp_mapping == MAP_FAILED) {
927 return -1;
928 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000929 }
930 size_t file_offset = 0;
931
932 // Iterate over the relro segments and compare/remap the pages.
933 const ElfW(Phdr)* phdr = phdr_table;
934 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
935
936 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
937 if (phdr->p_type != PT_GNU_RELRO) {
938 continue;
939 }
940
941 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
942 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
943
944 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
945 char* mem_base = reinterpret_cast<char*>(seg_page_start);
946 size_t match_offset = 0;
947 size_t size = seg_page_end - seg_page_start;
948
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100949 if (file_size - file_offset < size) {
950 // File is too short to compare to this segment. The contents are likely
951 // different as well (it's probably for a different library version) so
952 // just don't bother checking.
953 break;
954 }
955
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000956 while (match_offset < size) {
957 // Skip over dissimilar pages.
958 while (match_offset < size &&
959 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
960 match_offset += PAGE_SIZE;
961 }
962
963 // Count similar pages.
964 size_t mismatch_offset = match_offset;
965 while (mismatch_offset < size &&
966 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
967 mismatch_offset += PAGE_SIZE;
968 }
969
970 // Map over similar pages.
971 if (mismatch_offset > match_offset) {
972 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
973 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
974 if (map == MAP_FAILED) {
975 munmap(temp_mapping, file_size);
976 return -1;
977 }
978 }
979
980 match_offset = mismatch_offset;
981 }
982
983 // Add to the base file offset in case there are multiple relro segments.
984 file_offset += size;
985 }
986 munmap(temp_mapping, file_size);
987 return 0;
988}
989
990
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700991#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200992
993# ifndef PT_ARM_EXIDX
994# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
995# endif
996
997/* Return the address and size of the .ARM.exidx section in memory,
998 * if present.
999 *
1000 * Input:
1001 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001002 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001003 * load_bias -> load bias
1004 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001005 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001006 * arm_exidx_count -> number of items in table (0 on failure).
1007 * Return:
1008 * 0 on error, -1 on failure (_no_ error code in errno)
1009 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001010int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1011 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001012 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001013 const ElfW(Phdr)* phdr = phdr_table;
1014 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001015
Elliott Hughes0266ae52014-02-10 17:46:57 -08001016 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1017 if (phdr->p_type != PT_ARM_EXIDX) {
1018 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001019 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001020
1021 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001022 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001023 return 0;
1024 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001025 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001026 *arm_exidx_count = 0;
1027 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001028}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001029#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001030
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001031/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001032 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001033 *
1034 * Input:
1035 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001036 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001037 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001038 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001039 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001040 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001041 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001042 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001043 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001044void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001045 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1046 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001047 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001048 for (size_t i = 0; i<phdr_count; ++i) {
1049 const ElfW(Phdr)& phdr = phdr_table[i];
1050 if (phdr.p_type == PT_DYNAMIC) {
1051 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001052 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001053 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001054 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001055 return;
1056 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001057 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001058}
1059
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001060/* Return the program interpreter string, or nullptr if missing.
1061 *
1062 * Input:
1063 * phdr_table -> program header table
1064 * phdr_count -> number of entries in tables
1065 * load_bias -> load bias
1066 * Return:
1067 * pointer to the program interpreter string.
1068 */
1069const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1070 ElfW(Addr) load_bias) {
1071 for (size_t i = 0; i<phdr_count; ++i) {
1072 const ElfW(Phdr)& phdr = phdr_table[i];
1073 if (phdr.p_type == PT_INTERP) {
1074 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1075 }
1076 }
1077 return nullptr;
1078}
1079
Robert Grosse4544d9f2014-10-15 14:32:19 -07001080// Sets loaded_phdr_ to the address of the program header table as it appears
1081// in the loaded segments in memory. This is in contrast with phdr_table_,
1082// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001083bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001084 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001085
Elliott Hughes650be4e2013-03-05 18:47:58 -08001086 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001087 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001088 if (phdr->p_type == PT_PHDR) {
1089 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001090 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001091 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001092
Elliott Hughes650be4e2013-03-05 18:47:58 -08001093 // Otherwise, check the first loadable segment. If its file offset
1094 // is 0, it starts with the ELF header, and we can trivially find the
1095 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001096 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001097 if (phdr->p_type == PT_LOAD) {
1098 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001099 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001100 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001101 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001102 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001103 }
1104 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001105 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001106 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001107
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001108 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001109 return false;
1110}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001111
Elliott Hughes650be4e2013-03-05 18:47:58 -08001112// Ensures that our program header is actually within a loadable
1113// segment. This should help catch badly-formed ELF files that
1114// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001115bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1116 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1117 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001118 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001119 if (phdr->p_type != PT_LOAD) {
1120 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001121 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001122 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1123 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001124 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001125 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001126 return true;
1127 }
1128 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001129 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1130 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001131 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001132}