blob: 973fcf5a25e8f621730d09b9e83bb6dd58ccc524 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
45
Elliott Hughesb5140262014-12-02 16:16:29 -080046static int GetTargetElfMachine() {
47#if defined(__arm__)
48 return EM_ARM;
49#elif defined(__aarch64__)
50 return EM_AARCH64;
51#elif defined(__i386__)
52 return EM_386;
53#elif defined(__mips__)
54 return EM_MIPS;
55#elif defined(__x86_64__)
56 return EM_X86_64;
57#endif
58}
59
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020060/**
61 TECHNICAL NOTE ON ELF LOADING.
62
63 An ELF file's program header table contains one or more PT_LOAD
64 segments, which corresponds to portions of the file that need to
65 be mapped into the process' address space.
66
67 Each loadable segment has the following important properties:
68
69 p_offset -> segment file offset
70 p_filesz -> segment file size
71 p_memsz -> segment memory size (always >= p_filesz)
72 p_vaddr -> segment's virtual address
73 p_flags -> segment flags (e.g. readable, writable, executable)
74
Elliott Hughes0266ae52014-02-10 17:46:57 -080075 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020076
77 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
78 ranges of virtual addresses. A few rules apply:
79
80 - the virtual address ranges should not overlap.
81
82 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
83 between them should always be initialized to 0.
84
85 - ranges do not necessarily start or end at page boundaries. Two distinct
86 segments can have their start and end on the same page. In this case, the
87 page inherits the mapping flags of the latter segment.
88
89 Finally, the real load addrs of each segment is not p_vaddr. Instead the
90 loader decides where to load the first segment, then will load all others
91 relative to the first one to respect the initial range layout.
92
93 For example, consider the following list:
94
95 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
96 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
97
98 This corresponds to two segments that cover these virtual address ranges:
99
100 0x30000...0x34000
101 0x40000...0x48000
102
103 If the loader decides to load the first segment at address 0xa0000000
104 then the segments' load address ranges will be:
105
106 0xa0030000...0xa0034000
107 0xa0040000...0xa0048000
108
109 In other words, all segments must be loaded at an address that has the same
110 constant offset from their p_vaddr value. This offset is computed as the
111 difference between the first segment's load address, and its p_vaddr value.
112
113 However, in practice, segments do _not_ start at page boundaries. Since we
114 can only memory-map at page boundaries, this means that the bias is
115 computed as:
116
117 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
118
119 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
120 possible wrap around UINT32_MAX for possible large p_vaddr values).
121
122 And that the phdr0_load_address must start at a page boundary, with
123 the segment's real content starting at:
124
125 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
126
127 Note that ELF requires the following condition to make the mmap()-ing work:
128
129 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
130
131 The load_bias must be added to any p_vaddr value read from the ELF file to
132 determine the corresponding memory address.
133
134 **/
135
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800136#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200137#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
138 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
139 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
140
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700141ElfReader::ElfReader()
142 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
143 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800144 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
145 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700146}
147
148bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
149 CHECK(!did_read_);
150 CHECK(!did_load_);
151 name_ = name;
152 fd_ = fd;
153 file_offset_ = file_offset;
154 file_size_ = file_size;
155
156 if (ReadElfHeader() &&
157 VerifyElfHeader() &&
158 ReadProgramHeaders() &&
159 ReadSectionHeaders() &&
160 ReadDynamicSection()) {
161 did_read_ = true;
162 }
163
164 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200165}
166
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000167bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700168 CHECK(did_read_);
169 CHECK(!did_load_);
170 if (ReserveAddressSpace(extinfo) &&
171 LoadSegments() &&
172 FindPhdr()) {
173 did_load_ = true;
174 }
175
176 return did_load_;
177}
178
179const char* ElfReader::get_string(ElfW(Word) index) const {
180 CHECK(strtab_ != nullptr);
181 CHECK(index < strtab_size_);
182
183 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800184}
185
186bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700187 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800190 return false;
191 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700192
Elliott Hughes650be4e2013-03-05 18:47:58 -0800193 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700194 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700195 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800196 return false;
197 }
198 return true;
199}
200
201bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700202 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700203 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800204 return false;
205 }
206
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700207 // Try to give a clear diagnostic for ELF class mismatches, since they're
208 // an easy mistake to make during the 32-bit/64-bit transition period.
209 int elf_class = header_.e_ident[EI_CLASS];
210#if defined(__LP64__)
211 if (elf_class != ELFCLASS64) {
212 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700213 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700214 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700215 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700216 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800217 return false;
218 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700219#else
220 if (elf_class != ELFCLASS32) {
221 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700222 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700223 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700224 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700225 }
226 return false;
227 }
228#endif
229
Elliott Hughes650be4e2013-03-05 18:47:58 -0800230 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700231 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800232 return false;
233 }
234
235 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700236 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800237 return false;
238 }
239
240 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700241 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800242 return false;
243 }
244
Elliott Hughesb5140262014-12-02 16:16:29 -0800245 if (header_.e_machine != GetTargetElfMachine()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700246 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800247 return false;
248 }
249
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700250 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800251 // Fail if app is targeting Android O or above
252 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
253 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
254 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
255 return false;
256 }
257 DL_WARN("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
258 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
259 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700260 }
261
262 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800263 // Fail if app is targeting Android O or above
264 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
265 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
266 return false;
267 }
268
269 DL_WARN("\"%s\" has invalid e_shstrndx", name_.c_str());
270 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700271 }
272
Elliott Hughes650be4e2013-03-05 18:47:58 -0800273 return true;
274}
275
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700276bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800277 off64_t range_start;
278 off64_t range_end;
279
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700280 // Only header can be located at the 0 offset... This function called to
281 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700282 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700283
284 return offset > 0 &&
285 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800286 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700287 (range_start < file_size_) &&
288 (range_end <= file_size_) &&
289 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800290}
291
Elliott Hughes650be4e2013-03-05 18:47:58 -0800292// Loads the program header table from an ELF file into a read-only private
293// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700294bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800295 phdr_num_ = header_.e_phnum;
296
297 // Like the kernel, we only accept program header tables that
298 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800299 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700300 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800301 return false;
302 }
303
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800304 // Boundary checks
305 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700306 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
307 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
308 name_.c_str(),
309 static_cast<size_t>(header_.e_phoff),
310 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800311 return false;
312 }
313
314 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700315 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800316 return false;
317 }
318
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700319 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800320 return true;
321}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200322
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700323bool ElfReader::ReadSectionHeaders() {
324 shdr_num_ = header_.e_shnum;
325
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800326 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700327 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800328 return false;
329 }
330
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800331 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700332 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
333 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
334 name_.c_str(),
335 static_cast<size_t>(header_.e_shoff),
336 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800337 return false;
338 }
339
340 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700341 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
342 return false;
343 }
344
345 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
346 return true;
347}
348
349bool ElfReader::ReadDynamicSection() {
350 // 1. Find .dynamic section (in section headers)
351 const ElfW(Shdr)* dynamic_shdr = nullptr;
352 for (size_t i = 0; i < shdr_num_; ++i) {
353 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
354 dynamic_shdr = &shdr_table_ [i];
355 break;
356 }
357 }
358
359 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700360 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700361 return false;
362 }
363
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700364 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
365 size_t pt_dynamic_offset = 0;
366 size_t pt_dynamic_filesz = 0;
367 for (size_t i = 0; i < phdr_num_; ++i) {
368 const ElfW(Phdr)* phdr = &phdr_table_[i];
369 if (phdr->p_type == PT_DYNAMIC) {
370 pt_dynamic_offset = phdr->p_offset;
371 pt_dynamic_filesz = phdr->p_filesz;
372 }
373 }
374
375 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
376 DL_ERR("\"%s\" .dynamic section has invalid offset: 0x%zx, "
377 "expected to match PT_DYNAMIC offset: 0x%zx",
378 name_.c_str(),
379 static_cast<size_t>(dynamic_shdr->sh_offset),
380 pt_dynamic_offset);
381 return false;
382 }
383
384 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
385 DL_ERR("\"%s\" .dynamic section has invalid size: 0x%zx, "
386 "expected to match PT_DYNAMIC filesz: 0x%zx",
387 name_.c_str(),
388 static_cast<size_t>(dynamic_shdr->sh_size),
389 pt_dynamic_filesz);
390 return false;
391 }
392
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700393 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700394 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
395 name_.c_str(),
396 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700397 return false;
398 }
399
400 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
401
402 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700403 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
404 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700405 return false;
406 }
407
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700408 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
409 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800410 return false;
411 }
412
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700413 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
414 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
415 return false;
416 }
417
418 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
419
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700420 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
421 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
422 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800423 return false;
424 }
425
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700426 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
427 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
428 return false;
429 }
430
431 strtab_ = static_cast<const char*>(strtab_fragment_.data());
432 strtab_size_ = strtab_fragment_.size();
433 return true;
434}
435
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800436/* Returns the size of the extent of all the possibly non-contiguous
437 * loadable segments in an ELF program header table. This corresponds
438 * to the page-aligned size in bytes that needs to be reserved in the
439 * process' address space. If there are no loadable segments, 0 is
440 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200441 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700442 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800443 * set to the minimum and maximum addresses of pages to be reserved,
444 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200445 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800446size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
447 ElfW(Addr)* out_min_vaddr,
448 ElfW(Addr)* out_max_vaddr) {
449 ElfW(Addr) min_vaddr = UINTPTR_MAX;
450 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200451
Elliott Hughes0266ae52014-02-10 17:46:57 -0800452 bool found_pt_load = false;
453 for (size_t i = 0; i < phdr_count; ++i) {
454 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200455
Elliott Hughes0266ae52014-02-10 17:46:57 -0800456 if (phdr->p_type != PT_LOAD) {
457 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200458 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800459 found_pt_load = true;
460
461 if (phdr->p_vaddr < min_vaddr) {
462 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200463 }
464
Elliott Hughes0266ae52014-02-10 17:46:57 -0800465 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
466 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
467 }
468 }
469 if (!found_pt_load) {
470 min_vaddr = 0;
471 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200472
Elliott Hughes0266ae52014-02-10 17:46:57 -0800473 min_vaddr = PAGE_START(min_vaddr);
474 max_vaddr = PAGE_END(max_vaddr);
475
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700476 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800477 *out_min_vaddr = min_vaddr;
478 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700479 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800480 *out_max_vaddr = max_vaddr;
481 }
482 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200483}
484
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700485// Reserve a virtual address range such that if it's limits were extended to the next 2**align
486// boundary, it would not overlap with any existing mappings.
487static void* ReserveAligned(void* hint, size_t size, size_t align) {
488 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
489 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
490 // with it.
491 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
492 // mapping at the requested address?
493 if (align == PAGE_SIZE || hint != nullptr) {
494 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
495 if (mmap_ptr == MAP_FAILED) {
496 return nullptr;
497 }
498 return mmap_ptr;
499 }
500
501 // Allocate enough space so that the end of the desired region aligned up is still inside the
502 // mapping.
503 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
504 uint8_t* mmap_ptr =
505 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
506 if (mmap_ptr == MAP_FAILED) {
507 return nullptr;
508 }
509
510 uint8_t* first = align_up(mmap_ptr, align);
511 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
512 size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
513 uint8_t* start = first + n * PAGE_SIZE;
514 munmap(mmap_ptr, start - mmap_ptr);
515 munmap(start + size, mmap_ptr + mmap_size - (start + size));
516 return start;
517}
518
Elliott Hughes650be4e2013-03-05 18:47:58 -0800519// Reserve a virtual address range big enough to hold all loadable
520// segments of a program header table. This is done by creating a
521// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000522bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800523 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800524 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800525 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700526 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800527 return false;
528 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200529
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800530 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000531 void* start;
532 size_t reserved_size = 0;
533 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700534 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700535 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700536 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000537
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700538 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000539 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
540 reserved_size = extinfo->reserved_size;
541 reserved_hint = false;
542 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
543 reserved_size = extinfo->reserved_size;
544 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700545
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700546 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700547 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700548 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
549 mmap_hint = extinfo->reserved_addr;
550 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700551 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000552 }
553
554 if (load_size_ > reserved_size) {
555 if (!reserved_hint) {
556 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700557 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000558 return false;
559 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700560 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
561 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700562 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000563 return false;
564 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700565 if (strict_hint && (start != mmap_hint)) {
566 munmap(start, load_size_);
567 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
568 load_size_, mmap_hint, name_.c_str());
569 return false;
570 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000571 } else {
572 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800573 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800574 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200575
Elliott Hughes650be4e2013-03-05 18:47:58 -0800576 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800577 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800578 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200579}
580
Elliott Hughes650be4e2013-03-05 18:47:58 -0800581bool ElfReader::LoadSegments() {
582 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800583 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200584
Elliott Hughes650be4e2013-03-05 18:47:58 -0800585 if (phdr->p_type != PT_LOAD) {
586 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200587 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800588
589 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800590 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
591 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800592
Elliott Hughes0266ae52014-02-10 17:46:57 -0800593 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
594 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800595
Elliott Hughes0266ae52014-02-10 17:46:57 -0800596 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800597
598 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800599 ElfW(Addr) file_start = phdr->p_offset;
600 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800601
Elliott Hughes0266ae52014-02-10 17:46:57 -0800602 ElfW(Addr) file_page_start = PAGE_START(file_start);
603 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800604
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700605 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700606 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700607 return false;
608 }
609
skvalex93ce3542015-08-20 01:06:42 +0300610 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700611 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
612 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700613 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700614 reinterpret_cast<void*>(phdr->p_filesz),
615 reinterpret_cast<void*>(file_end), file_size_);
616 return false;
617 }
618
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700619 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700620 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700621 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800622 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800623 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800624 DL_ERR_AND_LOG("\"%s\": W + E load segments are not allowed", name_.c_str());
625 return false;
626 }
627 DL_WARN("\"%s\": W + E load segments are not allowed", name_.c_str());
628 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700629 }
630
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700631 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700632 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700633 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700634 MAP_FIXED|MAP_PRIVATE,
635 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700636 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700637 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700638 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700639 return false;
640 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800641 }
642
643 // if the segment is writable, and does not end on a page boundary,
644 // zero-fill it until the page limit.
645 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800646 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800647 }
648
649 seg_file_end = PAGE_END(seg_file_end);
650
651 // seg_file_end is now the first page address after the file
652 // content. If seg_end is larger, we need to zero anything
653 // between them. This is done by using a private anonymous
654 // map for all extra pages.
655 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800656 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800657 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800658 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800659 PFLAGS_TO_PROT(phdr->p_flags),
660 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
661 -1,
662 0);
663 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700664 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800665 return false;
666 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800667
668 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800669 }
670 }
671 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200672}
673
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000674/* Used internally. Used to set the protection bits of all loaded segments
675 * with optional extra flags (i.e. really PROT_WRITE). Used by
676 * phdr_table_protect_segments and phdr_table_unprotect_segments.
677 */
678static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
679 ElfW(Addr) load_bias, int extra_prot_flags) {
680 const ElfW(Phdr)* phdr = phdr_table;
681 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
682
683 for (; phdr < phdr_limit; phdr++) {
684 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
685 continue;
686 }
687
688 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
689 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
690
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700691 int prot = PFLAGS_TO_PROT(phdr->p_flags);
692 if ((extra_prot_flags & PROT_WRITE) != 0) {
693 // make sure we're never simultaneously writable / executable
694 prot &= ~PROT_EXEC;
695 }
696
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000697 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
698 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700699 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000700 if (ret < 0) {
701 return -1;
702 }
703 }
704 return 0;
705}
706
707/* Restore the original protection modes for all loadable segments.
708 * You should only call this after phdr_table_unprotect_segments and
709 * applying all relocations.
710 *
711 * Input:
712 * phdr_table -> program header table
713 * phdr_count -> number of entries in tables
714 * load_bias -> load bias
715 * Return:
716 * 0 on error, -1 on failure (error code in errno).
717 */
718int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
719 size_t phdr_count, ElfW(Addr) load_bias) {
720 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
721}
722
723/* Change the protection of all loaded segments in memory to writable.
724 * This is useful before performing relocations. Once completed, you
725 * will have to call phdr_table_protect_segments to restore the original
726 * protection flags on all segments.
727 *
728 * Note that some writable segments can also have their content turned
729 * to read-only by calling phdr_table_protect_gnu_relro. This is no
730 * performed here.
731 *
732 * Input:
733 * phdr_table -> program header table
734 * phdr_count -> number of entries in tables
735 * load_bias -> load bias
736 * Return:
737 * 0 on error, -1 on failure (error code in errno).
738 */
739int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
740 size_t phdr_count, ElfW(Addr) load_bias) {
741 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
742}
743
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200744/* Used internally by phdr_table_protect_gnu_relro and
745 * phdr_table_unprotect_gnu_relro.
746 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800747static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
748 ElfW(Addr) load_bias, int prot_flags) {
749 const ElfW(Phdr)* phdr = phdr_table;
750 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200751
Elliott Hughes0266ae52014-02-10 17:46:57 -0800752 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
753 if (phdr->p_type != PT_GNU_RELRO) {
754 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200755 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800756
757 // Tricky: what happens when the relro segment does not start
758 // or end at page boundaries? We're going to be over-protective
759 // here and put every page touched by the segment as read-only.
760
761 // This seems to match Ian Lance Taylor's description of the
762 // feature at http://www.airs.com/blog/archives/189.
763
764 // Extract:
765 // Note that the current dynamic linker code will only work
766 // correctly if the PT_GNU_RELRO segment starts on a page
767 // boundary. This is because the dynamic linker rounds the
768 // p_vaddr field down to the previous page boundary. If
769 // there is anything on the page which should not be read-only,
770 // the program is likely to fail at runtime. So in effect the
771 // linker must only emit a PT_GNU_RELRO segment if it ensures
772 // that it starts on a page boundary.
773 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
774 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
775
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800776 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800777 seg_page_end - seg_page_start,
778 prot_flags);
779 if (ret < 0) {
780 return -1;
781 }
782 }
783 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200784}
785
786/* Apply GNU relro protection if specified by the program header. This will
787 * turn some of the pages of a writable PT_LOAD segment to read-only, as
788 * specified by one or more PT_GNU_RELRO segments. This must be always
789 * performed after relocations.
790 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200791 * The areas typically covered are .got and .data.rel.ro, these are
792 * read-only from the program's POV, but contain absolute addresses
793 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200794 *
795 * Input:
796 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700797 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200798 * load_bias -> load bias
799 * Return:
800 * 0 on error, -1 on failure (error code in errno).
801 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700802int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
803 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800804 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200805}
806
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000807/* Serialize the GNU relro segments to the given file descriptor. This can be
808 * performed after relocations to allow another process to later share the
809 * relocated segment, if it was loaded at the same address.
810 *
811 * Input:
812 * phdr_table -> program header table
813 * phdr_count -> number of entries in tables
814 * load_bias -> load bias
815 * fd -> writable file descriptor to use
816 * Return:
817 * 0 on error, -1 on failure (error code in errno).
818 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700819int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
820 size_t phdr_count,
821 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000822 int fd) {
823 const ElfW(Phdr)* phdr = phdr_table;
824 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
825 ssize_t file_offset = 0;
826
827 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
828 if (phdr->p_type != PT_GNU_RELRO) {
829 continue;
830 }
831
832 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
833 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
834 ssize_t size = seg_page_end - seg_page_start;
835
836 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
837 if (written != size) {
838 return -1;
839 }
840 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
841 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
842 if (map == MAP_FAILED) {
843 return -1;
844 }
845 file_offset += size;
846 }
847 return 0;
848}
849
850/* Where possible, replace the GNU relro segments with mappings of the given
851 * file descriptor. This can be performed after relocations to allow a file
852 * previously created by phdr_table_serialize_gnu_relro in another process to
853 * replace the dirty relocated pages, saving memory, if it was loaded at the
854 * same address. We have to compare the data before we map over it, since some
855 * parts of the relro segment may not be identical due to other libraries in
856 * the process being loaded at different addresses.
857 *
858 * Input:
859 * phdr_table -> program header table
860 * phdr_count -> number of entries in tables
861 * load_bias -> load bias
862 * fd -> readable file descriptor to use
863 * Return:
864 * 0 on error, -1 on failure (error code in errno).
865 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700866int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
867 size_t phdr_count,
868 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000869 int fd) {
870 // Map the file at a temporary location so we can compare its contents.
871 struct stat file_stat;
872 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
873 return -1;
874 }
875 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700876 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100877 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700878 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100879 if (temp_mapping == MAP_FAILED) {
880 return -1;
881 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000882 }
883 size_t file_offset = 0;
884
885 // Iterate over the relro segments and compare/remap the pages.
886 const ElfW(Phdr)* phdr = phdr_table;
887 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
888
889 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
890 if (phdr->p_type != PT_GNU_RELRO) {
891 continue;
892 }
893
894 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
895 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
896
897 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
898 char* mem_base = reinterpret_cast<char*>(seg_page_start);
899 size_t match_offset = 0;
900 size_t size = seg_page_end - seg_page_start;
901
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100902 if (file_size - file_offset < size) {
903 // File is too short to compare to this segment. The contents are likely
904 // different as well (it's probably for a different library version) so
905 // just don't bother checking.
906 break;
907 }
908
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000909 while (match_offset < size) {
910 // Skip over dissimilar pages.
911 while (match_offset < size &&
912 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
913 match_offset += PAGE_SIZE;
914 }
915
916 // Count similar pages.
917 size_t mismatch_offset = match_offset;
918 while (mismatch_offset < size &&
919 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
920 mismatch_offset += PAGE_SIZE;
921 }
922
923 // Map over similar pages.
924 if (mismatch_offset > match_offset) {
925 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
926 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
927 if (map == MAP_FAILED) {
928 munmap(temp_mapping, file_size);
929 return -1;
930 }
931 }
932
933 match_offset = mismatch_offset;
934 }
935
936 // Add to the base file offset in case there are multiple relro segments.
937 file_offset += size;
938 }
939 munmap(temp_mapping, file_size);
940 return 0;
941}
942
943
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700944#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200945
946# ifndef PT_ARM_EXIDX
947# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
948# endif
949
950/* Return the address and size of the .ARM.exidx section in memory,
951 * if present.
952 *
953 * Input:
954 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700955 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200956 * load_bias -> load bias
957 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700958 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200959 * arm_exidx_count -> number of items in table (0 on failure).
960 * Return:
961 * 0 on error, -1 on failure (_no_ error code in errno)
962 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800963int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
964 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800965 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800966 const ElfW(Phdr)* phdr = phdr_table;
967 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200968
Elliott Hughes0266ae52014-02-10 17:46:57 -0800969 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
970 if (phdr->p_type != PT_ARM_EXIDX) {
971 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200972 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800973
974 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800975 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800976 return 0;
977 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700978 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800979 *arm_exidx_count = 0;
980 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200981}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700982#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200983
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200984/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700985 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200986 *
987 * Input:
988 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700989 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200990 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200991 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700992 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +0800993 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200994 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200995 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200996 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800997void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +0800998 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
999 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001000 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001001 for (size_t i = 0; i<phdr_count; ++i) {
1002 const ElfW(Phdr)& phdr = phdr_table[i];
1003 if (phdr.p_type == PT_DYNAMIC) {
1004 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001005 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001006 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001007 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001008 return;
1009 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001010 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001011}
1012
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001013/* Return the program interpreter string, or nullptr if missing.
1014 *
1015 * Input:
1016 * phdr_table -> program header table
1017 * phdr_count -> number of entries in tables
1018 * load_bias -> load bias
1019 * Return:
1020 * pointer to the program interpreter string.
1021 */
1022const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1023 ElfW(Addr) load_bias) {
1024 for (size_t i = 0; i<phdr_count; ++i) {
1025 const ElfW(Phdr)& phdr = phdr_table[i];
1026 if (phdr.p_type == PT_INTERP) {
1027 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1028 }
1029 }
1030 return nullptr;
1031}
1032
Robert Grosse4544d9f2014-10-15 14:32:19 -07001033// Sets loaded_phdr_ to the address of the program header table as it appears
1034// in the loaded segments in memory. This is in contrast with phdr_table_,
1035// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001036bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001037 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001038
Elliott Hughes650be4e2013-03-05 18:47:58 -08001039 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001040 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001041 if (phdr->p_type == PT_PHDR) {
1042 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001043 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001044 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001045
Elliott Hughes650be4e2013-03-05 18:47:58 -08001046 // Otherwise, check the first loadable segment. If its file offset
1047 // is 0, it starts with the ELF header, and we can trivially find the
1048 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001049 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001050 if (phdr->p_type == PT_LOAD) {
1051 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001052 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001053 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001054 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001055 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001056 }
1057 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001058 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001059 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001060
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001061 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001062 return false;
1063}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001064
Elliott Hughes650be4e2013-03-05 18:47:58 -08001065// Ensures that our program header is actually within a loadable
1066// segment. This should help catch badly-formed ELF files that
1067// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001068bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1069 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1070 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001071 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001072 if (phdr->p_type != PT_LOAD) {
1073 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001074 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001075 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1076 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001077 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001078 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001079 return true;
1080 }
1081 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001082 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1083 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001084 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001085}