AArch64: Linker64 support for AArch64
Addition of support for AArch64 in the linker64 target.
Change-Id: I8dfd9711278f6706063e91f626b6007ea7a3dd6e
Signed-off-by: Marcus Oakland <marcus.oakland@arm.com>
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 6ebba8e..81ca2f5 100755
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -31,6 +31,7 @@
#include <fcntl.h>
#include <linux/auxvec.h>
#include <pthread.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -138,10 +139,18 @@
#if COUNT_PAGES
static unsigned bitmask[4096];
+#if defined(__LP64__)
+#define MARK(offset) \
+ do { \
+ if ((((offset) >> 12) >> 5) < 4096) \
+ bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
+ } while(0)
+#else
#define MARK(offset) \
do { \
bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
} while(0)
+#endif
#else
#define MARK(x) do {} while (0)
#endif
@@ -889,7 +898,20 @@
*/
switch (type) {
-#if defined(__x86_64__)
+#if defined(__aarch64__)
+ case R_AARCH64_JUMP_SLOT:
+ case R_AARCH64_GLOB_DAT:
+ case R_AARCH64_ABS64:
+ case R_AARCH64_ABS32:
+ case R_AARCH64_ABS16:
+ case R_AARCH64_RELATIVE:
+ /*
+ * The sym_addr was initialized to be zero above, or the relocation
+ * code below does not care about value of sym_addr.
+ * No need to do anything.
+ */
+ break;
+#elif defined(__x86_64__)
case R_X86_64_JUMP_SLOT:
case R_X86_64_GLOB_DAT:
case R_X86_64_32:
@@ -900,7 +922,6 @@
sym_addr = reloc;
break;
#endif
-
default:
DL_ERR("unknown weak reloc type %d @ %p (%d)", type, rela, (int) (rela - start));
return -1;
@@ -915,7 +936,197 @@
}
switch (type) {
-#if defined(__x86_64__)
+#if defined(__aarch64__)
+ case R_AARCH64_JUMP_SLOT:
+ count_relocation(kRelocAbsolute);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO JMP_SLOT %16lx <- %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ sym_name);
+ *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
+ break;
+ case R_AARCH64_GLOB_DAT:
+ count_relocation(kRelocAbsolute);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO GLOB_DAT %16lx <- %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ sym_name);
+ *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
+ break;
+ case R_AARCH64_ABS64:
+ count_relocation(kRelocAbsolute);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO ABS64 %16lx <- %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ sym_name);
+ *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+ break;
+ case R_AARCH64_ABS32:
+ count_relocation(kRelocAbsolute);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO ABS32 %16lx <- %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ sym_name);
+ if ((static_cast<Elf_Addr>(INT32_MIN) <=
+ (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
+ ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
+ static_cast<Elf_Addr>(UINT32_MAX))) {
+ *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+ } else {
+ DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+ (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
+ static_cast<Elf_Addr>(INT32_MIN),
+ static_cast<Elf_Addr>(UINT32_MAX));
+ return -1;
+ }
+ break;
+ case R_AARCH64_ABS16:
+ count_relocation(kRelocAbsolute);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO ABS16 %16lx <- %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ sym_name);
+ if ((static_cast<Elf_Addr>(INT16_MIN) <=
+ (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
+ ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
+ static_cast<Elf_Addr>(UINT16_MAX))) {
+ *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+ } else {
+ DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+ (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
+ static_cast<Elf_Addr>(INT16_MIN),
+ static_cast<Elf_Addr>(UINT16_MAX));
+ return -1;
+ }
+ break;
+ case R_AARCH64_PREL64:
+ count_relocation(kRelocRelative);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO REL64 %16lx <- %16lx - %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ rela->r_offset,
+ sym_name);
+ *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
+ break;
+ case R_AARCH64_PREL32:
+ count_relocation(kRelocRelative);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO REL32 %16lx <- %16lx - %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ rela->r_offset, sym_name);
+ if ((static_cast<Elf_Addr>(INT32_MIN) <=
+ (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
+ ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
+ static_cast<Elf_Addr>(UINT32_MAX))) {
+ *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
+ } else {
+ DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+ (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
+ static_cast<Elf_Addr>(INT32_MIN),
+ static_cast<Elf_Addr>(UINT32_MAX));
+ return -1;
+ }
+ break;
+ case R_AARCH64_PREL16:
+ count_relocation(kRelocRelative);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO REL16 %16lx <- %16lx - %16lx %s\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ rela->r_offset, sym_name);
+ if ((static_cast<Elf_Addr>(INT16_MIN) <=
+ (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
+ ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
+ static_cast<Elf_Addr>(UINT16_MAX))) {
+ *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
+ } else {
+ DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+ (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
+ static_cast<Elf_Addr>(INT16_MIN),
+ static_cast<Elf_Addr>(UINT16_MAX));
+ return -1;
+ }
+ break;
+
+ case R_AARCH64_RELATIVE:
+ count_relocation(kRelocRelative);
+ MARK(rela->r_offset);
+ if (sym) {
+ DL_ERR("odd RELATIVE form...");
+ return -1;
+ }
+ TRACE_TYPE(RELO, "RELO RELATIVE %16lx <- %16lx\n",
+ reloc,
+ (si->base + rela->r_addend));
+ *reinterpret_cast<Elf_Addr*>(reloc) = (si->base + rela->r_addend);
+ break;
+
+ case R_AARCH64_COPY:
+ if ((si->flags & FLAG_EXE) == 0) {
+ /*
+ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
+ *
+ * Section 4.7.1.10 "Dynamic relocations"
+ * R_AARCH64_COPY may only appear in executable objects where e_type is
+ * set to ET_EXEC.
+ *
+ * FLAG_EXE is set for both ET_DYN and ET_EXEC executables.
+ * We should explicitly disallow ET_DYN executables from having
+ * R_AARCH64_COPY relocations.
+ */
+ DL_ERR("%s R_AARCH64_COPY relocations only supported for ET_EXEC", si->name);
+ return -1;
+ }
+ count_relocation(kRelocCopy);
+ MARK(rela->r_offset);
+ TRACE_TYPE(RELO, "RELO COPY %16lx <- %ld @ %16lx %s\n",
+ reloc,
+ s->st_size,
+ (sym_addr + rela->r_addend),
+ sym_name);
+ if (reloc == (sym_addr + rela->r_addend)) {
+ Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+
+ if (src == NULL) {
+ DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name);
+ return -1;
+ }
+ if (lsi->has_DT_SYMBOLIC) {
+ DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared "
+ "library %s (built with -Bsymbolic?)", si->name, lsi->name);
+ return -1;
+ }
+ if (s->st_size < src->st_size) {
+ DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%ld < %ld)",
+ si->name, s->st_size, src->st_size);
+ return -1;
+ }
+ memcpy((void*)reloc, (void*)(src->st_value + lsi->load_bias), src->st_size);
+ } else {
+ DL_ERR("%s R_AARCH64_COPY relocation target cannot be resolved", si->name);
+ return -1;
+ }
+ break;
+ case R_AARCH64_TLS_TPREL64:
+ TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16lx <- %16lx - %16lx\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ rela->r_offset);
+ break;
+ case R_AARCH64_TLS_DTPREL32:
+ TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16lx <- %16lx - %16lx\n",
+ reloc,
+ (sym_addr + rela->r_addend),
+ rela->r_offset);
+ break;
+#elif defined(__x86_64__)
case R_X86_64_JUMP_SLOT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
@@ -964,6 +1175,7 @@
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend - reloc;
break;
#endif
+
default:
DL_ERR("unknown reloc type %d @ %p (%d)", type, rela, (int) (rela - start));
return -1;
@@ -1934,7 +2146,11 @@
for (n = 0; n < 4096; n++) {
if (bitmask[n]) {
unsigned x = bitmask[n];
+#if defined(__LP64__)
+ for (i = 0; i < 32; i++) {
+#else
for (i = 0; i < 8; i++) {
+#endif
if (x & 1) {
count++;
}
@@ -1990,7 +2206,6 @@
KernelArgumentBlock args(raw_args);
Elf_Addr linker_addr = args.getauxval(AT_BASE);
-
Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr);
Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);