Linker support for MTE globals.

This patch adds the necessary bionic code for the linker to protect
global data using MTE.

The implementation is described in the MemtagABI addendum to the
AArch64 ELF ABI:
https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst

In summary, this patch includes:

1. When MTE globals is requested, the linker maps writable SHF_ALLOC
   sections as anonymous pages with PROT_MTE (copying the file contents
   into the anonymous mapping), rather than using a file-backed private
   mapping. This is required as file-based mappings are not necessarily
   backed by the kernel with tag-capable memory. For sections already
   mapped by the kernel when the linker is invoked via. PT_INTERP, we
   unmap the contents, remap a PROT_MTE+anonymous mapping in its place,
   and re-load the file contents from disk.

2. When MTE globals is requested, the linker tags areas of global memory
   (as defined in SHT_AARCH64_MEMTAG_GLOBALS_DYNAMIC) with random tags,
   but ensuring that adjacent globals are never tagged using the same
   memory tag (to provide detemrinistic overflow detection).

3. Changes to RELATIVE, ABS64, and GLOB_DAT relocations to load and
   store tags in the right places. This ensures that the address tags are
   materialized into the GOT entries as well. These changes are a
   functional no-op to existing binaries and/or non-MTE capable hardware.

Bug: N/A
Test: atest bionic-unit-tests CtsBionicTestCases --test-filter=*Memtag*

Change-Id: Id7b1a925339b14949d5a8f607dd86928624bda0e
diff --git a/linker/Android.bp b/linker/Android.bp
index 0ccd16d..4e158e4 100644
--- a/linker/Android.bp
+++ b/linker/Android.bp
@@ -328,6 +328,10 @@
 
     sanitize: {
         hwaddress: false,
+        // TODO(mitchp): For now, disable MTE globals in the linker. MTE globals
+        // change the relocation semantics, and GVs that are touched before the
+        // linker has the chance to relocate itself have to be annotated.
+        memtag_globals: false,
     },
 
     static_libs: [
diff --git a/linker/dlfcn.cpp b/linker/dlfcn.cpp
index fee19f4..82f2728 100644
--- a/linker/dlfcn.cpp
+++ b/linker/dlfcn.cpp
@@ -331,6 +331,7 @@
     __libdl_info->gnu_bloom_filter_ = linker_si.gnu_bloom_filter_;
     __libdl_info->gnu_bucket_ = linker_si.gnu_bucket_;
     __libdl_info->gnu_chain_ = linker_si.gnu_chain_;
+    __libdl_info->memtag_dynamic_entries_ = linker_si.memtag_dynamic_entries_;
 
     __libdl_info->ref_count_ = 1;
     __libdl_info->strtab_size_ = linker_si.strtab_size_;
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 135eaa3..13ce6ef 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -51,6 +51,7 @@
 #include <android-base/scopeguard.h>
 #include <async_safe/log.h>
 #include <bionic/pthread_internal.h>
+#include <platform/bionic/mte.h>
 
 // Private C library headers.
 
@@ -2339,7 +2340,7 @@
         void* tls_block = get_tls_block_for_this_thread(tls_module, /*should_alloc=*/true);
         *symbol = static_cast<char*>(tls_block) + sym->st_value;
       } else {
-        *symbol = reinterpret_cast<void*>(found->resolve_symbol_address(sym));
+        *symbol = get_tagged_address(reinterpret_cast<void*>(found->resolve_symbol_address(sym)));
       }
       failure_guard.Disable();
       LD_LOG(kLogDlsym,
@@ -2770,8 +2771,11 @@
 }
 
 void soinfo::apply_relr_reloc(ElfW(Addr) offset) {
-  ElfW(Addr) address = offset + load_bias;
-  *reinterpret_cast<ElfW(Addr)*>(address) += load_bias;
+  ElfW(Addr)* tagged_address = reinterpret_cast<ElfW(Addr)*>(
+      get_tagged_address(reinterpret_cast<void*>(offset + load_bias)));
+  ElfW(Addr) tagged_result = reinterpret_cast<ElfW(Addr)>(
+      get_tagged_address(reinterpret_cast<void*>(*tagged_address + load_bias)));
+  *tagged_address = tagged_result;
 }
 
 // Process relocations in SHT_RELR section (experimental).
@@ -3304,6 +3308,18 @@
   // it each time we look up a symbol with a version.
   if (!validate_verdef_section(this)) return false;
 
+  // MTE globals requires remapping data segments with PROT_MTE as anonymous mappings, because file
+  // based mappings may not be backed by tag-capable memory (see "MAP_ANONYMOUS" on
+  // https://www.kernel.org/doc/html/latest/arch/arm64/memory-tagging-extension.html). This is only
+  // done if the binary has MTE globals (evidenced by the dynamic table entries), as it destroys
+  // page sharing. It's also only done on devices that support MTE, because the act of remapping
+  // pages is unnecessary on non-MTE devices (where we might still run MTE-globals enabled code).
+  if (mte_supported() && memtag_globals() && memtag_globalssz() &&
+      remap_memtag_globals_segments(phdr, phnum, base) == 0) {
+    tag_globals();
+    protect_memtag_globals_ro_segments(phdr, phnum, base);
+  }
+
   flags_ |= FLAG_PRELINKED;
   return true;
 }
@@ -3375,6 +3391,14 @@
     return false;
   }
 
+  if (mte_supported() && memtag_globals() && memtag_globalssz()) {
+    // The linker's full path is not available until the main executable is loaded, as it's obtained
+    // from DT_INTERP. We manually rename the linker's segments later, but have a best-effort name
+    // in case we find a bug prior to loading the main executable.
+    const char* soname = is_linker() ? "linker" : get_realpath();
+    name_memtag_globals_segments(phdr, phnum, base, soname);
+  }
+
   /* Handle serializing/sharing the RELRO segment */
   if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
     if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias,
@@ -3407,6 +3431,47 @@
   return true;
 }
 
+// https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#global-variable-tagging
+void soinfo::tag_globals() {
+  if (is_linked()) return;
+  if (flags_ & FLAG_GLOBALS_TAGGED) return;
+  flags_ |= FLAG_GLOBALS_TAGGED;
+
+  constexpr size_t kTagGranuleSize = 16;
+  const uint8_t* descriptor_stream = reinterpret_cast<const uint8_t*>(memtag_globals());
+
+  if (memtag_globalssz() == 0) {
+    DL_ERR("Invalid memtag descriptor pool size: %zu", memtag_globalssz());
+  }
+
+  uint64_t addr = 0;
+  uleb128_decoder decoder(descriptor_stream, memtag_globalssz());
+  // Don't ever generate tag zero, to easily distinguish between tagged and
+  // untagged globals in register/tag dumps.
+  uint64_t last_tag_mask = 1;
+  constexpr uint64_t kMemtagStepVarintReservedBits = 3;
+
+  while (decoder.has_bytes()) {
+    uint64_t value = decoder.pop_front();
+    uint64_t step = value >> kMemtagStepVarintReservedBits;
+    uint64_t granules_to_tag = value & ((1 << kMemtagStepVarintReservedBits) - 1);
+    if (granules_to_tag == 0) {
+      granules_to_tag = decoder.pop_front() + 1;
+    }
+
+    addr += step * kTagGranuleSize;
+    void* tagged_addr = insert_random_tag(reinterpret_cast<void*>(addr + load_bias), last_tag_mask);
+    uint64_t tag = (reinterpret_cast<uint64_t>(tagged_addr) >> 56) & 0x0f;
+    last_tag_mask = 1 | (1 << tag);
+
+    for (size_t k = 0; k < granules_to_tag; k++) {
+      auto* granule = static_cast<uint8_t*>(tagged_addr) + k * kTagGranuleSize;
+      set_memory_tag(static_cast<void*>(granule));
+    }
+    addr += granules_to_tag * kTagGranuleSize;
+  }
+}
+
 static std::vector<android_namespace_t*> init_default_namespace_no_config(bool is_asan, bool is_hwasan) {
   g_default_namespace.set_isolated(false);
   auto default_ld_paths = is_asan ? kAsanDefaultLdPaths : (
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 5f5eba4..c1a8929 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -44,7 +44,9 @@
 #include "linker_tls.h"
 #include "linker_utils.h"
 
+#include "platform/bionic/macros.h"
 #include "private/KernelArgumentBlock.h"
+#include "private/bionic_auxv.h"
 #include "private/bionic_call_ifunc_resolver.h"
 #include "private/bionic_globals.h"
 #include "private/bionic_tls.h"
@@ -78,7 +80,8 @@
 // TODO (dimtiry): remove somain, rename solist to solist_head
 static soinfo* solist;
 static soinfo* sonext;
-static soinfo* somain; // main process, always the one after libdl_info
+// main process, always the one after libdl_info
+BIONIC_USED_BEFORE_LINKER_RELOCATES static soinfo* somain;
 static soinfo* solinker;
 static soinfo* vdso; // vdso if present
 
@@ -390,9 +393,16 @@
     interp = kFallbackLinkerPath;
   }
   solinker->set_realpath(interp);
+  if (solinker->memtag_globals() && solinker->memtag_globalssz()) {
+    name_memtag_globals_segments(solinker->phdr, solinker->phnum, solinker->load_bias,
+                                 solinker->get_realpath());
+  }
   init_link_map_head(*solinker);
 
 #if defined(__aarch64__)
+  __libc_init_mte(somain->memtag_dynamic_entries(), somain->phdr, somain->phnum, somain->load_bias,
+                  args.argv);
+
   if (exe_to_load == nullptr) {
     // Kernel does not add PROT_BTI to executable pages of the loaded ELF.
     // Apply appropriate protections here if it is needed.
@@ -404,9 +414,6 @@
                      strerror(errno));
     }
   }
-
-  __libc_init_mte(somain->memtag_dynamic_entries(), somain->phdr, somain->phnum, somain->load_bias,
-                  args.argv);
 #endif
 
   // Register the main executable and the linker upfront to have
@@ -605,7 +612,7 @@
 const unsigned kRelSzTag = DT_RELSZ;
 #endif
 
-extern __LIBC_HIDDEN__ ElfW(Ehdr) __ehdr_start;
+BIONIC_USED_BEFORE_LINKER_RELOCATES extern __LIBC_HIDDEN__ ElfW(Ehdr) __ehdr_start;
 
 static void call_ifunc_resolvers_for_section(RelType* begin, RelType* end) {
   auto ehdr = reinterpret_cast<ElfW(Addr)>(&__ehdr_start);
@@ -663,6 +670,16 @@
   }
 }
 
+// Remapping MTE globals segments happens before the linker relocates itself, and so can't use
+// memcpy() from string.h. This function is compiled with -ffreestanding.
+void linker_memcpy(void* dest, const void* src, size_t n) {
+  char* dest_bytes = reinterpret_cast<char*>(dest);
+  const char* src_bytes = reinterpret_cast<const char*>(src);
+  for (size_t i = 0; i < n; ++i) {
+    dest_bytes[i] = src_bytes[i];
+  }
+}
+
 // Detect an attempt to run the linker on itself. e.g.:
 //   /system/bin/linker64 /system/bin/linker64
 // Use priority-1 to run this constructor before other constructors.
diff --git a/linker/linker_main.h b/linker/linker_main.h
index 724f43c..53fc58b 100644
--- a/linker/linker_main.h
+++ b/linker/linker_main.h
@@ -70,3 +70,5 @@
 soinfo* solist_get_head();
 soinfo* solist_get_somain();
 soinfo* solist_get_vdso();
+
+void linker_memcpy(void* dest, const void* src, size_t n);
\ No newline at end of file
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 0ad0fd5..bec6994 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -36,10 +36,12 @@
 #include <sys/stat.h>
 #include <unistd.h>
 
+#include "android-base/unique_fd.h"
 #include "linker.h"
+#include "linker_debug.h"
 #include "linker_dlwarning.h"
 #include "linker_globals.h"
-#include "linker_debug.h"
+#include "linker_main.h"
 #include "linker_utils.h"
 
 #include "private/CFIShadow.h" // For kLibraryAlignment
@@ -862,6 +864,108 @@
   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
 }
 
+static bool segment_needs_memtag_globals_remapping(const ElfW(Phdr) * phdr) {
+  // For now, MTE globals is only supported on writeable data segments.
+  return phdr->p_type == PT_LOAD && !(phdr->p_flags & PF_X) && (phdr->p_flags & PF_W);
+}
+
+/* When MTE globals are requested by the binary, and when the hardware supports
+ * it, remap the executable's PT_LOAD data pages to have PROT_MTE.
+ *
+ * Input:
+ *   phdr_table  -> program header table
+ *   phdr_count  -> number of entries in tables
+ *   load_bias   -> load bias
+ * Return:
+ *   0 on success, -1 on failure (error code in errno).
+ */
+int remap_memtag_globals_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
+                                  ElfW(Addr) load_bias) {
+  for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_table + phdr_count; phdr++) {
+    if (!segment_needs_memtag_globals_remapping(phdr)) {
+      continue;
+    }
+
+    uintptr_t seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    uintptr_t seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    size_t seg_page_aligned_size = seg_page_end - seg_page_start;
+
+    int prot = PFLAGS_TO_PROT(phdr->p_flags);
+    // For anonymous private mappings, it may be possible to simply mprotect()
+    // the PROT_MTE flag over the top. For file-based mappings, this will fail,
+    // and we'll need to fall back. We also allow PROT_WRITE here to allow
+    // writing memory tags (in `soinfo::tag_globals()`), and set these sections
+    // back to read-only after tags are applied (similar to RELRO).
+#if defined(__aarch64__)
+    prot |= PROT_MTE;
+#endif  // defined(__aarch64__)
+    if (mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size,
+                 prot | PROT_WRITE) == 0) {
+      continue;
+    }
+
+    void* mapping_copy = mmap(nullptr, seg_page_aligned_size, PROT_READ | PROT_WRITE,
+                              MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    linker_memcpy(mapping_copy, reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size);
+
+    void* seg_addr = mmap(reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size,
+                          prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    if (seg_addr == MAP_FAILED) return -1;
+
+    linker_memcpy(seg_addr, mapping_copy, seg_page_aligned_size);
+    munmap(mapping_copy, seg_page_aligned_size);
+  }
+
+  return 0;
+}
+
+void protect_memtag_globals_ro_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
+                                        ElfW(Addr) load_bias) {
+  for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_table + phdr_count; phdr++) {
+    int prot = PFLAGS_TO_PROT(phdr->p_flags);
+    if (!segment_needs_memtag_globals_remapping(phdr) || (prot & PROT_WRITE)) {
+      continue;
+    }
+
+#if defined(__aarch64__)
+    prot |= PROT_MTE;
+#endif  // defined(__aarch64__)
+
+    uintptr_t seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    uintptr_t seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    size_t seg_page_aligned_size = seg_page_end - seg_page_start;
+    mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size, prot);
+  }
+}
+
+void name_memtag_globals_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
+                                  ElfW(Addr) load_bias, const char* soname) {
+  for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_table + phdr_count; phdr++) {
+    if (!segment_needs_memtag_globals_remapping(phdr)) {
+      continue;
+    }
+
+    uintptr_t seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    uintptr_t seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    size_t seg_page_aligned_size = seg_page_end - seg_page_start;
+
+    // For file-based mappings that we're now forcing to be anonymous mappings, set the VMA name to
+    // make debugging easier. The previous Android-kernel specific implementation captured the name
+    // by pointer from userspace, which meant we had to persist the name permanently in memory.
+    // Since Android13-5.10 (https://android-review.git.corp.google.com/c/kernel/common/+/1934723)
+    // though, we use the upstream-kernel implementation
+    // (https://github.com/torvalds/linux/commit/9a10064f5625d5572c3626c1516e0bebc6c9fe9b), which
+    // copies the name into kernel memory. It's a safe bet that any devices with Android 14 are
+    // using a kernel >= 5.10.
+    constexpr unsigned kVmaNameLimit = 80;
+    char vma_name[kVmaNameLimit];
+    async_safe_format_buffer(vma_name, kVmaNameLimit, "memtag:%s+0x%" PRIxPTR, soname,
+                             page_start(phdr->p_vaddr));
+    prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<void*>(seg_page_start),
+          seg_page_aligned_size, vma_name);
+  }
+}
+
 /* Change the protection of all loaded segments in memory to writable.
  * This is useful before performing relocations. Once completed, you
  * will have to call phdr_table_protect_segments to restore the original
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 98bf020..092ea5d 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -148,3 +148,12 @@
 
 const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
                                             ElfW(Addr) load_bias);
+
+int remap_memtag_globals_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
+                                  ElfW(Addr) load_bias);
+
+void protect_memtag_globals_ro_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
+                                        ElfW(Addr) load_bias);
+
+void name_memtag_globals_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
+                                  ElfW(Addr) load_bias, const char* soname);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 952dade..1ee7bc6 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -44,6 +44,8 @@
 #include "linker_soinfo.h"
 #include "private/bionic_globals.h"
 
+#include <platform/bionic/mte.h>
+
 static bool is_tls_reloc(ElfW(Word) type) {
   switch (type) {
     case R_GENERIC_TLS_DTPMOD:
@@ -157,12 +159,19 @@
 
 static bool process_relocation_general(Relocator& relocator, const rel_t& reloc);
 
+static ElfW(Addr) apply_memtag(ElfW(Addr) sym_addr) {
+  if (sym_addr == 0) return sym_addr;  // Handle undefined weak symbols.
+
+  return reinterpret_cast<ElfW(Addr)>(get_tagged_address(reinterpret_cast<void*>(sym_addr)));
+}
+
 template <RelocMode Mode>
 __attribute__((always_inline))
 static bool process_relocation_impl(Relocator& relocator, const rel_t& reloc) {
   constexpr bool IsGeneral = Mode == RelocMode::General;
 
-  void* const rel_target = reinterpret_cast<void*>(reloc.r_offset + relocator.si->load_bias);
+  void* const rel_target =
+      reinterpret_cast<void*>(apply_memtag(reloc.r_offset + relocator.si->load_bias));
   const uint32_t r_type = ELFW(R_TYPE)(reloc.r_info);
   const uint32_t r_sym = ELFW(R_SYM)(reloc.r_info);
 
@@ -325,7 +334,7 @@
     // common in non-platform binaries.
     if (r_type == R_GENERIC_ABSOLUTE) {
       count_relocation_if<IsGeneral>(kRelocAbsolute);
-      const ElfW(Addr) result = sym_addr + get_addend_rel();
+      const ElfW(Addr) result = apply_memtag(sym_addr) + get_addend_rel();
       trace_reloc("RELO ABSOLUTE %16p <- %16p %s",
                   rel_target, reinterpret_cast<void*>(result), sym_name);
       *static_cast<ElfW(Addr)*>(rel_target) = result;
@@ -335,16 +344,29 @@
       // document (IHI0044F) specifies that R_ARM_GLOB_DAT has an addend, but Bionic isn't adding
       // it.
       count_relocation_if<IsGeneral>(kRelocAbsolute);
-      const ElfW(Addr) result = sym_addr + get_addend_norel();
-      trace_reloc("RELO GLOB_DAT %16p <- %16p %s",
-                  rel_target, reinterpret_cast<void*>(result), sym_name);
+      ElfW(Addr) result = apply_memtag(sym_addr) + get_addend_norel();
+      trace_reloc("RELO GLOB_DAT %16p <- %16p %s", rel_target, reinterpret_cast<void*>(result),
+                  sym_name);
       *static_cast<ElfW(Addr)*>(rel_target) = result;
       return true;
     } else if (r_type == R_GENERIC_RELATIVE) {
       // In practice, r_sym is always zero, but if it weren't, the linker would still look up the
       // referenced symbol (and abort if the symbol isn't found), even though it isn't used.
       count_relocation_if<IsGeneral>(kRelocRelative);
-      const ElfW(Addr) result = relocator.si->load_bias + get_addend_rel();
+      ElfW(Addr) result;
+      // MTE globals reuses the place bits for additional tag-derivation metadata for
+      // R_AARCH64_RELATIVE relocations, which makes it incompatible with
+      // `-Wl,--apply-dynamic-relocs`. This is enforced by lld, however there's nothing stopping
+      // Android binaries (particularly prebuilts) from building with this linker flag if they're
+      // not built with MTE globals. Thus, don't use the new relocation semantics if this DSO
+      // doesn't have MTE globals.
+      if (relocator.si->memtag_globals() && relocator.si->memtag_globalssz()) {
+        int64_t* place = static_cast<int64_t*>(rel_target);
+        int64_t offset = *place;
+        result = apply_memtag(relocator.si->load_bias + get_addend_rel() + offset) - offset;
+      } else {
+        result = relocator.si->load_bias + get_addend_rel();
+      }
       trace_reloc("RELO RELATIVE %16p <- %16p",
                   rel_target, reinterpret_cast<void*>(result));
       *static_cast<ElfW(Addr)*>(rel_target) = result;
diff --git a/linker/linker_sleb128.h b/linker/linker_sleb128.h
index 6bb3199..f48fda8 100644
--- a/linker/linker_sleb128.h
+++ b/linker/linker_sleb128.h
@@ -69,3 +69,32 @@
   const uint8_t* current_;
   const uint8_t* const end_;
 };
+
+class uleb128_decoder {
+ public:
+  uleb128_decoder(const uint8_t* buffer, size_t count) : current_(buffer), end_(buffer + count) {}
+
+  uint64_t pop_front() {
+    uint64_t value = 0;
+
+    size_t shift = 0;
+    uint8_t byte;
+
+    do {
+      if (current_ >= end_) {
+        async_safe_fatal("uleb128_decoder ran out of bounds");
+      }
+      byte = *current_++;
+      value |= (static_cast<size_t>(byte & 127) << shift);
+      shift += 7;
+    } while (byte & 128);
+
+    return value;
+  }
+
+  bool has_bytes() { return current_ < end_; }
+
+ private:
+  const uint8_t* current_;
+  const uint8_t* const end_;
+};
diff --git a/linker/linker_soinfo.h b/linker/linker_soinfo.h
index 622719d..092cbcc 100644
--- a/linker/linker_soinfo.h
+++ b/linker/linker_soinfo.h
@@ -66,9 +66,10 @@
                                          // soinfo is executed and this flag is
                                          // unset.
 #define FLAG_PRELINKED        0x00000400 // prelink_image has successfully processed this soinfo
+#define FLAG_GLOBALS_TAGGED 0x00000800   // globals have been tagged by MTE.
 #define FLAG_NEW_SOINFO       0x40000000 // new soinfo format
 
-#define SOINFO_VERSION 6
+#define SOINFO_VERSION 7
 
 ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr);
 
@@ -257,6 +258,8 @@
                   const android_dlextinfo* extinfo, size_t* relro_fd_offset);
   bool protect_relro();
 
+  void tag_globals();
+
   void add_child(soinfo* child);
   void remove_all_links();