Revert "Linker support for MTE globals."

Revert submission 2709995

Reason for revert: linker crash in soinfo::apply_relr_reloc

Reverted changes: /q/submissionid:2709995

Bug: 314038442
Change-Id: I2c6ad7f46fb1174f009253602ad08ceb36aa7d71
diff --git a/libc/bionic/__libc_init_main_thread.cpp b/libc/bionic/__libc_init_main_thread.cpp
index 0d557f1..1b539f2 100644
--- a/libc/bionic/__libc_init_main_thread.cpp
+++ b/libc/bionic/__libc_init_main_thread.cpp
@@ -44,7 +44,7 @@
 // Declared in "private/bionic_ssp.h".
 uintptr_t __stack_chk_guard = 0;
 
-BIONIC_USED_BEFORE_LINKER_RELOCATES static pthread_internal_t main_thread;
+static pthread_internal_t main_thread;
 
 // Setup for the main thread. For dynamic executables, this is called by the
 // linker _before_ libc is mapped in memory. This means that all writes to
diff --git a/libc/bionic/bionic_call_ifunc_resolver.cpp b/libc/bionic/bionic_call_ifunc_resolver.cpp
index 0b12088..3cfb8b5 100644
--- a/libc/bionic/bionic_call_ifunc_resolver.cpp
+++ b/libc/bionic/bionic_call_ifunc_resolver.cpp
@@ -30,7 +30,6 @@
 #include <sys/auxv.h>
 #include <sys/ifunc.h>
 
-#include "bionic/macros.h"
 #include "private/bionic_auxv.h"
 
 // This code is called in the linker before it has been relocated, so minimize calls into other
@@ -40,8 +39,8 @@
 ElfW(Addr) __bionic_call_ifunc_resolver(ElfW(Addr) resolver_addr) {
 #if defined(__aarch64__)
   typedef ElfW(Addr) (*ifunc_resolver_t)(uint64_t, __ifunc_arg_t*);
-  BIONIC_USED_BEFORE_LINKER_RELOCATES static __ifunc_arg_t arg;
-  BIONIC_USED_BEFORE_LINKER_RELOCATES static bool initialized = false;
+  static __ifunc_arg_t arg;
+  static bool initialized = false;
   if (!initialized) {
     initialized = true;
     arg._size = sizeof(__ifunc_arg_t);
diff --git a/libc/bionic/libc_init_static.cpp b/libc/bionic/libc_init_static.cpp
index 181a729..f46d702 100644
--- a/libc/bionic/libc_init_static.cpp
+++ b/libc/bionic/libc_init_static.cpp
@@ -410,7 +410,6 @@
   // We did not enable MTE, so we do not need to arm the upgrade timer.
   __libc_shared_globals()->heap_tagging_upgrade_timer_sec = 0;
 }
-
 #else   // __aarch64__
 void __libc_init_mte(const memtag_dynamic_entries_t*, const void*, size_t, uintptr_t, void*) {}
 #endif  // __aarch64__
@@ -505,6 +504,6 @@
 // compiled with -ffreestanding to avoid implicit string.h function calls. (It shouldn't strictly
 // be necessary, though.)
 __LIBC_HIDDEN__ libc_shared_globals* __libc_shared_globals() {
-  BIONIC_USED_BEFORE_LINKER_RELOCATES static libc_shared_globals globals;
+  static libc_shared_globals globals;
   return &globals;
 }
diff --git a/libc/platform/bionic/macros.h b/libc/platform/bionic/macros.h
index b781731..93268c1 100644
--- a/libc/platform/bionic/macros.h
+++ b/libc/platform/bionic/macros.h
@@ -97,17 +97,3 @@
 static inline T* _Nonnull untag_address(T* _Nonnull p) {
   return reinterpret_cast<T*>(untag_address(reinterpret_cast<uintptr_t>(p)));
 }
-
-// MTE globals protects internal and external global variables. One of the main
-// things that MTE globals does is force all global variables accesses to go
-// through the GOT. In the linker though, some global variables are accessed (or
-// address-taken) prior to relocations being processed. Because relocations
-// haven't run yet, the GOT entry hasn't been populated, and this leads to
-// crashes. Thus, any globals used by the linker prior to relocation should be
-// annotated with this attribute, which suppresses tagging of this global
-// variable, restoring the pc-relative address computation.
-#if __has_feature(memtag_globals)
-#define BIONIC_USED_BEFORE_LINKER_RELOCATES __attribute__((no_sanitize("memtag")))
-#else  // __has_feature(memtag_globals)
-#define BIONIC_USED_BEFORE_LINKER_RELOCATES
-#endif  // __has_feature(memtag_globals)
diff --git a/libc/platform/bionic/mte.h b/libc/platform/bionic/mte.h
index dc25098..73cd821 100644
--- a/libc/platform/bionic/mte.h
+++ b/libc/platform/bionic/mte.h
@@ -28,7 +28,6 @@
 
 #pragma once
 
-#include <stddef.h>
 #include <sys/auxv.h>
 #include <sys/prctl.h>
 
@@ -47,36 +46,6 @@
   return supported;
 }
 
-inline void* get_tagged_address(const void* ptr) {
-#if defined(__aarch64__)
-  if (mte_supported()) {
-    __asm__ __volatile__(".arch_extension mte; ldg %0, [%0]" : "+r"(ptr));
-  }
-#endif  // aarch64
-  return const_cast<void*>(ptr);
-}
-
-// Inserts a random tag tag to `ptr`, using any of the set lower 16 bits in
-// `mask` to exclude the corresponding tag from being generated. Note: This does
-// not tag memory.
-inline void* insert_random_tag(const void* ptr, __attribute__((unused)) uint64_t mask = 0) {
-#if defined(__aarch64__)
-  if (mte_supported() && ptr) {
-    __asm__ __volatile__(".arch_extension mte; irg %0, %0, %1" : "+r"(ptr) : "r"(mask));
-  }
-#endif  // aarch64
-  return const_cast<void*>(ptr);
-}
-
-// Stores the address tag in `ptr` to memory, at `ptr`.
-inline void set_memory_tag(__attribute__((unused)) void* ptr) {
-#if defined(__aarch64__)
-  if (mte_supported()) {
-    __asm__ __volatile__(".arch_extension mte; stg %0, [%0]" : "+r"(ptr));
-  }
-#endif  // aarch64
-}
-
 #ifdef __aarch64__
 class ScopedDisableMTE {
   size_t prev_tco_;