Merge "Properly detect timeout in pthread_mutex_lock_timeout_np_impl"
diff --git a/ABI-bugs.txt b/ABI-bugs.txt
index 83ee952..51da9f0 100644
--- a/ABI-bugs.txt
+++ b/ABI-bugs.txt
@@ -8,8 +8,5 @@
   sigset_t is too small on ARM and x86 (but correct on MIPS), so support
   for real-time signals is broken. http://b/5828899
 
-  Too few TLS slots mean we can't allocate 128 pthread_key_t instances,
-  which POSIX says should be the minimum.
-
   atexit(3) handlers registered by a shared library aren't called on
   dlclose(3); this only affects ARM. http://b/4998315
diff --git a/benchmarks/Android.mk b/benchmarks/Android.mk
new file mode 100644
index 0000000..f64c108
--- /dev/null
+++ b/benchmarks/Android.mk
@@ -0,0 +1,50 @@
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifneq ($(BUILD_TINY_ANDROID), true)
+
+LOCAL_PATH := $(call my-dir)
+
+# -----------------------------------------------------------------------------
+# Benchmarks.
+# -----------------------------------------------------------------------------
+
+benchmark_c_flags = \
+    -O2 \
+    -Wall -Wextra \
+    -Werror \
+    -fno-builtin \
+    -std=gnu++11 \
+
+benchmark_src_files = \
+    benchmark_main.cpp \
+    math_benchmark.cpp \
+    property_benchmark.cpp \
+    string_benchmark.cpp \
+    time_benchmark.cpp \
+
+# Build benchmarks for the device (with bionic's .so). Run with:
+#   adb shell bionic-benchmarks
+include $(CLEAR_VARS)
+LOCAL_MODULE := bionic-benchmarks
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+LOCAL_CFLAGS += $(benchmark_c_flags)
+LOCAL_C_INCLUDES += external/stlport/stlport bionic/ bionic/libstdc++/include
+LOCAL_SHARED_LIBRARIES += libstlport
+LOCAL_SRC_FILES := $(benchmark_src_files)
+include $(BUILD_EXECUTABLE)
+
+endif # !BUILD_TINY_ANDROID
diff --git a/tests/benchmark.h b/benchmarks/benchmark.h
similarity index 100%
rename from tests/benchmark.h
rename to benchmarks/benchmark.h
diff --git a/tests/benchmark_main.cpp b/benchmarks/benchmark_main.cpp
similarity index 97%
rename from tests/benchmark_main.cpp
rename to benchmarks/benchmark_main.cpp
index e15a688..d8b8e58 100644
--- a/tests/benchmark_main.cpp
+++ b/benchmarks/benchmark_main.cpp
@@ -23,6 +23,8 @@
 #include <string>
 #include <map>
 
+#include <inttypes.h>
+
 static int64_t gBytesProcessed;
 static int64_t gBenchmarkTotalTimeNs;
 static int64_t gBenchmarkStartTimeNs;
@@ -162,8 +164,8 @@
     snprintf(full_name, sizeof(full_name), "%s", name_);
   }
 
-  printf("%-20s %10lld %10lld%s\n", full_name,
-         static_cast<int64_t>(iterations), gBenchmarkTotalTimeNs/iterations, throughput);
+  printf("%-20s %10d %10" PRId64 "%s\n", full_name,
+         iterations, gBenchmarkTotalTimeNs/iterations, throughput);
   fflush(stdout);
 }
 
diff --git a/tests/math_benchmark.cpp b/benchmarks/math_benchmark.cpp
similarity index 100%
rename from tests/math_benchmark.cpp
rename to benchmarks/math_benchmark.cpp
diff --git a/tests/property_benchmark.cpp b/benchmarks/property_benchmark.cpp
similarity index 100%
rename from tests/property_benchmark.cpp
rename to benchmarks/property_benchmark.cpp
diff --git a/tests/string_benchmark.cpp b/benchmarks/string_benchmark.cpp
similarity index 100%
rename from tests/string_benchmark.cpp
rename to benchmarks/string_benchmark.cpp
diff --git a/tests/time_benchmark.cpp b/benchmarks/time_benchmark.cpp
similarity index 100%
rename from tests/time_benchmark.cpp
rename to benchmarks/time_benchmark.cpp
diff --git a/libc/Android.mk b/libc/Android.mk
index 962365e..7c098a8 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -1,4 +1,4 @@
-LOCAL_PATH:= $(call my-dir)
+LOCAL_PATH := $(call my-dir)
 
 include $(LOCAL_PATH)/arch-$(TARGET_ARCH)/syscalls.mk
 
@@ -506,6 +506,40 @@
 
 endif # x86_64
 
+ifeq ($(TARGET_ARCH), aarch64)
+#TODO: Replace C stubs with optimised assembly
+libc_common_src_files += \
+    bionic/memchr.c   \
+    bionic/memcmp.c   \
+    bionic/memcpy.c   \
+    bionic/memmove.c  \
+    bionic/memrchr.c  \
+    bionic/memset.c   \
+    bionic/strchr.cpp \
+    bionic/strnlen.c  \
+    string/bcopy.c    \
+    string/index.c    \
+    string/memcmp16.c \
+    string/strcat.c   \
+    string/strcmp.c   \
+    string/strcpy.c   \
+    string/strlcat.c  \
+    string/strlcpy.c  \
+    string/strlen.c   \
+    string/strncat.c  \
+    string/strncmp.c  \
+    string/strncpy.c  \
+    string/strrchr.c  \
+    upstream-freebsd/lib/libc/string/wcscat.c \
+    upstream-freebsd/lib/libc/string/wcschr.c \
+    upstream-freebsd/lib/libc/string/wcscmp.c \
+    upstream-freebsd/lib/libc/string/wcscpy.c \
+    upstream-freebsd/lib/libc/string/wcslen.c \
+    upstream-freebsd/lib/libc/string/wcsrchr.c \
+    upstream-freebsd/lib/libc/string/wmemcmp.c \
+
+endif # aarch64
+
 ifeq ($(TARGET_ARCH),arm)
   ifeq ($(strip $(TARGET_CPU_VARIANT)),)
     $(warning TARGET_ARCH is arm, but TARGET_CPU_VARIANT is not defined)
@@ -571,10 +605,16 @@
   libc_common_cflags += -DMALLOC_ALIGNMENT=$(BOARD_MALLOC_ALIGNMENT)
 endif
 
+# crtbrand.c needs <stdint.h> and a #define for the platform SDK version.
+libc_crt_target_cflags := \
+    -I$(LOCAL_PATH)/include \
+    -I$(LOCAL_PATH)/arch-$(TARGET_ARCH)/include \
+    -DPLATFORM_SDK_VERSION=$(PLATFORM_SDK_VERSION) \
+
 ifeq ($(TARGET_ARCH),arm)
   libc_common_cflags += -DSOFTFLOAT
   libc_common_cflags += -fstrict-aliasing
-  libc_crt_target_cflags := -mthumb-interwork
+  libc_crt_target_cflags += -mthumb-interwork
 endif # arm
 
 ifeq ($(TARGET_ARCH),mips)
@@ -582,16 +622,16 @@
     libc_common_cflags += -DSOFTFLOAT
   endif
   libc_common_cflags += -fstrict-aliasing
-  libc_crt_target_cflags := $(TARGET_GLOBAL_CFLAGS)
+  libc_crt_target_cflags += $(TARGET_GLOBAL_CFLAGS)
 endif # mips
 
 ifeq ($(TARGET_ARCH),x86)
-  libc_crt_target_cflags := -m32
+  libc_crt_target_cflags += -m32
   libc_crt_target_ldflags := -melf_i386
 endif # x86
 
 ifeq ($(TARGET_ARCH),x86_64)
-  libc_crt_target_cflags := -m64
+  libc_crt_target_cflags += -m64
   libc_crt_target_ldflags := -melf_x86_64
 endif # x86_64
 
@@ -602,12 +642,6 @@
     libc_common_cflags += -DANDROID_SMP=0
 endif
 
-# crtbrand.c needs <stdint.h> and a #define for the platform SDK version.
-libc_crt_target_cflags += \
-    -I$(LOCAL_PATH)/include  \
-    -I$(LOCAL_PATH)/arch-$(TARGET_ARCH)/include \
-    -DPLATFORM_SDK_VERSION=$(PLATFORM_SDK_VERSION) \
-
 # Define some common conlyflags
 libc_common_conlyflags := \
     -std=gnu99
@@ -628,7 +662,7 @@
 # which are needed to build all other objects (shared/static libs and
 # executables)
 # ==========================================================================
-# ARM, MIPS, and x86 all need crtbegin_so/crtend_so.
+# AArch64, ARM, MIPS, and x86 all need crtbegin_so/crtend_so.
 #
 # For x86, the .init section must point to a function that calls all
 # entries in the .ctors section. (on ARM this is done through the
@@ -641,12 +675,16 @@
 libc_crt_target_crtbegin_file := $(LOCAL_PATH)/arch-common/bionic/crtbegin.c
 libc_crt_target_crtbegin_so_file := $(LOCAL_PATH)/arch-common/bionic/crtbegin_so.c
 
+ifeq ($(TARGET_ARCH),aarch64)
+    libc_crt_target_so_cflags :=
+    libc_crt_target_crtbegin_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin.c
+endif
 ifeq ($(TARGET_ARCH),arm)
     libc_crt_target_so_cflags :=
 endif
 ifeq ($(TARGET_ARCH),mips)
     libc_crt_target_so_cflags := -fPIC
-libc_crt_target_crtbegin_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin.c
+    libc_crt_target_crtbegin_file := $(LOCAL_PATH)/arch-$(TARGET_ARCH)/bionic/crtbegin.c
 endif
 ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),x86 x86_64))
     libc_crt_target_so_cflags := -fPIC
@@ -869,7 +907,7 @@
 # ========================================================
 # libc_bionic.a - home-grown C library code
 # ========================================================
-#
+
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := $(libc_bionic_src_files)
@@ -984,12 +1022,14 @@
 LOCAL_C_INCLUDES := $(libc_common_c_includes)
 
 LOCAL_SRC_FILES := \
-	$(libc_arch_dynamic_src_files) \
-	$(libc_static_common_src_files) \
-	bionic/dlmalloc.c \
-	bionic/malloc_debug_common.cpp \
-	bionic/pthread_debug.cpp \
-	bionic/libc_init_dynamic.cpp
+    $(libc_arch_dynamic_src_files) \
+    $(libc_static_common_src_files) \
+    bionic/dlmalloc.c \
+    bionic/malloc_debug_common.cpp \
+    bionic/debug_mapinfo.cpp \
+    bionic/debug_stacktrace.cpp \
+    bionic/pthread_debug.cpp \
+    bionic/libc_init_dynamic.cpp \
 
 ifeq ($(TARGET_ARCH),arm)
 	LOCAL_NO_CRT := true
diff --git a/libc/SYSCALLS.TXT b/libc/SYSCALLS.TXT
index 1277b1a..19b4d22 100644
--- a/libc/SYSCALLS.TXT
+++ b/libc/SYSCALLS.TXT
@@ -93,8 +93,6 @@
 ssize_t     pwrite64|pwrite(int, void*, size_t, off_t) aarch64,x86_64
 int         close(int)                      all
 pid_t       getpid()    all
-void*       mmap(void*, size_t, int, int, int, long)  aarch64,x86_64
-void*       __mmap2:mmap2(void*, size_t, int, int, int, long)   arm,mips,x86
 int         munmap(void*, size_t)  all
 void*       mremap(void*, size_t, size_t, unsigned long)  all
 int         msync(const void*, size_t, int)    all
@@ -159,6 +157,9 @@
 int truncate(const char*, off_t) arm,mips,x86
 int truncate64(const char*, off64_t) arm,mips,x86
 int truncate|truncate64(const char*, off_t) aarch64,x86_64
+# (mmap only gets two lines because we only used the 64-bit variant on 32-bit systems.)
+void* __mmap2:mmap2(void*, size_t, int, int, int, long)   arm,mips,x86
+void* mmap|mmap64(void*, size_t, int, int, int, off_t)  aarch64,x86_64
 
 # file system
 int     chdir(const char*)              all
diff --git a/libc/arch-aarch64/aarch64.mk b/libc/arch-aarch64/aarch64.mk
new file mode 100644
index 0000000..847db98
--- /dev/null
+++ b/libc/arch-aarch64/aarch64.mk
@@ -0,0 +1,14 @@
+_LIBC_ARCH_COMMON_SRC_FILES := \
+    arch-aarch64/bionic/__bionic_clone.S \
+    arch-aarch64/bionic/bzero_aarch64.c \
+    arch-aarch64/bionic/cacheflush_aarch64.c \
+    arch-aarch64/bionic/_exit_with_stack_teardown.S \
+    arch-aarch64/bionic/futex_aarch64.S \
+    arch-aarch64/bionic/__get_sp.S \
+    arch-aarch64/bionic/__rt_sigreturn.S \
+    arch-aarch64/bionic/_setjmp.S \
+    arch-aarch64/bionic/setjmp.S \
+    arch-aarch64/bionic/__set_tls.c \
+    arch-aarch64/bionic/sigsetjmp.S \
+    arch-aarch64/bionic/syscall.S \
+    arch-aarch64/bionic/vfork.S \
diff --git a/libc/arch-aarch64/bionic/__bionic_clone.S b/libc/arch-aarch64/bionic/__bionic_clone.S
new file mode 100644
index 0000000..3f160f5
--- /dev/null
+++ b/libc/arch-aarch64/bionic/__bionic_clone.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <private/bionic_asm.h>
+
+// pid_t __bionic_clone(int flags, void* child_stack, pid_t* parent_tid, void* tls, pid_t* child_tid, int (*fn)(void*), void* arg);
+
+ENTRY(__bionic_clone)
+    stp     x29, x30, [sp, #-16]!
+    mov     x29,  sp
+    str     x8,       [sp, #-16]!
+
+    /* store thread pointer & args in child stack */
+    stp     x5, x6, [x1, #-16]
+
+    /* sys_clone */
+    uxtw    x0, w0
+    mov     x8, __NR_clone
+    svc     #0
+
+    /* check for child/parent */
+    cbz     x0,1f
+
+    ldr     x8,       [sp], #16
+    ldp     x29, x30, [sp], #16
+
+    cmn     x0, #(MAX_ERRNO + 1)
+    cneg    x0, x0, hi
+    b.hi    __set_errno
+
+    ret
+
+    /* thread initialization - set the end of the frame record chain */
+1:
+    mov     x29, xzr
+    ldp     x0, x1, [sp, #-16]
+    b       __bionic_clone_entry
+END(__bionic_clone)
diff --git a/libc/arch-aarch64/bionic/__get_sp.S b/libc/arch-aarch64/bionic/__get_sp.S
new file mode 100644
index 0000000..66a0ff1
--- /dev/null
+++ b/libc/arch-aarch64/bionic/__get_sp.S
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+ENTRY(__get_sp)
+    mov x0, sp
+    ret
+END(__get_sp)
diff --git a/libc/arch-aarch64/bionic/__rt_sigreturn.S b/libc/arch-aarch64/bionic/__rt_sigreturn.S
new file mode 100644
index 0000000..be5d1fa
--- /dev/null
+++ b/libc/arch-aarch64/bionic/__rt_sigreturn.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <asm/unistd.h>
+#include <machine/asm.h>
+
+ENTRY_PRIVATE(__rt_sigreturn)
+  mov     x8, __NR_rt_sigreturn
+  svc     #0
+END(__rt_sigreturn)
diff --git a/libc/arch-aarch64/bionic/__set_tls.c b/libc/arch-aarch64/bionic/__set_tls.c
new file mode 100644
index 0000000..16d2357
--- /dev/null
+++ b/libc/arch-aarch64/bionic/__set_tls.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+void __set_tls(void* tls) {
+  asm("msr tpidr_el0, %0" : : "r" (tls));
+}
diff --git a/libc/arch-aarch64/bionic/_exit_with_stack_teardown.S b/libc/arch-aarch64/bionic/_exit_with_stack_teardown.S
new file mode 100644
index 0000000..083bcb6
--- /dev/null
+++ b/libc/arch-aarch64/bionic/_exit_with_stack_teardown.S
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <asm/unistd.h>
+#include <machine/asm.h>
+
+// void _exit_with_stack_teardown(void* stackBase, size_t stackSize, int status)
+ENTRY(_exit_with_stack_teardown)
+  mov     w28, w2
+  mov     w8, __NR_munmap
+  svc     #0
+  // If munmap failed, we ignore the failure and exit anyway.
+
+  sxtw    x0, w28
+  mov     w8, __NR_exit
+  svc     #0
+  // The exit syscall does not return.
+END(_exit_with_stack_teardown)
diff --git a/libc/arch-aarch64/bionic/_setjmp.S b/libc/arch-aarch64/bionic/_setjmp.S
new file mode 100644
index 0000000..ba08940
--- /dev/null
+++ b/libc/arch-aarch64/bionic/_setjmp.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+#include <machine/setjmp.h>
+
+/*
+ * C library - _setjmp, _longjmp
+ *
+ * _longjmp(jmp_buf state, int value)
+ * will generate a "return(v)" from the last call to _setjmp(state) by restoring
+ * registers from the stack. The previous signal state is NOT restored.
+ *
+ * NOTE: x0 return value
+ *       x9-x15 temporary registers
+ */
+
+ENTRY(_setjmp)
+    /* store magic number */
+    ldr     w9, .L_setjmp_magic
+    str     w9, [x0, #(_JB_MAGIC * 4)]
+
+    /* store core registers */
+    mov     x10, sp
+    stp     x30, x10, [x0, #(_JB_CORE_BASE * 4 + 16 * 0)]
+    stp     x28, x29, [x0, #(_JB_CORE_BASE * 4 + 16 * 1)]
+    stp     x26, x27, [x0, #(_JB_CORE_BASE * 4 + 16 * 2)]
+    stp     x24, x25, [x0, #(_JB_CORE_BASE * 4 + 16 * 3)]
+    stp     x22, x23, [x0, #(_JB_CORE_BASE * 4 + 16 * 4)]
+    stp     x20, x21, [x0, #(_JB_CORE_BASE * 4 + 16 * 5)]
+    str     x19,      [x0, #(_JB_CORE_BASE * 4 + 16 * 6)]
+
+    /* store floating point registers */
+    stp     d14, d15, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 0)]
+    stp     d12, d13, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 1)]
+    stp     d10, d11, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 2)]
+    stp     d8,  d9,  [x0, #(_JB_FLOAT_BASE * 4 + 16 * 3)]
+
+    mov     w0, wzr
+    ret
+END(_setjmp)
+
+.L_setjmp_magic:
+    .word   _JB_MAGIC__SETJMP
+
+ENTRY(_longjmp)
+    /* check magic */
+    ldr     w9, .L_setjmp_magic
+    ldr     w10, [x0, #(_JB_MAGIC * 4)]
+    cmp     w9, w10
+    b.ne    botch
+
+    /* restore core registers */
+    ldp     x30, x10, [x0, #(_JB_CORE_BASE * 4 + 16 * 0)]
+    mov     sp, x10
+    ldp     x28, x29, [x0, #(_JB_CORE_BASE * 4 + 16 * 1)]
+    ldp     x26, x27, [x0, #(_JB_CORE_BASE * 4 + 16 * 2)]
+    ldp     x24, x25, [x0, #(_JB_CORE_BASE * 4 + 16 * 3)]
+    ldp     x22, x23, [x0, #(_JB_CORE_BASE * 4 + 16 * 4)]
+    ldp     x20, x21, [x0, #(_JB_CORE_BASE * 4 + 16 * 5)]
+    ldr     x19,      [x0, #(_JB_CORE_BASE * 4 + 16 * 6)]
+
+    /* restore floating point registers */
+    ldp     d14, d15, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 0)]
+    ldp     d12, d13, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 1)]
+    ldp     d10, d11, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 2)]
+    ldp     d8,  d9,  [x0, #(_JB_FLOAT_BASE * 4 + 16 * 3)]
+
+    /* validate sp (sp mod 16 = 0) and lr (lr mod 4 = 0) */
+    tst     x30, #3
+    b.ne    botch
+    mov     x10, sp
+    tst     x10, #15
+    b.ne    botch
+
+    /* set return value */
+    cmp     w1, wzr
+    csinc   w0, w1, wzr, ne
+    ret
+
+    /* validation failed, die die die */
+botch:
+    bl      PIC_SYM(_C_LABEL(longjmperror), PLT)
+    bl      PIC_SYM(_C_LABEL(abort), PLT)
+    b        . - 8       /* Cannot get here */
+END(_longjmp)
diff --git a/libc/arch-aarch64/bionic/bzero_aarch64.c b/libc/arch-aarch64/bionic/bzero_aarch64.c
new file mode 100644
index 0000000..d403d0c
--- /dev/null
+++ b/libc/arch-aarch64/bionic/bzero_aarch64.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+void bzero(void* s, size_t n) {
+  memset(s, '\0', n);
+}
diff --git a/libc/arch-aarch64/bionic/cacheflush_aarch64.c b/libc/arch-aarch64/bionic/cacheflush_aarch64.c
new file mode 100644
index 0000000..fbbd788
--- /dev/null
+++ b/libc/arch-aarch64/bionic/cacheflush_aarch64.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* TODO: We can implement a specialised cacheflush() */
+int cacheflush (long start, long end, long flags __attribute__((unused))) {
+  __builtin___clear_cache((char*) start, (char*) end);
+  return 0;
+}
diff --git a/libc/arch-aarch64/bionic/crtbegin.c b/libc/arch-aarch64/bionic/crtbegin.c
new file mode 100644
index 0000000..73d2010
--- /dev/null
+++ b/libc/arch-aarch64/bionic/crtbegin.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "../../bionic/libc_init_common.h"
+#include <stddef.h>
+#include <stdint.h>
+
+__attribute__ ((section (".preinit_array")))
+void (*__PREINIT_ARRAY__)(void) = (void (*)(void)) -1;
+
+__attribute__ ((section (".init_array")))
+void (*__INIT_ARRAY__)(void) = (void (*)(void)) -1;
+
+__attribute__ ((section (".fini_array")))
+void (*__FINI_ARRAY__)(void) = (void (*)(void)) -1;
+
+
+__LIBC_HIDDEN__ void do_aarch64_start(void* raw_args) {
+  structors_array_t array;
+  array.preinit_array = &__PREINIT_ARRAY__;
+  array.init_array = &__INIT_ARRAY__;
+  array.fini_array = &__FINI_ARRAY__;
+  __libc_init(raw_args, NULL, &main, &array);
+}
+
+/*
+ * Put the value of sp in x0 and call do_aarch64_init(). The latter will then
+ * then be able to access the stack as prepared by the kernel's execve system
+ * call (via the first argument).
+ */
+__asm__ (
+"        .text                      \n"
+"        .align  2                  \n"
+"        .global _start             \n"
+"        .hidden _start             \n"
+"        .type   _start, %function  \n"
+"_start:                            \n"
+"        add     x0, sp, xzr        \n"
+"        b       do_aarch64_start   \n"
+"        .size   _start, .-_start   \n"
+);
+
+#include "../../arch-common/bionic/__dso_handle.h"
+#include "../../arch-common/bionic/atexit.h"
diff --git a/libc/arch-aarch64/bionic/futex_aarch64.S b/libc/arch-aarch64/bionic/futex_aarch64.S
new file mode 100644
index 0000000..c25d2dd
--- /dev/null
+++ b/libc/arch-aarch64/bionic/futex_aarch64.S
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <asm/unistd.h>
+#include <linux/err.h>
+#include <machine/asm.h>
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/*
+ * Syscall interface for fast userspace locks
+ *
+ * int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
+ * int __futex_wake(volatile void *ftx, int count);
+ * int __futex_syscall3(volatile void *ftx, int op, int val);
+ * int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
+ */
+
+ENTRY(__futex_syscall4)
+    /* create AArch64 PCS frame pointer */
+    stp    x29, x30, [sp, #-16]!
+    mov    x29,  sp
+
+    /* store x8 */
+    str    x8,       [sp, #-16]!
+
+    /* syscall No. in x8 */
+    mov     x8, __NR_futex
+    svc     #0
+
+    /* restore x8 */
+    ldr     x8,       [sp], #16
+    ldp     x29, x30, [sp], #16
+
+    /* check if syscall returned successfully */
+    cmn     x0, #(MAX_ERRNO + 1)
+    cneg    x0, x0, hi
+    b.hi    __set_errno
+
+    ret
+END(__futex_syscall4)
+
+ENTRY(__futex_syscall3)
+    /* __futex_syscall4 but with fewer arguments */
+    b __futex_syscall4
+END(__futex_syscall3)
+
+ENTRY(__futex_wait)
+    /* create AArch64 PCS frame pointer */
+    stp    x29, x30, [sp, #-16]!
+    mov    x29,  sp
+
+    /* store x8 */
+    str    x8,       [sp, #-16]!
+
+    /* arange arguments as expected in the kernel side */
+    mov x3, x2
+    mov w2, w1
+    mov w1, #FUTEX_WAIT
+
+    /* syscall No. in X8 */
+    mov     x8, __NR_futex
+    svc     #0
+
+    /* restore x8 */
+    ldr     x8,       [sp], #16
+    ldp     x29, x30, [sp], #16
+
+    /* check if syscall returned successfully */
+    cmn     x0, #(MAX_ERRNO + 1)
+    cneg    x0, x0, hi
+    b.hi    __set_errno
+
+    ret
+END(__futex_wait)
+
+ENTRY(__futex_wake)
+    /* create AArch64 PCS frame pointer */
+    stp    x29, x30, [sp, #-16]!
+    mov    x29,  sp
+
+    /* store x8 */
+    str    x8,       [sp, #-16]!
+
+    /* arange arguments as expected in the kernel side */
+    mov w2, w1
+    mov w1, #FUTEX_WAIT
+
+    /* syscall No. in X8 */
+    mov     x8, __NR_futex
+    svc     #0
+
+    /* restore x8 */
+    ldr     x8,       [sp], #16
+    ldp     x29, x30, [sp], #16
+
+    /* check if syscall returned successfully */
+    cmn     x0, #(MAX_ERRNO + 1)
+    cneg    x0, x0, hi
+    b.hi    __set_errno
+
+    ret
+END(__futex_wake)
diff --git a/libc/arch-aarch64/bionic/setjmp.S b/libc/arch-aarch64/bionic/setjmp.S
new file mode 100644
index 0000000..faa854f
--- /dev/null
+++ b/libc/arch-aarch64/bionic/setjmp.S
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+#include <machine/setjmp.h>
+
+/*
+ * C library - _setjmp, _longjmp
+ *
+ * _longjmp(jmp_buf state, int value)
+ * will generate a "return(v)" from the last call to _setjmp(state) by restoring
+ * registers from the stack. The previous signal state is NOT restored.
+ *
+ * NOTE: x0 return value
+ *       x9-x15 temporary registers
+ */
+
+ENTRY(setjmp)
+    /* block all signals an retrieve signal mask */
+    stp     x0, x30, [sp, #-16]!
+
+    mov     x0, xzr
+    bl      PIC_SYM(_C_LABEL(sigblock), PLT)
+    mov     w1, w0
+
+    ldp     x0, x30, [sp], #16
+
+    /* store signal mask */
+    str     w1, [x0, #(_JB_SIGMASK *4)]
+
+    /* store magic number */
+    ldr     w9, .L_setjmp_magic
+    str     w9, [x0, #(_JB_MAGIC * 4)]
+
+    /* store core registers */
+    mov     x10, sp
+    stp     x30, x10, [x0, #(_JB_CORE_BASE * 4 + 16 * 0)]
+    stp     x28, x29, [x0, #(_JB_CORE_BASE * 4 + 16 * 1)]
+    stp     x26, x27, [x0, #(_JB_CORE_BASE * 4 + 16 * 2)]
+    stp     x24, x25, [x0, #(_JB_CORE_BASE * 4 + 16 * 3)]
+    stp     x22, x23, [x0, #(_JB_CORE_BASE * 4 + 16 * 4)]
+    stp     x20, x21, [x0, #(_JB_CORE_BASE * 4 + 16 * 5)]
+    str     x19,      [x0, #(_JB_CORE_BASE * 4 + 16 * 6)]
+
+    /* store floating point registers */
+    stp     d14, d15, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 0)]
+    stp     d12, d13, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 1)]
+    stp     d10, d11, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 2)]
+    stp     d8,  d9,  [x0, #(_JB_FLOAT_BASE * 4 + 16 * 3)]
+
+    mov     w0, wzr
+    ret
+END(setjmp)
+
+.L_setjmp_magic:
+    .word   _JB_MAGIC__SETJMP
+
+ENTRY(longjmp)
+    /* check magic */
+    ldr     w9, .L_setjmp_magic
+    ldr     w10, [x0, #(_JB_MAGIC * 4)]
+    cmp     w9, w10
+    b.ne    botch
+
+    /* restore core registers */
+    ldp     x30, x10, [x0, #(_JB_CORE_BASE * 4 + 16 * 0)]
+    mov     sp, x10
+    ldp     x28, x29, [x0, #(_JB_CORE_BASE * 4 + 16 * 1)]
+    ldp     x26, x27, [x0, #(_JB_CORE_BASE * 4 + 16 * 2)]
+    ldp     x24, x25, [x0, #(_JB_CORE_BASE * 4 + 16 * 3)]
+    ldp     x22, x23, [x0, #(_JB_CORE_BASE * 4 + 16 * 4)]
+    ldp     x20, x21, [x0, #(_JB_CORE_BASE * 4 + 16 * 5)]
+    ldr     x19,      [x0, #(_JB_CORE_BASE * 4 + 16 * 6)]
+
+    /* restore floating point registers */
+    ldp     d14, d15, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 0)]
+    ldp     d12, d13, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 1)]
+    ldp     d10, d11, [x0, #(_JB_FLOAT_BASE * 4 + 16 * 2)]
+    ldp     d8,  d9,  [x0, #(_JB_FLOAT_BASE * 4 + 16 * 3)]
+
+    /* validate sp (sp mod 16 = 0) and lr (lr mod 4 = 0) */
+    tst     x30, #3
+    b.ne    botch
+    mov     x10, sp
+    tst     x10, #15
+    b.ne    botch
+
+    /* set return value */
+    cmp     w1, wzr
+    csinc   w0, w1, wzr, ne
+    ret
+
+    /* validation failed, die die die */
+botch:
+    bl      PIC_SYM(_C_LABEL(longjmperror), PLT)
+    bl      PIC_SYM(_C_LABEL(abort), PLT)
+    b       . - 8       /* Cannot get here */
+END(longjmp)
diff --git a/libc/arch-aarch64/bionic/sigsetjmp.S b/libc/arch-aarch64/bionic/sigsetjmp.S
new file mode 100644
index 0000000..ffc8984
--- /dev/null
+++ b/libc/arch-aarch64/bionic/sigsetjmp.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+#include <machine/setjmp.h>
+
+/*
+ * int sigsetjmp(sigjmp_buf env, int savesigs);
+ * void siglongjmp(sigjmp_buf env, int val);
+ */
+
+ENTRY(sigsetjmp)
+    cbz     w1, PIC_SYM(_C_LABEL(_setjmp), PLT)
+    b       PIC_SYM(_C_LABEL(setjmp), PLT)
+END(sigsetjmp)
+
+.L_setjmp_magic:
+    .word   _JB_MAGIC__SETJMP
+
+ENTRY(siglongjmp)
+    ldr     w2, .L_setjmp_magic
+    ldr     w3, [x0]
+    cmp     w2, w3
+    b.eq    PIC_SYM(_C_LABEL(_longjmp), PLT)
+    b       PIC_SYM(_C_LABEL(longjmp), PLT)
+END(siglongjmp)
diff --git a/libc/arch-aarch64/bionic/syscall.S b/libc/arch-aarch64/bionic/syscall.S
new file mode 100644
index 0000000..f7ce010
--- /dev/null
+++ b/libc/arch-aarch64/bionic/syscall.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/err.h>
+#include <machine/asm.h>
+
+ENTRY(syscall)
+    /* create AAPCS frame pointer */
+    stp     x29, x30, [sp, #-16]!
+    mov     x29,  sp
+
+    /* store x8 */
+    str     x8,       [sp, #-16]!
+
+    /* Move syscall No. from x0 to x8 */
+    mov     x8, x0
+    /* Move syscall parameters from x1 thru x6 to x0 thru x5 */
+    mov     x0, x1
+    mov     x1, x2
+    mov     x2, x3
+    mov     x3, x4
+    mov     x4, x5
+    mov     x5, x6
+    svc     #0
+
+    /* restore x8 */
+    ldr     x8,       [sp], #16
+    ldp     x29, x30, [sp], #16
+
+    /* check if syscall returned successfully */
+    cmn     x0, #(MAX_ERRNO + 1)
+    cneg    x0, x0, hi
+    b.hi    __set_errno
+
+    ret
+END(syscall)
diff --git a/libc/arch-aarch64/bionic/vfork.S b/libc/arch-aarch64/bionic/vfork.S
new file mode 100644
index 0000000..964f38d
--- /dev/null
+++ b/libc/arch-aarch64/bionic/vfork.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <asm/unistd.h>
+#include <linux/err.h>
+#include <machine/asm.h>
+
+ENTRY(vfork)
+    mov     x0, #0x4111     /* CLONE_VM | CLONE_VFORK | SIGCHLD */
+    mov     x1, xzr
+    mov     x2, xzr
+    mov     x3, xzr
+    mov     x4, xzr
+
+    str     x8, [sp, #-16]!
+    mov     x8, __NR_clone
+
+    svc     #0
+    ldr     x8, [sp], #16
+
+    /* check if syscall returned successfully */
+    cmn     x0, #(MAX_ERRNO + 1)
+    cneg    x0, x0, hi
+    b.hi    __set_errno
+
+    ret
+END(vfork)
diff --git a/libc/arch-aarch64/include/machine/_types.h b/libc/arch-aarch64/include/machine/_types.h
new file mode 100644
index 0000000..0a462ab
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/_types.h
@@ -0,0 +1,111 @@
+/*	$OpenBSD: _types.h,v 1.3 2006/02/14 18:12:58 miod Exp $	*/
+
+/*-
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)types.h	8.3 (Berkeley) 1/5/94
+ *	@(#)ansi.h	8.2 (Berkeley) 1/4/94
+ */
+
+#ifndef _AARCH64__TYPES_H_
+#define _AARCH64__TYPES_H_
+
+/* 7.18.1.1 Exact-width integer types */
+typedef	__signed char		__int8_t;
+typedef	unsigned char		__uint8_t;
+typedef	short			__int16_t;
+typedef	unsigned short		__uint16_t;
+typedef	int			__int32_t;
+typedef	unsigned int		__uint32_t;
+typedef	long 		__int64_t;
+typedef	unsigned long  __uint64_t;
+
+/* 7.18.1.2 Minimum-width integer types */
+typedef	__int8_t		__int_least8_t;
+typedef	__uint8_t		__uint_least8_t;
+typedef	__int16_t		__int_least16_t;
+typedef	__uint16_t		__uint_least16_t;
+typedef	__int32_t		__int_least32_t;
+typedef	__uint32_t		__uint_least32_t;
+typedef	__int64_t		__int_least64_t;
+typedef	__uint64_t		__uint_least64_t;
+
+/* 7.18.1.3 Fastest minimum-width integer types */
+typedef	__int32_t		__int_fast8_t;
+typedef	__uint32_t		__uint_fast8_t;
+typedef	__int32_t		__int_fast16_t;
+typedef	__uint32_t		__uint_fast16_t;
+typedef	__int32_t		__int_fast32_t;
+typedef	__uint32_t		__uint_fast32_t;
+typedef	__int64_t		__int_fast64_t;
+typedef	__uint64_t		__uint_fast64_t;
+
+/* 7.18.1.4 Integer types capable of holding object pointers */
+typedef	int 			__intptr_t;
+typedef	unsigned int 		__uintptr_t;
+
+/* 7.18.1.5 Greatest-width integer types */
+typedef	__int64_t		__intmax_t;
+typedef	__uint64_t		__uintmax_t;
+
+/* Register size */
+typedef	__int64_t		__register_t;
+
+/* VM system types */
+typedef	unsigned long		__vaddr_t;
+typedef	unsigned long		__paddr_t;
+typedef	unsigned long		__vsize_t;
+typedef	unsigned long		__psize_t;
+
+/* Standard system types */
+typedef	long			__clock_t;
+typedef	int	    		__clockid_t;
+typedef	double			__double_t;
+typedef	float			__float_t;
+typedef	long			__ptrdiff_t;
+typedef	long			__time_t;
+typedef	int	    		__timer_t;
+#if defined(__GNUC__) && __GNUC__ >= 3
+typedef	__builtin_va_list	__va_list;
+#else
+typedef	char *			__va_list;
+#endif
+
+/* Wide character support types */
+#ifndef __cplusplus
+typedef	int			__wchar_t;
+#endif
+typedef	int			__wint_t;
+typedef	int			__rune_t;
+typedef	void *			__wctrans_t;
+typedef	void *			__wctype_t;
+
+#define _BYTE_ORDER _LITTLE_ENDIAN
+
+#endif  /* _AARCH64__TYPES_H_ */
+
diff --git a/libc/arch-aarch64/include/machine/asm.h b/libc/arch-aarch64/include/machine/asm.h
new file mode 100644
index 0000000..3f8b908
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/asm.h
@@ -0,0 +1,128 @@
+/*	$OpenBSD: asm.h,v 1.1 2004/02/01 05:09:49 drahn Exp $	*/
+/*	$NetBSD: asm.h,v 1.4 2001/07/16 05:43:32 matt Exp $	*/
+
+/*
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from: @(#)asm.h	5.5 (Berkeley) 5/7/91
+ */
+
+#ifndef _AARCH64_ASM_H_
+#define _AARCH64_ASM_H_
+
+/* TODO: Add cfi directives for creating/restoring FP */
+#ifdef __ELF__
+# define	_C_LABEL(x)	x
+#else
+# ifdef __STDC__
+#  define	_C_LABEL(x)	_ ## x
+# else
+#  define	_C_LABEL(x)	_/**/x
+# endif
+#endif
+#define	_ASM_LABEL(x)	x
+
+#ifdef __STDC__
+# define	__CONCAT(x,y)	x ## y
+# define	__STRING(x)	#x
+#else
+# define	__CONCAT(x,y)	x/**/y
+# define	__STRING(x)	"x"
+#endif
+
+#ifndef _ALIGN_TEXT
+# define	_ALIGN_TEXT	.align 0
+#endif
+
+#define	_ASM_TYPE_FUNCTION	%function
+#define	_ASM_TYPE_OBJECT	%object
+#define	_ENTRY(x) \
+	.text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: .cfi_startproc
+
+#define	_ASM_SIZE(x)	.size x, .-x;
+
+#define _END(x) \
+	.cfi_endproc; \
+	_ASM_SIZE(x)
+
+#define	ENTRY(y)	_ENTRY(_C_LABEL(y));
+#define	ENTRY_NP(y)	_ENTRY(_C_LABEL(y))
+#define	END(y)		_END(_C_LABEL(y))
+#define	ASENTRY(y)	_ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
+#define	ASENTRY_NP(y)	_ENTRY(_ASM_LABEL(y))
+#define	ASEND(y)	_END(_ASM_LABEL(y))
+
+#ifdef __ELF__
+#define	ENTRY_PRIVATE(y)  ENTRY(y); .hidden _C_LABEL(y)
+#else
+#define	ENTRY_PRIVATE(y)  ENTRY(y)
+#endif
+
+#define	ASMSTR		.asciz
+
+#if defined(__ELF__) && defined(PIC)
+#ifdef __STDC__
+#define	PIC_SYM(x,y)	x ## ( ## y ## )
+#else
+#define	PIC_SYM(x,y)	x/**/(/**/y/**/)
+#endif
+#else
+#define	PIC_SYM(x,y)	x
+#endif
+
+#ifdef __ELF__
+#define	RCSID(x)	.section ".ident"; .asciz x
+#else
+#define	RCSID(x)	.text; .asciz x
+#endif
+
+#ifdef __ELF__
+#define	WEAK_ALIAS(alias,sym)						\
+	.weak alias;							\
+	alias = sym
+#endif
+
+#ifdef __STDC__
+#define	WARN_REFERENCES(sym,msg)					\
+	.stabs msg ## ,30,0,0,0 ;					\
+	.stabs __STRING(_C_LABEL(sym)) ## ,1,0,0,0
+#elif defined(__ELF__)
+#define	WARN_REFERENCES(sym,msg)					\
+	.stabs msg,30,0,0,0 ;						\
+	.stabs __STRING(sym),1,0,0,0
+#else
+#define	WARN_REFERENCES(sym,msg)					\
+	.stabs msg,30,0,0,0 ;						\
+	.stabs __STRING(_/**/sym),1,0,0,0
+#endif /* __STDC__ */
+
+#endif /* _AARCH64_ASM_H_ */
+
diff --git a/libc/arch-aarch64/include/machine/elf_machdep.h b/libc/arch-aarch64/include/machine/elf_machdep.h
new file mode 100644
index 0000000..2bf8189
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/elf_machdep.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_ELF_MACHDEP_H_
+#define _AARCH64_ELF_MACHDEP_H_
+
+#if defined(__AARCH64EB__)
+#define ELF64_MACHDEP_ENDIANNESS    ELFDATA2MSB
+#else
+#define ELF64_MACHDEP_ENDIANNESS    ELFDATA2LSB
+#endif
+
+#define ELF64_MACHDEP_ID_CASES                      \
+    case EM_AARCH64:                                \
+        break;
+
+#define ELF64_MACHDEP_ID    EM_AARCH64
+
+#define ARCH_ELFSIZE        64  /* MD native binary size */
+
+/* Null relocations */
+#define R_ARM_NONE                      0
+#define R_AARCH64_NONE                  256
+
+/* Static Data relocations */
+#define R_AARCH64_ABS64                 257
+#define R_AARCH64_ABS32                 258
+#define R_AARCH64_ABS16                 259
+#define R_AARCH64_PREL64                260
+#define R_AARCH64_PREL32                261
+#define R_AARCH64_PREL16                262
+
+#define R_AARCH64_MOVW_UABS_G0          263
+#define R_AARCH64_MOVW_UABS_G0_NC       264
+#define R_AARCH64_MOVW_UABS_G1          265
+#define R_AARCH64_MOVW_UABS_G1_NC       266
+#define R_AARCH64_MOVW_UABS_G2          267
+#define R_AARCH64_MOVW_UABS_G2_NC       268
+#define R_AARCH64_MOVW_UABS_G3          269
+#define R_AARCH64_MOVW_SABS_G0          270
+#define R_AARCH64_MOVW_SABS_G1          271
+#define R_AARCH64_MOVW_SABS_G2          272
+
+/* PC-relative addresses */
+#define R_AARCH64_LD_PREL_LO19          273
+#define R_AARCH64_ADR_PREL_LO21         274
+#define R_AARCH64_ADR_PREL_PG_HI21      275
+#define R_AARCH64_ADR_PREL_PG_HI21_NC   276
+#define R_AARCH64_ADD_ABS_LO12_NC       277
+#define R_AARCH64_LDST8_ABS_LO12_NC     278
+
+/* Control-flow relocations */
+#define R_AARCH64_TSTBR14               279
+#define R_AARCH64_CONDBR19              280
+#define R_AARCH64_JUMP26                282
+#define R_AARCH64_CALL26                283
+#define R_AARCH64_LDST16_ABS_LO12_NC    284
+#define R_AARCH64_LDST32_ABS_LO12_NC    285
+#define R_AARCH64_LDST64_ABS_LO12_NC    286
+#define R_AARCH64_LDST128_ABS_LO12_NC   299
+
+#define R_AARCH64_MOVW_PREL_G0          287
+#define R_AARCH64_MOVW_PREL_G0_NC       288
+#define R_AARCH64_MOVW_PREL_G1          289
+#define R_AARCH64_MOVW_PREL_G1_NC       290
+#define R_AARCH64_MOVW_PREL_G2          291
+#define R_AARCH64_MOVW_PREL_G2_NC       292
+#define R_AARCH64_MOVW_PREL_G3          293
+
+/* Dynamic relocations */
+#define R_AARCH64_COPY                  1024
+#define R_AARCH64_GLOB_DAT              1025    /* Create GOT entry.  */
+#define R_AARCH64_JUMP_SLOT             1026    /* Create PLT entry.  */
+#define R_AARCH64_RELATIVE              1027    /* Adjust by program base.  */
+#define R_AARCH64_TLS_TPREL64           1030
+#define R_AARCH64_TLS_DTPREL32          1031
+
+#define R_TYPE(name)        __CONCAT(R_AARCH64_,name)
+
+#endif /* _AARCH64_ELF_MACHDEP_H_ */
diff --git a/libc/arch-aarch64/include/machine/endian.h b/libc/arch-aarch64/include/machine/endian.h
new file mode 100644
index 0000000..87a038d
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/endian.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_ENDIAN_H_
+#define _AARCH64_ENDIAN_H_
+
+#include <sys/types.h>
+#include <sys/endian.h>
+
+#ifdef __GNUC__
+
+#define __swap16md(x) ({                                        \
+    register u_int16_t _x = (x);                                \
+    __asm volatile ("rev16 %0, %0" : "+r" (_x));                \
+    _x;                                                         \
+})
+
+/* Use GCC builtins */
+#define __swap32md(x) __builtin_bswap32(x)
+#define __swap64md(x) __builtin_bswap64(x)
+
+/* Tell sys/endian.h we have MD variants of the swap macros.  */
+#define MD_SWAP
+
+#endif  /* __GNUC__ */
+
+#if defined(__AARCH64EB__)
+#define _BYTE_ORDER _BIG_ENDIAN
+#else
+#define _BYTE_ORDER _LITTLE_ENDIAN
+#endif
+
+#endif /* _AARCH64_ENDIAN_H_ */
diff --git a/libc/arch-aarch64/include/machine/exec.h b/libc/arch-aarch64/include/machine/exec.h
new file mode 100644
index 0000000..7437626
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/exec.h
@@ -0,0 +1,50 @@
+/*	$OpenBSD: exec.h,v 1.9 2003/04/17 03:42:14 drahn Exp $	*/
+/*	$NetBSD: exec.h,v 1.6 1994/10/27 04:16:05 cgd Exp $	*/
+
+/*
+ * Copyright (c) 1993 Christopher G. Demetriou
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_EXEC_H_
+#define _AARCH64_EXEC_H_
+
+#define	__LDPGSZ		4096
+
+#define	NATIVE_EXEC_ELF
+
+#define	ARCH_ELFSIZE		64
+
+#define	ELF_TARG_CLASS		ELFCLASS64		/* 64-bit objects */
+#define	ELF_TARG_DATA		ELFDATA2LSB
+#define	ELF_TARG_MACH		EM_AARCH64
+
+#define	_NLIST_DO_AOUT
+#define	_NLIST_DO_ELF
+
+#define	_KERN_DO_AOUT
+#define	_KERN_DO_ELF64
+
+#endif  /* _AARCH64_EXEC_H_ */
diff --git a/libc/arch-aarch64/include/machine/ieee.h b/libc/arch-aarch64/include/machine/ieee.h
new file mode 100644
index 0000000..cf2c1fc
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/ieee.h
@@ -0,0 +1,191 @@
+/*	$OpenBSD: ieee.h,v 1.1 2004/02/01 05:09:49 drahn Exp $	*/
+/*	$NetBSD: ieee.h,v 1.2 2001/02/21 17:43:50 bjh21 Exp $	*/
+
+/*
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)ieee.h	8.1 (Berkeley) 6/11/93
+ */
+
+/*
+ * ieee.h defines the machine-dependent layout of the machine's IEEE
+ * floating point.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ *		     k	         k+1
+ * Note that  1.0 x 2  == 0.1 x 2      and that denorms are represented
+ *
+ *					  (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2             .  This means that
+ *
+ *			 -126
+ * the number 0.10000 x 2    , for instance, is the same as the normalized
+ *
+ *		-127			   -128
+ * float 1.0 x 2    .  Thus, to represent 2    , we need one leading zero
+ *
+ *				  -129
+ * in the fraction; to represent 2    , we need two, and so on.  This
+ *
+ *						     (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ *						-126		-149
+ * instance, we get .00000000000000000000001 x 2    , or 1.0 x 2    , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+
+/*
+ * The ARM has two sets of FP data formats.  The FPA supports 32-bit, 64-bit
+ * and 96-bit IEEE formats, with the words in big-endian order.  VFP supports
+ * 32-bin and 64-bit IEEE formats with the words in the CPU's native byte
+ * order.
+ *
+ * The FPA also has two packed decimal formats, but we ignore them here.
+ */
+
+#define	SNG_EXPBITS	8
+#define	SNG_FRACBITS	23
+
+#define	DBL_EXPBITS	11
+#define	DBL_FRACBITS	52
+
+#ifndef __VFP_FP__
+#define	E80_EXPBITS	15
+#define	E80_FRACBITS	64
+
+#define	EXT_EXPBITS	15
+#define	EXT_FRACBITS	112
+#endif
+
+struct ieee_single {
+	u_int	sng_frac:23;
+	u_int	sng_exponent:8;
+	u_int	sng_sign:1;
+};
+
+#ifdef __VFP_FP__
+struct ieee_double {
+#ifdef __AARCH64EB__
+	u_int	dbl_sign:1;
+	u_int	dbl_exp:11;
+	u_int	dbl_frach:20;
+	u_int	dbl_fracl;
+#else /* !__AARCH64EB__ */
+	u_int	dbl_fracl;
+	u_int	dbl_frach:20;
+	u_int	dbl_exp:11;
+	u_int	dbl_sign:1;
+#endif /* !__AARCH64EB__ */
+};
+#else /* !__VFP_FP__ */
+struct ieee_double {
+	u_int	dbl_frach:20;
+	u_int	dbl_exp:11;
+	u_int	dbl_sign:1;
+	u_int	dbl_fracl;
+};
+
+union ieee_double_u {
+	double                  dblu_d;
+	struct ieee_double      dblu_dbl;
+};
+
+
+struct ieee_e80 {
+	u_int	e80_exp:15;
+	u_int	e80_zero:16;
+	u_int	e80_sign:1;
+	u_int	e80_frach:31;
+	u_int	e80_j:1;
+	u_int	e80_fracl;
+};
+
+struct ieee_ext {
+	u_int	ext_frach:16;
+	u_int	ext_exp:15;
+	u_int	ext_sign:1;
+	u_int	ext_frachm;
+	u_int	ext_fraclm;
+	u_int	ext_fracl;
+};
+#endif /* !__VFP_FP__ */
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'.  Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define	SNG_EXP_INFNAN	255
+#define	DBL_EXP_INFNAN	2047
+#ifndef __VFP_FP__
+#define	E80_EXP_INFNAN	32767
+#define	EXT_EXP_INFNAN	32767
+#endif /* !__VFP_FP__ */
+
+#if 0
+#define	SNG_QUIETNAN	(1 << 22)
+#define	DBL_QUIETNAN	(1 << 19)
+#ifndef __VFP_FP__
+#define	E80_QUIETNAN	(1 << 15)
+#define	EXT_QUIETNAN	(1 << 15)
+#endif /* !__VFP_FP__ */
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define	SNG_EXP_BIAS	127
+#define	DBL_EXP_BIAS	1023
+#ifndef __VFP_FP__
+#define	E80_EXP_BIAS	16383
+#define	EXT_EXP_BIAS	16383
+#endif /* !__VFP_FP__ */
diff --git a/libc/arch-aarch64/include/machine/kernel.h b/libc/arch-aarch64/include/machine/kernel.h
new file mode 100644
index 0000000..070e704
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/kernel.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARCH_AARCH64_KERNEL_H
+#define _ARCH_AARCH64_KERNEL_H
+
+/* this file contains kernel-specific definitions that were optimized out of
+   our processed kernel headers, but still useful nonetheless... */
+
+typedef unsigned long   __kernel_blkcnt_t;
+typedef unsigned long   __kernel_blksize_t;
+
+/* these aren't really defined by the kernel headers though... */
+typedef unsigned long   __kernel_fsblkcnt_t;
+typedef unsigned long   __kernel_fsfilcnt_t;
+typedef unsigned int    __kernel_id_t;
+
+#endif /* _ARCH_ARM_KERNEL_H */
+
diff --git a/libc/arch-aarch64/include/machine/limits.h b/libc/arch-aarch64/include/machine/limits.h
new file mode 100644
index 0000000..ecddb01
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/limits.h
@@ -0,0 +1,62 @@
+/*	$OpenBSD: limits.h,v 1.3 2006/01/06 22:48:46 millert Exp $	*/
+/*	$NetBSD: limits.h,v 1.4 2003/04/28 23:16:18 bjh21 Exp $	*/
+
+/*
+ * Copyright (c) 1988 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from: @(#)limits.h	7.2 (Berkeley) 6/28/90
+ */
+
+#ifndef _AARCH64_LIMITS_H_
+#define _AARCH64_LIMITS_H_
+
+#include <sys/cdefs.h>
+
+#define	MB_LEN_MAX	1		/* no multibyte characters */
+
+#define	LONGLONG_BIT	64
+#define	LONGLONG_MIN	(-9223372036854775807LL-1)
+#define	LONGLONG_MAX	9223372036854775807LL
+#define	ULONGLONG_MAX	18446744073709551615ULL
+
+#ifndef	SIZE_MAX
+#define	SIZE_MAX	ULONGLONG_MAX	/* max value for a size_t */
+#endif
+#ifndef SSIZE_MAX
+#define	SSIZE_MAX	LONGLONG_MAX		/* max value for a ssize_t */
+#endif
+
+#if __BSD_VISIBLE
+#define	SIZE_T_MAX	ULONG_MAX	/* max value for a size_t (historic) */
+
+#define	UQUAD_MAX	0xffffffffffffffffULL		/* max unsigned quad */
+#define	QUAD_MAX	0x7fffffffffffffffLL		/* max signed quad */
+#define	QUAD_MIN	(-0x7fffffffffffffffLL-1)	/* min signed quad */
+
+#endif /* __BSD_VISIBLE */
+#endif /* _AARCH64_LIMITS_H_ */
diff --git a/libc/arch-aarch64/include/machine/setjmp.h b/libc/arch-aarch64/include/machine/setjmp.h
new file mode 100644
index 0000000..1c237da
--- /dev/null
+++ b/libc/arch-aarch64/include/machine/setjmp.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+/* _JBLEN is the size of a jmp_buf in longs(64bit on AArch64) */
+#define _JBLEN 32
+
+/* According to AARCH64 PCS document we need to save the following
+ * registers:
+ *
+ * Core     x19 - x30, sp (see section 5.1.1)
+ * VFP      d8 - d15 (see section 5.1.2)
+ *
+ * NOTE: All the registers saved here will have 64bit vales (except FPSR).
+ *       AAPCS mandates that the higher part of q registers does not need to
+ *       be saveved by the callee.
+ */
+
+/* The structure of jmp_buf for AArch64:
+ *
+ * NOTE: _JBLEN is the size of jmp_buf in longs(64bit on AArch64)! The table
+ *      below computes the offsets in words(32bit).
+ *
+ *  word        name            description
+ *  0       magic           magic number
+ *  1       sigmask         signal mask (not used with _setjmp / _longjmp)
+ *  2       core_base       base of core registers (x19-x30, sp)
+ *  28      float_base      base of float registers (d8-d15)
+ *  44      reserved        reserved entries (room to grow)
+ *  64
+ *
+ *
+ *  NOTE: The instructions that load/store core/vfp registers expect 8-byte
+ *        alignment. Contrary to the previous setjmp header for ARM we do not
+ *        need to save status/control registers for VFP (it is not a
+ *        requirement for setjmp).
+ */
+
+#define _JB_MAGIC       0
+#define _JB_SIGMASK     (_JB_MAGIC+1)
+#define _JB_CORE_BASE   (_JB_SIGMASK+1)
+#define _JB_FLOAT_BASE  (_JB_CORE_BASE + (31-19+1)*2)
+
+#define _JB_MAGIC__SETJMP   0x53657200
+#define _JB_MAGIC_SETJMP    0x53657201
diff --git a/libc/arch-aarch64/syscalls/mmap.S b/libc/arch-aarch64/syscalls/mmap.S
index d9abd41..a2d181a 100644
--- a/libc/arch-aarch64/syscalls/mmap.S
+++ b/libc/arch-aarch64/syscalls/mmap.S
@@ -19,3 +19,6 @@
 
     ret
 END(mmap)
+
+    .globl _C_LABEL(mmap64)
+    .equ _C_LABEL(mmap64), _C_LABEL(mmap)
diff --git a/libc/arch-arm/bionic/memcmp16.S b/libc/arch-arm/bionic/memcmp16.S
index 825c94f..afbb1b0 100644
--- a/libc/arch-arm/bionic/memcmp16.S
+++ b/libc/arch-arm/bionic/memcmp16.S
@@ -32,15 +32,15 @@
 /*
  * Optimized memcmp16() for ARM9.
  * This would not be optimal on XScale or ARM11, where more prefetching
- * and use of PLD will be needed.
+ * and use of pld will be needed.
  * The 2 major optimzations here are
  * (1) The main loop compares 16 bytes at a time
  * (2) The loads are scheduled in a way they won't stall
  */
 
 ENTRY(__memcmp16)
-        PLD         (r0, #0)
-        PLD         (r1, #0)
+        pld         [r0, #0]
+        pld         [r1, #0]
 
         /* take of the case where length is nul or the buffers are the same */
         cmp         r0, r1
@@ -62,13 +62,13 @@
         bpl         0f
 
         /* small blocks (less then 12 words) */
-        PLD         (r0, #32)
-        PLD         (r1, #32)
+        pld         [r0, #32]
+        pld         [r1, #32]
 
 1:      ldrh        r0, [r3], #2
         ldrh        ip, [r1], #2
         subs        r0, r0, ip
-        bxne        lr        
+        bxne        lr
         subs        r2, r2, #1
         bne         1b
         bx          lr
@@ -79,11 +79,11 @@
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r4, 0
         .cfi_rel_offset lr, 4
-        
+
         /* align first pointer to word boundary */
         tst         r3, #2
         beq         0f
-        
+
         ldrh        r0, [r3], #2
         ldrh        ip, [r1], #2
         sub         r2, r2, #1
@@ -111,10 +111,10 @@
         ldr         ip, [r1]
         subs        r2, r2, #(16 + 2)
         bmi         1f
-        
+
 0:
-        PLD         (r3, #64)
-        PLD         (r1, #64)
+        pld         [r3, #64]
+        pld         [r1, #64]
         ldr         r0, [r3], #4
         ldr         lr, [r1, #4]!
         eors        r0, r0, ip
@@ -139,14 +139,14 @@
         ldreq       r0, [r3], #4
         ldreq       ip, [r1, #4]!
         eoreqs      r0, r0, lr
-        bne         2f        
+        bne         2f
         subs        r2, r2, #16
         bhs         0b
 
         /* do we have at least 2 words left? */
 1:      adds        r2, r2, #(16 - 2 + 2)
         bmi         4f
-        
+
         /* finish off 2 words at a time */
 3:      ldr         r0, [r3], #4
         ldr         ip, [r1], #4
@@ -195,8 +195,8 @@
         sub         r2, r2, #8
 
 6:
-        PLD         (r3, #64)
-        PLD         (r1, #64)
+        pld         [r3, #64]
+        pld         [r1, #64]
         mov         ip, lr, lsr #16
         ldr         lr, [r1], #4
         ldr         r0, [r3], #4
diff --git a/libc/arch-arm/bionic/memcpy.S b/libc/arch-arm/bionic/memcpy.S
index 0dc86d5..f25b3e3 100644
--- a/libc/arch-arm/bionic/memcpy.S
+++ b/libc/arch-arm/bionic/memcpy.S
@@ -352,9 +352,9 @@
 
         // preload the destination because we'll align it to a cache line
         // with small writes. Also start the source "pump".
-        PLD         (r0, #0)
-        PLD         (r1, #0)
-        PLD         (r1, #32)
+        pld         [r0, #0]
+        pld         [r1, #0]
+        pld         [r1, #32]
 
 		/* it simplifies things to take care of len<4 early */
 		cmp			r2, #4
@@ -442,7 +442,7 @@
         add         r12, r12, #64
 
 1:      ldmia       r1!, { r4-r11 }
-        PLD         (r12, #64)
+        pld         [r12, #64]
         subs        r2, r2, #32
 
         // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
@@ -563,7 +563,7 @@
         ldr         r12, [r1], #4
 1:      mov         r4, r12
 		ldmia		r1!, {   r5,r6,r7,  r8,r9,r10,r11}
-        PLD         (r1, #64)
+        pld         [r1, #64]
         subs        r2, r2, #32
         ldrhs       r12, [r1], #4
 		orr			r3, r3, r4,		lsl #16
@@ -590,7 +590,7 @@
         ldr         r12, [r1], #4
 1:      mov         r4, r12
 		ldmia		r1!, {   r5,r6,r7,  r8,r9,r10,r11}
-        PLD         (r1, #64)
+        pld         [r1, #64]
 		subs		r2, r2, #32
         ldrhs       r12, [r1], #4
 		orr			r3, r3, r4,		lsl #24
@@ -617,7 +617,7 @@
         ldr         r12, [r1], #4
 1:      mov         r4, r12
 		ldmia		r1!, {   r5,r6,r7,  r8,r9,r10,r11}
-        PLD         (r1, #64)
+        pld         [r1, #64]
 		subs		r2, r2, #32
         ldrhs       r12, [r1], #4
 		orr			r3, r3, r4,		lsl #8
diff --git a/libc/arch-arm/bionic/strcmp.S b/libc/arch-arm/bionic/strcmp.S
index 764a531..42d41d1 100644
--- a/libc/arch-arm/bionic/strcmp.S
+++ b/libc/arch-arm/bionic/strcmp.S
@@ -52,8 +52,8 @@
 #define magic2(REG) REG, lsl #7
 
 ENTRY(strcmp)
-	PLD(r0, #0)
-	PLD(r1, #0)
+	pld	[r0, #0]
+	pld	[r1, #0]
 	eor	r2, r0, r1
 	tst	r2, #3
 
@@ -88,8 +88,8 @@
 	orr	r4, r4, r4, lsl #16
 	.p2align	2
 4:
-	PLD(r0, #8)
-	PLD(r1, #8)
+	pld	[r0, #8]
+	pld	[r1, #8]
 	sub	r2, ip, magic1(r4)
 	cmp	ip, r3
 	itttt	eq
diff --git a/libc/arch-arm/generic/bionic/memcpy.S b/libc/arch-arm/generic/bionic/memcpy.S
index 87ebc44..699b88d 100644
--- a/libc/arch-arm/generic/bionic/memcpy.S
+++ b/libc/arch-arm/generic/bionic/memcpy.S
@@ -57,9 +57,9 @@
 
         // preload the destination because we'll align it to a cache line
         // with small writes. Also start the source "pump".
-        PLD         (r0, #0)
-        PLD         (r1, #0)
-        PLD         (r1, #32)
+        pld         [r0, #0]
+        pld         [r1, #0]
+        pld         [r1, #32]
 
         /* it simplifies things to take care of len<4 early */
         cmp         r2, #4
@@ -147,7 +147,7 @@
         add         r12, r12, #64
 
 1:      ldmia       r1!, { r4-r11 }
-        PLD         (r12, #64)
+        pld         [r12, #64]
         subs        r2, r2, #32
 
         // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
@@ -268,7 +268,7 @@
         ldr         r12, [r1], #4
 1:      mov         r4, r12
         ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
-        PLD         (r1, #64)
+        pld         [r1, #64]
         subs        r2, r2, #32
         ldrhs       r12, [r1], #4
         orr         r3, r3, r4,     lsl #16
@@ -295,7 +295,7 @@
         ldr         r12, [r1], #4
 1:      mov         r4, r12
         ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
-        PLD         (r1, #64)
+        pld         [r1, #64]
         subs        r2, r2, #32
         ldrhs       r12, [r1], #4
         orr         r3, r3, r4,     lsl #24
@@ -322,7 +322,7 @@
         ldr         r12, [r1], #4
 1:      mov         r4, r12
         ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
-        PLD         (r1, #64)
+        pld         [r1, #64]
         subs        r2, r2, #32
         ldrhs       r12, [r1], #4
         orr         r3, r3, r4,     lsl #8
diff --git a/libc/arch-arm/generic/bionic/strcmp.S b/libc/arch-arm/generic/bionic/strcmp.S
index 764a531..42d41d1 100644
--- a/libc/arch-arm/generic/bionic/strcmp.S
+++ b/libc/arch-arm/generic/bionic/strcmp.S
@@ -52,8 +52,8 @@
 #define magic2(REG) REG, lsl #7
 
 ENTRY(strcmp)
-	PLD(r0, #0)
-	PLD(r1, #0)
+	pld	[r0, #0]
+	pld	[r1, #0]
 	eor	r2, r0, r1
 	tst	r2, #3
 
@@ -88,8 +88,8 @@
 	orr	r4, r4, r4, lsl #16
 	.p2align	2
 4:
-	PLD(r0, #8)
-	PLD(r1, #8)
+	pld	[r0, #8]
+	pld	[r1, #8]
 	sub	r2, ip, magic1(r4)
 	cmp	ip, r3
 	itttt	eq
diff --git a/libc/arch-arm/generic/bionic/strcpy.S b/libc/arch-arm/generic/bionic/strcpy.S
index 21dafda..cc997f4 100644
--- a/libc/arch-arm/generic/bionic/strcpy.S
+++ b/libc/arch-arm/generic/bionic/strcpy.S
@@ -33,7 +33,7 @@
 #include <machine/asm.h>
 
 ENTRY(strcpy)
-	PLD(r1, #0)
+	pld	[r1, #0]
 	eor	r2, r0, r1
 	mov	ip, r0
 	tst	r2, #3
@@ -62,7 +62,7 @@
 	  load stalls.  */
 	.p2align 2
 2:
-	PLD(r1, #8)
+	pld	[r1, #8]
 	ldr	r4, [r1], #4
 	sub	r2, r3, r5
 	bics	r2, r2, r3
diff --git a/libc/arch-arm/generic/bionic/strlen.c b/libc/arch-arm/generic/bionic/strlen.c
index 824cf78..811e1e0 100644
--- a/libc/arch-arm/generic/bionic/strlen.c
+++ b/libc/arch-arm/generic/bionic/strlen.c
@@ -63,9 +63,7 @@
         "ldr     %[v], [%[s]], #4           \n"
         "sub     %[l], %[l], %[s]           \n"
         "0:                                 \n"
-#if __ARM_HAVE_PLD
         "pld     [%[s], #64]                \n"
-#endif
         "sub     %[t], %[v], %[mask], lsr #7\n"
         "and     %[t], %[t], %[mask]        \n"
         "bics    %[t], %[t], %[v]           \n"
diff --git a/libc/arch-arm/include/machine/cdefs.h b/libc/arch-arm/include/machine/cdefs.h
deleted file mode 100644
index 44f1542..0000000
--- a/libc/arch-arm/include/machine/cdefs.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*	$OpenBSD: cdefs.h,v 1.2 2005/11/24 20:46:44 deraadt Exp $	*/
-
-#ifndef	_MACHINE_CDEFS_H_
-#define	_MACHINE_CDEFS_H_
-
-#if defined(lint)
-#define __indr_reference(sym,alias)	__lint_equal__(sym,alias)
-#define __warn_references(sym,msg)
-#define __weak_alias(alias,sym)		__lint_equal__(sym,alias)
-#elif defined(__GNUC__) && defined(__STDC__)
-#define __weak_alias(alias,sym)					\
-	__asm__(".weak " __STRING(alias) " ; " __STRING(alias)	\
-	    " = " __STRING(sym));
-#define	__warn_references(sym,msg)				\
-	__asm__(".section .gnu.warning." __STRING(sym)		\
-	    " ; .ascii \"" msg "\" ; .text");
-#endif
-
-#endif /* !_MACHINE_CDEFS_H_ */
diff --git a/libc/arch-arm/include/machine/cpu-features.h b/libc/arch-arm/include/machine/cpu-features.h
index 80d3fda..fc5a8fd 100644
--- a/libc/arch-arm/include/machine/cpu-features.h
+++ b/libc/arch-arm/include/machine/cpu-features.h
@@ -34,133 +34,29 @@
  *
  * This is done to abstract us from the various ARM Architecture
  * quirks and alphabet soup.
- *
- * IMPORTANT: We have no intention to support anything below an ARMv4T !
  */
 
 /* __ARM_ARCH__ is a number corresponding to the ARM revision
- * we're going to support
- *
- * it looks like our toolchain doesn't define __ARM_ARCH__
+ * we're going to support. Our toolchain doesn't define __ARM_ARCH__
  * so try to guess it.
- *
- *
- *
  */
 #ifndef __ARM_ARCH__
-
 #  if defined __ARM_ARCH_7__   || defined __ARM_ARCH_7A__ || \
-      defined __ARM_ARCH_7R__  || defined __ARM_ARCH_7M__
-
+        defined __ARM_ARCH_7R__  || defined __ARM_ARCH_7M__
 #    define __ARM_ARCH__ 7
-
 #  elif defined __ARM_ARCH_6__   || defined __ARM_ARCH_6J__ || \
-      defined __ARM_ARCH_6K__  || defined __ARM_ARCH_6Z__ || \
-      defined __ARM_ARCH_6KZ__ || defined __ARM_ARCH_6T2__
-#
+        defined __ARM_ARCH_6K__  || defined __ARM_ARCH_6Z__ || \
+        defined __ARM_ARCH_6KZ__ || defined __ARM_ARCH_6T2__
 #    define __ARM_ARCH__ 6
-#
-#  elif defined __ARM_ARCH_5__ || defined __ARM_ARCH_5T__ || \
-        defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__
-#
-#    define __ARM_ARCH__ 5
-#
-#  elif defined __ARM_ARCH_4T__
-#
-#    define __ARM_ARCH__ 4
-#
-#  elif defined __ARM_ARCH_4__
-#    error ARMv4 is not supported, please use ARMv4T at a minimum
 #  else
 #    error Unknown or unsupported ARM architecture
 #  endif
 #endif
 
-/* experimental feature used to check that our ARMv4 workarounds
- * work correctly without a real ARMv4 machine */
-#ifdef BIONIC_EXPERIMENTAL_FORCE_ARMV4
-#  undef  __ARM_ARCH__
-#  define __ARM_ARCH__  4
-#endif
-
-/* define __ARM_HAVE_5TE if we have the ARMv5TE instructions */
-#if __ARM_ARCH__ > 5
-#  define  __ARM_HAVE_5TE  1
-#elif __ARM_ARCH__ == 5
-#  if defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__
-#    define __ARM_HAVE_5TE  1
-#  endif
-#endif
-
-/* instructions introduced in ARMv5 */
-#if __ARM_ARCH__ >= 5
-#  define  __ARM_HAVE_BLX  1
-#  define  __ARM_HAVE_CLZ  1
-#  define  __ARM_HAVE_LDC2 1
-#  define  __ARM_HAVE_MCR2 1
-#  define  __ARM_HAVE_MRC2 1
-#  define  __ARM_HAVE_STC2 1
-#endif
-
-/* ARMv5TE introduces a few instructions */
-#if __ARM_HAVE_5TE
-#  define  __ARM_HAVE_PLD   1
-#  define  __ARM_HAVE_MCRR  1
-#  define  __ARM_HAVE_MRRC  1
-#endif
-
 /* define __ARM_HAVE_HALFWORD_MULTIPLY when half-word multiply instructions
  * this means variants of: smul, smulw, smla, smlaw, smlal
  */
-#if __ARM_HAVE_5TE
-#  define  __ARM_HAVE_HALFWORD_MULTIPLY  1
-#endif
-
-/* define __ARM_HAVE_PAIR_LOAD_STORE when 64-bit memory loads and stored
- * into/from a pair of 32-bit registers is supported throuhg 'ldrd' and 'strd'
- */
-#if __ARM_HAVE_5TE
-#  define  __ARM_HAVE_PAIR_LOAD_STORE 1
-#endif
-
-/* define __ARM_HAVE_SATURATED_ARITHMETIC is you have the saturated integer
- * arithmetic instructions: qdd, qdadd, qsub, qdsub
- */
-#if __ARM_HAVE_5TE
-#  define  __ARM_HAVE_SATURATED_ARITHMETIC 1
-#endif
-
-/* define __ARM_HAVE_PC_INTERWORK when a direct assignment to the
- * pc register will switch into thumb/ARM mode depending on bit 0
- * of the new instruction address. Before ARMv5, this was not the
- * case, and you have to write:
- *
- *     mov  r0, [<some address>]
- *     bx   r0
- *
- * instead of:
- *
- *     ldr  pc, [<some address>]
- *
- * note that this affects any instruction that explicitly changes the
- * value of the pc register, including ldm { ...,pc } or 'add pc, #offset'
- */
-#if __ARM_ARCH__ >= 5
-#  define __ARM_HAVE_PC_INTERWORK
-#endif
-
-/* define __ARM_HAVE_LDREX_STREX for ARMv6 and ARMv7 architecture to be
- * used in replacement of deprecated swp instruction
- */
-#if __ARM_ARCH__ >= 6
-#  define __ARM_HAVE_LDREX_STREX
-#endif
-
-/* define __ARM_HAVE_DMB for ARMv7 architecture
- */
-#if __ARM_ARCH__ >= 7
-#  define __ARM_HAVE_DMB
-#endif
+#define  __ARM_HAVE_HALFWORD_MULTIPLY  1
 
 /* define __ARM_HAVE_LDREXD for ARMv7 architecture
  * (also present in ARMv6K, but not implemented in ARMv7-M, neither of which
@@ -184,18 +80,4 @@
 #  define __ARM_HAVE_NEON
 #endif
 
-/* Assembly-only macros */
-#ifdef __ASSEMBLY__
-
-/* define a handy PLD(address) macro since the cache preload
- * is an optional opcode
- */
-#if __ARM_HAVE_PLD
-#  define  PLD(reg,offset)    pld    [reg, offset]
-#else
-#  define  PLD(reg,offset)    /* nothing */
-#endif
-
-#endif /* ! __ASSEMBLY__ */
-
 #endif /* _ARM_MACHINE_CPU_FEATURES_H */
diff --git a/libc/arch-arm/include/machine/endian.h b/libc/arch-arm/include/machine/endian.h
index 7cba3b9..8d9723d 100644
--- a/libc/arch-arm/include/machine/endian.h
+++ b/libc/arch-arm/include/machine/endian.h
@@ -33,15 +33,6 @@
 
 #ifdef __GNUC__
 
-/*
- * REV and REV16 weren't available on ARM5 or ARM4.
- * We don't include <machine/cpu-features.h> because it pollutes the
- * namespace with macros like PLD.
- */
-#if !defined __ARM_ARCH_5__ && !defined __ARM_ARCH_5T__ && \
-    !defined __ARM_ARCH_5TE__ && !defined __ARM_ARCH_5TEJ__ && \
-    !defined __ARM_ARCH_4T__ && !defined __ARM_ARCH_4__
-
 /* According to RealView Assembler User's Guide, REV and REV16 are available
  * in Thumb code and 16-bit instructions when used in Thumb-2 code.
  *
@@ -55,13 +46,13 @@
  */
 #define __swap16md(x) ({                                        \
     register u_int16_t _x = (x);                                \
-    __asm volatile ("rev16 %0, %0" : "+l" (_x));                \
+    __asm__ __volatile__("rev16 %0, %0" : "+l" (_x));           \
     _x;                                                         \
 })
 
 #define __swap32md(x) ({                                        \
     register u_int32_t _x = (x);                                \
-    __asm volatile ("rev %0, %0" : "+l" (_x));                  \
+    __asm__ __volatile__("rev %0, %0" : "+l" (_x));             \
     _x;                                                         \
 })
 
@@ -74,7 +65,6 @@
 /* Tell sys/endian.h we have MD variants of the swap macros.  */
 #define MD_SWAP
 
-#endif  /* __ARM_ARCH__ */
 #endif  /* __GNUC__ */
 
 #if defined(__ARMEB__)
diff --git a/libc/arch-common/bionic/__dso_handle.h b/libc/arch-common/bionic/__dso_handle.h
index e67ce7c..d4bff77 100644
--- a/libc/arch-common/bionic/__dso_handle.h
+++ b/libc/arch-common/bionic/__dso_handle.h
@@ -30,5 +30,9 @@
 #ifndef CRT_LEGACY_WORKAROUND
 __attribute__ ((visibility ("hidden")))
 #endif
+#ifdef __aarch64__
+__attribute__ ((section (".data")))
+#else
 __attribute__ ((section (".bss")))
+#endif
 void *__dso_handle = (void *) 0;
diff --git a/libc/arch-common/bionic/__dso_handle_so.h b/libc/arch-common/bionic/__dso_handle_so.h
index 732799b..fab328a 100644
--- a/libc/arch-common/bionic/__dso_handle_so.h
+++ b/libc/arch-common/bionic/__dso_handle_so.h
@@ -29,4 +29,8 @@
 
 __attribute__ ((visibility ("hidden")))
 __attribute__ ((section (".data")))
+#ifdef __aarch64__
+void *__dso_handle = (void *) 0;
+#else
 void *__dso_handle = &__dso_handle;
+#endif
diff --git a/libc/arch-mips/include/machine/cdefs.h b/libc/arch-mips/include/machine/cdefs.h
deleted file mode 100644
index d52376a..0000000
--- a/libc/arch-mips/include/machine/cdefs.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*	$OpenBSD: cdefs.h,v 1.4 2006/01/10 00:04:04 millert Exp $	*/
-
-/*
- * Copyright (c) 2002-2003 Opsycon AB  (www.opsycon.se / www.opsycon.com)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- */
-
-
-#ifndef _MIPS_CDEFS_H_
-#define	_MIPS_CDEFS_H_
-
-#if defined(lint)
-#define __indr_reference(sym,alias)	__lint_equal__(sym,alias)
-#define __warn_references(sym,msg)
-#define __weak_alias(alias,sym)		__lint_equal__(sym,alias)
-#elif defined(__GNUC__) && defined(__STDC__)
-#define __weak_alias(alias,sym)				\
-	__asm__(".weak " __STRING(alias) " ; "		\
-	    __STRING(alias) " = " __STRING(sym))
-#define	__warn_references(sym,msg)			\
-	__asm__(".section .gnu.warning." __STRING(sym)	\
-	    " ; .ascii \"" msg "\" ; .text")
-#define	__indr_references(sym,msg)	/* nothing */
-#endif
-
-#endif /* !_MIPS_CDEFS_H_ */
diff --git a/libc/arch-x86/include/machine/cdefs.h b/libc/arch-x86/include/machine/cdefs.h
deleted file mode 100644
index 6efee6a..0000000
--- a/libc/arch-x86/include/machine/cdefs.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*	$OpenBSD: cdefs.h,v 1.9 2005/11/24 20:46:45 deraadt Exp $	*/
-
-/*
- * Written by J.T. Conklin <jtc@wimsey.com> 01/17/95.
- * Public domain.
- */
-
-#ifndef	_MACHINE_CDEFS_H_
-#define	_MACHINE_CDEFS_H_
-
-#if defined(lint)
-#define __indr_reference(sym,alias)	__lint_equal__(sym,alias)
-#define __warn_references(sym,msg)
-#define __weak_alias(alias,sym)		__lint_equal__(sym,alias)
-#elif defined(__GNUC__) && defined(__STDC__)
-#define __weak_alias(alias,sym)				\
-	__asm__(".weak " __STRING(alias) " ; "		\
-	    __STRING(alias) " = " __STRING(sym));
-#define __warn_references(sym,msg)			\
-	__asm__(".section .gnu.warning." __STRING(sym)	\
-	    " ; .ascii \"" msg "\" ; .text");
-#endif
-
-#endif /* !_MACHINE_CDEFS_H_ */
diff --git a/libc/arch-x86_64/include/machine/cdefs.h b/libc/arch-x86_64/include/machine/cdefs.h
deleted file mode 100644
index eb243a3..0000000
--- a/libc/arch-x86_64/include/machine/cdefs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*	$OpenBSD: cdefs.h,v 1.3 2013/03/28 17:30:45 martynas Exp $	*/
-
-/*
- * Written by J.T. Conklin <jtc@wimsey.com> 01/17/95.
- * Public domain.
- */
-
-#ifndef	_MACHINE_CDEFS_H_
-#define	_MACHINE_CDEFS_H_
-
-#define __strong_alias(alias,sym)					\
-	__asm__(".global " __STRING(alias) " ; " __STRING(alias)	\
-	    " = " __STRING(sym))
-#define __weak_alias(alias,sym)						\
-	__asm__(".weak " __STRING(alias) " ; " __STRING(alias)		\
-	    " = " __STRING(sym))
-#define __warn_references(sym,msg)					\
-	__asm__(".section .gnu.warning." __STRING(sym)			\
-	    " ; .ascii \"" msg "\" ; .text")
-
-#endif /* !_MACHINE_CDEFS_H_ */
diff --git a/libc/arch-x86_64/include/machine/exec.h b/libc/arch-x86_64/include/machine/exec.h
index 6d16439..829351c 100644
--- a/libc/arch-x86_64/include/machine/exec.h
+++ b/libc/arch-x86_64/include/machine/exec.h
@@ -12,7 +12,7 @@
 
 #define ELF_TARG_CLASS		ELFCLASS64
 #define ELF_TARG_DATA		ELFDATA2LSB
-#define ELF_TARG_MACH		EM_AMD64
+#define ELF_TARG_MACH		EM_X86_64
 
 #define _NLIST_DO_ELF
 #define _KERN_DO_ELF64
diff --git a/libc/arch-x86_64/syscalls/mmap.S b/libc/arch-x86_64/syscalls/mmap.S
index d28cc42..d6f9687 100644
--- a/libc/arch-x86_64/syscalls/mmap.S
+++ b/libc/arch-x86_64/syscalls/mmap.S
@@ -15,3 +15,6 @@
 1:
     ret
 END(mmap)
+
+    .globl _C_LABEL(mmap64)
+    .equ _C_LABEL(mmap64), _C_LABEL(mmap)
diff --git a/libc/bionic/pthread_debug.cpp b/libc/bionic/pthread_debug.cpp
index 79a193d..f01f040 100644
--- a/libc/bionic/pthread_debug.cpp
+++ b/libc/bionic/pthread_debug.cpp
@@ -91,6 +91,8 @@
 the lock has been acquired.
 */
 
+#if PTHREAD_DEBUG_ENABLED
+
 // =============================================================================
 // log functions
 // =============================================================================
@@ -658,23 +660,6 @@
 
 /****************************************************************************/
 
-/* pthread_debug_init() is called from libc_init_dynamic() just
- * after system properties have been initialized
- */
-
-extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
-    char env[PROP_VALUE_MAX];
-    if (__system_property_get("debug.libc.pthread", env)) {
-        int level = atoi(env);
-        if (level) {
-            LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
-                    level, getpid(), __progname);
-            hashmap_init(&sMutexMap);
-            sPthreadDebugLevel = level;
-        }
-    }
-}
-
 /*
  * See if we were allowed to grab the lock at this time.  We do it
  * *after* acquiring the lock, rather than before, so that we can
@@ -712,3 +697,21 @@
     remove_most_recently_locked(object);
     mutex_unlock_checked(object);
 }
+
+#endif // PTHREAD_DEBUG_ENABLED
+
+// Called from libc_init_dynamic() just after system properties have been initialized.
+extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
+#if PTHREAD_DEBUG_ENABLED
+    char env[PROP_VALUE_MAX];
+    if (__system_property_get("debug.libc.pthread", env)) {
+        int level = atoi(env);
+        if (level) {
+            LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
+                    level, getpid(), __progname);
+            hashmap_init(&sMutexMap);
+            sPthreadDebugLevel = level;
+        }
+    }
+#endif
+}
diff --git a/libc/bionic/pthread_exit.cpp b/libc/bionic/pthread_exit.cpp
index 22c2c3c..e6e636f 100644
--- a/libc/bionic/pthread_exit.cpp
+++ b/libc/bionic/pthread_exit.cpp
@@ -36,6 +36,7 @@
 
 extern "C" void _exit_with_stack_teardown(void*, size_t, int);
 extern "C" void __exit(int);
+extern "C" int __set_tid_address(int*);
 
 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
  *         and thread cancelation
@@ -93,7 +94,10 @@
 
   pthread_mutex_lock(&gThreadListLock);
   if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
-    // The thread is detached, so we can destroy the pthread_internal_t.
+    // The thread is detached, so we can free the pthread_internal_t.
+    // First make sure that the kernel does not try to clear the tid field
+    // because we'll have freed the memory before the thread actually exits.
+    __set_tid_address(NULL);
     _pthread_internal_remove_locked(thread);
   } else {
     // Make sure that the pthread_internal_t doesn't have stale pointers to a stack that
diff --git a/libc/bionic/pthread_mutex.cpp b/libc/bionic/pthread_mutex.cpp
index 4a11747..11a66ab 100644
--- a/libc/bionic/pthread_mutex.cpp
+++ b/libc/bionic/pthread_mutex.cpp
@@ -282,21 +282,16 @@
     return 0;
 }
 
-int pthread_mutex_init(pthread_mutex_t *mutex,
-                       const pthread_mutexattr_t *attr)
-{
-    int value = 0;
-
-    if (mutex == NULL)
-        return EINVAL;
-
+int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
     if (__predict_true(attr == NULL)) {
         mutex->value = MUTEX_TYPE_BITS_NORMAL;
         return 0;
     }
 
-    if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
+    int value = 0;
+    if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
         value |= MUTEX_SHARED_MASK;
+    }
 
     switch (*attr & MUTEXATTR_TYPE_MASK) {
     case PTHREAD_MUTEX_NORMAL:
@@ -473,9 +468,6 @@
 {
     int mvalue, mtype, tid, shared;
 
-    if (__predict_false(mutex == NULL))
-        return EINVAL;
-
     mvalue = mutex->value;
     mtype = (mvalue & MUTEX_TYPE_MASK);
     shared = (mvalue & MUTEX_SHARED_MASK);
@@ -565,9 +557,6 @@
 {
     int mvalue, mtype, tid, shared;
 
-    if (__predict_false(mutex == NULL))
-        return EINVAL;
-
     mvalue = mutex->value;
     mtype  = (mvalue & MUTEX_TYPE_MASK);
     shared = (mvalue & MUTEX_SHARED_MASK);
@@ -630,9 +619,6 @@
 {
     int mvalue, mtype, tid, shared;
 
-    if (__predict_false(mutex == NULL))
-        return EINVAL;
-
     mvalue = mutex->value;
     mtype  = (mvalue & MUTEX_TYPE_MASK);
     shared = (mvalue & MUTEX_SHARED_MASK);
@@ -705,9 +691,6 @@
     /* compute absolute expiration time */
     __timespec_to_relative_msec(&abstime, msecs, clock);
 
-    if (__predict_false(mutex == NULL))
-        return EINVAL;
-
     mvalue = mutex->value;
     mtype  = (mvalue & MUTEX_TYPE_MASK);
     shared = (mvalue & MUTEX_SHARED_MASK);
diff --git a/libc/include/stdio.h b/libc/include/stdio.h
index 409afea..260a4e7 100644
--- a/libc/include/stdio.h
+++ b/libc/include/stdio.h
@@ -222,13 +222,13 @@
 ssize_t	 getdelim(char ** __restrict, size_t * __restrict, int,
 	    FILE * __restrict);
 ssize_t	 getline(char ** __restrict, size_t * __restrict, FILE * __restrict);
-char	*gets(char *);
+
 #if __BSD_VISIBLE && !defined(__SYS_ERRLIST)
 #define __SYS_ERRLIST
-
 extern int sys_nerr;			/* perror(3) external variables */
 extern char *sys_errlist[];
 #endif
+
 void	 perror(const char *);
 int	 printf(const char * __restrict, ...)
 		__printflike(1, 2);
@@ -251,13 +251,16 @@
 		__printflike(1, 0);
 
 #ifndef __AUDIT__
-char	*gets(char *);
-int	 sprintf(char * __restrict, const char * __restrict, ...)
-		__printflike(2, 3);
-char	*tmpnam(char *);
-int	 vsprintf(char * __restrict, const char * __restrict,
-    __va_list)
-		__printflike(2, 0);
+char* gets(char*) __warnattr("gets is very unsafe; consider using fgets");
+int sprintf(char* __restrict, const char* __restrict, ...)
+    __printflike(2, 3) __warnattr("sprintf is often misused; please use snprintf");
+char* tmpnam(char*) __warnattr("tmpnam possibly used unsafely; consider using mkstemp");
+int vsprintf(char* __restrict, const char* __restrict, __va_list)
+    __printflike(2, 0) __warnattr("vsprintf is often misused; please use vsnprintf");
+#if __XPG_VISIBLE
+char* tempnam(const char*, const char*)
+    __warnattr("tempnam possibly used unsafely; consider using mkstemp");
+#endif
 #endif
 
 extern int rename(const char*, const char*);
@@ -320,9 +323,6 @@
 int	 putchar_unlocked(int);
 #endif /* __POSIX_VISIBLE >= 199506 */
 
-#if __XPG_VISIBLE
-char	*tempnam(const char *, const char *);
-#endif
 __END_DECLS
 
 #endif /* __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE */
diff --git a/libc/include/stdlib.h b/libc/include/stdlib.h
index 9fa84c1..72b554f 100644
--- a/libc/include/stdlib.h
+++ b/libc/include/stdlib.h
@@ -51,9 +51,9 @@
 extern int unsetenv(const char *);
 extern int clearenv(void);
 
-extern char *mkdtemp(char *);
-extern char *mktemp(char *);
-extern int mkstemp(char *);
+extern char* mkdtemp(char*);
+extern char* mktemp(char*) __warnattr("mktemp possibly used unsafely; consider using mkstemp");
+extern int mkstemp(char*);
 
 extern long strtol(const char *, char **, int);
 extern long long strtoll(const char *, char **, int);
diff --git a/libc/include/sys/cdefs.h b/libc/include/sys/cdefs.h
index c7f2ac7..b4dad74 100644
--- a/libc/include/sys/cdefs.h
+++ b/libc/include/sys/cdefs.h
@@ -334,8 +334,10 @@
 
 #if __GNUC_PREREQ__(4, 3)
 #define __errordecl(name, msg) extern void name(void) __attribute__((__error__(msg)))
+#define __warnattr(msg) __attribute__((__warning__(msg)))
 #else
 #define __errordecl(name, msg) extern void name(void)
+#define __warnattr(msg)
 #endif
 
 /*
diff --git a/libc/include/sys/cdefs_elf.h b/libc/include/sys/cdefs_elf.h
index 0887fa5..bb846b7 100644
--- a/libc/include/sys/cdefs_elf.h
+++ b/libc/include/sys/cdefs_elf.h
@@ -38,20 +38,10 @@
 #define _C_LABEL_STRING(x)	x
 #endif
 
-#if __STDC__
 #define	___RENAME(x)	__asm__(___STRING(_C_LABEL(x)))
-#else
-#ifdef __LEADING_UNDERSCORE
-#define	___RENAME(x)	____RENAME(_/**/x)
-#define	____RENAME(x)	__asm__(___STRING(x))
-#else
-#define	___RENAME(x)	__asm__(___STRING(x))
-#endif
-#endif
 
 #define	__indr_reference(sym,alias)	/* nada, since we do weak refs */
 
-#if __STDC__
 #define	__strong_alias(alias,sym)	       				\
     __asm__(".global " _C_LABEL_STRING(#alias) "\n"			\
 	    _C_LABEL_STRING(#alias) " = " _C_LABEL_STRING(#sym));
@@ -61,39 +51,15 @@
 	    _C_LABEL_STRING(#alias) " = " _C_LABEL_STRING(#sym));
 #define	__weak_extern(sym)						\
     __asm__(".weak " _C_LABEL_STRING(#sym));
+
+/* We use __warnattr instead of __warn_references.
+ * TODO: remove this and put an empty definition in one of the upstream-* compatibility headers.
+ */
 #define	__warn_references(sym,msg)					\
-    __asm__(".section .gnu.warning." #sym "\n\t.ascii \"" msg "\"\n\t.text");
+    /*__asm__(".section .gnu.warning." #sym "\n\t.ascii \"" msg "\"\n\t.text");*/
 
-#else /* !__STDC__ */
-
-#ifdef __LEADING_UNDERSCORE
-#define __weak_alias(alias,sym) ___weak_alias(_/**/alias,_/**/sym)
-#define	___weak_alias(alias,sym)					\
-    __asm__(".weak alias\nalias = sym");
-#else
-#define	__weak_alias(alias,sym)						\
-    __asm__(".weak alias\nalias = sym");
-#endif
-#ifdef __LEADING_UNDERSCORE
-#define __weak_extern(sym) ___weak_extern(_/**/sym)
-#define	___weak_extern(sym)						\
-    __asm__(".weak sym");
-#else
-#define	__weak_extern(sym)						\
-    __asm__(".weak sym");
-#endif
-#define	__warn_references(sym,msg)					\
-    __asm__(".section .gnu.warning.sym\n\t.ascii msg ; .text");
-
-#endif /* !__STDC__ */
-
-#if __STDC__
 #define	__SECTIONSTRING(_sec, _str)					\
 	__asm__(".section " #_sec "\n\t.asciz \"" _str "\"\n\t.previous")
-#else
-#define	__SECTIONSTRING(_sec, _str)					\
-	__asm__(".section _sec\n\t.asciz _str\n\t.previous")
-#endif
 
 /* GCC visibility helper macro */
 /* This must be used to tag non-static functions that are private, i.e.
diff --git a/libc/include/sys/param.h b/libc/include/sys/param.h
index 27acd70..37c6427 100644
--- a/libc/include/sys/param.h
+++ b/libc/include/sys/param.h
@@ -40,8 +40,19 @@
 #define ALIGNBYTES 3
 #endif
 
-#define ALIGN(p)    (((unsigned long)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#ifndef ALIGN
+#define ALIGN(p) (((uintptr_t)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#endif
 
-#define powerof2(x) ((((x)-1)&(x))==0)
+/* Macros for counting and rounding. */
+#ifndef howmany
+#define howmany(x, y)   (((x)+((y)-1))/(y))
+#endif
+#define roundup(x, y)   ((((x)+((y)-1))/(y))*(y))
+#define powerof2(x)     ((((x)-1)&(x))==0)
+
+/* Macros for min/max. */
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MAX(a,b) (((a)>(b))?(a):(b))
 
 #endif /* _SYS_PARAM_H_ */
diff --git a/libc/kernel/uapi/linux/ioprio.h b/libc/kernel/uapi/linux/ioprio.h
new file mode 100644
index 0000000..ccdb3b0
--- /dev/null
+++ b/libc/kernel/uapi/linux/ioprio.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _UAPI_LINUX_IOPRIO_H
+#define _UAPI_LINUX_IOPRIO_H
+#define IOPRIO_BITS (16)
+#define IOPRIO_CLASS_SHIFT (13)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
+#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+enum {
+ IOPRIO_CLASS_NONE,
+ IOPRIO_CLASS_RT,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+ IOPRIO_CLASS_BE,
+ IOPRIO_CLASS_IDLE,
+};
+#define IOPRIO_BE_NR (8)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+enum {
+ IOPRIO_WHO_PROCESS = 1,
+ IOPRIO_WHO_PGRP,
+ IOPRIO_WHO_USER,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+#define IOPRIO_NORM (4)
+#endif
diff --git a/libc/netbsd/resolv/res_query.c b/libc/netbsd/resolv/res_query.c
index 8e1321e..d31e83c 100644
--- a/libc/netbsd/resolv/res_query.c
+++ b/libc/netbsd/resolv/res_query.c
@@ -272,6 +272,15 @@
 	    (dots && !trailing_dot && (statp->options & RES_DNSRCH) != 0U)) {
 		int done = 0;
 
+		/* Unfortunately we need to load interface info
+		 * (dns servers, search domains) before
+		 * the domain stuff is tried.  Will have a better
+		 * fix after thread pools are used as this will
+		 * be loaded once for the thread instead of each
+		 * time a query is tried.
+		 */
+		_resolv_populate_res_for_iface(statp);
+
 		for (domain = (const char * const *)statp->dnsrch;
 		     *domain && !done;
 		     domain++) {
diff --git a/libc/private/__get_tls.h b/libc/private/__get_tls.h
index 5f9451d..04c5fdb 100644
--- a/libc/private/__get_tls.h
+++ b/libc/private/__get_tls.h
@@ -29,11 +29,10 @@
 #ifndef __BIONIC_PRIVATE_GET_TLS_H_
 #define __BIONIC_PRIVATE_GET_TLS_H_
 
-#if defined(__arm__)
-# define __get_tls() \
-    ({ void** __val; \
-       __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__val)); \
-       __val; })
+#if defined(__aarch64__)
+# define __get_tls() ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; })
+#elif defined(__arm__)
+# define __get_tls() ({ void** __val; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__val)); __val; })
 #elif defined(__mips__)
 # define __get_tls() \
     /* On mips32r1, this goes via a kernel illegal instruction trap that's optimized for v1. */ \
@@ -44,15 +43,9 @@
                ".set    pop\n" : "=r"(__val)); \
        __val; })
 #elif defined(__i386__)
-# define __get_tls() \
-    ({ void** __val; \
-       __asm__("movl %%gs:0, %0" : "=r"(__val)); \
-       __val; })
+# define __get_tls() ({ void** __val; __asm__("movl %%gs:0, %0" : "=r"(__val)); __val; })
 #elif defined(__x86_64__)
-# define __get_tls() \
-    ({ void** __val; \
-       __asm__("mov %%fs:0, %0" : "=r"(__val)); \
-       __val; })
+# define __get_tls() ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; })
 #else
 #error unsupported architecture
 #endif
diff --git a/libc/private/bionic_atomic_aarch64.h b/libc/private/bionic_atomic_aarch64.h
new file mode 100644
index 0000000..c5a9e2e
--- /dev/null
+++ b/libc/private/bionic_atomic_aarch64.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BIONIC_ATOMIC_AARCH64_H
+#define BIONIC_ATOMIC_AARCH64_H
+
+/* For ARMv8, we can use the 'dmb' instruction directly */
+__ATOMIC_INLINE__ void __bionic_memory_barrier(void) {
+  __asm__ __volatile__ ( "dmb ish" : : : "memory" );
+}
+
+/* Compare-and-swap, without any explicit barriers. Note that this function
+ * returns 0 on success, and 1 on failure. The opposite convention is typically
+ * used on other platforms.
+ */
+__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
+  int32_t tmp, oldval;
+  __asm__ __volatile__ (
+      "// atomic_cmpxchg\n"
+      "1:  ldaxr %w1, [%3]\n"
+      "    cmp %w1, %w4\n"
+      "    b.ne 2f\n"
+      "    stlxr %w0, %w5, [%3]\n"
+      "    cbnz  %w0, 1b\n"
+      "2:"
+      : "=&r" (tmp), "=&r" (oldval), "+o"(*ptr)
+      : "r" (ptr), "Ir" (old_value), "r" (new_value)
+      : "cc");
+  return oldval != old_value;
+}
+
+/* Swap, without any explicit barriers.  */
+__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
+  int32_t prev, status;
+  __asm__ __volatile__ (
+      "// atomic_swap\n"
+      "1:  ldxr %w0, [%3]\n"
+      "    stxr %w1, %w4, [%3]\n"
+      "    cbnz %w1, 1b\n"
+      : "=&r" (prev), "=&r" (status), "+o" (*ptr)
+      : "r" (ptr), "r" (new_value)
+      : "cc");
+  return prev;
+}
+
+/* Atomic decrement, without explicit barriers.  */
+__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
+  int32_t prev, tmp, status;
+  __asm__ __volatile__ (
+      "1:  ldxr %w0, [%4]\n"
+      "    sub %w1, %w0, #1\n"
+      "    stxr %w2, %w1, [%4]\n"
+      "    cbnz %w2, 1b"
+      : "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
+      : "r" (ptr)
+      : "cc");
+  return prev;
+}
+
+#endif /* BIONIC_ATOMICS_AARCH64_H */
diff --git a/libc/private/bionic_atomic_arm.h b/libc/private/bionic_atomic_arm.h
index 3bb639e..023d662 100644
--- a/libc/private/bionic_atomic_arm.h
+++ b/libc/private/bionic_atomic_arm.h
@@ -16,198 +16,64 @@
 #ifndef BIONIC_ATOMIC_ARM_H
 #define BIONIC_ATOMIC_ARM_H
 
-#include <machine/cpu-features.h>
-
-/* Some of the harware instructions used below are not available in Thumb-1
- * mode (they are if you build in ARM or Thumb-2 mode though). To solve this
- * problem, we're going to use the same technique than libatomics_ops,
- * which is to temporarily switch to ARM, do the operation, then switch
- * back to Thumb-1.
- *
- * This results in two 'bx' jumps, just like a normal function call, but
- * everything is kept inlined, avoids loading or computing the function's
- * address, and prevents a little I-cache trashing too.
- *
- * However, it is highly recommended to avoid compiling any C library source
- * file that use these functions in Thumb-1 mode.
- *
- * Define three helper macros to implement this:
- */
-#if defined(__thumb__) && !defined(__thumb2__)
-#  define  __ATOMIC_SWITCH_TO_ARM \
-            "adr r3, 5f\n" \
-            "bx  r3\n" \
-            ".align\n" \
-            ".arm\n" \
-        "5:\n"
-/* note: the leading \n below is intentional */
-#  define __ATOMIC_SWITCH_TO_THUMB \
-            "\n" \
-            "adr r3, 6f\n" \
-            "bx  r3\n" \
-            ".thumb" \
-        "6:\n"
-
-#  define __ATOMIC_CLOBBERS   "r3"  /* list of clobbered registers */
-
-/* Warn the user that ARM mode should really be preferred! */
-#  warning Rebuilding this source file in ARM mode is highly recommended for performance!!
-
-#else
-#  define  __ATOMIC_SWITCH_TO_ARM   /* nothing */
-#  define  __ATOMIC_SWITCH_TO_THUMB /* nothing */
-#  define  __ATOMIC_CLOBBERS        /* nothing */
-#endif
-
-
-/* Define a full memory barrier, this is only needed if we build the
- * platform for a multi-core device. For the record, using a 'dmb'
- * instruction on a Nexus One device can take up to 180 ns even if
- * it is completely un-necessary on this device.
- *
- * NOTE: This is where the platform and NDK headers atomic headers are
- *        going to diverge. With the NDK, we don't know if the generated
- *        code is going to run on a single or multi-core device, so we
- *        need to be cautious.
- *
- *        I.e. on single-core devices, the helper immediately returns,
- *        on multi-core devices, it uses "dmb" or any other means to
- *        perform a full-memory barrier.
- *
- * There are three cases to consider for the platform:
- *
- *    - multi-core ARMv7-A       => use the 'dmb' hardware instruction
- *    - multi-core ARMv6         => use the coprocessor
- *    - single core ARMv6+       => do not use any hardware barrier
- */
+__ATOMIC_INLINE__ void __bionic_memory_barrier(void) {
 #if defined(ANDROID_SMP) && ANDROID_SMP == 1
-
-/* Sanity check, multi-core is only supported starting from ARMv6 */
-#  if __ARM_ARCH__ < 6
-#    error ANDROID_SMP should not be set to 1 for an ARM architecture less than 6
-#  endif
-
-#  ifdef __ARM_HAVE_DMB
-/* For ARMv7-A, we can use the 'dmb' instruction directly */
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier(void)
-{
-    /* Note: we always build in ARM or Thumb-2 on ARMv7-A, so don't
-     * bother with __ATOMIC_SWITCH_TO_ARM */
-    __asm__ __volatile__ ( "dmb" : : : "memory" );
-}
-#  else /* !__ARM_HAVE_DMB */
-/* Otherwise, i.e. for multi-core ARMv6, we need to use the coprocessor,
- * which requires the use of a general-purpose register, which is slightly
- * less efficient.
- */
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier(void)
-{
-    __asm__ __volatile__ (
-        __SWITCH_TO_ARM
-        "mcr p15, 0, %0, c7, c10, 5"
-        __SWITCH_TO_THUMB
-        : : "r" (0) : __ATOMIC_CLOBBERS "memory");
-}
-#  endif /* !__ARM_HAVE_DMB */
-#else /* !ANDROID_SMP */
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier(void)
-{
-    /* A simple compiler barrier */
-    __asm__ __volatile__ ( "" : : : "memory" );
-}
-#endif /* !ANDROID_SMP */
-
-#ifndef __ARM_HAVE_LDREX_STREX
-#error Only ARM devices which have LDREX / STREX are supported
+  __asm__ __volatile__ ( "dmb" : : : "memory" );
+#else
+  /* A simple compiler barrier. */
+  __asm__ __volatile__ ( "" : : : "memory" );
 #endif
+}
 
-/* Compare-and-swap, without any explicit barriers. Note that this functions
+/* Compare-and-swap, without any explicit barriers. Note that this function
  * returns 0 on success, and 1 on failure. The opposite convention is typically
  * used on other platforms.
  */
-__ATOMIC_INLINE__ int
-__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
-{
-    int32_t prev, status;
-    do {
-        __asm__ __volatile__ (
-            __ATOMIC_SWITCH_TO_ARM
-            "ldrex %0, [%3]\n"
-            "mov %1, #0\n"
-            "teq %0, %4\n"
+__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
+  int32_t prev, status;
+  do {
+    __asm__ __volatile__ (
+          "ldrex %0, [%3]\n"
+          "mov %1, #0\n"
+          "teq %0, %4\n"
 #ifdef __thumb2__
-            "it eq\n"
+          "it eq\n"
 #endif
-            "strexeq %1, %5, [%3]"
-            __ATOMIC_SWITCH_TO_THUMB
-            : "=&r" (prev), "=&r" (status), "+m"(*ptr)
-            : "r" (ptr), "Ir" (old_value), "r" (new_value)
-            : __ATOMIC_CLOBBERS "cc");
-    } while (__builtin_expect(status != 0, 0));
-    return prev != old_value;
+          "strexeq %1, %5, [%3]"
+          : "=&r" (prev), "=&r" (status), "+m"(*ptr)
+          : "r" (ptr), "Ir" (old_value), "r" (new_value)
+          : "cc");
+  } while (__builtin_expect(status != 0, 0));
+  return prev != old_value;
 }
 
-/* Swap operation, without any explicit barriers. */
-__ATOMIC_INLINE__ int32_t
-__bionic_swap(int32_t new_value, volatile int32_t* ptr)
-{
-    int32_t prev, status;
-    do {
-        __asm__ __volatile__ (
-            __ATOMIC_SWITCH_TO_ARM
-            "ldrex %0, [%3]\n"
-            "strex %1, %4, [%3]"
-            __ATOMIC_SWITCH_TO_THUMB
-            : "=&r" (prev), "=&r" (status), "+m" (*ptr)
-            : "r" (ptr), "r" (new_value)
-            : __ATOMIC_CLOBBERS "cc");
-    } while (__builtin_expect(status != 0, 0));
-    return prev;
+/* Swap, without any explicit barriers. */
+__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
+  int32_t prev, status;
+  do {
+    __asm__ __volatile__ (
+          "ldrex %0, [%3]\n"
+          "strex %1, %4, [%3]"
+          : "=&r" (prev), "=&r" (status), "+m" (*ptr)
+          : "r" (ptr), "r" (new_value)
+          : "cc");
+  } while (__builtin_expect(status != 0, 0));
+  return prev;
 }
 
-/* Atomic increment - without any barriers
- * This returns the old value
- */
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_inc(volatile int32_t* ptr)
-{
-    int32_t prev, tmp, status;
-    do {
-        __asm__ __volatile__ (
-            __ATOMIC_SWITCH_TO_ARM
-            "ldrex %0, [%4]\n"
-            "add %1, %0, #1\n"
-            "strex %2, %1, [%4]"
-            __ATOMIC_SWITCH_TO_THUMB
-            : "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
-            : "r" (ptr)
-            : __ATOMIC_CLOBBERS "cc");
-    } while (__builtin_expect(status != 0, 0));
-    return prev;
-}
-
-/* Atomic decrement - without any barriers
- * This returns the old value.
- */
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_dec(volatile int32_t* ptr)
-{
-    int32_t prev, tmp, status;
-    do {
-        __asm__ __volatile__ (
-            __ATOMIC_SWITCH_TO_ARM
-            "ldrex %0, [%4]\n"
-            "sub %1, %0, #1\n"
-            "strex %2, %1, [%4]"
-            __ATOMIC_SWITCH_TO_THUMB
-            : "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
-            : "r" (ptr)
-            : __ATOMIC_CLOBBERS "cc");
-    } while (__builtin_expect(status != 0, 0));
-    return prev;
+/* Atomic decrement, without explicit barriers. */
+__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
+  int32_t prev, tmp, status;
+  do {
+    __asm__ __volatile__ (
+          "ldrex %0, [%4]\n"
+          "sub %1, %0, #1\n"
+          "strex %2, %1, [%4]"
+          : "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
+          : "r" (ptr)
+          : "cc");
+  } while (__builtin_expect(status != 0, 0));
+  return prev;
 }
 
 #endif /* SYS_ATOMICS_ARM_H */
diff --git a/libc/private/bionic_atomic_gcc_builtin.h b/libc/private/bionic_atomic_gcc_builtin.h
index 2919f7f..9e5e5aa 100644
--- a/libc/private/bionic_atomic_gcc_builtin.h
+++ b/libc/private/bionic_atomic_gcc_builtin.h
@@ -16,46 +16,35 @@
 #ifndef BIONIC_ATOMIC_GCC_BUILTIN_H
 #define BIONIC_ATOMIC_GCC_BUILTIN_H
 
-/* This header file is used by default if we don't have optimized atomic
+/*
+ * This header file is used by default if we don't have optimized atomic
  * routines for a given platform. See bionic_atomic_arm.h and
  * bionic_atomic_x86.h for examples.
+ *
+ * Note that the GCC builtins include barriers that aren't present in
+ * the architecture-specific assembler versions.
  */
 
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier(void)
-{
-    __sync_synchronize();
+__ATOMIC_INLINE__ void __bionic_memory_barrier(void) {
+  __sync_synchronize();
 }
 
-__ATOMIC_INLINE__ int
-__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
-{
-    /* We must return 0 on success */
-    return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
+__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
+  /* We must return 0 on success. */
+  return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
 }
 
-__ATOMIC_INLINE__ int32_t
-__bionic_swap(int32_t new_value, volatile int32_t* ptr)
-{
-    int32_t old_value;
-    do {
-        old_value = *ptr;
-    } while (__sync_val_compare_and_swap(ptr, old_value, new_value) != old_value);
-    return old_value;
+__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
+  int32_t old_value;
+  do {
+    old_value = *ptr;
+  } while (__sync_val_compare_and_swap(ptr, old_value, new_value) != old_value);
+  return old_value;
 }
 
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_inc(volatile int32_t* ptr)
-{
-    /* We must return the old value */
-    return __sync_fetch_and_add(ptr, 1);
-}
-
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_dec(volatile int32_t* ptr)
-{
-    /* We must return the old value */
-    return __sync_fetch_and_add(ptr, -1);
+__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
+  /* We must return the old value. */
+  return __sync_fetch_and_add(ptr, -1);
 }
 
 #endif /* BIONIC_ATOMIC_GCC_BUILTIN_H */
diff --git a/libc/private/bionic_atomic_inline.h b/libc/private/bionic_atomic_inline.h
index 6819af6..2bb1128 100644
--- a/libc/private/bionic_atomic_inline.h
+++ b/libc/private/bionic_atomic_inline.h
@@ -23,11 +23,6 @@
  * memory barrier needs to be issued inline rather than as a function
  * call.
  *
- * Most code should not use these.
- *
- * Anything that does include this file must set ANDROID_SMP to either
- * 0 or 1, indicating compilation for UP or SMP, respectively.
- *
  * Macros defined in this header:
  *
  * void ANDROID_MEMBAR_FULL(void)
@@ -49,7 +44,9 @@
  */
 #define  __ATOMIC_INLINE__  static __inline__ __attribute__((always_inline))
 
-#ifdef __arm__
+#if defined(__aarch64__)
+#  include "bionic_atomic_aarch64.h"
+#elif defined(__arm__)
 #  include "bionic_atomic_arm.h"
 #elif defined(__i386__)
 #  include "bionic_atomic_x86.h"
diff --git a/libc/private/bionic_atomic_mips.h b/libc/private/bionic_atomic_mips.h
index 28fe88d..5e08116 100644
--- a/libc/private/bionic_atomic_mips.h
+++ b/libc/private/bionic_atomic_mips.h
@@ -19,84 +19,58 @@
 /* Define a full memory barrier, this is only needed if we build the
  * platform for a multi-core device.
  */
+
+__ATOMIC_INLINE__ void __bionic_memory_barrier() {
 #if defined(ANDROID_SMP) && ANDROID_SMP == 1
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier()
-{
-    __asm__ __volatile__ ( "sync" : : : "memory" );
-}
+  __asm__ __volatile__ ( "sync" : : : "memory" );
 #else
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier()
-{
-    /* A simple compiler barrier */
-    __asm__ __volatile__ ( "" : : : "memory" );
-}
+  /* A simple compiler barrier. */
+  __asm__ __volatile__ ( "" : : : "memory" );
 #endif
+}
 
 /* Compare-and-swap, without any explicit barriers. Note that this function
  * returns 0 on success, and 1 on failure. The opposite convention is typically
  * used on other platforms.
  */
-__ATOMIC_INLINE__ int
-__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
-{
-    int32_t prev, status;
-    __asm__ __volatile__ ("1: move %[status], %[new_value]  \n"
-                          "   ll %[prev], 0(%[ptr])         \n"
-                          "   bne %[old_value], %[prev], 2f \n"
-                          "   sc   %[status], 0(%[ptr])     \n"
-                          "   beqz %[status], 1b            \n"
-                          "2:                               \n"
-                          : [prev]"=&r"(prev), [status]"=&r"(status), "+m"(*ptr)
-                          : [new_value]"r"(new_value), [old_value]"r"(old_value), [ptr]"r"(ptr)
-                          : "memory");
-    return prev != old_value;
+__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
+  int32_t prev, status;
+  __asm__ __volatile__ ("1: move %[status], %[new_value]  \n"
+                        "   ll %[prev], 0(%[ptr])         \n"
+                        "   bne %[old_value], %[prev], 2f \n"
+                        "   sc   %[status], 0(%[ptr])     \n"
+                        "   beqz %[status], 1b            \n"
+                        "2:                               \n"
+                        : [prev]"=&r"(prev), [status]"=&r"(status), "+m"(*ptr)
+                        : [new_value]"r"(new_value), [old_value]"r"(old_value), [ptr]"r"(ptr)
+                        : "memory");
+  return prev != old_value;
 }
 
-
-/* Swap, without any explicit barriers */
-__ATOMIC_INLINE__ int32_t
-__bionic_swap(int32_t new_value, volatile int32_t *ptr)
-{
-   int32_t prev, status;
-    __asm__ __volatile__ ("1:  move %[status], %[new_value] \n"
-                          "    ll %[prev], 0(%[ptr])        \n"
-                          "    sc %[status], 0(%[ptr])      \n"
-                          "    beqz %[status], 1b           \n"
-                          : [prev]"=&r"(prev), [status]"=&r"(status), "+m"(*ptr)
-                          : [ptr]"r"(ptr), [new_value]"r"(new_value)
-                          : "memory");
-    return prev;
+/* Swap, without any explicit barriers. */
+__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
+  int32_t prev, status;
+  __asm__ __volatile__ ("1:  move %[status], %[new_value] \n"
+                        "    ll %[prev], 0(%[ptr])        \n"
+                        "    sc %[status], 0(%[ptr])      \n"
+                        "    beqz %[status], 1b           \n"
+                        : [prev]"=&r"(prev), [status]"=&r"(status), "+m"(*ptr)
+                        : [ptr]"r"(ptr), [new_value]"r"(new_value)
+                        : "memory");
+  return prev;
 }
 
-/* Atomic increment, without explicit barriers */
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_inc(volatile int32_t *ptr)
-{
-    int32_t prev, status;
-    __asm__ __volatile__ ("1:  ll %[prev], 0(%[ptr])        \n"
-                          "    addiu %[status], %[prev], 1  \n"
-                          "    sc   %[status], 0(%[ptr])    \n"
-                          "    beqz %[status], 1b           \n"
-                          : [prev]"=&r" (prev), [status]"=&r"(status), "+m" (*ptr)
-                          : [ptr]"r"(ptr)
-                          : "memory");
-    return prev;
+/* Atomic decrement, without explicit barriers. */
+__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
+  int32_t prev, status;
+  __asm__ __volatile__ ("1:  ll %[prev], 0(%[ptr])        \n"
+                        "    addiu %[status], %[prev], -1 \n"
+                        "    sc   %[status], 0(%[ptr])    \n"
+                        "    beqz %[status], 1b           \n"
+                        : [prev]"=&r" (prev), [status]"=&r"(status), "+m" (*ptr)
+                        : [ptr]"r"(ptr)
+                        : "memory");
+  return prev;
 }
 
-/* Atomic decrement, without explicit barriers */
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_dec(volatile int32_t *ptr)
-{
-    int32_t prev, status;
-    __asm__ __volatile__ ("1:  ll %[prev], 0(%[ptr])        \n"
-                          "    addiu %[status], %[prev], -1 \n"
-                          "    sc   %[status], 0(%[ptr])    \n"
-                          "    beqz %[status], 1b           \n"
-                          : [prev]"=&r" (prev), [status]"=&r"(status), "+m" (*ptr)
-                          : [ptr]"r"(ptr)
-                          : "memory");
-    return prev;
-}
 #endif /* BIONIC_ATOMIC_MIPS_H */
diff --git a/libc/private/bionic_atomic_x86.h b/libc/private/bionic_atomic_x86.h
index aca0c4b..89639c8 100644
--- a/libc/private/bionic_atomic_x86.h
+++ b/libc/private/bionic_atomic_x86.h
@@ -19,28 +19,20 @@
 /* Define a full memory barrier, this is only needed if we build the
  * platform for a multi-core device.
  */
+__ATOMIC_INLINE__ void __bionic_memory_barrier() {
 #if defined(ANDROID_SMP) && ANDROID_SMP == 1
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier()
-{
-    __asm__ __volatile__ ( "mfence" : : : "memory" );
-}
+  __asm__ __volatile__ ( "mfence" : : : "memory" );
 #else
-__ATOMIC_INLINE__ void
-__bionic_memory_barrier()
-{
-    /* A simple compiler barrier */
-    __asm__ __volatile__ ( "" : : : "memory" );
-}
+  /* A simple compiler barrier. */
+  __asm__ __volatile__ ( "" : : : "memory" );
 #endif
+}
 
 /* Compare-and-swap, without any explicit barriers. Note that this function
  * returns 0 on success, and 1 on failure. The opposite convention is typically
  * used on other platforms.
  */
-__ATOMIC_INLINE__ int
-__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
-{
+__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
     int32_t prev;
     __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
                           : "=a" (prev)
@@ -49,40 +41,23 @@
     return prev != old_value;
 }
 
-
-/* Swap, without any explicit barriers */
-__ATOMIC_INLINE__ int32_t
-__bionic_swap(int32_t new_value, volatile int32_t *ptr)
-{
-    __asm__ __volatile__ ("xchgl %1, %0"
-                          : "=r" (new_value)
-                          : "m" (*ptr), "0" (new_value)
-                          : "memory");
-    return new_value;
+/* Swap, without any explicit barriers. */
+__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t *ptr) {
+  __asm__ __volatile__ ("xchgl %1, %0"
+                        : "=r" (new_value)
+                        : "m" (*ptr), "0" (new_value)
+                        : "memory");
+  return new_value;
 }
 
-/* Atomic increment, without explicit barriers */
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_inc(volatile int32_t *ptr)
-{
-    int increment = 1;
-    __asm__ __volatile__ ("lock; xaddl %0, %1"
-                          : "+r" (increment), "+m" (*ptr)
-                          : : "memory");
-    /* increment now holds the old value of *ptr */
-    return increment;
-}
-
-/* Atomic decrement, without explicit barriers */
-__ATOMIC_INLINE__ int32_t
-__bionic_atomic_dec(volatile int32_t *ptr)
-{
-    int increment = -1;
-    __asm__ __volatile__ ("lock; xaddl %0, %1"
-                          : "+r" (increment), "+m" (*ptr)
-                          : : "memory");
-    /* increment now holds the old value of *ptr */
-    return increment;
+/* Atomic decrement, without explicit barriers. */
+__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
+  int increment = -1;
+  __asm__ __volatile__ ("lock; xaddl %0, %1"
+                        : "+r" (increment), "+m" (*ptr)
+                        : : "memory");
+  /* increment now holds the old value of *ptr */
+  return increment;
 }
 
 #endif /* BIONIC_ATOMIC_X86_H */
diff --git a/libc/private/bionic_tls.h b/libc/private/bionic_tls.h
index ff13fdb..a42a8ab 100644
--- a/libc/private/bionic_tls.h
+++ b/libc/private/bionic_tls.h
@@ -30,6 +30,7 @@
 #define __BIONIC_PRIVATE_BIONIC_TLS_H_
 
 #include <sys/cdefs.h>
+#include <sys/limits.h>
 #include "__get_tls.h"
 
 __BEGIN_DECLS
@@ -74,21 +75,21 @@
 };
 
 /*
- * Maximum number of elements in the TLS array.
- * POSIX says this must be at least 128, but Android has traditionally had only 64, minus those
- * ones used internally by bionic itself.
  * There are two kinds of slot used internally by bionic --- there are the well-known slots
  * enumerated above, and then there are those that are allocated during startup by calls to
  * pthread_key_create; grep for GLOBAL_INIT_THREAD_LOCAL_BUFFER to find those. We need to manually
  * maintain that second number, but pthread_test will fail if we forget.
  */
 #define GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT 4
-/*
- * This is PTHREAD_KEYS_MAX + TLS_SLOT_FIRST_USER_SLOT + GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT
- * rounded up to maintain stack alignment.
- */
+
 #define BIONIC_ALIGN(x, a) (((x) + (a - 1)) & ~(a - 1))
-#define BIONIC_TLS_SLOTS BIONIC_ALIGN(128 + TLS_SLOT_FIRST_USER_SLOT + GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT, 4)
+
+/*
+ * Maximum number of elements in the TLS array.
+ * This includes space for pthread keys and our own internal slots.
+ * We need to round up to maintain stack alignment.
+ */
+#define BIONIC_TLS_SLOTS BIONIC_ALIGN(PTHREAD_KEYS_MAX + TLS_SLOT_FIRST_USER_SLOT + GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT, 4)
 
 __END_DECLS
 
diff --git a/libm/Android.mk b/libm/Android.mk
index 39e5563..abe0722 100644
--- a/libm/Android.mk
+++ b/libm/Android.mk
@@ -224,6 +224,9 @@
 
 libm_common_includes := $(LOCAL_PATH)/upstream-freebsd/lib/msun/src/
 
+libm_aarch64_includes := $(LOCAL_PATH)/aarch64
+libm_aarch64_src_files := aarch64/fenv.c
+
 libm_arm_includes := $(LOCAL_PATH)/arm
 libm_arm_src_files := arm/fenv.c
 
diff --git a/libm/aarch64/_fpmath.h b/libm/aarch64/_fpmath.h
new file mode 100644
index 0000000..a24632a
--- /dev/null
+++ b/libm/aarch64/_fpmath.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2002, 2003 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/libc/aarch64/_fpmath.h $
+ */
+
+union IEEEl2bits {
+	long double	e;
+	struct {
+#ifndef __AARCH64EB__
+		unsigned int	manl	:32;
+		unsigned int	manh	:20;
+		unsigned int	exp	:11;
+		unsigned int	sign	:1;
+#else
+		unsigned int		sign	:1;
+		unsigned int		exp	:11;
+		unsigned int		manh	:20;
+		unsigned int		manl	:32;
+#endif
+	} bits;
+};
+
+#define	LDBL_NBIT	0
+#define	LDBL_IMPLICIT_NBIT
+#define	mask_nbit_l(u)	((void)0)
+
+#define	LDBL_MANH_SIZE	32
+#define	LDBL_MANL_SIZE	32
+
+#define	LDBL_TO_ARRAY32(u, a) do {			\
+	(a)[0] = (uint32_t)(u).bits.manl;		\
+	(a)[1] = (uint32_t)(u).bits.manh;		\
+} while(0)
diff --git a/libm/aarch64/fenv.c b/libm/aarch64/fenv.c
new file mode 100644
index 0000000..27c405f
--- /dev/null
+++ b/libm/aarch64/fenv.c
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: libm/aarch64/fenv.c $
+ */
+
+#include <fenv.h>
+
+/*
+ * Hopefully the system ID byte is immutable, so it's valid to use
+ * this as a default environment.
+ */
+const fenv_t __fe_dfl_env = 0;
diff --git a/libm/include/aarch64/fenv.h b/libm/include/aarch64/fenv.h
new file mode 100644
index 0000000..32c3b1d
--- /dev/null
+++ b/libm/include/aarch64/fenv.h
@@ -0,0 +1,232 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/arm/fenv.h,v 1.5 2005/03/16 19:03:45 das Exp $
+ */
+
+/*
+ * Rewritten for Android.
+ *
+ * The ARM FPSCR (Floating-point Status and Control Register) described here:
+ * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0344b/Chdfafia.html
+ * has been split into the FPCR (Floating-point Control Register) and FPSR
+ * (Floating-point Status Register) on the ARMv8. These are described briefly in
+ * "Procedure Call Standard for the ARM 64-bit Architecture"
+ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055a/IHI0055A_aapcs64.pdf
+ * section 5.1.2 SIMD and Floating-Point Registers
+ */
+
+#ifndef _FENV_H_
+#define _FENV_H_
+
+#include <sys/types.h>
+
+__BEGIN_DECLS
+
+typedef __uint32_t fenv_t;
+typedef __uint32_t fexcept_t;
+
+/* Exception flags. */
+#define FE_INVALID    0x01
+#define FE_DIVBYZERO  0x02
+#define FE_OVERFLOW   0x04
+#define FE_UNDERFLOW  0x08
+#define FE_INEXACT    0x10
+#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_INEXACT | FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+#define _FPSCR_ENABLE_SHIFT 8
+#define _FPSCR_ENABLE_MASK (FE_ALL_EXCEPT << _FPSCR_ENABLE_SHIFT)
+
+/* Rounding modes. */
+#define FE_TONEAREST  0x0
+#define FE_UPWARD     0x1
+#define FE_DOWNWARD   0x2
+#define FE_TOWARDZERO 0x3
+#define _FPSCR_RMODE_SHIFT 22
+
+#define FPCR_IOE    (1 << 8)
+#define FPCR_DZE    (1 << 9)
+#define FPCR_OFE    (1 << 10)
+#define FPCR_UFE    (1 << 11)
+#define FPCR_IXE    (1 << 12)
+#define FPCR_IDE    (1 << 15)
+#define FPCR_LEN    (7 << 16)
+#define FPCR_STRIDE (3 << 20)
+#define FPCR_RMODE  (3 << 22)
+#define FPCR_FZ     (1 << 24)
+#define FPCR_DN     (1 << 25)
+#define FPCR_AHP    (1 << 26)
+#define FPCR_MASK   (FPCR_IOE | \
+                     FPCR_DZE | \
+                     FPCR_OFE | \
+                     FPCR_UFE | \
+                     FPCR_IXE | \
+                     FPCR_IDE | \
+                     FPCR_LEN | \
+                     FPCR_STRIDE | \
+                     FPCR_RMODE | \
+                     FPCR_FZ | \
+                     FPCR_DN | \
+                     FPCR_AHP )
+
+#define FPSR_IOC    (1 << 0)
+#define FPSR_DZC    (1 << 1)
+#define FPSR_OFC    (1 << 2)
+#define FPSR_UFC    (1 << 3)
+#define FPSR_IXC    (1 << 4)
+#define FPSR_IDC    (1 << 7)
+#define FPSR_QC     (1 << 27)
+#define FPSR_V      (1 << 28)
+#define FPSR_C      (1 << 29)
+#define FPSR_Z      (1 << 30)
+#define FPSR_N      (1 << 31)
+#define FPSR_MASK   (FPSR_IOC | \
+                     FPSR_DZC | \
+                     FPSR_OFC | \
+                     FPSR_UFC | \
+                     FPSR_IXC | \
+                     FPSR_IDC | \
+                     FPSR_QC | \
+                     FPSR_V | \
+                     FPSR_C | \
+                     FPSR_Z | \
+                     FPSR_N )
+
+/* Default floating-point environment. */
+extern const fenv_t __fe_dfl_env;
+#define FE_DFL_ENV (&__fe_dfl_env)
+
+static __inline int fegetenv(fenv_t* __envp) {
+    fenv_t _fpcr, _fpsr;
+    __asm__ __volatile__("mrs %0,fpcr" : "=r" (_fpcr));
+    __asm__ __volatile__("mrs %0,fpsr" : "=r" (_fpsr));
+  *__envp = (_fpcr | _fpsr);
+  return 0;
+}
+
+static __inline int fesetenv(const fenv_t* __envp) {
+    fenv_t _fpcr = (*__envp & FPCR_MASK);
+    fenv_t _fpsr = (*__envp & FPSR_MASK);
+    __asm__ __volatile__("msr fpcr,%0" : :"ri" (_fpcr));
+    __asm__ __volatile__("msr fpsr,%0" : :"ri" (_fpsr));
+  return 0;
+}
+
+static __inline int feclearexcept(int __excepts) {
+  fexcept_t __fpscr;
+  fegetenv(&__fpscr);
+  __fpscr &= ~__excepts;
+  fesetenv(&__fpscr);
+  return 0;
+}
+
+static __inline int fegetexceptflag(fexcept_t* __flagp, int __excepts) {
+  fexcept_t __fpscr;
+  fegetenv(&__fpscr);
+  *__flagp = __fpscr & __excepts;
+  return 0;
+}
+
+static __inline int fesetexceptflag(const fexcept_t* __flagp, int __excepts) {
+  fexcept_t __fpscr;
+  fegetenv(&__fpscr);
+  __fpscr &= ~__excepts;
+  __fpscr |= *__flagp & __excepts;
+  fesetenv(&__fpscr);
+  return 0;
+}
+
+static __inline int feraiseexcept(int __excepts) {
+  fexcept_t __ex = __excepts;
+  fesetexceptflag(&__ex, __excepts);
+  return 0;
+}
+
+static __inline int fetestexcept(int __excepts) {
+  fexcept_t __fpscr;
+  fegetenv(&__fpscr);
+  return (__fpscr & __excepts);
+}
+
+static __inline int fegetround(void) {
+  fenv_t _fpscr;
+  fegetenv(&_fpscr);
+  return ((_fpscr >> _FPSCR_RMODE_SHIFT) & 0x3);
+}
+
+static __inline int fesetround(int __round) {
+  fenv_t _fpscr;
+  fegetenv(&_fpscr);
+  _fpscr &= ~(0x3 << _FPSCR_RMODE_SHIFT);
+  _fpscr |= (__round << _FPSCR_RMODE_SHIFT);
+  fesetenv(&_fpscr);
+  return 0;
+}
+
+static __inline int feholdexcept(fenv_t* __envp) {
+  fenv_t __env;
+  fegetenv(&__env);
+  *__envp = __env;
+  __env &= ~(FE_ALL_EXCEPT | _FPSCR_ENABLE_MASK);
+  fesetenv(&__env);
+  return 0;
+}
+
+static __inline int feupdateenv(const fenv_t* __envp) {
+  fexcept_t __fpscr;
+  fegetenv(&__fpscr);
+  fesetenv(__envp);
+  feraiseexcept(__fpscr & FE_ALL_EXCEPT);
+  return 0;
+}
+
+#if __BSD_VISIBLE
+
+static __inline int feenableexcept(int __mask) {
+  fenv_t __old_fpscr, __new_fpscr;
+  fegetenv(&__old_fpscr);
+  __new_fpscr = __old_fpscr | (__mask & FE_ALL_EXCEPT) << _FPSCR_ENABLE_SHIFT;
+  fesetenv(&__new_fpscr);
+  return ((__old_fpscr >> _FPSCR_ENABLE_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int fedisableexcept(int __mask) {
+  fenv_t __old_fpscr, __new_fpscr;
+  fegetenv(&__old_fpscr);
+  __new_fpscr = __old_fpscr & ~((__mask & FE_ALL_EXCEPT) << _FPSCR_ENABLE_SHIFT);
+  fesetenv(&__new_fpscr);
+  return ((__old_fpscr >> _FPSCR_ENABLE_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int fegetexcept(void) {
+  fenv_t __fpscr;
+  fegetenv(&__fpscr);
+  return ((__fpscr & _FPSCR_ENABLE_MASK) >> _FPSCR_ENABLE_SHIFT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif /* !_FENV_H_ */
diff --git a/linker/Android.mk b/linker/Android.mk
index f73d8d6..1bf3e9d 100644
--- a/linker/Android.mk
+++ b/linker/Android.mk
@@ -33,7 +33,7 @@
 # We need to access Bionic private headers in the linker.
 LOCAL_CFLAGS += -I$(LOCAL_PATH)/../libc/
 
-ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),x86_64))
+ifeq ($(TARGET_IS_64_BIT),true)
     LOCAL_MODULE := linker64
 else
     LOCAL_MODULE := linker
diff --git a/linker/arch/aarch64/begin.S b/linker/arch/aarch64/begin.S
new file mode 100644
index 0000000..55618b7
--- /dev/null
+++ b/linker/arch/aarch64/begin.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+ENTRY(_start)
+  mov x0, sp
+  mov x1, xzr
+  bl  __linker_init
+
+  /* linker init returns the _entry address in the main image */
+  br x0
+END(_start)
diff --git a/linker/debugger.cpp b/linker/debugger.cpp
index 29afab1..92e9dac 100644
--- a/linker/debugger.cpp
+++ b/linker/debugger.cpp
@@ -92,7 +92,7 @@
         return -1;
     }
 
-    int err = TEMP_FAILURE_RETRY(connect(s, (sockaddr*) &addr, alen));
+    int err = TEMP_FAILURE_RETRY(connect(s, reinterpret_cast<sockaddr*>(&addr), alen));
     if (err == -1) {
         close(s);
         s = -1;
diff --git a/linker/dlfcn.cpp b/linker/dlfcn.cpp
index 09f3ddf..207e03c 100644
--- a/linker/dlfcn.cpp
+++ b/linker/dlfcn.cpp
@@ -148,13 +148,13 @@
 //   0123456 78901234 567890 12345678 9012345 6789012345678901234567890123456 7890123456789012 3456789
 #define ANDROID_LIBDL_STRTAB \
     "dlopen\0dlclose\0dlsym\0dlerror\0dladdr\0android_update_LD_LIBRARY_PATH\0dl_iterate_phdr\0dl_unwind_find_exidx\0"
-#elif defined(__i386__) || defined(__mips__) || defined(__x86_64__)
+#elif defined(__aarch64__) || defined(__i386__) || defined(__mips__) || defined(__x86_64__)
 //   0000000 00011111 111112 22222222 2333333 3333444444444455555555556666666 6667
 //   0123456 78901234 567890 12345678 9012345 6789012345678901234567890123456 7890
 #define ANDROID_LIBDL_STRTAB \
     "dlopen\0dlclose\0dlsym\0dlerror\0dladdr\0android_update_LD_LIBRARY_PATH\0dl_iterate_phdr\0"
 #else
-#error Unsupported architecture. Only ARM, MIPS, x86, and x86_64 are presently supported.
+#error Unsupported architecture. Only aarch64, arm, mips, x86, and x86_64 are presently supported.
 #endif
 
 // name_offset: starting index of the name in libdl_info.strtab
diff --git a/linker/linker.cpp b/linker/linker.cpp
index fe4d6c4..81ca2f5 100755
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -31,6 +31,7 @@
 #include <fcntl.h>
 #include <linux/auxvec.h>
 #include <pthread.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -138,10 +139,18 @@
 
 #if COUNT_PAGES
 static unsigned bitmask[4096];
+#if defined(__LP64__)
+#define MARK(offset) \
+    do { \
+        if ((((offset) >> 12) >> 5) < 4096) \
+            bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
+    } while(0)
+#else
 #define MARK(offset) \
     do { \
         bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
     } while(0)
+#endif
 #else
 #define MARK(x) do {} while (0)
 #endif
@@ -889,7 +898,20 @@
          */
 
         switch (type) {
-#if defined(__x86_64__)
+#if defined(__aarch64__)
+        case R_AARCH64_JUMP_SLOT:
+        case R_AARCH64_GLOB_DAT:
+        case R_AARCH64_ABS64:
+        case R_AARCH64_ABS32:
+        case R_AARCH64_ABS16:
+        case R_AARCH64_RELATIVE:
+          /*
+           * The sym_addr was initialized to be zero above, or the relocation
+           * code below does not care about value of sym_addr.
+           * No need to do anything.
+           */
+          break;
+#elif defined(__x86_64__)
         case R_X86_64_JUMP_SLOT:
         case R_X86_64_GLOB_DAT:
         case R_X86_64_32:
@@ -900,7 +922,6 @@
           sym_addr = reloc;
           break;
 #endif
-
         default:
           DL_ERR("unknown weak reloc type %d @ %p (%d)", type, rela, (int) (rela - start));
           return -1;
@@ -915,7 +936,197 @@
     }
 
     switch (type) {
-#if defined(__x86_64__)
+#if defined(__aarch64__)
+    case R_AARCH64_JUMP_SLOT:
+        count_relocation(kRelocAbsolute);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO JMP_SLOT %16lx <- %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    sym_name);
+        *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
+        break;
+    case R_AARCH64_GLOB_DAT:
+        count_relocation(kRelocAbsolute);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO GLOB_DAT %16lx <- %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    sym_name);
+        *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
+        break;
+    case R_AARCH64_ABS64:
+        count_relocation(kRelocAbsolute);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO ABS64 %16lx <- %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    sym_name);
+        *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+        break;
+    case R_AARCH64_ABS32:
+        count_relocation(kRelocAbsolute);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO ABS32 %16lx <- %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    sym_name);
+        if ((static_cast<Elf_Addr>(INT32_MIN) <=
+          (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
+          ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
+          static_cast<Elf_Addr>(UINT32_MAX))) {
+            *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+        } else {
+            DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+                    (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
+                    static_cast<Elf_Addr>(INT32_MIN),
+                    static_cast<Elf_Addr>(UINT32_MAX));
+            return -1;
+        }
+        break;
+    case R_AARCH64_ABS16:
+        count_relocation(kRelocAbsolute);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO ABS16 %16lx <- %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    sym_name);
+        if ((static_cast<Elf_Addr>(INT16_MIN) <=
+          (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
+          ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
+          static_cast<Elf_Addr>(UINT16_MAX))) {
+            *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
+        } else {
+            DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+                    (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
+                    static_cast<Elf_Addr>(INT16_MIN),
+                    static_cast<Elf_Addr>(UINT16_MAX));
+            return -1;
+        }
+        break;
+    case R_AARCH64_PREL64:
+        count_relocation(kRelocRelative);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO REL64 %16lx <- %16lx - %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    rela->r_offset,
+                    sym_name);
+        *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
+        break;
+    case R_AARCH64_PREL32:
+        count_relocation(kRelocRelative);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO REL32 %16lx <- %16lx - %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    rela->r_offset, sym_name);
+        if ((static_cast<Elf_Addr>(INT32_MIN) <=
+          (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
+          ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
+          static_cast<Elf_Addr>(UINT32_MAX))) {
+            *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
+        } else {
+            DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+                    (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
+                    static_cast<Elf_Addr>(INT32_MIN),
+                    static_cast<Elf_Addr>(UINT32_MAX));
+            return -1;
+        }
+        break;
+    case R_AARCH64_PREL16:
+        count_relocation(kRelocRelative);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO REL16 %16lx <- %16lx - %16lx %s\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    rela->r_offset, sym_name);
+        if ((static_cast<Elf_Addr>(INT16_MIN) <=
+          (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
+          ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
+          static_cast<Elf_Addr>(UINT16_MAX))) {
+            *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
+        } else {
+            DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
+                    (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
+                    static_cast<Elf_Addr>(INT16_MIN),
+                    static_cast<Elf_Addr>(UINT16_MAX));
+            return -1;
+        }
+        break;
+
+    case R_AARCH64_RELATIVE:
+        count_relocation(kRelocRelative);
+        MARK(rela->r_offset);
+        if (sym) {
+            DL_ERR("odd RELATIVE form...");
+            return -1;
+        }
+        TRACE_TYPE(RELO, "RELO RELATIVE %16lx <- %16lx\n",
+                    reloc,
+                    (si->base + rela->r_addend));
+        *reinterpret_cast<Elf_Addr*>(reloc) = (si->base + rela->r_addend);
+        break;
+
+    case R_AARCH64_COPY:
+        if ((si->flags & FLAG_EXE) == 0) {
+            /*
+              * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
+              *
+              * Section 4.7.1.10 "Dynamic relocations"
+              * R_AARCH64_COPY may only appear in executable objects where e_type is
+              * set to ET_EXEC.
+              *
+              * FLAG_EXE is set for both ET_DYN and ET_EXEC executables.
+              * We should explicitly disallow ET_DYN executables from having
+              * R_AARCH64_COPY relocations.
+              */
+            DL_ERR("%s R_AARCH64_COPY relocations only supported for ET_EXEC", si->name);
+            return -1;
+        }
+        count_relocation(kRelocCopy);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO COPY %16lx <- %ld @ %16lx %s\n",
+                    reloc,
+                    s->st_size,
+                    (sym_addr + rela->r_addend),
+                    sym_name);
+        if (reloc == (sym_addr + rela->r_addend)) {
+            Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+
+            if (src == NULL) {
+                DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name);
+                return -1;
+            }
+            if (lsi->has_DT_SYMBOLIC) {
+                DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared "
+                        "library %s (built with -Bsymbolic?)", si->name, lsi->name);
+                return -1;
+            }
+            if (s->st_size < src->st_size) {
+                DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%ld < %ld)",
+                        si->name, s->st_size, src->st_size);
+                return -1;
+            }
+            memcpy((void*)reloc, (void*)(src->st_value + lsi->load_bias), src->st_size);
+        } else {
+            DL_ERR("%s R_AARCH64_COPY relocation target cannot be resolved", si->name);
+            return -1;
+        }
+        break;
+    case R_AARCH64_TLS_TPREL64:
+        TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16lx <- %16lx - %16lx\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    rela->r_offset);
+        break;
+    case R_AARCH64_TLS_DTPREL32:
+        TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16lx <- %16lx - %16lx\n",
+                    reloc,
+                    (sym_addr + rela->r_addend),
+                    rela->r_offset);
+        break;
+#elif defined(__x86_64__)
     case R_X86_64_JUMP_SLOT:
       count_relocation(kRelocAbsolute);
       MARK(rela->r_offset);
@@ -964,6 +1175,7 @@
       *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend - reloc;
       break;
 #endif
+
     default:
       DL_ERR("unknown reloc type %d @ %p (%d)", type, rela, (int) (rela - start));
       return -1;
@@ -1750,6 +1962,9 @@
 static void add_vdso(KernelArgumentBlock& args UNUSED) {
 #if defined(AT_SYSINFO_EHDR)
     Elf_Ehdr* ehdr_vdso = reinterpret_cast<Elf_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
+    if (ehdr_vdso == NULL) {
+        return;
+    }
 
     soinfo* si = soinfo_alloc("[vdso]");
 
@@ -1761,7 +1976,6 @@
     si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
 
     soinfo_link_image(si);
-    insert_soinfo_into_debug_map(si);
 #endif
 }
 
@@ -1932,7 +2146,11 @@
         for (n = 0; n < 4096; n++) {
             if (bitmask[n]) {
                 unsigned x = bitmask[n];
+#if defined(__LP64__)
+                for (i = 0; i < 32; i++) {
+#else
                 for (i = 0; i < 8; i++) {
+#endif
                     if (x & 1) {
                         count++;
                     }
@@ -1988,7 +2206,6 @@
   KernelArgumentBlock args(raw_args);
 
   Elf_Addr linker_addr = args.getauxval(AT_BASE);
-
   Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr);
   Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
 
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 4884364..b4d72b2 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -29,6 +29,7 @@
 #include "linker_phdr.h"
 
 #include <errno.h>
+#include <machine/exec.h>
 #include <sys/mman.h>
 
 #include "linker.h"
@@ -201,17 +202,7 @@
     return false;
   }
 
-  if (header_.e_machine !=
-#if defined(__arm__)
-      EM_ARM
-#elif defined(__i386__)
-      EM_386
-#elif defined(__mips__)
-      EM_MIPS
-#elif defined(__x86_64__)
-      EM_X86_64
-#endif
-  ) {
+  if (header_.e_machine != ELF_TARG_MACH) {
     DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
     return false;
   }
diff --git a/tests/Android.mk b/tests/Android.mk
index 19b5447..0a4db91 100644
--- a/tests/Android.mk
+++ b/tests/Android.mk
@@ -14,39 +14,11 @@
 # limitations under the License.
 #
 
-ifneq ($(BUILD_TINY_ANDROID), true)
+ifneq ($(BUILD_TINY_ANDROID),true)
 
 LOCAL_PATH := $(call my-dir)
 
 # -----------------------------------------------------------------------------
-# Benchmarks.
-# -----------------------------------------------------------------------------
-
-benchmark_c_flags = \
-    -O2 \
-    -Wall -Wextra \
-    -Werror \
-    -fno-builtin \
-
-benchmark_src_files = \
-    benchmark_main.cpp \
-    math_benchmark.cpp \
-    property_benchmark.cpp \
-    string_benchmark.cpp \
-    time_benchmark.cpp \
-
-# Build benchmarks for the device (with bionic's .so). Run with:
-#   adb shell bionic-benchmarks
-include $(CLEAR_VARS)
-LOCAL_MODULE := bionic-benchmarks
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-LOCAL_CFLAGS += $(benchmark_c_flags)
-LOCAL_C_INCLUDES += external/stlport/stlport bionic/ bionic/libstdc++/include
-LOCAL_SHARED_LIBRARIES += libstlport
-LOCAL_SRC_FILES := $(benchmark_src_files)
-include $(BUILD_EXECUTABLE)
-
-# -----------------------------------------------------------------------------
 # Unit tests.
 # -----------------------------------------------------------------------------
 
@@ -57,6 +29,11 @@
     -Werror \
     -fno-builtin \
 
+ifeq ($(TARGET_ARCH),aarch64)
+  $(info TODO: $(LOCAL_PATH)/Android.mk -fstack-protector not yet available for the AArch64 toolchain)
+  test_c_flags += -fno-stack-protector
+endif # aarch64
+
 test_src_files = \
     buffer_tests.cpp \
     dirent_test.cpp \
diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp
index 480e455..e7a952a 100644
--- a/tests/pthread_test.cpp
+++ b/tests/pthread_test.cpp
@@ -33,6 +33,11 @@
 
 #if !defined(__GLIBC__) // glibc uses keys internally that its sysconf value doesn't account for.
 TEST(pthread, pthread_key_create_lots) {
+  // POSIX says PTHREAD_KEYS_MAX should be at least 128.
+  ASSERT_GE(PTHREAD_KEYS_MAX, 128);
+  // sysconf shouldn't return a smaller value.
+  ASSERT_GE(sysconf(_SC_THREAD_KEYS_MAX), PTHREAD_KEYS_MAX);
+
   // We can allocate _SC_THREAD_KEYS_MAX keys.
   std::vector<pthread_key_t> keys;
   for (int i = 0; i < sysconf(_SC_THREAD_KEYS_MAX); ++i) {