Merge "add fortified memchr/memrchr implementations"
diff --git a/libc/Android.mk b/libc/Android.mk
index 6256caf..2c33a8f 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -63,6 +63,7 @@
     stdio/sprintf.c \
     stdio/stdio.c \
     stdio/stdio_ext.cpp \
+    stdlib/atexit.c \
     stdlib/exit.c \
 
 # Fortify implementations of libc functions.
@@ -76,6 +77,8 @@
     bionic/__pread64_chk.cpp \
     bionic/__pread_chk.cpp \
     bionic/__read_chk.cpp \
+    bionic/__readlink_chk.cpp \
+    bionic/__readlinkat_chk.cpp \
     bionic/__recvfrom_chk.cpp \
     bionic/__stpcpy_chk.cpp \
     bionic/__stpncpy_chk.cpp \
@@ -109,6 +112,7 @@
     bionic/clock_getcpuclockid.cpp \
     bionic/clock_nanosleep.cpp \
     bionic/clone.cpp \
+    bionic/close.cpp \
     bionic/__cmsg_nxthdr.cpp \
     bionic/connect.cpp \
     bionic/ctype.cpp \
@@ -481,7 +485,6 @@
     upstream-openbsd/lib/libc/stdio/wprintf.c \
     upstream-openbsd/lib/libc/stdio/wscanf.c \
     upstream-openbsd/lib/libc/stdio/wsetup.c \
-    upstream-openbsd/lib/libc/stdlib/atexit.c \
     upstream-openbsd/lib/libc/stdlib/atoi.c \
     upstream-openbsd/lib/libc/stdlib/atol.c \
     upstream-openbsd/lib/libc/stdlib/atoll.c \
@@ -824,12 +827,7 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := $(libc_upstream_openbsd_ndk_src_files)
-ifneq (,$(filter $(TARGET_ARCH),x86 x86_64))
-  # Clang has wrong long double size or LDBL_MANT_DIG, http://b/17163651.
-  LOCAL_CLANG := false
-else
-  LOCAL_CLANG := $(use_clang)
-endif
+LOCAL_CLANG := $(use_clang)
 
 LOCAL_CFLAGS := \
     $(libc_common_cflags) \
@@ -867,12 +865,7 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := $(libc_upstream_openbsd_src_files)
-ifneq (,$(filter $(TARGET_ARCH),x86 x86_64))
-  # Clang has wrong long double size or LDBL_MANT_DIG, http://b/17163651.
-  LOCAL_CLANG := false
-else
-  LOCAL_CLANG := $(use_clang)
-endif
+LOCAL_CLANG := $(use_clang)
 
 LOCAL_CFLAGS := \
     $(libc_common_cflags) \
@@ -912,12 +905,7 @@
 
 LOCAL_SRC_FILES_32 := $(libc_upstream_openbsd_gdtoa_src_files_32)
 LOCAL_SRC_FILES_64 := $(libc_upstream_openbsd_gdtoa_src_files_64)
-ifneq (,$(filter $(TARGET_ARCH),x86 x86_64))
-  # Clang has wrong long double size or LDBL_MANT_DIG, http://b/17163651.
-  LOCAL_CLANG := false
-else
-  LOCAL_CLANG := $(use_clang)
-endif
+LOCAL_CLANG := $(use_clang)
 
 LOCAL_CFLAGS := \
     $(libc_common_cflags) \
@@ -1236,10 +1224,6 @@
 
 LOCAL_WHOLE_STATIC_LIBRARIES_arm := libc_aeabi
 
-ifneq ($(MALLOC_IMPL),dlmalloc)
-LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
-endif
-
 LOCAL_CXX_STL := none
 LOCAL_SYSTEM_SHARED_LIBRARIES :=
 
@@ -1332,6 +1316,11 @@
 LOCAL_CLANG := $(use_clang)
 LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
 LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
+
+ifneq ($(MALLOC_IMPL),dlmalloc)
+LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
+endif
+
 LOCAL_CXX_STL := none
 LOCAL_SYSTEM_SHARED_LIBRARIES :=
 LOCAL_ADDRESS_SANITIZER := false
@@ -1353,10 +1342,13 @@
 
 LOCAL_C_INCLUDES := $(libc_common_c_includes)
 LOCAL_SRC_FILES := \
+    arch-common/bionic/crtbegin_so.c \
+    arch-common/bionic/crtbrand.S \
     $(libc_arch_dynamic_src_files) \
     bionic/malloc_debug_common.cpp \
     bionic/libc_init_dynamic.cpp \
     bionic/NetdClient.cpp \
+    arch-common/bionic/crtend_so.S \
 
 LOCAL_MODULE := libc
 LOCAL_CLANG := $(use_clang)
@@ -1379,6 +1371,11 @@
 
 LOCAL_SHARED_LIBRARIES := libdl
 LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
+
+ifneq ($(MALLOC_IMPL),dlmalloc)
+LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
+endif
+
 LOCAL_CXX_STL := none
 LOCAL_SYSTEM_SHARED_LIBRARIES :=
 
@@ -1396,15 +1393,15 @@
 
 $(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
 $(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_arch_dynamic_src_files))
+
+LOCAL_NO_CRT := true
+LOCAL_ASFLAGS += $(libc_crt_target_cflags)
+
 # special for arm
-LOCAL_NO_CRT_arm := true
 LOCAL_CFLAGS_arm += -DCRT_LEGACY_WORKAROUND
-LOCAL_ASFLAGS_arm += $(libc_crt_target_cflags)
 LOCAL_SRC_FILES_arm += \
-    arch-common/bionic/crtbegin_so.c \
-    arch-common/bionic/crtbrand.S \
-    arch-arm/bionic/atexit_legacy.c \
-    arch-common/bionic/crtend_so.S
+    arch-arm/bionic/atexit_legacy.c
+
 LOCAL_ADDRESS_SANITIZER := false
 LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
 
diff --git a/libc/SYSCALLS.TXT b/libc/SYSCALLS.TXT
index b91f5bf..33e30eb 100644
--- a/libc/SYSCALLS.TXT
+++ b/libc/SYSCALLS.TXT
@@ -95,7 +95,7 @@
 ssize_t     pread64|pread(int, void*, size_t, off_t) arm64,mips64,x86_64
 ssize_t     pwrite64(int, void*, size_t, off64_t) arm,mips,x86
 ssize_t     pwrite64|pwrite(int, void*, size_t, off_t) arm64,mips64,x86_64
-int         close(int)                      all
+int         ___close:close(int)  all
 pid_t       __getpid:getpid()  all
 int         munmap(void*, size_t)  all
 void*       mremap(void*, size_t, size_t, unsigned long)  all
diff --git a/libc/arch-arm/syscalls/close.S b/libc/arch-arm/syscalls/___close.S
similarity index 84%
rename from libc/arch-arm/syscalls/close.S
rename to libc/arch-arm/syscalls/___close.S
index ec05445..db8a230 100644
--- a/libc/arch-arm/syscalls/close.S
+++ b/libc/arch-arm/syscalls/___close.S
@@ -2,7 +2,7 @@
 
 #include <private/bionic_asm.h>
 
-ENTRY(close)
+ENTRY(___close)
     mov     ip, r7
     ldr     r7, =__NR_close
     swi     #0
@@ -11,4 +11,5 @@
     bxls    lr
     neg     r0, r0
     b       __set_errno_internal
-END(close)
+END(___close)
+.hidden ___close
diff --git a/libc/arch-arm64/bionic/crtbegin.c b/libc/arch-arm64/bionic/crtbegin.c
index fec0b11..7e2c5d7 100644
--- a/libc/arch-arm64/bionic/crtbegin.c
+++ b/libc/arch-arm64/bionic/crtbegin.c
@@ -67,3 +67,4 @@
 
 #include "../../arch-common/bionic/__dso_handle.h"
 #include "../../arch-common/bionic/atexit.h"
+#include "../../arch-common/bionic/pthread_atfork.h"
diff --git a/libc/arch-arm64/syscalls/close.S b/libc/arch-arm64/syscalls/___close.S
similarity index 82%
rename from libc/arch-arm64/syscalls/close.S
rename to libc/arch-arm64/syscalls/___close.S
index 3624581..8fb8361 100644
--- a/libc/arch-arm64/syscalls/close.S
+++ b/libc/arch-arm64/syscalls/___close.S
@@ -2,7 +2,7 @@
 
 #include <private/bionic_asm.h>
 
-ENTRY(close)
+ENTRY(___close)
     mov     x8, __NR_close
     svc     #0
 
@@ -11,4 +11,5 @@
     b.hi    __set_errno_internal
 
     ret
-END(close)
+END(___close)
+.hidden ___close
diff --git a/libc/arch-common/bionic/crtbegin.c b/libc/arch-common/bionic/crtbegin.c
index fa9f3f3..c46405c 100644
--- a/libc/arch-common/bionic/crtbegin.c
+++ b/libc/arch-common/bionic/crtbegin.c
@@ -59,6 +59,7 @@
 
 #include "__dso_handle.h"
 #include "atexit.h"
+#include "pthread_atfork.h"
 #ifdef __i386__
 # include "../../arch-x86/bionic/__stack_chk_fail_local.h"
 #endif
diff --git a/libc/arch-common/bionic/crtbegin_so.c b/libc/arch-common/bionic/crtbegin_so.c
index 641e45a..3754363 100644
--- a/libc/arch-common/bionic/crtbegin_so.c
+++ b/libc/arch-common/bionic/crtbegin_so.c
@@ -56,6 +56,7 @@
 # include "__dso_handle_so.h"
 # include "atexit.h"
 #endif
+#include "pthread_atfork.h"
 #ifdef __i386__
 # include "../../arch-x86/bionic/__stack_chk_fail_local.h"
 #endif
diff --git a/libc/arch-common/bionic/pthread_atfork.h b/libc/arch-common/bionic/pthread_atfork.h
new file mode 100644
index 0000000..0c48a12
--- /dev/null
+++ b/libc/arch-common/bionic/pthread_atfork.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern void* __dso_handle;
+
+extern int __register_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void), void* dso);
+
+#ifndef _LIBC
+// Libc used to export this in previous versions, therefore it needs
+// to remain global for binary compatibility.
+__attribute__ ((visibility ("hidden")))
+#endif
+int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)) {
+  return __register_atfork(prepare, parent, child, &__dso_handle);
+}
+
diff --git a/libc/arch-mips/bionic/crtbegin.c b/libc/arch-mips/bionic/crtbegin.c
index 50e9eeb..d72ec7b 100644
--- a/libc/arch-mips/bionic/crtbegin.c
+++ b/libc/arch-mips/bionic/crtbegin.c
@@ -92,3 +92,4 @@
 
 #include "../../arch-common/bionic/__dso_handle.h"
 #include "../../arch-common/bionic/atexit.h"
+#include "../../arch-common/bionic/pthread_atfork.h"
diff --git a/libc/arch-mips/syscalls/close.S b/libc/arch-mips/syscalls/___close.S
similarity index 85%
rename from libc/arch-mips/syscalls/close.S
rename to libc/arch-mips/syscalls/___close.S
index 231f497..356cfd6 100644
--- a/libc/arch-mips/syscalls/close.S
+++ b/libc/arch-mips/syscalls/___close.S
@@ -2,7 +2,7 @@
 
 #include <private/bionic_asm.h>
 
-ENTRY(close)
+ENTRY(___close)
     .set noreorder
     .cpload t9
     li v0, __NR_close
@@ -16,4 +16,5 @@
     j t9
     nop
     .set reorder
-END(close)
+END(___close)
+.hidden ___close
diff --git a/libc/arch-mips64/bionic/crtbegin.c b/libc/arch-mips64/bionic/crtbegin.c
index 1374fea..bdd423b 100644
--- a/libc/arch-mips64/bionic/crtbegin.c
+++ b/libc/arch-mips64/bionic/crtbegin.c
@@ -92,3 +92,4 @@
 
 #include "../../arch-common/bionic/__dso_handle.h"
 #include "../../arch-common/bionic/atexit.h"
+#include "../../arch-common/bionic/pthread_atfork.h"
diff --git a/libc/arch-mips64/syscalls/close.S b/libc/arch-mips64/syscalls/___close.S
similarity index 88%
rename from libc/arch-mips64/syscalls/close.S
rename to libc/arch-mips64/syscalls/___close.S
index 5e237dd..f1ce708 100644
--- a/libc/arch-mips64/syscalls/close.S
+++ b/libc/arch-mips64/syscalls/___close.S
@@ -2,7 +2,7 @@
 
 #include <private/bionic_asm.h>
 
-ENTRY(close)
+ENTRY(___close)
     .set push
     .set noreorder
     li v0, __NR_close
@@ -22,4 +22,5 @@
     j t9
     move ra, t0
     .set pop
-END(close)
+END(___close)
+.hidden ___close
diff --git a/libc/arch-x86/syscalls/close.S b/libc/arch-x86/syscalls/___close.S
similarity index 89%
rename from libc/arch-x86/syscalls/close.S
rename to libc/arch-x86/syscalls/___close.S
index f6cce62..796944b 100644
--- a/libc/arch-x86/syscalls/close.S
+++ b/libc/arch-x86/syscalls/___close.S
@@ -2,7 +2,7 @@
 
 #include <private/bionic_asm.h>
 
-ENTRY(close)
+ENTRY(___close)
     pushl   %ebx
     .cfi_def_cfa_offset 8
     .cfi_rel_offset ebx, 0
@@ -18,4 +18,5 @@
 1:
     popl    %ebx
     ret
-END(close)
+END(___close)
+.hidden ___close
diff --git a/libc/arch-x86_64/syscalls/close.S b/libc/arch-x86_64/syscalls/___close.S
similarity index 84%
rename from libc/arch-x86_64/syscalls/close.S
rename to libc/arch-x86_64/syscalls/___close.S
index 8a7ada1..8607f05 100644
--- a/libc/arch-x86_64/syscalls/close.S
+++ b/libc/arch-x86_64/syscalls/___close.S
@@ -2,7 +2,7 @@
 
 #include <private/bionic_asm.h>
 
-ENTRY(close)
+ENTRY(___close)
     movl    $__NR_close, %eax
     syscall
     cmpq    $-MAX_ERRNO, %rax
@@ -12,4 +12,5 @@
     call    __set_errno_internal
 1:
     ret
-END(close)
+END(___close)
+.hidden ___close
diff --git a/libc/bionic/__readlink_chk.cpp b/libc/bionic/__readlink_chk.cpp
new file mode 100644
index 0000000..f19f917
--- /dev/null
+++ b/libc/bionic/__readlink_chk.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#undef _FORTIFY_SOURCE
+#include <unistd.h>
+#include "private/libc_logging.h"
+
+extern "C" ssize_t __readlink_chk(const char* path, char* buf, size_t size, size_t buf_size) {
+  if (__predict_false(size > buf_size)) {
+    __fortify_chk_fail("readlink: prevented write past end of buffer", 0);
+  }
+
+  if (__predict_false(size > SSIZE_MAX)) {
+    __fortify_chk_fail("readlink: size > SSIZE_MAX", 0);
+  }
+
+  return readlink(path, buf, size);
+}
diff --git a/libc/bionic/__readlinkat_chk.cpp b/libc/bionic/__readlinkat_chk.cpp
new file mode 100644
index 0000000..a11db8e
--- /dev/null
+++ b/libc/bionic/__readlinkat_chk.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#undef _FORTIFY_SOURCE
+#include <unistd.h>
+#include "private/libc_logging.h"
+
+extern "C" ssize_t __readlinkat_chk(int dirfd, const char* path, char* buf, size_t size, size_t buf_size) {
+  if (__predict_false(size > buf_size)) {
+    __fortify_chk_fail("readlinkat: prevented write past end of buffer", 0);
+  }
+
+  if (__predict_false(size > SSIZE_MAX)) {
+    __fortify_chk_fail("readlinkat: size > SSIZE_MAX", 0);
+  }
+
+  return readlinkat(dirfd, path, buf, size);
+}
diff --git a/libc/bionic/close.cpp b/libc/bionic/close.cpp
new file mode 100644
index 0000000..18225f0
--- /dev/null
+++ b/libc/bionic/close.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <unistd.h>
+
+extern "C" int ___close(int);
+
+int close(int fd) {
+  int rc = ___close(fd);
+  if (rc == -1 && errno == EINTR) {
+    // POSIX says that if close returns with EINTR, the fd must not be closed.
+    // Linus disagrees: http://lkml.indiana.edu/hypermail/linux/kernel/0509.1/0877.html
+    // The future POSIX solution is posix_close (http://austingroupbugs.net/view.php?id=529),
+    // with the state after EINTR being undefined, and EINPROGRESS for the case where close
+    // was interrupted by a signal but the file descriptor was actually closed.
+    // My concern with that future behavior is that it breaks existing code that assumes
+    // that close only returns -1 if it failed. Unlike other system calls, I have real
+    // difficulty even imagining a caller that would need to know that close was interrupted
+    // but succeeded. So returning EINTR is wrong (because Linux always closes) and EINPROGRESS
+    // is harmful because callers need to be rewritten to understand that EINPROGRESS isn't
+    // actually a failure, but will be reported as one.
+
+    // We don't restore errno because that would incur a cost (the TLS read) for every caller.
+    // Since callers don't know ahead of time whether close will legitimately fail, they need
+    // to have stashed the old errno value anyway if they plan on using it afterwards, so
+    // us clobbering errno here doesn't change anything in that respect.
+    return 0;
+  }
+  return rc;
+}
diff --git a/libc/bionic/posix_timers.cpp b/libc/bionic/posix_timers.cpp
index bc3aeb2..c8f71c8 100644
--- a/libc/bionic/posix_timers.cpp
+++ b/libc/bionic/posix_timers.cpp
@@ -174,10 +174,10 @@
     return -1;
   }
 
-  // Give the thread a meaningful name.
+  // Give the thread a specific meaningful name.
   // It can't do this itself because the kernel timer isn't created until after it's running.
-  char name[32];
-  snprintf(name, sizeof(name), "POSIX interval timer %d", to_kernel_timer_id(timer));
+  char name[16]; // 16 is the kernel-imposed limit.
+  snprintf(name, sizeof(name), "POSIX timer %d", to_kernel_timer_id(timer));
   pthread_setname_np(timer->callback_thread, name);
 
   *timer_id = timer;
diff --git a/libc/bionic/pthread_atfork.cpp b/libc/bionic/pthread_atfork.cpp
index d1c4ad0..093ffd2 100644
--- a/libc/bionic/pthread_atfork.cpp
+++ b/libc/bionic/pthread_atfork.cpp
@@ -30,6 +30,8 @@
 #include <pthread.h>
 #include <stdlib.h>
 
+#include "private/bionic_macros.h"
+
 struct atfork_t {
   atfork_t* next;
   atfork_t* prev;
@@ -37,79 +39,143 @@
   void (*prepare)(void);
   void (*child)(void);
   void (*parent)(void);
+
+  void* dso_handle;
 };
 
-struct atfork_list_t {
-  atfork_t* first;
-  atfork_t* last;
+class atfork_list_t {
+ public:
+  atfork_list_t() : first_(nullptr), last_(nullptr) {}
+
+  template<typename F>
+  void walk_forward(F f) {
+    for (atfork_t* it = first_; it != nullptr; it = it->next) {
+      f(it);
+    }
+  }
+
+  template<typename F>
+  void walk_backwards(F f) {
+    for (atfork_t* it = last_; it != nullptr; it = it->prev) {
+      f(it);
+    }
+  }
+
+  void push_back(atfork_t* entry) {
+    entry->next = nullptr;
+    entry->prev = last_;
+    if (entry->prev != nullptr) {
+      entry->prev->next = entry;
+    }
+    if (first_ == nullptr) {
+      first_ = entry;
+    }
+    last_ = entry;
+  }
+
+  template<typename F>
+  void remove_if(F predicate) {
+    atfork_t* it = first_;
+    while (it != nullptr) {
+      if (predicate(it)) {
+        atfork_t* entry = it;
+        it = it->next;
+        remove(entry);
+      } else {
+        it = it->next;
+      }
+    }
+  }
+
+ private:
+  void remove(atfork_t* entry) {
+    if (entry->prev != nullptr) {
+      entry->prev->next = entry->next;
+    } else {
+      first_ = entry->next;
+    }
+
+    if (entry->next != nullptr) {
+      entry->next->prev = entry->prev;
+    } else {
+      last_ = entry->prev;
+    }
+
+    free(entry);
+  }
+
+  atfork_t* first_;
+  atfork_t* last_;
+
+  DISALLOW_COPY_AND_ASSIGN(atfork_list_t);
 };
 
 static pthread_mutex_t g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
-static atfork_list_t g_atfork_list = { NULL, NULL };
+static atfork_list_t g_atfork_list;
 
 void __bionic_atfork_run_prepare() {
   // We lock the atfork list here, unlock it in the parent, and reset it in the child.
   // This ensures that nobody can modify the handler array between the calls
   // to the prepare and parent/child handlers.
-  //
-  // TODO: If a handler tries to mutate the list, they'll block. We should probably copy
-  // the list before forking, and have prepare, parent, and child all work on the consistent copy.
   pthread_mutex_lock(&g_atfork_list_mutex);
 
   // Call pthread_atfork() prepare handlers. POSIX states that the prepare
   // handlers should be called in the reverse order of the parent/child
   // handlers, so we iterate backwards.
-  for (atfork_t* it = g_atfork_list.last; it != NULL; it = it->prev) {
-    if (it->prepare != NULL) {
+  g_atfork_list.walk_backwards([](atfork_t* it) {
+    if (it->prepare != nullptr) {
       it->prepare();
     }
-  }
+  });
 }
 
 void __bionic_atfork_run_child() {
-  for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) {
-    if (it->child != NULL) {
+  g_atfork_list.walk_forward([](atfork_t* it) {
+    if (it->child != nullptr) {
       it->child();
     }
-  }
+  });
 
   g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
 }
 
 void __bionic_atfork_run_parent() {
-  for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) {
-    if (it->parent != NULL) {
+  g_atfork_list.walk_forward([](atfork_t* it) {
+    if (it->parent != nullptr) {
       it->parent();
     }
-  }
+  });
 
   pthread_mutex_unlock(&g_atfork_list_mutex);
 }
 
-int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) {
+// __register_atfork is the name used by glibc
+extern "C" int __register_atfork(void (*prepare)(void), void (*parent)(void),
+                                 void(*child)(void), void* dso) {
   atfork_t* entry = reinterpret_cast<atfork_t*>(malloc(sizeof(atfork_t)));
-  if (entry == NULL) {
+  if (entry == nullptr) {
     return ENOMEM;
   }
 
   entry->prepare = prepare;
   entry->parent = parent;
   entry->child = child;
+  entry->dso_handle = dso;
 
   pthread_mutex_lock(&g_atfork_list_mutex);
 
-  // Append 'entry' to the list.
-  entry->next = NULL;
-  entry->prev = g_atfork_list.last;
-  if (entry->prev != NULL) {
-    entry->prev->next = entry;
-  }
-  if (g_atfork_list.first == NULL) {
-    g_atfork_list.first = entry;
-  }
-  g_atfork_list.last = entry;
+  g_atfork_list.push_back(entry);
 
   pthread_mutex_unlock(&g_atfork_list_mutex);
 
   return 0;
 }
+
+extern "C" __LIBC_HIDDEN__ void __unregister_atfork(void* dso) {
+  pthread_mutex_lock(&g_atfork_list_mutex);
+  g_atfork_list.remove_if([&](const atfork_t* entry) {
+    return entry->dso_handle == dso;
+  });
+  pthread_mutex_unlock(&g_atfork_list_mutex);
+}
+
diff --git a/libc/bionic/readlink.cpp b/libc/bionic/readlink.cpp
index 3bb7bc1..a53f933 100644
--- a/libc/bionic/readlink.cpp
+++ b/libc/bionic/readlink.cpp
@@ -26,6 +26,8 @@
  * SUCH DAMAGE.
  */
 
+#undef _FORTIFY_SOURCE
+
 #include <fcntl.h>
 #include <sys/stat.h>
 #include <sys/types.h>
diff --git a/libc/bionic/stubs.cpp b/libc/bionic/stubs.cpp
index c971d1b..b57aeda 100644
--- a/libc/bionic/stubs.cpp
+++ b/libc/bionic/stubs.cpp
@@ -245,6 +245,7 @@
         appid = android_ids[n].aid;
         // Move the end pointer to the null terminator.
         end += strlen(android_ids[n].name) + 1;
+        break;
       }
     }
   }
diff --git a/libc/include/unistd.h b/libc/include/unistd.h
index a601cb7..f0de29e 100644
--- a/libc/include/unistd.h
+++ b/libc/include/unistd.h
@@ -239,6 +239,16 @@
 __errordecl(__read_count_toobig_error, "read called with count > SSIZE_MAX");
 extern ssize_t __read_real(int, void*, size_t) __RENAME(read);
 
+extern ssize_t __readlink_chk(const char*, char*, size_t, size_t);
+__errordecl(__readlink_dest_size_error, "readlink called with size bigger than destination");
+__errordecl(__readlink_size_toobig_error, "readlink called with size > SSIZE_MAX");
+extern ssize_t __readlink_real(const char*, char*, size_t) __RENAME(readlink);
+
+extern ssize_t __readlinkat_chk(int dirfd, const char*, char*, size_t, size_t);
+__errordecl(__readlinkat_dest_size_error, "readlinkat called with size bigger than destination");
+__errordecl(__readlinkat_size_toobig_error, "readlinkat called with size > SSIZE_MAX");
+extern ssize_t __readlinkat_real(int dirfd, const char*, char*, size_t) __RENAME(readlinkat);
+
 #if defined(__BIONIC_FORTIFY)
 
 #if defined(__USE_FILE_OFFSET64)
@@ -322,6 +332,56 @@
     return __read_chk(fd, buf, count, bos);
 }
 
+__BIONIC_FORTIFY_INLINE
+ssize_t readlink(const char* path, char* buf, size_t size) {
+    size_t bos = __bos(buf);
+
+#if !defined(__clang__)
+    if (__builtin_constant_p(size) && (size > SSIZE_MAX)) {
+        __readlink_size_toobig_error();
+    }
+
+    if (bos == __BIONIC_FORTIFY_UNKNOWN_SIZE) {
+        return __readlink_real(path, buf, size);
+    }
+
+    if (__builtin_constant_p(size) && (size > bos)) {
+        __readlink_dest_size_error();
+    }
+
+    if (__builtin_constant_p(size) && (size <= bos)) {
+        return __readlink_real(path, buf, size);
+    }
+#endif
+
+    return __readlink_chk(path, buf, size, bos);
+}
+
+__BIONIC_FORTIFY_INLINE
+ssize_t readlinkat(int dirfd, const char* path, char* buf, size_t size) {
+    size_t bos = __bos(buf);
+
+#if !defined(__clang__)
+    if (__builtin_constant_p(size) && (size > SSIZE_MAX)) {
+        __readlinkat_size_toobig_error();
+    }
+
+    if (bos == __BIONIC_FORTIFY_UNKNOWN_SIZE) {
+        return __readlinkat_real(dirfd, path, buf, size);
+    }
+
+    if (__builtin_constant_p(size) && (size > bos)) {
+        __readlinkat_dest_size_error();
+    }
+
+    if (__builtin_constant_p(size) && (size <= bos)) {
+        return __readlinkat_real(dirfd, path, buf, size);
+    }
+#endif
+
+    return __readlinkat_chk(dirfd, path, buf, size, bos);
+}
+
 #endif /* defined(__BIONIC_FORTIFY) */
 
 __END_DECLS
diff --git a/libc/kernel/tools/cpp.py b/libc/kernel/tools/cpp.py
index ff5136e..10ce290 100644
--- a/libc/kernel/tools/cpp.py
+++ b/libc/kernel/tools/cpp.py
@@ -14,7 +14,6 @@
 
 # Set up the env vars for libclang.
 site.addsitedir(os.path.join(top, 'external/clang/bindings/python'))
-os.putenv('LD_LIBRARY_PATH', os.path.join(top, 'prebuilts/sdk/tools/linux'))
 
 import clang.cindex
 from clang.cindex import conf
@@ -26,6 +25,10 @@
 from clang.cindex import TokenKind
 from clang.cindex import TranslationUnit
 
+# Set up LD_LIBRARY_PATH to include libclang.so, libLLVM.so, and etc.
+# Note that setting LD_LIBRARY_PATH with os.putenv() sometimes doesn't help.
+clang.cindex.Config.set_library_path(os.path.join(top, 'prebuilts/sdk/tools/linux/lib64'))
+
 from defaults import kCppUndefinedMacro
 from defaults import kernel_remove_config_macros
 from defaults import kernel_token_replacements
diff --git a/libc/upstream-openbsd/lib/libc/stdlib/atexit.c b/libc/stdlib/atexit.c
similarity index 92%
rename from libc/upstream-openbsd/lib/libc/stdlib/atexit.c
rename to libc/stdlib/atexit.c
index 6532b38..df2b1b5 100644
--- a/libc/upstream-openbsd/lib/libc/stdlib/atexit.c
+++ b/libc/stdlib/atexit.c
@@ -35,11 +35,15 @@
 #include <string.h>
 #include <unistd.h>
 #include "atexit.h"
-#include "thread_private.h"
+#include "private/thread_private.h"
 
 struct atexit *__atexit;
 static int restartloop;
 
+/* BEGIN android-changed: __unregister_atfork is used by __cxa_finalize */
+extern void __unregister_atfork(void* dso);
+/* END android-changed */
+
 /*
  * Function pointers are stored in a linked list of pages. The list
  * is initially empty, and pages are allocated on demand. The first
@@ -62,7 +66,7 @@
 {
 	struct atexit *p = __atexit;
 	struct atexit_fn *fnp;
-	int pgsize = getpagesize();
+	size_t pgsize = getpagesize();
 	int ret = -1;
 
 	if (pgsize < sizeof(*p))
@@ -161,6 +165,12 @@
 		__atexit = NULL;
 	}
 	_ATEXIT_UNLOCK();
+
+  /* BEGIN android-changed: call __unregister_atfork if dso is not null */
+  if (dso != NULL) {
+    __unregister_atfork(dso);
+  }
+  /* END android-changed */
 }
 
 /*
@@ -170,7 +180,7 @@
 __atexit_register_cleanup(void (*func)(void))
 {
 	struct atexit *p;
-	int pgsize = getpagesize();
+	size_t pgsize = getpagesize();
 
 	if (pgsize < sizeof(*p))
 		return;
diff --git a/libc/upstream-openbsd/lib/libc/stdlib/atexit.h b/libc/stdlib/atexit.h
similarity index 100%
rename from libc/upstream-openbsd/lib/libc/stdlib/atexit.h
rename to libc/stdlib/atexit.h
diff --git a/linker/linker.cpp b/linker/linker.cpp
index e029dbd..be7b10c 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -55,7 +55,7 @@
 #include "linker_block_allocator.h"
 #include "linker_debug.h"
 #include "linker_environ.h"
-#include "linker_leb128.h"
+#include "linker_sleb128.h"
 #include "linker_phdr.h"
 #include "linker_relocs.h"
 #include "linker_reloc_iterators.h"
@@ -442,7 +442,8 @@
     offset += verdef->vd_next;
 
     if (verdef->vd_version != 1) {
-      DL_ERR("unsupported verdef[%zd] vd_version: %d (expected 1)", i, verdef->vd_version);
+      DL_ERR("unsupported verdef[%zd] vd_version: %d (expected 1) library: %s",
+          i, verdef->vd_version, si->get_soname());
       return false;
     }
 
@@ -2874,7 +2875,7 @@
     if (android_relocs_size_ > 3 &&
         android_relocs_[0] == 'A' &&
         android_relocs_[1] == 'P' &&
-        (android_relocs_[2] == 'U' || android_relocs_[2] == 'S') &&
+        android_relocs_[2] == 'S' &&
         android_relocs_[3] == '2') {
       DEBUG("[ android relocating %s ]", get_soname());
 
@@ -2882,17 +2883,10 @@
       const uint8_t* packed_relocs = android_relocs_ + 4;
       const size_t packed_relocs_size = android_relocs_size_ - 4;
 
-      if (android_relocs_[2] == 'U') {
-        relocated = relocate(
-            packed_reloc_iterator<leb128_decoder>(
-              leb128_decoder(packed_relocs, packed_relocs_size)),
-            global_group, local_group);
-      } else { // android_relocs_[2] == 'S'
-        relocated = relocate(
-            packed_reloc_iterator<sleb128_decoder>(
-              sleb128_decoder(packed_relocs, packed_relocs_size)),
-            global_group, local_group);
-      }
+      relocated = relocate(
+          packed_reloc_iterator<sleb128_decoder>(
+            sleb128_decoder(packed_relocs, packed_relocs_size)),
+          global_group, local_group);
 
       if (!relocated) {
         return false;
diff --git a/linker/linker_mips.cpp b/linker/linker_mips.cpp
index c162111..0769f82 100644
--- a/linker/linker_mips.cpp
+++ b/linker/linker_mips.cpp
@@ -30,7 +30,7 @@
 #include "linker_debug.h"
 #include "linker_relocs.h"
 #include "linker_reloc_iterators.h"
-#include "linker_leb128.h"
+#include "linker_sleb128.h"
 
 template bool soinfo::relocate<plain_reloc_iterator>(plain_reloc_iterator&& rel_iterator,
                                                      const soinfo_list_t& global_group,
@@ -41,11 +41,6 @@
     const soinfo_list_t& global_group,
     const soinfo_list_t& local_group);
 
-template bool soinfo::relocate<packed_reloc_iterator<leb128_decoder>>(
-    packed_reloc_iterator<leb128_decoder>&& rel_iterator,
-    const soinfo_list_t& global_group,
-    const soinfo_list_t& local_group);
-
 template <typename ElfRelIteratorT>
 bool soinfo::relocate(ElfRelIteratorT&& rel_iterator,
                       const soinfo_list_t& global_group,
diff --git a/linker/linker_leb128.h b/linker/linker_sleb128.h
similarity index 67%
rename from linker/linker_leb128.h
rename to linker/linker_sleb128.h
index d5c6488..a34916f 100644
--- a/linker/linker_leb128.h
+++ b/linker/linker_sleb128.h
@@ -14,42 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef _LINKER_LEB128_H
-#define _LINKER_LEB128_H
+#ifndef _LINKER_SLEB128_H
+#define _LINKER_SLEB128_H
 
 #include <stdint.h>
 
 // Helper classes for decoding LEB128, used in packed relocation data.
 // http://en.wikipedia.org/wiki/LEB128
 
-class leb128_decoder {
- public:
-  leb128_decoder(const uint8_t* buffer, size_t count)
-      : current_(buffer), end_(buffer + count) { }
-
-  size_t pop_front() {
-    size_t value = 0;
-
-    size_t shift = 0;
-    uint8_t byte;
-
-    do {
-      if (current_ >= end_) {
-        __libc_fatal("leb128_decoder ran out of bounds");
-      }
-      byte = *current_++;
-      value |= static_cast<size_t>(byte & 127) << shift;
-      shift += 7;
-    } while (byte & 128);
-
-    return value;
-  }
-
- private:
-  const uint8_t* current_;
-  const uint8_t* const end_;
-};
-
 class sleb128_decoder {
  public:
   sleb128_decoder(const uint8_t* buffer, size_t count)
@@ -64,7 +36,7 @@
 
     do {
       if (current_ >= end_) {
-        __libc_fatal("leb128_decoder ran out of bounds");
+        __libc_fatal("sleb128_decoder ran out of bounds");
       }
       byte = *current_++;
       value |= (static_cast<size_t>(byte & 127) << shift);
@@ -83,5 +55,4 @@
   const uint8_t* const end_;
 };
 
-#endif // __LINKER_LEB128_H
-
+#endif // __LINKER_SLEB128_H
diff --git a/tests/Android.mk b/tests/Android.mk
index c942375..cd65c10 100644
--- a/tests/Android.mk
+++ b/tests/Android.mk
@@ -295,9 +295,7 @@
 # which bionic does not support. Reenable this once this question is resolved.
 bionic-unit-tests_clang_target := false
 
-ifneq ($(filter $(TARGET_ARCH),arm arm64),$(TARGET_ARCH))
 bionic-unit-tests_shared_libraries_target += libdl_test_df_1_global
-endif
 
 module := bionic-unit-tests
 module_tag := optional
diff --git a/tests/dlfcn_test.cpp b/tests/dlfcn_test.cpp
index 1023644..6b1f109 100644
--- a/tests/dlfcn_test.cpp
+++ b/tests/dlfcn_test.cpp
@@ -626,7 +626,6 @@
 }
 
 TEST(dlfcn, dlsym_df_1_global) {
-#if !defined(__arm__) && !defined(__aarch64__)
   void* handle = dlopen("libtest_dlsym_df_1_global.so", RTLD_NOW);
   ASSERT_TRUE(handle != nullptr) << dlerror();
   int (*get_answer)();
@@ -634,9 +633,6 @@
   ASSERT_TRUE(get_answer != nullptr) << dlerror();
   ASSERT_EQ(42, get_answer());
   ASSERT_EQ(0, dlclose(handle));
-#else
-  GTEST_LOG_(INFO) << "This test does nothing on arm/arm64 (to be reenabled once b/18137520 or b/18130452 are fixed).\n";
-#endif
 }
 
 TEST(dlfcn, dlopen_failure) {
diff --git a/tests/fortify_test.cpp b/tests/fortify_test.cpp
index 70159de..4faccb4 100644
--- a/tests/fortify_test.cpp
+++ b/tests/fortify_test.cpp
@@ -647,6 +647,18 @@
   close(fd);
 }
 
+TEST_F(DEATHTEST, readlink_fortified) {
+  char buf[1];
+  size_t ct = atoi("2"); // prevent optimizations
+  ASSERT_FORTIFY(readlink("/dev/null", buf, ct));
+}
+
+TEST_F(DEATHTEST, readlinkat_fortified) {
+  char buf[1];
+  size_t ct = atoi("2"); // prevent optimizations
+  ASSERT_FORTIFY(readlinkat(AT_FDCWD, "/dev/null", buf, ct));
+}
+
 extern "C" char* __strncat_chk(char*, const char*, size_t, size_t);
 extern "C" char* __strcat_chk(char*, const char*, size_t);
 
diff --git a/tests/libs/Android.build.pthread_atfork.mk b/tests/libs/Android.build.pthread_atfork.mk
new file mode 100644
index 0000000..72ffec4
--- /dev/null
+++ b/tests/libs/Android.build.pthread_atfork.mk
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -----------------------------------------------------------------------------
+# This library used to test phtread_atfork handler behaviour
+# during/after dlclose.
+# -----------------------------------------------------------------------------
+libtest_pthread_atfork_src_files := pthread_atfork.cpp
+
+module := libtest_pthread_atfork
+include $(LOCAL_PATH)/Android.build.testlib.mk
+
diff --git a/tests/libs/Android.mk b/tests/libs/Android.mk
index 3d5b060..c432c2e 100644
--- a/tests/libs/Android.mk
+++ b/tests/libs/Android.mk
@@ -25,6 +25,7 @@
     $(LOCAL_PATH)/Android.build.dlopen_check_order_dlsym.mk \
     $(LOCAL_PATH)/Android.build.dlopen_check_order_reloc_siblings.mk \
     $(LOCAL_PATH)/Android.build.dlopen_check_order_reloc_main_executable.mk \
+    $(LOCAL_PATH)/Android.build.pthread_atfork.mk \
     $(LOCAL_PATH)/Android.build.testlib.mk \
     $(LOCAL_PATH)/Android.build.versioned_lib.mk \
     $(TEST_PATH)/Android.build.mk
@@ -204,6 +205,11 @@
 include $(LOCAL_PATH)/Android.build.versioned_lib.mk
 
 # -----------------------------------------------------------------------------
+# Build libraries needed by pthread_atfork tests
+# -----------------------------------------------------------------------------
+include $(LOCAL_PATH)/Android.build.pthread_atfork.mk
+
+# -----------------------------------------------------------------------------
 # Library with dependency loop used by dlfcn tests
 #
 # libtest_with_dependency_loop -> a -> b -> c -> a
@@ -348,17 +354,17 @@
 # Library with DF_1_GLOBAL
 # -----------------------------------------------------------------------------
 libdl_test_df_1_global_src_files := dl_df_1_global.cpp
-libdl_test_df_1_global_ldflags := -fuse-ld=bfd -Wl,-z,global
-module := libdl_test_df_1_global
-# TODO: re-enable arm once b/18137520 or b/18130452 are fixed
-ifeq ($(filter $(TARGET_ARCH),arm arm64),)
-include $(LOCAL_PATH)/Android.build.testlib.mk
-else
-# build it for host only
-build_target := SHARED_LIBRARY
-build_type := host
-include $(TEST_PATH)/Android.build.mk
+libdl_test_df_1_global_ldflags := -Wl,-z,global
+# TODO (dimitry): x86* toolchain does not support -z global - switch to bfd
+ifeq ($(filter $(TARGET_ARCH),x86 x86_64),$(TARGET_ARCH))
+libdl_test_df_1_global_ldflags_target := -fuse-ld=bfd
 endif
+# TODO (dimitry): host ld.gold does not yet support -z global
+# remove this line once it is updated.
+libdl_test_df_1_global_ldflags_host := -fuse-ld=bfd
+
+module := libdl_test_df_1_global
+include $(LOCAL_PATH)/Android.build.testlib.mk
 
 # -----------------------------------------------------------------------------
 # Library using symbol from libdl_test_df_1_global
diff --git a/tests/libs/pthread_atfork.cpp b/tests/libs/pthread_atfork.cpp
new file mode 100644
index 0000000..3a5aa4f
--- /dev/null
+++ b/tests/libs/pthread_atfork.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <pthread.h>
+
+extern "C" int proxy_pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)) {
+  return pthread_atfork(prepare, parent, child);
+}
diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp
index a299f02..cb5e818 100644
--- a/tests/pthread_test.cpp
+++ b/tests/pthread_test.cpp
@@ -16,6 +16,7 @@
 
 #include <gtest/gtest.h>
 
+#include <dlfcn.h>
 #include <errno.h>
 #include <inttypes.h>
 #include <limits.h>
@@ -403,7 +404,9 @@
 }
 
 TEST(pthread, pthread_setname_np__too_long) {
-  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
+  // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
+  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "123456789012345"));
+  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "1234567890123456"));
 }
 
 TEST(pthread, pthread_setname_np__self) {
@@ -987,14 +990,14 @@
 }
 
 static int g_atfork_prepare_calls = 0;
-static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
-static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
+static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
+static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
 static int g_atfork_parent_calls = 0;
-static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
-static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
+static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
+static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
 static int g_atfork_child_calls = 0;
-static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
-static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
+static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
+static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
 
 TEST(pthread, pthread_atfork_smoke) {
   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
@@ -1005,13 +1008,71 @@
 
   // Child and parent calls are made in the order they were registered.
   if (pid == 0) {
-    ASSERT_EQ(0x12, g_atfork_child_calls);
+    ASSERT_EQ(12, g_atfork_child_calls);
     _exit(0);
   }
-  ASSERT_EQ(0x12, g_atfork_parent_calls);
+  ASSERT_EQ(12, g_atfork_parent_calls);
 
   // Prepare calls are made in the reverse order.
-  ASSERT_EQ(0x21, g_atfork_prepare_calls);
+  ASSERT_EQ(21, g_atfork_prepare_calls);
+  int status;
+  ASSERT_EQ(pid, waitpid(pid, &status, 0));
+}
+
+static void AtForkPrepare3() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 3; }
+static void AtForkPrepare4() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 4; }
+
+static void AtForkParent3() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 3; }
+static void AtForkParent4() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 4; }
+
+static void AtForkChild3() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 3; }
+static void AtForkChild4() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 4; }
+
+TEST(pthread, pthread_atfork_with_dlclose) {
+  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
+
+  void* handle = dlopen("libtest_pthread_atfork.so", RTLD_NOW | RTLD_LOCAL);
+  ASSERT_TRUE(handle != nullptr) << dlerror();
+  typedef int (*fn_t)(void (*)(void), void (*)(void), void (*)(void));
+  fn_t fn = reinterpret_cast<fn_t>(dlsym(handle, "proxy_pthread_atfork"));
+  ASSERT_TRUE(fn != nullptr) << dlerror();
+  // the library registers 2 additional atfork handlers in a constructor
+  ASSERT_EQ(0, fn(AtForkPrepare2, AtForkParent2, AtForkChild2));
+  ASSERT_EQ(0, fn(AtForkPrepare3, AtForkParent3, AtForkChild3));
+
+  ASSERT_EQ(0, pthread_atfork(AtForkPrepare4, AtForkParent4, AtForkChild4));
+
+  int pid = fork();
+
+  ASSERT_NE(-1, pid) << strerror(errno);
+
+  if (pid == 0) {
+    ASSERT_EQ(1234, g_atfork_child_calls);
+    _exit(0);
+  }
+
+  ASSERT_EQ(1234, g_atfork_parent_calls);
+  ASSERT_EQ(4321, g_atfork_prepare_calls);
+
+  EXPECT_EQ(0, dlclose(handle));
+  g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
+
+  int status;
+  ASSERT_EQ(pid, waitpid(pid, &status, 0));
+
+  pid = fork();
+
+  ASSERT_NE(-1, pid) << strerror(errno);
+
+  if (pid == 0) {
+    ASSERT_EQ(14, g_atfork_child_calls);
+    _exit(0);
+  }
+
+  ASSERT_EQ(14, g_atfork_parent_calls);
+  ASSERT_EQ(41, g_atfork_prepare_calls);
+
+  ASSERT_EQ(pid, waitpid(pid, &status, 0));
 }
 
 TEST(pthread, pthread_attr_getscope) {
diff --git a/tools/bionicbb/README.md b/tools/bionicbb/README.md
index 4d3291f..a285984 100644
--- a/tools/bionicbb/README.md
+++ b/tools/bionicbb/README.md
@@ -8,6 +8,7 @@
 ------------
 
  * Python 2.7
+ * [Advanced Python Scheduler](https://apscheduler.readthedocs.org/en/latest/)
  * [Flask](http://flask.pocoo.org/)
  * [Google API Client Library](https://developers.google.com/api-client-library/python/start/installation)
  * [jenkinsapi](https://pypi.python.org/pypi/jenkinsapi)
diff --git a/tools/bionicbb/build_listener.py b/tools/bionicbb/bionicbb.py
similarity index 88%
rename from tools/bionicbb/build_listener.py
rename to tools/bionicbb/bionicbb.py
index fa55d37..a786b27 100644
--- a/tools/bionicbb/build_listener.py
+++ b/tools/bionicbb/bionicbb.py
@@ -16,11 +16,15 @@
 #
 import json
 import logging
+import os
+
+from apscheduler.schedulers.background import BackgroundScheduler
+from flask import Flask, request
 import requests
 
 import gerrit
+import tasks
 
-from flask import Flask, request
 app = Flask(__name__)
 
 
@@ -115,4 +119,16 @@
 
 
 if __name__ == "__main__":
+    logging.basicConfig(level=logging.INFO)
+    logger = logging.getLogger()
+    fh = logging.FileHandler('bionicbb.log')
+    fh.setLevel(logging.INFO)
+    logger.addHandler(fh)
+
+    # Prevent the job from being rescheduled by the reloader.
+    if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
+        scheduler = BackgroundScheduler()
+        scheduler.start()
+        scheduler.add_job(tasks.get_and_process_jobs, 'interval', minutes=5)
+
     app.run(host='0.0.0.0', debug=True)
diff --git a/tools/bionicbb/gmail.py b/tools/bionicbb/gmail.py
new file mode 100644
index 0000000..f088ad6
--- /dev/null
+++ b/tools/bionicbb/gmail.py
@@ -0,0 +1,71 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import base64
+import httplib2
+
+import config
+
+
+def get_body(msg):
+    if 'attachmentId' in msg['payload']['body']:
+        raise NotImplementedError('Handling of messages contained in '
+                                  'attachments not yet implemented.')
+    b64_body = msg['payload']['body']['data']
+    return base64.urlsafe_b64decode(b64_body.encode('ASCII'))
+
+
+def build_service():
+    from apiclient.discovery import build
+    from oauth2client.client import flow_from_clientsecrets
+    from oauth2client.file import Storage
+    from oauth2client.tools import run
+
+    OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.modify'
+    STORAGE = Storage('oauth.storage')
+
+    # Start the OAuth flow to retrieve credentials
+    flow = flow_from_clientsecrets(config.client_secret_file,
+                                   scope=OAUTH_SCOPE)
+    http = httplib2.Http()
+
+    # Try to retrieve credentials from storage or run the flow to generate them
+    credentials = STORAGE.get()
+    if credentials is None or credentials.invalid:
+        credentials = run(flow, STORAGE, http=http)
+
+    http = credentials.authorize(http)
+    return build('gmail', 'v1', http=http)
+
+
+def get_gerrit_label(labels):
+    for label in labels:
+        if label['name'] == 'gerrit':
+            return label['id']
+    return None
+
+
+def get_all_messages(service, label):
+    msgs = []
+    response = service.users().messages().list(
+        userId='me', labelIds=label).execute()
+    if 'messages' in response:
+        msgs.extend(response['messages'])
+    while 'nextPageToken' in response:
+        page_token = response['nextPageToken']
+        response = service.users().messages().list(
+            userId='me', pageToken=page_token).execute()
+        msgs.extend(response['messages'])
+    return msgs
diff --git a/tools/bionicbb/gmail_listener.py b/tools/bionicbb/gmail_listener.py
deleted file mode 100644
index 134258a..0000000
--- a/tools/bionicbb/gmail_listener.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/env python2
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import base64
-import httplib
-import httplib2
-import jenkinsapi
-import json
-import logging
-import os
-import re
-import requests
-import socket
-import sys
-import time
-
-import apiclient.errors
-
-import config
-import gerrit
-
-
-class GmailError(RuntimeError):
-    def __init__(self, message):
-        super(GmailError, self).__init__(message)
-
-
-def get_gerrit_label(labels):
-    for label in labels:
-        if label['name'] == 'gerrit':
-            return label['id']
-    return None
-
-
-def get_headers(msg):
-    headers = {}
-    for hdr in msg['payload']['headers']:
-        headers[hdr['name']] = hdr['value']
-    return headers
-
-
-def is_untrusted_committer(change_id, patch_set):
-    # TODO(danalbert): Needs to be based on the account that made the comment.
-    commit = gerrit.get_commit(change_id, patch_set)
-    committer = commit['committer']['email']
-    return not committer.endswith('@google.com')
-
-
-def contains_cleanspec(change_id, patch_set):
-    files = gerrit.get_files_for_revision(change_id, patch_set)
-    return 'CleanSpec.mk' in [os.path.basename(f) for f in files]
-
-
-def contains_bionicbb(change_id, patch_set):
-    files = gerrit.get_files_for_revision(change_id, patch_set)
-    return any('tools/bionicbb' in f for f in files)
-
-
-def should_skip_build(info):
-    if info['MessageType'] not in ('newchange', 'newpatchset', 'comment'):
-        raise ValueError('should_skip_build() is only valid for new '
-                         'changes, patch sets, and commits.')
-
-    change_id = info['Change-Id']
-    patch_set = info['PatchSet']
-
-    checks = [
-        is_untrusted_committer,
-        contains_cleanspec,
-        contains_bionicbb,
-    ]
-    for check in checks:
-        if check(change_id, patch_set):
-            return True
-    return False
-
-
-def build_service():
-    from apiclient.discovery import build
-    from oauth2client.client import flow_from_clientsecrets
-    from oauth2client.file import Storage
-    from oauth2client.tools import run
-
-    OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.modify'
-    STORAGE = Storage('oauth.storage')
-
-    # Start the OAuth flow to retrieve credentials
-    flow = flow_from_clientsecrets(config.client_secret_file,
-                                   scope=OAUTH_SCOPE)
-    http = httplib2.Http()
-
-    # Try to retrieve credentials from storage or run the flow to generate them
-    credentials = STORAGE.get()
-    if credentials is None or credentials.invalid:
-        credentials = run(flow, STORAGE, http=http)
-
-    http = credentials.authorize(http)
-    return build('gmail', 'v1', http=http)
-
-
-def get_all_messages(service, label):
-    msgs = []
-    response = service.users().messages().list(
-        userId='me', labelIds=label).execute()
-    if 'messages' in response:
-        msgs.extend(response['messages'])
-    while 'nextPageToken' in response:
-        page_token = response['nextPageToken']
-        response = service.users().messages().list(
-            userId='me', pageToken=page_token).execute()
-        msgs.extend(response['messages'])
-    return msgs
-
-
-def get_body(msg):
-    if 'attachmentId' in msg['payload']['body']:
-        raise NotImplementedError('Handling of messages contained in '
-                                  'attachments not yet implemented.')
-    b64_body = msg['payload']['body']['data']
-    return base64.urlsafe_b64decode(b64_body.encode('ASCII'))
-
-
-def get_gerrit_info(body):
-    info = {}
-    gerrit_pattern = r'^Gerrit-(\S+): (.+)$'
-    for match in re.finditer(gerrit_pattern, body, flags=re.MULTILINE):
-        info[match.group(1)] = match.group(2).strip()
-    return info
-
-
-def clean_project(dry_run):
-    username = config.jenkins_credentials['username']
-    password = config.jenkins_credentials['password']
-    jenkins_url = config.jenkins_url
-    jenkins = jenkinsapi.api.Jenkins(jenkins_url, username, password)
-
-    build = 'clean-bionic-presubmit'
-    if build in jenkins:
-        if not dry_run:
-            job = jenkins[build].invoke()
-            url = job.get_build().baseurl
-        else:
-            url = 'DRY_RUN_URL'
-        logging.info('Cleaning: %s %s', build, url)
-    else:
-        logging.error('Failed to clean: could not find project %s', build)
-    return True
-
-
-def build_project(gerrit_info, dry_run, lunch_target=None):
-    project_to_jenkins_map = {
-        'platform/bionic': 'bionic-presubmit',
-        'platform/build': 'bionic-presubmit',
-        'platform/external/jemalloc': 'bionic-presubmit',
-        'platform/external/libcxx': 'bionic-presubmit',
-        'platform/external/libcxxabi': 'bionic-presubmit',
-        'platform/external/compiler-rt': 'bionic-presubmit',
-    }
-
-    username = config.jenkins_credentials['username']
-    password = config.jenkins_credentials['password']
-    jenkins_url = config.jenkins_url
-    jenkins = jenkinsapi.api.Jenkins(jenkins_url, username, password)
-
-    project = gerrit_info['Project']
-    change_id = gerrit_info['Change-Id']
-    if project in project_to_jenkins_map:
-        build = project_to_jenkins_map[project]
-    else:
-        build = 'bionic-presubmit'
-
-    if build in jenkins:
-        project_path = '/'.join(project.split('/')[1:])
-        if not project_path:
-            raise RuntimeError('bogus project: {}'.format(project))
-        if project_path.startswith('platform/'):
-            raise RuntimeError('Bad project mapping: {} => {}'.format(
-                project, project_path))
-        ref = gerrit.ref_for_change(change_id)
-        params = {
-            'REF': ref,
-            'CHANGE_ID': change_id,
-            'PROJECT': project_path
-        }
-        if lunch_target is not None:
-            params['LUNCH_TARGET'] = lunch_target
-        if not dry_run:
-            _ = jenkins[build].invoke(build_params=params)
-            # https://issues.jenkins-ci.org/browse/JENKINS-27256
-            # url = job.get_build().baseurl
-            url = 'URL UNAVAILABLE'
-        else:
-            url = 'DRY_RUN_URL'
-        logging.info('Building: %s => %s %s %s', project, build, url,
-                     change_id)
-    else:
-        logging.error('Unknown build: %s => %s %s', project, build, change_id)
-    return True
-
-
-def handle_change(gerrit_info, _, dry_run):
-    if should_skip_build(gerrit_info):
-        return True
-    return build_project(gerrit_info, dry_run)
-handle_newchange = handle_change
-handle_newpatchset = handle_change
-
-
-def drop_rejection(gerrit_info, dry_run):
-    request_data = {
-        'changeid': gerrit_info['Change-Id'],
-        'patchset': gerrit_info['PatchSet']
-    }
-    url = '{}/{}'.format(config.build_listener_url, 'drop-rejection')
-    headers = {'Content-Type': 'application/json;charset=UTF-8'}
-    if not dry_run:
-        try:
-            requests.post(url, headers=headers, data=json.dumps(request_data))
-        except requests.exceptions.ConnectionError as ex:
-            logging.error('Failed to drop rejection: %s', ex)
-            return False
-    logging.info('Dropped rejection: %s', gerrit_info['Change-Id'])
-    return True
-
-
-def handle_comment(gerrit_info, body, dry_run):
-    if 'Verified+1' in body:
-        drop_rejection(gerrit_info, dry_run)
-
-    if should_skip_build(gerrit_info):
-        return True
-
-    command_map = {
-        'clean': lambda: clean_project(dry_run),
-        'retry': lambda: build_project(gerrit_info, dry_run),
-
-        'arm': lambda: build_project(gerrit_info, dry_run,
-                                     lunch_target='aosp_arm-eng'),
-        'aarch64': lambda: build_project(gerrit_info, dry_run,
-                                         lunch_target='aosp_arm64-eng'),
-        'mips': lambda: build_project(gerrit_info, dry_run,
-                                      lunch_target='aosp_mips-eng'),
-        'mips64': lambda: build_project(gerrit_info, dry_run,
-                                        lunch_target='aosp_mips64-eng'),
-        'x86': lambda: build_project(gerrit_info, dry_run,
-                                     lunch_target='aosp_x86-eng'),
-        'x86_64': lambda: build_project(gerrit_info, dry_run,
-                                        lunch_target='aosp_x86_64-eng'),
-    }
-
-    def handle_unknown_command():
-        pass    # TODO(danalbert): should complain to the commenter.
-
-    commands = [match.group(1).strip() for match in
-                re.finditer(r'^bionicbb:\s*(.+)$', body, flags=re.MULTILINE)]
-
-    for command in commands:
-        if command in command_map:
-            command_map[command]()
-        else:
-            handle_unknown_command()
-
-    return True
-
-
-def skip_handler(gerrit_info, _, __):
-    logging.info('Skipping %s: %s', gerrit_info['MessageType'],
-                 gerrit_info['Change-Id'])
-    return True
-
-
-handle_abandon = skip_handler
-handle_merge_failed = skip_handler
-handle_merged = skip_handler
-handle_restore = skip_handler
-handle_revert = skip_handler
-
-
-def process_message(msg, dry_run):
-    try:
-        body = get_body(msg)
-        gerrit_info = get_gerrit_info(body)
-        if not gerrit_info:
-            logging.fatal('No Gerrit info found: %s', msg.subject)
-        msg_type = gerrit_info['MessageType']
-        handler = 'handle_{}'.format(
-            gerrit_info['MessageType'].replace('-', '_'))
-        if handler in globals():
-            return globals()[handler](gerrit_info, body, dry_run)
-        else:
-            logging.warning('MessageType %s unhandled.', msg_type)
-        return False
-    except NotImplementedError as ex:
-        logging.error("%s", ex)
-        return False
-    except gerrit.GerritError as ex:
-        change_id = gerrit_info['Change-Id']
-        logging.error('Gerrit error (%d): %s %s', ex.code, change_id, ex.url)
-        return ex.code == 404
-
-
-def main(argc, argv):
-    dry_run = False
-    if argc == 2 and argv[1] == '--dry-run':
-        dry_run = True
-    elif argc > 2:
-        sys.exit('usage: python {} [--dry-run]'.format(argv[0]))
-
-    gmail_service = build_service()
-    msg_service = gmail_service.users().messages()
-
-    while True:
-        try:
-            labels = gmail_service.users().labels().list(userId='me').execute()
-            if not labels['labels']:
-                raise GmailError('Could not retrieve Gmail labels')
-            label_id = get_gerrit_label(labels['labels'])
-            if not label_id:
-                raise GmailError('Could not find gerrit label')
-
-            for msg in get_all_messages(gmail_service, label_id):
-                msg = msg_service.get(userId='me', id=msg['id']).execute()
-                if process_message(msg, dry_run) and not dry_run:
-                    msg_service.trash(userId='me', id=msg['id']).execute()
-            time.sleep(60 * 5)
-        except GmailError as ex:
-            logging.error('Gmail error: %s', ex)
-            time.sleep(60 * 5)
-        except apiclient.errors.HttpError as ex:
-            logging.error('API Client HTTP error: %s', ex)
-            time.sleep(60 * 5)
-        except httplib.BadStatusLine:
-            pass
-        except httplib2.ServerNotFoundError:
-            pass
-        except socket.error:
-            pass
-
-
-if __name__ == '__main__':
-    main(len(sys.argv), sys.argv)
diff --git a/tools/bionicbb/presubmit.py b/tools/bionicbb/presubmit.py
new file mode 100644
index 0000000..cc6f3cc
--- /dev/null
+++ b/tools/bionicbb/presubmit.py
@@ -0,0 +1,203 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import absolute_import
+
+import json
+import logging
+import os.path
+import re
+import requests
+
+import jenkinsapi
+
+import gerrit
+
+import config
+
+
+def is_untrusted_committer(change_id, patch_set):
+    # TODO(danalbert): Needs to be based on the account that made the comment.
+    commit = gerrit.get_commit(change_id, patch_set)
+    committer = commit['committer']['email']
+    return not committer.endswith('@google.com')
+
+
+def contains_cleanspec(change_id, patch_set):
+    files = gerrit.get_files_for_revision(change_id, patch_set)
+    return 'CleanSpec.mk' in [os.path.basename(f) for f in files]
+
+
+def contains_bionicbb(change_id, patch_set):
+    files = gerrit.get_files_for_revision(change_id, patch_set)
+    return any('tools/bionicbb' in f for f in files)
+
+
+def should_skip_build(info):
+    if info['MessageType'] not in ('newchange', 'newpatchset', 'comment'):
+        raise ValueError('should_skip_build() is only valid for new '
+                         'changes, patch sets, and commits.')
+
+    change_id = info['Change-Id']
+    patch_set = info['PatchSet']
+
+    checks = [
+        is_untrusted_committer,
+        contains_cleanspec,
+        contains_bionicbb,
+    ]
+    for check in checks:
+        if check(change_id, patch_set):
+            return True
+    return False
+
+
+def clean_project(dry_run):
+    username = config.jenkins_credentials['username']
+    password = config.jenkins_credentials['password']
+    jenkins_url = config.jenkins_url
+    jenkins = jenkinsapi.api.Jenkins(jenkins_url, username, password)
+
+    build = 'clean-bionic-presubmit'
+    if build in jenkins:
+        if not dry_run:
+            job = jenkins[build].invoke()
+            url = job.get_build().baseurl
+        else:
+            url = 'DRY_RUN_URL'
+        logging.info('Cleaning: %s %s', build, url)
+    else:
+        logging.error('Failed to clean: could not find project %s', build)
+    return True
+
+
+def build_project(gerrit_info, dry_run, lunch_target=None):
+    project_to_jenkins_map = {
+        'platform/bionic': 'bionic-presubmit',
+        'platform/build': 'bionic-presubmit',
+        'platform/external/jemalloc': 'bionic-presubmit',
+        'platform/external/libcxx': 'bionic-presubmit',
+        'platform/external/libcxxabi': 'bionic-presubmit',
+        'platform/external/compiler-rt': 'bionic-presubmit',
+    }
+
+    username = config.jenkins_credentials['username']
+    password = config.jenkins_credentials['password']
+    jenkins_url = config.jenkins_url
+    jenkins = jenkinsapi.api.Jenkins(jenkins_url, username, password)
+
+    project = gerrit_info['Project']
+    change_id = gerrit_info['Change-Id']
+    if project in project_to_jenkins_map:
+        build = project_to_jenkins_map[project]
+    else:
+        build = 'bionic-presubmit'
+
+    if build in jenkins:
+        project_path = '/'.join(project.split('/')[1:])
+        if not project_path:
+            raise RuntimeError('bogus project: {}'.format(project))
+        if project_path.startswith('platform/'):
+            raise RuntimeError('Bad project mapping: {} => {}'.format(
+                project, project_path))
+        ref = gerrit.ref_for_change(change_id)
+        params = {
+            'REF': ref,
+            'CHANGE_ID': change_id,
+            'PROJECT': project_path
+        }
+        if lunch_target is not None:
+            params['LUNCH_TARGET'] = lunch_target
+        if not dry_run:
+            _ = jenkins[build].invoke(build_params=params)
+            # https://issues.jenkins-ci.org/browse/JENKINS-27256
+            # url = job.get_build().baseurl
+            url = 'URL UNAVAILABLE'
+        else:
+            url = 'DRY_RUN_URL'
+        logging.info('Building: %s => %s %s %s', project, build, url,
+                     change_id)
+    else:
+        logging.error('Unknown build: %s => %s %s', project, build, change_id)
+    return True
+
+
+def handle_change(gerrit_info, _, dry_run):
+    if should_skip_build(gerrit_info):
+        return True
+    return build_project(gerrit_info, dry_run)
+
+
+def drop_rejection(gerrit_info, dry_run):
+    request_data = {
+        'changeid': gerrit_info['Change-Id'],
+        'patchset': gerrit_info['PatchSet']
+    }
+    url = '{}/{}'.format(config.build_listener_url, 'drop-rejection')
+    headers = {'Content-Type': 'application/json;charset=UTF-8'}
+    if not dry_run:
+        try:
+            requests.post(url, headers=headers, data=json.dumps(request_data))
+        except requests.exceptions.ConnectionError as ex:
+            logging.error('Failed to drop rejection: %s', ex)
+            return False
+    logging.info('Dropped rejection: %s', gerrit_info['Change-Id'])
+    return True
+
+
+def handle_comment(gerrit_info, body, dry_run):
+    if 'Verified+1' in body:
+        drop_rejection(gerrit_info, dry_run)
+
+    if should_skip_build(gerrit_info):
+        return True
+
+    command_map = {
+        'clean': lambda: clean_project(dry_run),
+        'retry': lambda: build_project(gerrit_info, dry_run),
+
+        'arm': lambda: build_project(gerrit_info, dry_run,
+                                     lunch_target='aosp_arm-eng'),
+        'aarch64': lambda: build_project(gerrit_info, dry_run,
+                                         lunch_target='aosp_arm64-eng'),
+        'mips': lambda: build_project(gerrit_info, dry_run,
+                                      lunch_target='aosp_mips-eng'),
+        'mips64': lambda: build_project(gerrit_info, dry_run,
+                                        lunch_target='aosp_mips64-eng'),
+        'x86': lambda: build_project(gerrit_info, dry_run,
+                                     lunch_target='aosp_x86-eng'),
+        'x86_64': lambda: build_project(gerrit_info, dry_run,
+                                        lunch_target='aosp_x86_64-eng'),
+    }
+
+    def handle_unknown_command():
+        pass    # TODO(danalbert): should complain to the commenter.
+
+    commands = [match.group(1).strip() for match in
+                re.finditer(r'^bionicbb:\s*(.+)$', body, flags=re.MULTILINE)]
+
+    for command in commands:
+        if command in command_map:
+            command_map[command]()
+        else:
+            handle_unknown_command()
+
+    return True
+
+
+def skip_handler(gerrit_info, _, __):
+    logging.info('Skipping %s: %s', gerrit_info['MessageType'],
+                 gerrit_info['Change-Id'])
+    return True
diff --git a/tools/bionicbb/tasks.py b/tools/bionicbb/tasks.py
new file mode 100644
index 0000000..4c39a98
--- /dev/null
+++ b/tools/bionicbb/tasks.py
@@ -0,0 +1,108 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import httplib
+import httplib2
+import logging
+import re
+import socket
+
+import apiclient.errors
+
+import gerrit
+import gmail
+import presubmit
+
+
+def get_gerrit_info(body):
+    info = {}
+    gerrit_pattern = r'^Gerrit-(\S+): (.+)$'
+    for match in re.finditer(gerrit_pattern, body, flags=re.MULTILINE):
+        info[match.group(1)] = match.group(2).strip()
+    return info
+
+
+def process_message(msg, dry_run):
+    try:
+        body = gmail.get_body(msg)
+        gerrit_info = get_gerrit_info(body)
+        if not gerrit_info:
+            logging.fatal('No Gerrit info found: %s', msg.subject)
+        msg_type = gerrit_info['MessageType']
+        handlers = {
+            'comment': presubmit.handle_comment,
+            'newchange': presubmit.handle_change,
+            'newpatchset': presubmit.handle_change,
+
+            'abandon': presubmit.skip_handler,
+            'merge-failed': presubmit.skip_handler,
+            'merged': presubmit.skip_handler,
+            'restore': presubmit.skip_handler,
+            'revert': presubmit.skip_handler,
+        }
+
+        message_type = gerrit_info['MessageType']
+        if message_type in handlers:
+            return handlers[message_type](gerrit_info, body, dry_run)
+        else:
+            logging.warning('MessageType %s unhandled.', msg_type)
+        return False
+    except NotImplementedError as ex:
+        logging.error("%s", ex)
+        return False
+    except gerrit.GerritError as ex:
+        change_id = gerrit_info['Change-Id']
+        logging.error('Gerrit error (%d): %s %s', ex.code, change_id, ex.url)
+        return ex.code == 404
+
+
+def get_and_process_jobs():
+    dry_run = False
+
+    gmail_service = gmail.build_service()
+    msg_service = gmail_service.users().messages()
+
+    # We run in a loop because some of the exceptions thrown here mean we just
+    # need to retry. For errors where we should back off (typically any gmail
+    # API exceptions), process_changes catches the error and returns normally.
+    while True:
+        try:
+            process_changes(gmail_service, msg_service, dry_run)
+            return
+        except httplib.BadStatusLine:
+            pass
+        except httplib2.ServerNotFoundError:
+            pass
+        except socket.error:
+            pass
+
+
+def process_changes(gmail_service, msg_service, dry_run):
+    try:
+        labels = gmail_service.users().labels().list(userId='me').execute()
+        if not labels['labels']:
+            logging.error('Could not retrieve Gmail labels')
+            return
+        label_id = gmail.get_gerrit_label(labels['labels'])
+        if not label_id:
+            logging.error('Could not find gerrit label')
+            return
+
+        for msg in gmail.get_all_messages(gmail_service, label_id):
+            msg = msg_service.get(userId='me', id=msg['id']).execute()
+            if process_message(msg, dry_run) and not dry_run:
+                msg_service.trash(userId='me', id=msg['id']).execute()
+    except apiclient.errors.HttpError as ex:
+        logging.error('API Client HTTP error: %s', ex)
diff --git a/tools/bionicbb/test_gmail_listener.py b/tools/bionicbb/test_tasks.py
similarity index 74%
rename from tools/bionicbb/test_gmail_listener.py
rename to tools/bionicbb/test_tasks.py
index f8b9ab6..b36cbad 100644
--- a/tools/bionicbb/test_gmail_listener.py
+++ b/tools/bionicbb/test_tasks.py
@@ -1,11 +1,12 @@
-import gmail_listener
 import mock
 import unittest
 
+import presubmit
+
 
 class TestShouldSkipBuild(unittest.TestCase):
-    @mock.patch('gmail_listener.contains_bionicbb')
-    @mock.patch('gmail_listener.contains_cleanspec')
+    @mock.patch('presubmit.contains_bionicbb')
+    @mock.patch('presubmit.contains_cleanspec')
     @mock.patch('gerrit.get_commit')
     def test_accepts_googlers(self, mock_commit, *other_checks):
         mock_commit.return_value = {
@@ -16,14 +17,14 @@
             other_check.return_value = False
 
         for message_type in ('newchange', 'newpatchset', 'comment'):
-            self.assertFalse(gmail_listener.should_skip_build({
+            self.assertFalse(presubmit.should_skip_build({
                 'MessageType': message_type,
                 'Change-Id': '',
                 'PatchSet': '',
             }))
 
-    @mock.patch('gmail_listener.contains_bionicbb')
-    @mock.patch('gmail_listener.contains_cleanspec')
+    @mock.patch('presubmit.contains_bionicbb')
+    @mock.patch('presubmit.contains_cleanspec')
     @mock.patch('gerrit.get_commit')
     def test_rejects_googlish_domains(self, mock_commit, *other_checks):
         mock_commit.return_value = {
@@ -34,14 +35,14 @@
             other_check.return_value = False
 
         for message_type in ('newchange', 'newpatchset', 'comment'):
-            self.assertTrue(gmail_listener.should_skip_build({
+            self.assertTrue(presubmit.should_skip_build({
                 'MessageType': message_type,
                 'Change-Id': '',
                 'PatchSet': '',
             }))
 
-    @mock.patch('gmail_listener.contains_bionicbb')
-    @mock.patch('gmail_listener.contains_cleanspec')
+    @mock.patch('presubmit.contains_bionicbb')
+    @mock.patch('presubmit.contains_cleanspec')
     @mock.patch('gerrit.get_commit')
     def test_rejects_non_googlers(self, mock_commit, *other_checks):
         mock_commit.return_value = {
@@ -52,14 +53,14 @@
             other_check.return_value = False
 
         for message_type in ('newchange', 'newpatchset', 'comment'):
-            self.assertTrue(gmail_listener.should_skip_build({
+            self.assertTrue(presubmit.should_skip_build({
                 'MessageType': message_type,
                 'Change-Id': '',
                 'PatchSet': '',
             }))
 
-    @mock.patch('gmail_listener.contains_bionicbb')
-    @mock.patch('gmail_listener.is_untrusted_committer')
+    @mock.patch('presubmit.contains_bionicbb')
+    @mock.patch('presubmit.is_untrusted_committer')
     @mock.patch('gerrit.get_files_for_revision')
     def test_skips_cleanspecs(self, mock_files, *other_checks):
         mock_files.return_value = ['foo/CleanSpec.mk']
@@ -67,14 +68,14 @@
             other_check.return_value = False
 
         for message_type in ('newchange', 'newpatchset', 'comment'):
-            self.assertTrue(gmail_listener.should_skip_build({
+            self.assertTrue(presubmit.should_skip_build({
                 'MessageType': message_type,
                 'Change-Id': '',
                 'PatchSet': '',
             }))
 
-    @mock.patch('gmail_listener.contains_cleanspec')
-    @mock.patch('gmail_listener.is_untrusted_committer')
+    @mock.patch('presubmit.contains_cleanspec')
+    @mock.patch('presubmit.is_untrusted_committer')
     @mock.patch('gerrit.get_files_for_revision')
     def test_skips_bionicbb(self, mock_files, *other_checks):
         mock_files.return_value = ['tools/bionicbb/common.sh']
@@ -82,7 +83,7 @@
             other_check.return_value = False
 
         for message_type in ('newchange', 'newpatchset', 'comment'):
-            self.assertTrue(gmail_listener.should_skip_build({
+            self.assertTrue(presubmit.should_skip_build({
                 'MessageType': message_type,
                 'Change-Id': '',
                 'PatchSet': '',
diff --git a/tools/relocation_packer/Android.mk b/tools/relocation_packer/Android.mk
index 99a39c0..75dba71 100644
--- a/tools/relocation_packer/Android.mk
+++ b/tools/relocation_packer/Android.mk
@@ -26,7 +26,6 @@
   src/debug.cc \
   src/delta_encoder.cc \
   src/elf_file.cc \
-  src/leb128.cc \
   src/packer.cc \
   src/sleb128.cc \
 
@@ -46,6 +45,9 @@
 
 LOCAL_SRC_FILES := src/main.cc
 LOCAL_STATIC_LIBRARIES := lib_relocation_packer libelf
+
+# Statically linking libc++ to make it work from prebuilts
+LOCAL_CXX_STL := libc++_static
 LOCAL_C_INCLUDES := external/elfutils/src/libelf libnativehelper/include
 
 LOCAL_MODULE := relocation_packer
@@ -64,7 +66,6 @@
   src/debug_unittest.cc \
   src/delta_encoder_unittest.cc \
   src/elf_file_unittest.cc \
-  src/leb128_unittest.cc \
   src/sleb128_unittest.cc \
   src/packer_unittest.cc \
 
diff --git a/tools/relocation_packer/src/elf_file.cc b/tools/relocation_packer/src/elf_file.cc
index 20b25ef..c8ddde6 100644
--- a/tools/relocation_packer/src/elf_file.cc
+++ b/tools/relocation_packer/src/elf_file.cc
@@ -190,6 +190,7 @@
   // these; both is unsupported.
   bool has_rel_relocations = false;
   bool has_rela_relocations = false;
+  bool has_android_relocations = false;
 
   Elf_Scn* section = NULL;
   while ((section = elf_nextscn(elf, section)) != nullptr) {
@@ -209,6 +210,11 @@
     if ((name == ".rel.dyn" || name == ".rela.dyn") &&
         section_header->sh_size > 0) {
       found_relocations_section = section;
+
+      // Note if relocation section is already packed
+      has_android_relocations =
+          section_header->sh_type == SHT_ANDROID_REL ||
+          section_header->sh_type == SHT_ANDROID_RELA;
     }
 
     if (section_header->sh_offset == dynamic_program_header->p_offset) {
@@ -250,6 +256,7 @@
   relocations_section_ = found_relocations_section;
   dynamic_section_ = found_dynamic_section;
   relocations_type_ = has_rel_relocations ? REL : RELA;
+  has_android_relocations_ = has_android_relocations;
   return true;
 }
 
@@ -439,6 +446,9 @@
                                 tag == DT_JMPREL ||
                                 tag == DT_INIT_ARRAY ||
                                 tag == DT_FINI_ARRAY ||
+                                tag == DT_VERSYM ||
+                                tag == DT_VERNEED ||
+                                tag == DT_VERDEF ||
                                 tag == DT_ANDROID_REL||
                                 tag == DT_ANDROID_RELA);
 
@@ -586,7 +596,7 @@
     const typename ELF::Rel* relocations_base = reinterpret_cast<typename ELF::Rel*>(data->d_buf);
     ConvertRelArrayToRelaVector(relocations_base,
         data->d_size / sizeof(typename ELF::Rel), &relocations);
-    LOG(INFO) << "Relocations   : REL";
+    VLOG(1) << "Relocations   : REL";
   } else if (relocations_type_ == RELA) {
     // Convert data to a vector of relocations with addends.
     const typename ELF::Rela* relocations_base = reinterpret_cast<typename ELF::Rela*>(data->d_buf);
@@ -594,7 +604,7 @@
         relocations_base,
         relocations_base + data->d_size / sizeof(relocations[0]));
 
-    LOG(INFO) << "Relocations   : RELA";
+    VLOG(1) << "Relocations   : RELA";
   } else {
     NOTREACHED();
   }
@@ -607,10 +617,15 @@
 bool ElfFile<ELF>::PackTypedRelocations(std::vector<typename ELF::Rela>* relocations) {
   typedef typename ELF::Rela Rela;
 
+  if (has_android_relocations_) {
+    LOG(INFO) << "Relocation table is already packed";
+    return true;
+  }
+
   // If no relocations then we have nothing packable.  Perhaps
   // the shared object has already been packed?
   if (relocations->empty()) {
-    LOG(ERROR) << "No relocations found (already packed?)";
+    LOG(ERROR) << "No relocations found";
     return false;
   }
 
@@ -618,18 +633,18 @@
       relocations_type_ == RELA ? sizeof(typename ELF::Rela) : sizeof(typename ELF::Rel);
   const size_t initial_bytes = relocations->size() * rel_size;
 
-  LOG(INFO) << "Unpacked                   : " << initial_bytes << " bytes";
+  VLOG(1) << "Unpacked                   : " << initial_bytes << " bytes";
   std::vector<uint8_t> packed;
   RelocationPacker<ELF> packer;
 
   // Pack relocations: dry run to estimate memory savings.
   packer.PackRelocations(*relocations, &packed);
   const size_t packed_bytes_estimate = packed.size() * sizeof(packed[0]);
-  LOG(INFO) << "Packed         (no padding): " << packed_bytes_estimate << " bytes";
+  VLOG(1) << "Packed         (no padding): " << packed_bytes_estimate << " bytes";
 
   if (packed.empty()) {
     LOG(INFO) << "Too few relocations to pack";
-    return false;
+    return true;
   }
 
   // Pre-calculate the size of the hole we will close up when we rewrite
@@ -646,12 +661,12 @@
   // Adjusting for alignment may have removed any packing benefit.
   if (hole_size == 0) {
     LOG(INFO) << "Too few relocations to pack after alignment";
-    return false;
+    return true;
   }
 
   if (hole_size <= 0) {
     LOG(INFO) << "Packing relocations saves no space";
-    return false;
+    return true;
   }
 
   size_t data_padding_bytes = is_padding_relocations_ ?
@@ -734,7 +749,7 @@
       packed.size() > 3 &&
       packed[0] == 'A' &&
       packed[1] == 'P' &&
-      (packed[2] == 'U' || packed[2] == 'S') &&
+      packed[2] == 'S' &&
       packed[3] == '2') {
     LOG(INFO) << "Relocations   : " << (relocations_type_ == REL ? "REL" : "RELA");
   } else {
diff --git a/tools/relocation_packer/src/elf_file.h b/tools/relocation_packer/src/elf_file.h
index a749d50..d6acc76 100644
--- a/tools/relocation_packer/src/elf_file.h
+++ b/tools/relocation_packer/src/elf_file.h
@@ -36,7 +36,7 @@
   explicit ElfFile(int fd)
       : fd_(fd), is_padding_relocations_(false), elf_(NULL),
         relocations_section_(NULL), dynamic_section_(NULL),
-        relocations_type_(NONE) {}
+        relocations_type_(NONE), has_android_relocations_(false) {}
   ~ElfFile() {}
 
   // Set padding mode.  When padding, PackRelocations() will not shrink
@@ -111,6 +111,9 @@
 
   // Relocation type found, assigned by Load().
   relocations_type_t relocations_type_;
+
+  // Elf-file has android relocations section
+  bool has_android_relocations_;
 };
 
 }  // namespace relocation_packer
diff --git a/tools/relocation_packer/src/elf_file_unittest.cc b/tools/relocation_packer/src/elf_file_unittest.cc
index 434f101..32f7968 100644
--- a/tools/relocation_packer/src/elf_file_unittest.cc
+++ b/tools/relocation_packer/src/elf_file_unittest.cc
@@ -103,8 +103,8 @@
 static void ProcessUnpack(FILE* relocs_so, FILE* packed_relocs_so) {
   relocation_packer::ElfFile<ELF> elf_file(fileno(packed_relocs_so));
 
-  // Ensure packing fails (already packed).
-  EXPECT_FALSE(elf_file.PackRelocations());
+  // Ensure packing already packed elf-file does not fail the build.
+  EXPECT_TRUE(elf_file.PackRelocations());
 
   // Unpack golden relocations, and check files are now identical.
   EXPECT_TRUE(elf_file.UnpackRelocations());
@@ -175,13 +175,19 @@
 
 namespace relocation_packer {
 
-TEST(ElfFile, PackRelocations) {
+TEST(ElfFile, PackRelocationsArm32) {
   RunPackRelocationsTestFor("arm32");
+}
+
+TEST(ElfFile, PackRelocationsArm64) {
   RunPackRelocationsTestFor("arm64");
 }
 
-TEST(ElfFile, UnpackRelocations) {
+TEST(ElfFile, UnpackRelocationsArm32) {
   RunUnpackRelocationsTestFor("arm32");
+}
+
+TEST(ElfFile, UnpackRelocationsArm64) {
   RunUnpackRelocationsTestFor("arm64");
 }
 
diff --git a/tools/relocation_packer/src/leb128.cc b/tools/relocation_packer/src/leb128.cc
deleted file mode 100644
index 101c557..0000000
--- a/tools/relocation_packer/src/leb128.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "leb128.h"
-
-#include <stdint.h>
-#include <vector>
-
-#include "elf_traits.h"
-
-namespace relocation_packer {
-
-// Empty constructor and destructor to silence chromium-style.
-template <typename uint_t>
-Leb128Encoder<uint_t>::Leb128Encoder() { }
-
-template <typename uint_t>
-Leb128Encoder<uint_t>::~Leb128Encoder() { }
-
-// Add a single value to the encoding.  Values are encoded with variable
-// length.  The least significant 7 bits of each byte hold 7 bits of data,
-// and the most significant bit is set on each byte except the last.
-template <typename uint_t>
-void Leb128Encoder<uint_t>::Enqueue(uint_t value) {
-  uint_t uvalue = static_cast<uint_t>(value);
-  do {
-    const uint8_t byte = uvalue & 127;
-    uvalue >>= 7;
-    encoding_.push_back((uvalue ? 128 : 0) | byte);
-  } while (uvalue);
-}
-
-// Add a vector of values to the encoding.
-template <typename uint_t>
-void Leb128Encoder<uint_t>::EnqueueAll(const std::vector<uint_t>& values) {
-  for (size_t i = 0; i < values.size(); ++i) {
-    Enqueue(values[i]);
-  }
-}
-
-// Create a new decoder for the given encoded stream.
-template <typename uint_t>
-Leb128Decoder<uint_t>::Leb128Decoder(const std::vector<uint8_t>& encoding, size_t start_with) {
-  encoding_ = encoding;
-  cursor_ = start_with;
-}
-
-// Empty destructor to silence chromium-style.
-template <typename uint_t>
-Leb128Decoder<uint_t>::~Leb128Decoder() { }
-
-// Decode and retrieve a single value from the encoding.  Read forwards until
-// a byte without its most significant bit is found, then read the 7 bit
-// fields of the bytes spanned to re-form the value.
-template <typename uint_t>
-uint_t Leb128Decoder<uint_t>::Dequeue() {
-  uint_t value = 0;
-
-  size_t shift = 0;
-  uint8_t byte;
-
-  // Loop until we reach a byte with its high order bit clear.
-  do {
-    byte = encoding_[cursor_++];
-    value |= static_cast<uint_t>(byte & 127) << shift;
-    shift += 7;
-  } while (byte & 128);
-
-  return value;
-}
-
-// Decode and retrieve all remaining values from the encoding.
-template <typename uint_t>
-void Leb128Decoder<uint_t>::DequeueAll(std::vector<uint_t>* values) {
-  while (cursor_ < encoding_.size()) {
-    values->push_back(Dequeue());
-  }
-}
-
-template class Leb128Encoder<uint32_t>;
-template class Leb128Encoder<uint64_t>;
-
-template class Leb128Decoder<uint32_t>;
-template class Leb128Decoder<uint64_t>;
-
-}  // namespace relocation_packer
diff --git a/tools/relocation_packer/src/leb128.h b/tools/relocation_packer/src/leb128.h
deleted file mode 100644
index 67fc4b8..0000000
--- a/tools/relocation_packer/src/leb128.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// LEB128 encoder and decoder for packed relative relocations.
-//
-// Packed relocations consist of a large number of relatively small
-// integer values.  Encoding these as LEB128 saves space.
-//
-// For more on LEB128 see http://en.wikipedia.org/wiki/LEB128.
-
-#ifndef TOOLS_RELOCATION_PACKER_SRC_LEB128_H_
-#define TOOLS_RELOCATION_PACKER_SRC_LEB128_H_
-
-#include <stdint.h>
-#include <vector>
-
-#include "elf_traits.h"
-
-namespace relocation_packer {
-
-// Encode packed words as a LEB128 byte stream.
-template <typename uint_t>
-class Leb128Encoder {
- public:
-  // Explicit (but empty) constructor and destructor, for chromium-style.
-  Leb128Encoder();
-  ~Leb128Encoder();
-
-  // Add a value to the encoding stream.
-  // |value| is the unsigned int to add.
-  void Enqueue(uint_t value);
-
-  // Add a vector of values to the encoding stream.
-  // |values| is the vector of unsigned ints to add.
-  void EnqueueAll(const std::vector<uint_t>& values);
-
-  // Retrieve the encoded representation of the values.
-  // |encoding| is the returned vector of encoded data.
-  void GetEncoding(std::vector<uint8_t>* encoding) { *encoding = encoding_; }
-
- private:
-  // Growable vector holding the encoded LEB128 stream.
-  std::vector<uint8_t> encoding_;
-};
-
-// Decode a LEB128 byte stream to produce packed words.
-template <typename uint_t>
-class Leb128Decoder {
- public:
-  // Create a new decoder for the given encoded stream.
-  // |encoding| is the vector of encoded data.
-  explicit Leb128Decoder(const std::vector<uint8_t>& encoding, size_t start_with);
-
-  // Explicit (but empty) destructor, for chromium-style.
-  ~Leb128Decoder();
-
-  // Retrieve the next value from the encoded stream.
-  uint_t Dequeue();
-
-  // Retrieve all remaining values from the encoded stream.
-  // |values| is the vector of decoded data.
-  void DequeueAll(std::vector<uint_t>* values);
-
- private:
-  // Encoded LEB128 stream.
-  std::vector<uint8_t> encoding_;
-
-  // Cursor indicating the current stream retrieval point.
-  size_t cursor_;
-};
-
-}  // namespace relocation_packer
-
-#endif  // TOOLS_RELOCATION_PACKER_SRC_LEB128_H_
diff --git a/tools/relocation_packer/src/leb128_unittest.cc b/tools/relocation_packer/src/leb128_unittest.cc
deleted file mode 100644
index 8a7028c..0000000
--- a/tools/relocation_packer/src/leb128_unittest.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "leb128.h"
-
-#include <vector>
-#include "gtest/gtest.h"
-
-namespace relocation_packer {
-
-TEST(Leb128, Encoder64) {
-  std::vector<uint64_t> values;
-  values.push_back(624485);
-  values.push_back(0);
-  values.push_back(1);
-  values.push_back(127);
-  values.push_back(128);
-
-  Leb128Encoder<uint64_t> encoder;
-  encoder.EnqueueAll(values);
-
-  encoder.Enqueue(4294967295);
-  encoder.Enqueue(18446744073709551615ul);
-
-  std::vector<uint8_t> encoding;
-  encoder.GetEncoding(&encoding);
-
-  EXPECT_EQ(23U, encoding.size());
-  // 624485
-  EXPECT_EQ(0xe5, encoding[0]);
-  EXPECT_EQ(0x8e, encoding[1]);
-  EXPECT_EQ(0x26, encoding[2]);
-  // 0
-  EXPECT_EQ(0x00, encoding[3]);
-  // 1
-  EXPECT_EQ(0x01, encoding[4]);
-  // 127
-  EXPECT_EQ(0x7f, encoding[5]);
-  // 128
-  EXPECT_EQ(0x80, encoding[6]);
-  EXPECT_EQ(0x01, encoding[7]);
-  // 4294967295
-  EXPECT_EQ(0xff, encoding[8]);
-  EXPECT_EQ(0xff, encoding[9]);
-  EXPECT_EQ(0xff, encoding[10]);
-  EXPECT_EQ(0xff, encoding[11]);
-  EXPECT_EQ(0x0f, encoding[12]);
-  // 18446744073709551615
-  EXPECT_EQ(0xff, encoding[13]);
-  EXPECT_EQ(0xff, encoding[14]);
-  EXPECT_EQ(0xff, encoding[15]);
-  EXPECT_EQ(0xff, encoding[16]);
-  EXPECT_EQ(0xff, encoding[17]);
-  EXPECT_EQ(0xff, encoding[18]);
-  EXPECT_EQ(0xff, encoding[19]);
-  EXPECT_EQ(0xff, encoding[20]);
-  EXPECT_EQ(0xff, encoding[21]);
-  EXPECT_EQ(0x01, encoding[22]);
-}
-
-TEST(Leb128, Decoder64) {
-  std::vector<uint8_t> encoding;
-  // 624485
-  encoding.push_back(0xe5);
-  encoding.push_back(0x8e);
-  encoding.push_back(0x26);
-  // 0
-  encoding.push_back(0x00);
-  // 1
-  encoding.push_back(0x01);
-  // 127
-  encoding.push_back(0x7f);
-  // 128
-  encoding.push_back(0x80);
-  encoding.push_back(0x01);
-  // 4294967295
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0x0f);
-  // 18446744073709551615
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0xff);
-  encoding.push_back(0x01);
-
-  Leb128Decoder<uint64_t> decoder(encoding, 0);
-
-  EXPECT_EQ(624485U, decoder.Dequeue());
-
-  std::vector<uint64_t> dequeued;
-  decoder.DequeueAll(&dequeued);
-
-  EXPECT_EQ(6U, dequeued.size());
-  EXPECT_EQ(0U, dequeued[0]);
-  EXPECT_EQ(1U, dequeued[1]);
-  EXPECT_EQ(127U, dequeued[2]);
-  EXPECT_EQ(128U, dequeued[3]);
-  EXPECT_EQ(4294967295U, dequeued[4]);
-  EXPECT_EQ(18446744073709551615UL, dequeued[5]);
-}
-
-}  // namespace relocation_packer
diff --git a/tools/relocation_packer/src/packer.cc b/tools/relocation_packer/src/packer.cc
index 8e30612..433611f 100644
--- a/tools/relocation_packer/src/packer.cc
+++ b/tools/relocation_packer/src/packer.cc
@@ -9,7 +9,6 @@
 #include "debug.h"
 #include "delta_encoder.h"
 #include "elf_traits.h"
-#include "leb128.h"
 #include "sleb128.h"
 
 namespace relocation_packer {
@@ -28,32 +27,17 @@
     return;
 
   Sleb128Encoder<typename ELF::Addr> sleb128_encoder;
-  Leb128Encoder<typename ELF::Addr> leb128_encoder;
 
-  std::vector<uint8_t> leb128_packed;
   std::vector<uint8_t> sleb128_packed;
 
-  leb128_encoder.EnqueueAll(packed_words);
-  leb128_encoder.GetEncoding(&leb128_packed);
-
   sleb128_encoder.EnqueueAll(packed_words);
   sleb128_encoder.GetEncoding(&sleb128_packed);
 
-  // TODO (simonb): Estimate savings on current android system image and consider using
-  // one encoder for all packed relocations to reduce complexity.
-  if (leb128_packed.size() <= sleb128_packed.size()) {
-    packed->push_back('A');
-    packed->push_back('P');
-    packed->push_back('U');
-    packed->push_back('2');
-    packed->insert(packed->end(), leb128_packed.begin(), leb128_packed.end());
-  } else {
-    packed->push_back('A');
-    packed->push_back('P');
-    packed->push_back('S');
-    packed->push_back('2');
-    packed->insert(packed->end(), sleb128_packed.begin(), sleb128_packed.end());
-  }
+  packed->push_back('A');
+  packed->push_back('P');
+  packed->push_back('S');
+  packed->push_back('2');
+  packed->insert(packed->end(), sleb128_packed.begin(), sleb128_packed.end());
 }
 
 // Unpack relative relocations from a run-length encoded packed
@@ -67,16 +51,11 @@
   CHECK(packed.size() > 4 &&
         packed[0] == 'A' &&
         packed[1] == 'P' &&
-        (packed[2] == 'U' || packed[2] == 'S') &&
+        packed[2] == 'S' &&
         packed[3] == '2');
 
-  if (packed[2] == 'U') {
-    Leb128Decoder<typename ELF::Addr> decoder(packed, 4);
-    decoder.DequeueAll(&packed_words);
-  } else {
-    Sleb128Decoder<typename ELF::Addr> decoder(packed, 4);
-    decoder.DequeueAll(&packed_words);
-  }
+  Sleb128Decoder<typename ELF::Addr> decoder(packed, 4);
+  decoder.DequeueAll(&packed_words);
 
   RelocationDeltaCodec<ELF> codec;
   codec.Decode(packed_words, relocations);
diff --git a/tools/relocation_packer/src/packer_unittest.cc b/tools/relocation_packer/src/packer_unittest.cc
index 8dddd8b..424b92c 100644
--- a/tools/relocation_packer/src/packer_unittest.cc
+++ b/tools/relocation_packer/src/packer_unittest.cc
@@ -39,6 +39,7 @@
 static void DoPackNoAddend() {
   std::vector<typename ELF::Rela> relocations;
   std::vector<uint8_t> packed;
+  bool is_32 = sizeof(typename ELF::Addr) == 4;
   // Initial relocation.
   AddRelocation<ELF>(0xd1ce0000, 0x11, 0, &relocations);
   // Two more relocations, 4 byte deltas.
@@ -59,16 +60,16 @@
   size_t ndx = 0;
   EXPECT_EQ('A', packed[ndx++]);
   EXPECT_EQ('P', packed[ndx++]);
-  EXPECT_EQ('U', packed[ndx++]);
+  EXPECT_EQ('S', packed[ndx++]);
   EXPECT_EQ('2', packed[ndx++]);
   // relocation count
   EXPECT_EQ(6, packed[ndx++]);
-  // base relocation = 0xd1cdfffc -> fc, ff, b7, 8e, 0d
+  // base relocation = 0xd1cdfffc -> fc, ff, b7, 8e, 7d/0d (32/64bit)
   EXPECT_EQ(0xfc, packed[ndx++]);
   EXPECT_EQ(0xff, packed[ndx++]);
   EXPECT_EQ(0xb7, packed[ndx++]);
   EXPECT_EQ(0x8e, packed[ndx++]);
-  EXPECT_EQ(0x0d, packed[ndx++]);
+  EXPECT_EQ(is_32 ? 0x7d : 0x0d, packed[ndx++]);
   // first group
   EXPECT_EQ(3, packed[ndx++]);  // size
   EXPECT_EQ(3, packed[ndx++]); // flags
@@ -83,8 +84,11 @@
   EXPECT_EQ(ndx, packed.size());
 }
 
-TEST(Packer, PackNoAddend) {
+TEST(Packer, PackNoAddend32) {
   DoPackNoAddend<ELF32_traits>();
+}
+
+TEST(Packer, PackNoAddend64) {
   DoPackNoAddend<ELF64_traits>();
 }
 
@@ -92,18 +96,19 @@
 static void DoUnpackNoAddend() {
   std::vector<typename ELF::Rela> relocations;
   std::vector<uint8_t> packed;
+  bool is_32 = sizeof(typename ELF::Addr) == 4;
   packed.push_back('A');
   packed.push_back('P');
-  packed.push_back('U');
+  packed.push_back('S');
   packed.push_back('2');
   // relocation count
   packed.push_back(6);
-  // base relocation = 0xd1cdfffc -> fc, ff, b7, 8e, 0d
+  // base relocation = 0xd1cdfffc -> fc, ff, b7, 8e, 7d/0d (32/64bit)
   packed.push_back(0xfc);
   packed.push_back(0xff);
   packed.push_back(0xb7);
   packed.push_back(0x8e);
-  packed.push_back(0x0d);
+  packed.push_back(is_32 ? 0x7d : 0x0d);
   // first group
   packed.push_back(3);  // size
   packed.push_back(3); // flags
@@ -131,8 +136,11 @@
   EXPECT_EQ(ndx, relocations.size());
 }
 
-TEST(Packer, UnpackNoAddend) {
+TEST(Packer, UnpackNoAddend32) {
   DoUnpackNoAddend<ELF32_traits>();
+}
+
+TEST(Packer, UnpackNoAddend64) {
   DoUnpackNoAddend<ELF64_traits>();
 }
 
diff --git a/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm32_packed.so b/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm32_packed.so
index d97ef82..6ac2eef 100755
--- a/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm32_packed.so
+++ b/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm32_packed.so
Binary files differ
diff --git a/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm64_packed.so b/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm64_packed.so
index e44e459..a2b0039 100755
--- a/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm64_packed.so
+++ b/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm64_packed.so
Binary files differ