Merge "Switch sem_t from bionic atomics to stdatomic.h."
diff --git a/CPPLINT.cfg b/CPPLINT.cfg
new file mode 100644
index 0000000..560d791
--- /dev/null
+++ b/CPPLINT.cfg
@@ -0,0 +1,2 @@
+set noparent
+filter=-build/header_guard,-runtime/int,-readability/function
diff --git a/libc/Android.mk b/libc/Android.mk
index 691017a..fe7b116 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -69,6 +69,7 @@
bionic/__FD_chk.cpp \
bionic/__fgets_chk.cpp \
bionic/__memmove_chk.cpp \
+ bionic/__poll_chk.cpp \
bionic/__read_chk.cpp \
bionic/__recvfrom_chk.cpp \
bionic/__stpcpy_chk.cpp \
@@ -115,6 +116,8 @@
bionic/error.cpp \
bionic/eventfd_read.cpp \
bionic/eventfd_write.cpp \
+ bionic/fchmod.cpp \
+ bionic/fchmodat.cpp \
bionic/ffs.cpp \
bionic/flockfile.cpp \
bionic/fork.cpp \
diff --git a/libc/SYSCALLS.TXT b/libc/SYSCALLS.TXT
index 0fa2a1e..aae7de7 100644
--- a/libc/SYSCALLS.TXT
+++ b/libc/SYSCALLS.TXT
@@ -113,7 +113,7 @@
int __fcntl64:fcntl64(int, int, void*) arm,mips,x86
int fcntl(int, int, void*) arm64,mips64,x86_64
int flock(int, int) all
-int fchmod(int, mode_t) all
+int ___fchmod:fchmod(int, mode_t) all
int dup(int) all
int pipe2(int*, int) all
int dup3(int, int, int) all
@@ -131,7 +131,7 @@
int __openat:openat(int, const char*, int, mode_t) all
int faccessat(int, const char*, int, int) all
-int fchmodat(int, const char*, mode_t, int) all
+int ___fchmodat:fchmodat(int, const char*, mode_t) all
int fchownat(int, const char*, uid_t, gid_t, int) all
int fstatat64|fstatat:fstatat64(int, const char*, struct stat*, int) arm,mips,x86
int fstatat64|fstatat:newfstatat(int, const char*, struct stat*, int) arm64,x86_64
diff --git a/libc/arch-arm/syscalls/fchmod.S b/libc/arch-arm/syscalls/___fchmod.S
similarity index 84%
rename from libc/arch-arm/syscalls/fchmod.S
rename to libc/arch-arm/syscalls/___fchmod.S
index 5675f0a..c6da4f8 100644
--- a/libc/arch-arm/syscalls/fchmod.S
+++ b/libc/arch-arm/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmod)
+ENTRY(___fchmod)
mov ip, r7
ldr r7, =__NR_fchmod
swi #0
@@ -11,4 +11,5 @@
bxls lr
neg r0, r0
b __set_errno_internal
-END(fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-arm/syscalls/fchmodat.S b/libc/arch-arm/syscalls/___fchmodat.S
similarity index 82%
rename from libc/arch-arm/syscalls/fchmodat.S
rename to libc/arch-arm/syscalls/___fchmodat.S
index 3f7e0ee..91bbda5 100644
--- a/libc/arch-arm/syscalls/fchmodat.S
+++ b/libc/arch-arm/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmodat)
+ENTRY(___fchmodat)
mov ip, r7
ldr r7, =__NR_fchmodat
swi #0
@@ -11,4 +11,5 @@
bxls lr
neg r0, r0
b __set_errno_internal
-END(fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-arm64/syscalls/fchmod.S b/libc/arch-arm64/syscalls/___fchmod.S
similarity index 81%
rename from libc/arch-arm64/syscalls/fchmod.S
rename to libc/arch-arm64/syscalls/___fchmod.S
index 83a8060..a143c65 100644
--- a/libc/arch-arm64/syscalls/fchmod.S
+++ b/libc/arch-arm64/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmod)
+ENTRY(___fchmod)
mov x8, __NR_fchmod
svc #0
@@ -11,4 +11,5 @@
b.hi __set_errno_internal
ret
-END(fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-arm64/syscalls/fchmodat.S b/libc/arch-arm64/syscalls/___fchmodat.S
similarity index 80%
rename from libc/arch-arm64/syscalls/fchmodat.S
rename to libc/arch-arm64/syscalls/___fchmodat.S
index 8c5bb0e..1ab3736 100644
--- a/libc/arch-arm64/syscalls/fchmodat.S
+++ b/libc/arch-arm64/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmodat)
+ENTRY(___fchmodat)
mov x8, __NR_fchmodat
svc #0
@@ -11,4 +11,5 @@
b.hi __set_errno_internal
ret
-END(fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-mips/syscalls/fchmod.S b/libc/arch-mips/syscalls/___fchmod.S
similarity index 84%
rename from libc/arch-mips/syscalls/fchmod.S
rename to libc/arch-mips/syscalls/___fchmod.S
index 2a95cc3..ac102ec 100644
--- a/libc/arch-mips/syscalls/fchmod.S
+++ b/libc/arch-mips/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmod)
+ENTRY(___fchmod)
.set noreorder
.cpload t9
li v0, __NR_fchmod
@@ -16,4 +16,5 @@
j t9
nop
.set reorder
-END(fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-mips/syscalls/fchmodat.S b/libc/arch-mips/syscalls/___fchmodat.S
similarity index 82%
rename from libc/arch-mips/syscalls/fchmodat.S
rename to libc/arch-mips/syscalls/___fchmodat.S
index d9de036..d581efa 100644
--- a/libc/arch-mips/syscalls/fchmodat.S
+++ b/libc/arch-mips/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmodat)
+ENTRY(___fchmodat)
.set noreorder
.cpload t9
li v0, __NR_fchmodat
@@ -16,4 +16,5 @@
j t9
nop
.set reorder
-END(fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-mips64/syscalls/fchmod.S b/libc/arch-mips64/syscalls/___fchmod.S
similarity index 87%
rename from libc/arch-mips64/syscalls/fchmod.S
rename to libc/arch-mips64/syscalls/___fchmod.S
index a877b78..7c16c54 100644
--- a/libc/arch-mips64/syscalls/fchmod.S
+++ b/libc/arch-mips64/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmod)
+ENTRY(___fchmod)
.set push
.set noreorder
li v0, __NR_fchmod
@@ -22,4 +22,5 @@
j t9
move ra, t0
.set pop
-END(fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-mips64/syscalls/fchmodat.S b/libc/arch-mips64/syscalls/___fchmodat.S
similarity index 86%
rename from libc/arch-mips64/syscalls/fchmodat.S
rename to libc/arch-mips64/syscalls/___fchmodat.S
index 151492a..50f108e 100644
--- a/libc/arch-mips64/syscalls/fchmodat.S
+++ b/libc/arch-mips64/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmodat)
+ENTRY(___fchmodat)
.set push
.set noreorder
li v0, __NR_fchmodat
@@ -22,4 +22,5 @@
j t9
move ra, t0
.set pop
-END(fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-x86/syscalls/fchmod.S b/libc/arch-x86/syscalls/___fchmod.S
similarity index 90%
rename from libc/arch-x86/syscalls/fchmod.S
rename to libc/arch-x86/syscalls/___fchmod.S
index 37851ff..119a695 100644
--- a/libc/arch-x86/syscalls/fchmod.S
+++ b/libc/arch-x86/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmod)
+ENTRY(___fchmod)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -23,4 +23,5 @@
popl %ecx
popl %ebx
ret
-END(fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-x86/syscalls/fchmodat.S b/libc/arch-x86/syscalls/___fchmodat.S
similarity index 70%
rename from libc/arch-x86/syscalls/fchmodat.S
rename to libc/arch-x86/syscalls/___fchmodat.S
index f515512..b15bb64 100644
--- a/libc/arch-x86/syscalls/fchmodat.S
+++ b/libc/arch-x86/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmodat)
+ENTRY(___fchmodat)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -12,13 +12,9 @@
pushl %edx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset edx, 0
- pushl %esi
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset esi, 0
- mov 20(%esp), %ebx
- mov 24(%esp), %ecx
- mov 28(%esp), %edx
- mov 32(%esp), %esi
+ mov 16(%esp), %ebx
+ mov 20(%esp), %ecx
+ mov 24(%esp), %edx
movl $__NR_fchmodat, %eax
int $0x80
cmpl $-MAX_ERRNO, %eax
@@ -28,9 +24,9 @@
call __set_errno_internal
addl $4, %esp
1:
- popl %esi
popl %edx
popl %ecx
popl %ebx
ret
-END(fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-x86_64/syscalls/fchmod.S b/libc/arch-x86_64/syscalls/___fchmod.S
similarity index 83%
rename from libc/arch-x86_64/syscalls/fchmod.S
rename to libc/arch-x86_64/syscalls/___fchmod.S
index b35bd21..7bccbef 100644
--- a/libc/arch-x86_64/syscalls/fchmod.S
+++ b/libc/arch-x86_64/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmod)
+ENTRY(___fchmod)
movl $__NR_fchmod, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -12,4 +12,5 @@
call __set_errno_internal
1:
ret
-END(fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-x86_64/syscalls/fchmodat.S b/libc/arch-x86_64/syscalls/___fchmodat.S
similarity index 82%
rename from libc/arch-x86_64/syscalls/fchmodat.S
rename to libc/arch-x86_64/syscalls/___fchmodat.S
index 2d78d8e..483ec7d 100644
--- a/libc/arch-x86_64/syscalls/fchmodat.S
+++ b/libc/arch-x86_64/syscalls/___fchmodat.S
@@ -2,8 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(fchmodat)
- movq %rcx, %r10
+ENTRY(___fchmodat)
movl $__NR_fchmodat, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -13,4 +12,5 @@
call __set_errno_internal
1:
ret
-END(fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/bionic/__cxa_guard.cpp b/libc/bionic/__cxa_guard.cpp
index 5b0d57d..5b34b58 100644
--- a/libc/bionic/__cxa_guard.cpp
+++ b/libc/bionic/__cxa_guard.cpp
@@ -14,10 +14,13 @@
* limitations under the License.
*/
-#include <stddef.h>
#include <endian.h>
+#include <limits.h>
+#undef _USING_LIBCXX // Prevent using of <atomic>.
+#include <stdatomic.h>
-#include "private/bionic_atomic_inline.h"
+#include <stddef.h>
+
#include "private/bionic_futex.h"
// This file contains C++ ABI support functions for one time
@@ -49,66 +52,82 @@
// values. The LSB is tested by the compiler-generated code before calling
// __cxa_guard_acquire.
union _guard_t {
- int volatile state;
- int32_t aligner;
+ atomic_int state;
+ int32_t aligner;
};
-const static int ready = 0x1;
-const static int pending = 0x2;
-const static int waiting = 0x6;
-
#else
// The Itanium/x86 C++ ABI (used by all other architectures) mandates that
// guard variables are 64-bit aligned, 64-bit values. The LSB is tested by
// the compiler-generated code before calling __cxa_guard_acquire.
union _guard_t {
- int volatile state;
- int64_t aligner;
+ atomic_int state;
+ int64_t aligner;
};
-const static int ready = letoh32(0x1);
-const static int pending = letoh32(0x100);
-const static int waiting = letoh32(0x10000);
#endif
+// Set construction state values according to reference documentation.
+// 0 is the initialization value.
+// Arm requires ((*gv & 1) == 1) after __cxa_guard_release, ((*gv & 3) == 0) after __cxa_guard_abort.
+// X86 requires first byte not modified by __cxa_guard_acquire, first byte is non-zero after
+// __cxa_guard_release.
+
+#define CONSTRUCTION_NOT_YET_STARTED 0
+#define CONSTRUCTION_COMPLETE 1
+#define CONSTRUCTION_UNDERWAY_WITHOUT_WAITER 0x100
+#define CONSTRUCTION_UNDERWAY_WITH_WAITER 0x200
+
extern "C" int __cxa_guard_acquire(_guard_t* gv) {
- // 0 -> pending, return 1
- // pending -> waiting, wait and return 0
- // waiting: untouched, wait and return 0
- // ready: untouched, return 0
+ int old_value = atomic_load_explicit(&gv->state, memory_order_relaxed);
-retry:
- if (__bionic_cmpxchg(0, pending, &gv->state) == 0) {
- ANDROID_MEMBAR_FULL();
- return 1;
- }
- __bionic_cmpxchg(pending, waiting, &gv->state); // Indicate there is a waiter
- __futex_wait(&gv->state, waiting, NULL);
-
- if (gv->state != ready) {
- // __cxa_guard_abort was called, let every thread try since there is no return code for this condition
- goto retry;
+ while (true) {
+ if (old_value == CONSTRUCTION_COMPLETE) {
+ // A load_acquire operation is need before exiting with COMPLETE state, as we have to ensure
+ // that all the stores performed by the construction function are observable on this CPU
+ // after we exit.
+ atomic_thread_fence(memory_order_acquire);
+ return 0;
+ } else if (old_value == CONSTRUCTION_NOT_YET_STARTED) {
+ if (!atomic_compare_exchange_weak_explicit(&gv->state, &old_value,
+ CONSTRUCTION_UNDERWAY_WITHOUT_WAITER,
+ memory_order_relaxed,
+ memory_order_relaxed)) {
+ continue;
+ }
+ // The acquire fence may not be needed. But as described in section 3.3.2 of
+ // the Itanium C++ ABI specification, it probably has to behave like the
+ // acquisition of a mutex, which needs an acquire fence.
+ atomic_thread_fence(memory_order_acquire);
+ return 1;
+ } else if (old_value == CONSTRUCTION_UNDERWAY_WITHOUT_WAITER) {
+ if (!atomic_compare_exchange_weak_explicit(&gv->state, &old_value,
+ CONSTRUCTION_UNDERWAY_WITH_WAITER,
+ memory_order_relaxed,
+ memory_order_relaxed)) {
+ continue;
+ }
}
- ANDROID_MEMBAR_FULL();
- return 0;
+ __futex_wait_ex(&gv->state, false, CONSTRUCTION_UNDERWAY_WITH_WAITER, NULL);
+ old_value = atomic_load_explicit(&gv->state, memory_order_relaxed);
+ }
}
extern "C" void __cxa_guard_release(_guard_t* gv) {
- // pending -> ready
- // waiting -> ready, and wake
-
- ANDROID_MEMBAR_FULL();
- if (__bionic_cmpxchg(pending, ready, &gv->state) == 0) {
- return;
- }
-
- gv->state = ready;
- __futex_wake(&gv->state, 0x7fffffff);
+ // Release fence is used to make all stores performed by the construction function
+ // visible in other threads.
+ int old_value = atomic_exchange_explicit(&gv->state, CONSTRUCTION_COMPLETE, memory_order_release);
+ if (old_value == CONSTRUCTION_UNDERWAY_WITH_WAITER) {
+ __futex_wake_ex(&gv->state, false, INT_MAX);
+ }
}
extern "C" void __cxa_guard_abort(_guard_t* gv) {
- ANDROID_MEMBAR_FULL();
- gv->state= 0;
- __futex_wake(&gv->state, 0x7fffffff);
+ // Release fence is used to make all stores performed by the construction function
+ // visible in other threads.
+ int old_value = atomic_exchange_explicit(&gv->state, CONSTRUCTION_NOT_YET_STARTED, memory_order_release);
+ if (old_value == CONSTRUCTION_UNDERWAY_WITH_WAITER) {
+ __futex_wake_ex(&gv->state, false, INT_MAX);
+ }
}
diff --git a/libc/bionic/__poll_chk.cpp b/libc/bionic/__poll_chk.cpp
new file mode 100644
index 0000000..3acac4e
--- /dev/null
+++ b/libc/bionic/__poll_chk.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#undef _FORTIFY_SOURCE
+#include <poll.h>
+#include "private/libc_logging.h"
+
+#include <stdio.h>
+
+extern "C" int __poll_chk(struct pollfd* fds, nfds_t fd_count, int timeout, size_t fds_size) {
+fprintf(stderr, "__poll_chk %p %i %i %i\n", fds, (int)fd_count, timeout, (int) fds_size);
+ if (__predict_false(fds_size / sizeof(*fds) < fd_count)) {
+ __fortify_chk_fail("poll: pollfd array smaller than fd count", 0);
+ }
+ return poll(fds, fd_count, timeout);
+}
+
+extern "C" int __ppoll_chk(struct pollfd* fds, nfds_t fd_count, const struct timespec* timeout, const sigset_t* mask, size_t fds_size) {
+fprintf(stderr, "__ppoll_chk %p %i %p %p %i\n", fds, (int)fd_count, timeout, mask, (int) fds_size);
+ if (__predict_false(fds_size / sizeof(*fds) < fd_count)) {
+ __fortify_chk_fail("ppoll: pollfd array smaller than fd count", 0);
+ }
+ return ppoll(fds, fd_count, timeout, mask);
+}
diff --git a/libc/bionic/fchmod.cpp b/libc/bionic/fchmod.cpp
new file mode 100644
index 0000000..ace8c6b
--- /dev/null
+++ b/libc/bionic/fchmod.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdio.h>
+
+extern "C" int ___fchmod(int, mode_t);
+
+int fchmod(int fd, mode_t mode) {
+ int saved_errno = errno;
+ int result = ___fchmod(fd, mode);
+
+ if ((result == 0) || (errno != EBADF)) {
+ return result;
+ }
+
+ // fd could be an O_PATH file descriptor, and the kernel
+ // may not directly support fchmod() on such a file descriptor.
+ // Use /proc/self/fd instead to emulate this support.
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=14578
+ //
+ // As of February 2015, there are no kernels which support fchmod
+ // on an O_PATH file descriptor, and "man open" documents fchmod
+ // on O_PATH file descriptors as returning EBADF.
+ int fd_flag = fcntl(fd, F_GETFL);
+ if ((fd_flag == -1) || ((fd_flag & O_PATH) == 0)) {
+ errno = EBADF;
+ return -1;
+ }
+
+ char buf[40];
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
+ errno = saved_errno;
+ result = chmod(buf, mode);
+ if ((result == -1) && (errno == ELOOP)) {
+ // Linux does not support changing the mode of a symlink.
+ // For fchmodat(AT_SYMLINK_NOFOLLOW), POSIX requires a return
+ // value of ENOTSUP. Assume that's true here too.
+ errno = ENOTSUP;
+ }
+
+ return result;
+}
diff --git a/libc/bionic/fchmodat.cpp b/libc/bionic/fchmodat.cpp
new file mode 100644
index 0000000..1f83c4b
--- /dev/null
+++ b/libc/bionic/fchmodat.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include "private/ErrnoRestorer.h"
+
+extern "C" int ___fchmodat(int, const char*, mode_t);
+
+int fchmodat(int dirfd, const char* pathname, mode_t mode, int flags) {
+ if ((flags & ~AT_SYMLINK_NOFOLLOW) != 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (flags & AT_SYMLINK_NOFOLLOW) {
+ // Emulate AT_SYMLINK_NOFOLLOW using the mechanism described
+ // at https://sourceware.org/bugzilla/show_bug.cgi?id=14578
+ // comment #10
+
+ int fd = openat(dirfd, pathname, O_PATH | O_NOFOLLOW | O_CLOEXEC);
+ if (fd == -1) {
+ return -1; // returns errno from openat
+ }
+
+ // POSIX requires that ENOTSUP be returned when the system
+ // doesn't support setting the mode of a symbolic link.
+ // This is true for all Linux kernels.
+ // We rely on the O_PATH compatibility layer added in the
+ // fchmod() function to get errno correct.
+ int result = fchmod(fd, mode);
+ ErrnoRestorer errno_restorer; // don't let close() clobber errno
+ close(fd);
+ return result;
+ }
+
+ return ___fchmodat(dirfd, pathname, mode);
+}
diff --git a/libc/bionic/jemalloc.h b/libc/bionic/jemalloc.h
index feb1f43..98ea0ee 100644
--- a/libc/bionic/jemalloc.h
+++ b/libc/bionic/jemalloc.h
@@ -18,6 +18,7 @@
#define LIBC_BIONIC_JEMALLOC_H_
#include <jemalloc/jemalloc.h>
+#include <malloc.h> // For struct mallinfo.
// Need to wrap memalign since je_memalign fails on non-power of 2 alignments.
#define je_memalign je_memalign_round_up_boundary
diff --git a/libc/bionic/libc_logging.cpp b/libc/bionic/libc_logging.cpp
index 49a3762..76bc46d 100644
--- a/libc/bionic/libc_logging.cpp
+++ b/libc/bionic/libc_logging.cpp
@@ -438,7 +438,7 @@
vec[1].iov_base = const_cast<char*>(": ");
vec[1].iov_len = 2;
vec[2].iov_base = const_cast<char*>(msg);
- vec[2].iov_len = strlen(msg) + 1;
+ vec[2].iov_len = strlen(msg);
vec[3].iov_base = const_cast<char*>("\n");
vec[3].iov_len = 1;
@@ -448,8 +448,7 @@
}
#ifdef TARGET_USES_LOGD
-static int __libc_open_log_socket()
-{
+static int __libc_open_log_socket() {
// ToDo: Ideally we want this to fail if the gid of the current
// process is AID_LOGD, but will have to wait until we have
// registered this in private/android_filesystem_config.h. We have
@@ -491,7 +490,6 @@
static int __libc_write_log(int priority, const char* tag, const char* msg) {
#ifdef TARGET_USES_LOGD
int main_log_fd = __libc_open_log_socket();
-
if (main_log_fd == -1) {
// Try stderr instead.
return __libc_write_stderr(tag, msg);
@@ -515,9 +513,9 @@
vec[3].iov_base = &priority;
vec[3].iov_len = 1;
vec[4].iov_base = const_cast<char*>(tag);
- vec[4].iov_len = strlen(tag) + 1;
+ vec[4].iov_len = strlen(tag);
vec[5].iov_base = const_cast<char*>(msg);
- vec[5].iov_len = strlen(msg) + 1;
+ vec[5].iov_len = strlen(msg);
#else
int main_log_fd = TEMP_FAILURE_RETRY(open("/dev/log/main", O_CLOEXEC | O_WRONLY));
if (main_log_fd == -1) {
@@ -532,9 +530,9 @@
vec[0].iov_base = &priority;
vec[0].iov_len = 1;
vec[1].iov_base = const_cast<char*>(tag);
- vec[1].iov_len = strlen(tag) + 1;
+ vec[1].iov_len = strlen(tag);
vec[2].iov_base = const_cast<char*>(msg);
- vec[2].iov_len = strlen(msg) + 1;
+ vec[2].iov_len = strlen(msg);
#endif
int result = TEMP_FAILURE_RETRY(writev(main_log_fd, vec, sizeof(vec) / sizeof(vec[0])));
@@ -614,7 +612,7 @@
if (tag != 0) {
__libc_android_log_event_uid(tag);
}
- __libc_fatal("FORTIFY_SOURCE: %s. Calling abort().", msg);
+ __libc_fatal("FORTIFY: %s", msg);
}
static void __libc_fatal(const char* format, va_list args) {
@@ -622,12 +620,12 @@
BufferOutputStream os(msg, sizeof(msg));
out_vformat(os, format, args);
- // log to stderr for the benefit of "adb shell" users.
+ // Log to stderr for the benefit of "adb shell" users.
struct iovec iov[2] = {
- {msg, strlen(msg)},
- {const_cast<void*>(static_cast<const void*>("\n")), 1},
+ { msg, os.total },
+ { const_cast<char*>("\n"), 1 },
};
- writev(2, iov, 2);
+ TEMP_FAILURE_RETRY(writev(2, iov, 2));
// Log to the log for the benefit of regular app developers (whose stdout and stderr are closed).
__libc_write_log(ANDROID_LOG_FATAL, "libc", msg);
diff --git a/libc/bionic/poll.cpp b/libc/bionic/poll.cpp
index d267229..23ef90a 100644
--- a/libc/bionic/poll.cpp
+++ b/libc/bionic/poll.cpp
@@ -26,6 +26,7 @@
* SUCH DAMAGE.
*/
+#undef _FORTIFY_SOURCE
#include <errno.h>
#include <sys/poll.h>
#include <sys/select.h>
diff --git a/libc/bionic/pthread_mutex.cpp b/libc/bionic/pthread_mutex.cpp
index 40f1ed2..83d6b54 100644
--- a/libc/bionic/pthread_mutex.cpp
+++ b/libc/bionic/pthread_mutex.cpp
@@ -30,22 +30,19 @@
#include <errno.h>
#include <limits.h>
+#include <stdatomic.h>
+#include <sys/cdefs.h>
#include <sys/mman.h>
#include <unistd.h>
#include "pthread_internal.h"
-#include "private/bionic_atomic_inline.h"
#include "private/bionic_constants.h"
#include "private/bionic_futex.h"
+#include "private/bionic_systrace.h"
#include "private/bionic_time_conversions.h"
#include "private/bionic_tls.h"
-#include "private/bionic_systrace.h"
-
-extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
-extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
-
/* a mutex is implemented as a 32-bit integer holding the following fields
*
* bits: name description
@@ -87,9 +84,6 @@
#define MUTEX_STATE_LOCKED_UNCONTENDED 1 /* must be 1 due to atomic dec in unlock operation */
#define MUTEX_STATE_LOCKED_CONTENDED 2 /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
-#define MUTEX_STATE_FROM_BITS(v) FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
-#define MUTEX_STATE_TO_BITS(v) FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
-
#define MUTEX_STATE_BITS_UNLOCKED MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
#define MUTEX_STATE_BITS_LOCKED_UNCONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
#define MUTEX_STATE_BITS_LOCKED_CONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
@@ -116,10 +110,7 @@
#define MUTEX_COUNTER_BITS_IS_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
/* Used to increment the counter directly after overflow has been checked */
-#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
-
-/* Returns true iff the counter is 0 */
-#define MUTEX_COUNTER_BITS_ARE_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
+#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
/* Mutex shared bit flag
*
@@ -159,30 +150,9 @@
/* Mutex owner field:
*
* This is only used for recursive and errorcheck mutexes. It holds the
- * tid of the owning thread. Note that this works because the Linux
- * kernel _only_ uses 16-bit values for tids.
- *
- * More specifically, it will wrap to 10000 when it reaches over 32768 for
- * application processes. You can check this by running the following inside
- * an adb shell session:
- *
- OLDPID=$$;
- while true; do
- NEWPID=$(sh -c 'echo $$')
- if [ "$NEWPID" -gt 32768 ]; then
- echo "AARGH: new PID $NEWPID is too high!"
- exit 1
- fi
- if [ "$NEWPID" -lt "$OLDPID" ]; then
- echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
- else
- echo -n "$NEWPID!"
- fi
- OLDPID=$NEWPID
- done
-
- * Note that you can run the same example on a desktop Linux system,
- * the wrapping will also happen at 32768, but will go back to 300 instead.
+ * tid of the owning thread. We use 16 bits to represent tid here,
+ * so the highest tid is 65535. There is a test to check /proc/sys/kernel/pid_max
+ * to make sure it will not exceed our limit.
*/
#define MUTEX_OWNER_SHIFT 16
#define MUTEX_OWNER_LEN 16
@@ -267,9 +237,20 @@
return 0;
}
+static inline atomic_int* MUTEX_TO_ATOMIC_POINTER(pthread_mutex_t* mutex) {
+ static_assert(sizeof(atomic_int) == sizeof(mutex->value),
+ "mutex->value should actually be atomic_int in implementation.");
+
+ // We prefer casting to atomic_int instead of declaring mutex->value to be atomic_int directly.
+ // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
+ return reinterpret_cast<atomic_int*>(&mutex->value);
+}
+
int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
if (__predict_true(attr == NULL)) {
- mutex->value = MUTEX_TYPE_BITS_NORMAL;
+ atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
return 0;
}
@@ -292,13 +273,13 @@
return EINVAL;
}
- mutex->value = value;
+ atomic_init(mutex_value_ptr, value);
return 0;
}
/*
- * Lock a non-recursive mutex.
+ * Lock a mutex of type NORMAL.
*
* As noted above, there are three states:
* 0 (unlocked, no contention)
@@ -309,96 +290,75 @@
* "type" value is zero, so the only bits that will be set are the ones in
* the lock state field.
*/
-static inline void _normal_lock(pthread_mutex_t* mutex, int shared) {
+static inline void _normal_mutex_lock(atomic_int* mutex_value_ptr, int shared) {
/* convenience shortcuts */
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- /*
- * The common case is an unlocked mutex, so we begin by trying to
- * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
- * __bionic_cmpxchg() returns 0 if it made the swap successfully.
- * If the result is nonzero, this lock is already held by another thread.
- */
- if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
- const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- /*
- * We want to go to sleep until the mutex is available, which
- * requires promoting it to state 2 (CONTENDED). We need to
- * swap in the new state value and then wait until somebody wakes us up.
- *
- * __bionic_swap() returns the previous value. We swap 2 in and
- * see if we got zero back; if so, we have acquired the lock. If
- * not, another thread still holds the lock and we wait again.
- *
- * The second argument to the __futex_wait() call is compared
- * against the current value. If it doesn't match, __futex_wait()
- * returns immediately (otherwise, it sleeps for a time specified
- * by the third argument; 0 means sleep forever). This ensures
- * that the mutex is in state 2 when we go to sleep on it, which
- * guarantees a wake-up call.
- */
- ScopedTrace trace("Contending for pthread mutex");
-
-
- while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
- __futex_wait_ex(&mutex->value, shared, locked_contended, NULL);
- }
+ // The common case is an unlocked mutex, so we begin by trying to
+ // change the lock's state from unlocked to locked_uncontended.
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ int mvalue = unlocked;
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
+ locked_uncontended,
+ memory_order_acquire,
+ memory_order_relaxed))) {
+ return;
}
- ANDROID_MEMBAR_FULL();
+
+ ScopedTrace trace("Contending for pthread mutex");
+
+ // We want to go to sleep until the mutex is available, which requires
+ // promoting it to locked_contended. We need to swap in the new state
+ // value and then wait until somebody wakes us up.
+ // An atomic_exchange is used to compete with other threads for the lock.
+ // If it returns unlocked, we have acquired the lock, otherwise another
+ // thread still holds the lock and we should wait again.
+ // If lock is acquired, an acquire fence is needed to make all memory accesses
+ // made by other threads visible in current CPU.
+ const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
+ memory_order_acquire) != unlocked) {
+
+ __futex_wait_ex(mutex_value_ptr, shared, locked_contended, NULL);
+ }
}
/*
- * Release a non-recursive mutex. The caller is responsible for determining
+ * Release a mutex of type NORMAL. The caller is responsible for determining
* that we are in fact the owner of this lock.
*/
-static inline void _normal_unlock(pthread_mutex_t* mutex, int shared) {
- ANDROID_MEMBAR_FULL();
+static inline void _normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
+ const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
+ const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- /*
- * The mutex state will be 1 or (rarely) 2. We use an atomic decrement
- * to release the lock. __bionic_atomic_dec() returns the previous value;
- * if it wasn't 1 we have to do some additional work.
- */
- if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
- /*
- * Start by releasing the lock. The decrement changed it from
- * "contended lock" to "uncontended lock", which means we still
- * hold it, and anybody who tries to sneak in will push it back
- * to state 2.
- *
- * Once we set it to zero the lock is up for grabs. We follow
- * this with a __futex_wake() to ensure that one of the waiting
- * threads has a chance to grab it.
- *
- * This doesn't cause a race with the swap/wait pair in
- * _normal_lock(), because the __futex_wait() call there will
- * return immediately if the mutex value isn't 2.
- */
- mutex->value = shared;
-
- /*
- * Wake up one waiting thread. We don't know which thread will be
- * woken or when it'll start executing -- futexes make no guarantees
- * here. There may not even be a thread waiting.
- *
- * The newly-woken thread will replace the 0 we just set above
- * with 2, which means that when it eventually releases the mutex
- * it will also call FUTEX_WAKE. This results in one extra wake
- * call whenever a lock is contended, but lets us avoid forgetting
- * anyone without requiring us to track the number of sleepers.
- *
- * It's possible for another thread to sneak in and grab the lock
- * between the zero assignment above and the wake call below. If
- * the new thread is "slow" and holds the lock for a while, we'll
- * wake up a sleeper, which will swap in a 2 and then go back to
- * sleep since the lock is still held. If the new thread is "fast",
- * running to completion before we call wake, the thread we
- * eventually wake will find an unlocked mutex and will execute.
- * Either way we have correct behavior and nobody is orphaned on
- * the wait queue.
- */
- __futex_wake_ex(&mutex->value, shared, 1);
+ // We use an atomic_exchange to release the lock. If locked_contended state
+ // is returned, some threads is waiting for the lock and we need to wake up
+ // one of them.
+ // A release fence is required to make previous stores visible to next
+ // lock owner threads.
+ if (atomic_exchange_explicit(mutex_value_ptr, unlocked,
+ memory_order_release) == locked_contended) {
+ // Wake up one waiting thread. We don't know which thread will be
+ // woken or when it'll start executing -- futexes make no guarantees
+ // here. There may not even be a thread waiting.
+ //
+ // The newly-woken thread will replace the unlocked state we just set above
+ // with locked_contended state, which means that when it eventually releases
+ // the mutex it will also call FUTEX_WAKE. This results in one extra wake
+ // call whenever a lock is contended, but let us avoid forgetting anyone
+ // without requiring us to track the number of sleepers.
+ //
+ // It's possible for another thread to sneak in and grab the lock between
+ // the exchange above and the wake call below. If the new thread is "slow"
+ // and holds the lock for a while, we'll wake up a sleeper, which will swap
+ // in locked_uncontended state and then go back to sleep since the lock is
+ // still held. If the new thread is "fast", running to completion before
+ // we call wake, the thread we eventually wake will find an unlocked mutex
+ // and will execute. Either way we have correct behavior and nobody is
+ // orphaned on the wait queue.
+ __futex_wake_ex(mutex_value_ptr, shared, 1);
}
}
@@ -414,183 +374,175 @@
* mvalue is the current mutex value (already loaded)
* mutex pointers to the mutex.
*/
-static inline __always_inline int _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype) {
+static inline __always_inline
+int _recursive_increment(atomic_int* mutex_value_ptr, int mvalue, int mtype) {
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
- /* trying to re-lock a mutex we already acquired */
+ // Trying to re-lock a mutex we already acquired.
return EDEADLK;
}
- /* Detect recursive lock overflow and return EAGAIN.
- * This is safe because only the owner thread can modify the
- * counter bits in the mutex value.
- */
+ // Detect recursive lock overflow and return EAGAIN.
+ // This is safe because only the owner thread can modify the
+ // counter bits in the mutex value.
if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
return EAGAIN;
}
- /* We own the mutex, but other threads are able to change
- * the lower bits (e.g. promoting it to "contended"), so we
- * need to use an atomic cmpxchg loop to update the counter.
- */
- for (;;) {
- /* increment counter, overflow was already checked */
- int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
- if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
- /* mutex is still locked, not need for a memory barrier */
- return 0;
- }
- /* the value was changed, this happens when another thread changes
- * the lower state bits from 1 to 2 to indicate contention. This
- * cannot change the counter, so simply reload and try again.
- */
- mvalue = mutex->value;
- }
+ // We own the mutex, but other threads are able to change the lower bits
+ // (e.g. promoting it to "contended"), so we need to use an atomic exchange
+ // loop to update the counter. The counter will not overflow in the loop,
+ // as only the owner thread can change it.
+ // The mutex is still locked, so we don't need a release fence.
+ while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
+ mvalue + MUTEX_COUNTER_BITS_ONE,
+ memory_order_relaxed,
+ memory_order_relaxed)) { }
+ return 0;
}
int pthread_mutex_lock(pthread_mutex_t* mutex) {
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
int mvalue, mtype, tid, shared;
- mvalue = mutex->value;
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
mtype = (mvalue & MUTEX_TYPE_MASK);
shared = (mvalue & MUTEX_SHARED_MASK);
- /* Handle non-recursive case first */
+ // Handle common case first.
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
- _normal_lock(mutex, shared);
+ _normal_mutex_lock(mutex_value_ptr, shared);
return 0;
}
- /* Do we already own this recursive or error-check mutex ? */
+ // Do we already own this recursive or error-check mutex?
tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
- return _recursive_increment(mutex, mvalue, mtype);
+ return _recursive_increment(mutex_value_ptr, mvalue, mtype);
- /* Add in shared state to avoid extra 'or' operations below */
+ // Add in shared state to avoid extra 'or' operations below.
mtype |= shared;
- /* First, if the mutex is unlocked, try to quickly acquire it.
- * In the optimistic case where this works, set the state to 1 to
- * indicate locked with no contention */
+ // First, if the mutex is unlocked, try to quickly acquire it.
+ // In the optimistic case where this works, set the state to locked_uncontended.
if (mvalue == mtype) {
int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
+ newval, memory_order_acquire, memory_order_relaxed))) {
return 0;
}
- /* argh, the value changed, reload before entering the loop */
- mvalue = mutex->value;
}
ScopedTrace trace("Contending for pthread mutex");
- for (;;) {
- int newval;
-
- /* if the mutex is unlocked, its value should be 'mtype' and
- * we try to acquire it by setting its owner and state atomically.
- * NOTE: We put the state to 2 since we _know_ there is contention
- * when we are in this loop. This ensures all waiters will be
- * unlocked.
- */
+ while (true) {
if (mvalue == mtype) {
- newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- /* TODO: Change this to __bionic_cmpxchg_acquire when we
- * implement it to get rid of the explicit memory
- * barrier below.
- */
- if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
- mvalue = mutex->value;
- continue;
- }
- ANDROID_MEMBAR_FULL();
- return 0;
- }
+ // If the mutex is unlocked, its value should be 'mtype' and
+ // we try to acquire it by setting its owner and state atomically.
+ // NOTE: We put the state to locked_contended since we _know_ there
+ // is contention when we are in this loop. This ensures all waiters
+ // will be unlocked.
- /* the mutex is already locked by another thread, if its state is 1
- * we will change it to 2 to indicate contention. */
- if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
- newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
- if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
- mvalue = mutex->value;
+ int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
+ &mvalue, newval,
+ memory_order_acquire,
+ memory_order_relaxed))) {
+ return 0;
+ }
+ continue;
+ } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
+ // The mutex is already locked by another thread, if the state is locked_uncontended,
+ // we should set it to locked_contended beforing going to sleep. This can make
+ // sure waiters will be woken up eventually.
+
+ int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
+ if (__predict_false(!atomic_compare_exchange_weak_explicit(mutex_value_ptr,
+ &mvalue, newval,
+ memory_order_relaxed,
+ memory_order_relaxed))) {
continue;
}
mvalue = newval;
}
- /* wait until the mutex is unlocked */
- __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
-
- mvalue = mutex->value;
+ // We are in locked_contended state, sleep until someone wake us up.
+ __futex_wait_ex(mutex_value_ptr, shared, mvalue, NULL);
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
}
- /* NOTREACHED */
}
int pthread_mutex_unlock(pthread_mutex_t* mutex) {
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
int mvalue, mtype, tid, shared;
- mvalue = mutex->value;
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
mtype = (mvalue & MUTEX_TYPE_MASK);
shared = (mvalue & MUTEX_SHARED_MASK);
- /* Handle common case first */
+ // Handle common case first.
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
- _normal_unlock(mutex, shared);
+ _normal_mutex_unlock(mutex_value_ptr, shared);
return 0;
}
- /* Do we already own this recursive or error-check mutex ? */
+ // Do we already own this recursive or error-check mutex?
tid = __get_thread()->tid;
if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
return EPERM;
- /* If the counter is > 0, we can simply decrement it atomically.
- * Since other threads can mutate the lower state bits (and only the
- * lower state bits), use a cmpxchg to do it.
- */
+ // If the counter is > 0, we can simply decrement it atomically.
+ // Since other threads can mutate the lower state bits (and only the
+ // lower state bits), use a compare_exchange loop to do it.
if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
- for (;;) {
- int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
- if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
- /* success: we still own the mutex, so no memory barrier */
- return 0;
- }
- /* the value changed, so reload and loop */
- mvalue = mutex->value;
- }
+ // We still own the mutex, so a release fence is not needed.
+ while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
+ mvalue - MUTEX_COUNTER_BITS_ONE,
+ memory_order_relaxed,
+ memory_order_relaxed)) { }
+ return 0;
}
- /* the counter is 0, so we're going to unlock the mutex by resetting
- * its value to 'unlocked'. We need to perform a swap in order
- * to read the current state, which will be 2 if there are waiters
- * to awake.
- *
- * TODO: Change this to __bionic_swap_release when we implement it
- * to get rid of the explicit memory barrier below.
- */
- ANDROID_MEMBAR_FULL(); /* RELEASE BARRIER */
- mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
-
- /* Wake one waiting thread, if any */
+ // The counter is 0, so we'are going to unlock the mutex by resetting its
+ // state to unlocked, we need to perform a atomic_exchange inorder to read
+ // the current state, which will be locked_contended if there may have waiters
+ // to awake.
+ // A release fence is required to make previous stores visible to next
+ // lock owner threads.
+ mvalue = atomic_exchange_explicit(mutex_value_ptr,
+ mtype | shared | MUTEX_STATE_BITS_UNLOCKED,
+ memory_order_release);
if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
- __futex_wake_ex(&mutex->value, shared, 1);
+ __futex_wake_ex(mutex_value_ptr, shared, 1);
}
+
return 0;
}
int pthread_mutex_trylock(pthread_mutex_t* mutex) {
- int mvalue = mutex->value;
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
+ int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
int mtype = (mvalue & MUTEX_TYPE_MASK);
int shared = (mvalue & MUTEX_SHARED_MASK);
// Handle common case first.
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
- if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
- shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
- &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
+ mvalue = shared | MUTEX_STATE_BITS_UNLOCKED;
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (atomic_compare_exchange_strong_explicit(mutex_value_ptr,
+ &mvalue,
+ shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
+ memory_order_acquire,
+ memory_order_relaxed)) {
return 0;
}
-
return EBUSY;
}
@@ -600,158 +552,163 @@
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
return EBUSY;
}
- return _recursive_increment(mutex, mvalue, mtype);
+ return _recursive_increment(mutex_value_ptr, mvalue, mtype);
}
- /* Same as pthread_mutex_lock, except that we don't want to wait, and
- * the only operation that can succeed is a single cmpxchg to acquire the
- * lock if it is released / not owned by anyone. No need for a complex loop.
- */
+ // Same as pthread_mutex_lock, except that we don't want to wait, and
+ // the only operation that can succeed is a single compare_exchange to acquire the
+ // lock if it is released / not owned by anyone. No need for a complex loop.
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
- ANDROID_MEMBAR_FULL();
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
+ &mtype, mvalue,
+ memory_order_acquire,
+ memory_order_relaxed))) {
return 0;
}
-
return EBUSY;
}
static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_ts, clockid_t clock) {
- timespec ts;
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
- int mvalue = mutex->value;
- int mtype = (mvalue & MUTEX_TYPE_MASK);
- int shared = (mvalue & MUTEX_SHARED_MASK);
+ timespec ts;
- // Handle common case first.
- if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
- const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
- const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
+ int mtype = (mvalue & MUTEX_TYPE_MASK);
+ int shared = (mvalue & MUTEX_SHARED_MASK);
- // Fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0.
- if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
- return 0;
+ // Handle common case first.
+ if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
+ const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
+ const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
+ const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ mvalue = unlocked;
+ if (atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, locked_uncontended,
+ memory_order_acquire, memory_order_relaxed)) {
+ return 0;
+ }
+
+ ScopedTrace trace("Contending for timed pthread mutex");
+
+ // Same as pthread_mutex_lock, except that we can only wait for a specified
+ // time interval. If lock is acquired, an acquire fence is needed to make
+ // all memory accesses made by other threads visible in current CPU.
+ while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
+ memory_order_acquire) != unlocked) {
+ if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
+ return ETIMEDOUT;
+ }
+ __futex_wait_ex(mutex_value_ptr, shared, locked_contended, &ts);
+ }
+
+ return 0;
+ }
+
+ // Do we already own this recursive or error-check mutex?
+ pid_t tid = __get_thread()->tid;
+ if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
+ return _recursive_increment(mutex_value_ptr, mvalue, mtype);
+ }
+
+ mtype |= shared;
+
+ // First try a quick lock.
+ if (mvalue == mtype) {
+ int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
+ &mvalue, newval,
+ memory_order_acquire,
+ memory_order_relaxed))) {
+ return 0;
+ }
}
ScopedTrace trace("Contending for timed pthread mutex");
- // Loop while needed.
- while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
- if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
- return ETIMEDOUT;
- }
- __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
+ // The following implements the same loop as pthread_mutex_lock,
+ // but adds checks to ensure that the operation never exceeds the
+ // absolute expiration time.
+ while (true) {
+ if (mvalue == mtype) { // Unlocked.
+ int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ // An acquire fence is needed for successful exchange.
+ if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
+ memory_order_acquire,
+ memory_order_relaxed)) {
+ goto check_time;
+ }
+
+ return 0;
+ } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
+ // The value is locked. If the state is locked_uncontended, we need to switch
+ // it to locked_contended before sleep, so we can get woken up later.
+ int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
+ if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
+ memory_order_relaxed,
+ memory_order_relaxed)) {
+ goto check_time;
+ }
+ mvalue = newval;
+ }
+
+ if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
+ return ETIMEDOUT;
+ }
+
+ if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, &ts) == -ETIMEDOUT) {
+ return ETIMEDOUT;
+ }
+
+check_time:
+ if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
+ return ETIMEDOUT;
+ }
+ // After futex_wait or time costly timespec_from_absolte_timespec,
+ // we'd better read mvalue again in case it is changed.
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
}
- ANDROID_MEMBAR_FULL();
- return 0;
- }
-
- // Do we already own this recursive or error-check mutex?
- pid_t tid = __get_thread()->tid;
- if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
- return _recursive_increment(mutex, mvalue, mtype);
- }
-
- // The following implements the same loop as pthread_mutex_lock_impl
- // but adds checks to ensure that the operation never exceeds the
- // absolute expiration time.
- mtype |= shared;
-
- // First try a quick lock.
- if (mvalue == mtype) {
- mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
- ANDROID_MEMBAR_FULL();
- return 0;
- }
- mvalue = mutex->value;
- }
-
- ScopedTrace trace("Contending for timed pthread mutex");
-
- while (true) {
- // If the value is 'unlocked', try to acquire it directly.
- // NOTE: put state to 2 since we know there is contention.
- if (mvalue == mtype) { // Unlocked.
- mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
- return 0;
- }
- // The value changed before we could lock it. We need to check
- // the time to avoid livelocks, reload the value, then loop again.
- if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
- return ETIMEDOUT;
- }
-
- mvalue = mutex->value;
- continue;
- }
-
- // The value is locked. If 'uncontended', try to switch its state
- // to 'contented' to ensure we get woken up later.
- if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
- int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
- if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
- // This failed because the value changed, reload it.
- mvalue = mutex->value;
- } else {
- // This succeeded, update mvalue.
- mvalue = newval;
- }
- }
-
- // Check time and update 'ts'.
- if (timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
- return ETIMEDOUT;
- }
-
- // Only wait to be woken up if the state is '2', otherwise we'll
- // simply loop right now. This can happen when the second cmpxchg
- // in our loop failed because the mutex was unlocked by another thread.
- if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
- if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == -ETIMEDOUT) {
- return ETIMEDOUT;
- }
- mvalue = mutex->value;
- }
- }
- /* NOTREACHED */
}
#if !defined(__LP64__)
extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
- timespec abs_timeout;
- clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
- abs_timeout.tv_sec += ms / 1000;
- abs_timeout.tv_nsec += (ms % 1000) * 1000000;
- if (abs_timeout.tv_nsec >= NS_PER_S) {
- abs_timeout.tv_sec++;
- abs_timeout.tv_nsec -= NS_PER_S;
- }
+ timespec abs_timeout;
+ clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
+ abs_timeout.tv_sec += ms / 1000;
+ abs_timeout.tv_nsec += (ms % 1000) * 1000000;
+ if (abs_timeout.tv_nsec >= NS_PER_S) {
+ abs_timeout.tv_sec++;
+ abs_timeout.tv_nsec -= NS_PER_S;
+ }
- int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
- if (error == ETIMEDOUT) {
- error = EBUSY;
- }
- return error;
+ int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
+ if (error == ETIMEDOUT) {
+ error = EBUSY;
+ }
+ return error;
}
#endif
int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
- return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
+ return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
}
int pthread_mutex_destroy(pthread_mutex_t* mutex) {
- // Use trylock to ensure that the mutex is valid and not already locked.
- int error = pthread_mutex_trylock(mutex);
- if (error != 0) {
- return error;
- }
- mutex->value = 0xdead10cc;
- return 0;
+ // Use trylock to ensure that the mutex is valid and not already locked.
+ int error = pthread_mutex_trylock(mutex);
+ if (error != 0) {
+ return error;
+ }
+
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+ atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
+ return 0;
}
diff --git a/libc/dns/resolv/res_cache.c b/libc/dns/resolv/res_cache.c
index 573fcbe..5a78450 100644
--- a/libc/dns/resolv/res_cache.c
+++ b/libc/dns/resolv/res_cache.c
@@ -104,10 +104,6 @@
*/
#define CONFIG_ENV "BIONIC_DNSCACHE"
-/* entries older than CONFIG_SECONDS seconds are always discarded.
- */
-#define CONFIG_SECONDS (60*10) /* 10 minutes */
-
/* default number of entries kept in the cache. This value has been
* determined by browsing through various sites and counting the number
* of corresponding requests. Keep in mind that our framework is currently
diff --git a/libc/include/android/legacy_errno_inlines.h b/libc/include/android/legacy_errno_inlines.h
new file mode 100644
index 0000000..71096fc
--- /dev/null
+++ b/libc/include/android/legacy_errno_inlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ANDROID_LEGACY_ERRNO_INLINES_H
+#define _ANDROID_LEGACY_ERRNO_INLINES_H
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+static __inline int __attribute__((deprecated)) __set_errno(int n) {
+ errno = n;
+ return -1;
+}
+
+__END_DECLS
+
+#endif /* _ANDROID_LEGACY_ERRNO_INLINES_H */
diff --git a/libc/include/android/legacy_signal_inlines.h b/libc/include/android/legacy_signal_inlines.h
new file mode 100644
index 0000000..1b6e687
--- /dev/null
+++ b/libc/include/android/legacy_signal_inlines.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ANDROID_LEGACY_SIGNAL_INLINES_H_
+#define _ANDROID_LEGACY_SIGNAL_INLINES_H_
+
+#include <string.h>
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+extern sighandler_t bsd_signal(int signum, sighandler_t handler);
+
+static __inline int sigismember(sigset_t *set, int signum) {
+ /* Signal numbers start at 1, but bit positions start at 0. */
+ int bit = signum - 1;
+ const unsigned long *local_set = (const unsigned long *)set;
+ if (set == NULL || bit < 0 || bit >= (int)(8 * sizeof(sigset_t))) {
+ errno = EINVAL;
+ return -1;
+ }
+ return (int)((local_set[bit / LONG_BIT] >> (bit % LONG_BIT)) & 1);
+}
+
+static __inline int sigaddset(sigset_t *set, int signum) {
+ /* Signal numbers start at 1, but bit positions start at 0. */
+ int bit = signum - 1;
+ unsigned long *local_set = (unsigned long *)set;
+ if (set == NULL || bit < 0 || bit >= (int)(8 * sizeof(sigset_t))) {
+ errno = EINVAL;
+ return -1;
+ }
+ local_set[bit / LONG_BIT] |= 1UL << (bit % LONG_BIT);
+ return 0;
+}
+
+static __inline int sigdelset(sigset_t *set, int signum) {
+ /* Signal numbers start at 1, but bit positions start at 0. */
+ int bit = signum - 1;
+ unsigned long *local_set = (unsigned long *)set;
+ if (set == NULL || bit < 0 || bit >= (int)(8 * sizeof(sigset_t))) {
+ errno = EINVAL;
+ return -1;
+ }
+ local_set[bit / LONG_BIT] &= ~(1UL << (bit % LONG_BIT));
+ return 0;
+}
+
+static __inline int sigemptyset(sigset_t *set) {
+ if (set == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+ memset(set, 0, sizeof(sigset_t));
+ return 0;
+}
+
+static __inline int sigfillset(sigset_t *set) {
+ if (set == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+ memset(set, ~0, sizeof(sigset_t));
+ return 0;
+}
+
+static __inline sighandler_t signal(int s, sighandler_t f) {
+ return bsd_signal(s, f);
+}
+
+__END_DECLS
+
+#endif /* _ANDROID_LEGACY_SIGNAL_INLINES_H_ */
diff --git a/libc/include/android/legacy_stdlib_inlines.h b/libc/include/android/legacy_stdlib_inlines.h
new file mode 100644
index 0000000..58a2a9e
--- /dev/null
+++ b/libc/include/android/legacy_stdlib_inlines.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ANDROID_LEGACY_STDLIB_INLINES_H_
+#define _ANDROID_LEGACY_STDLIB_INLINES_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+static __inline float strtof(const char *nptr, char **endptr) {
+ return (float)strtod(nptr, endptr);
+}
+
+static __inline double atof(const char *nptr) { return (strtod(nptr, NULL)); }
+
+static __inline int abs(int __n) { return (__n < 0) ? -__n : __n; }
+
+static __inline long labs(long __n) { return (__n < 0L) ? -__n : __n; }
+
+static __inline long long llabs(long long __n) {
+ return (__n < 0LL) ? -__n : __n;
+}
+
+static __inline int rand(void) { return (int)lrand48(); }
+
+static __inline void srand(unsigned int __s) { srand48(__s); }
+
+static __inline long random(void) { return lrand48(); }
+
+static __inline void srandom(unsigned int __s) { srand48(__s); }
+
+static __inline int grantpt(int __fd __attribute((unused))) {
+ return 0; /* devpts does this all for us! */
+}
+
+__END_DECLS
+
+#endif /* _ANDROID_LEGACY_STDLIB_INLINES_H_ */
diff --git a/libc/include/android/legacy_sys_atomics_inlines.h b/libc/include/android/legacy_sys_atomics_inlines.h
new file mode 100644
index 0000000..85cbade
--- /dev/null
+++ b/libc/include/android/legacy_sys_atomics_inlines.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ANDROID_LEGACY_SYS_ATOMICS_INLINES_H_
+#define _ANDROID_LEGACY_SYS_ATOMICS_INLINES_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+/* Note: atomic operations that were exported by the C library didn't
+ * provide any memory barriers, which created potential issues on
+ * multi-core devices. We now define them as inlined calls to
+ * GCC sync builtins, which always provide a full barrier.
+ *
+ * NOTE: The C library still exports atomic functions by the same
+ * name to ensure ABI stability for existing NDK machine code.
+ *
+ * If you are an NDK developer, we encourage you to rebuild your
+ * unmodified sources against this header as soon as possible.
+ */
+#define __ATOMIC_INLINE__ static __inline __attribute__((always_inline))
+
+__ATOMIC_INLINE__ int __atomic_cmpxchg(int old, int _new, volatile int *ptr) {
+ /* We must return 0 on success */
+ return __sync_val_compare_and_swap(ptr, old, _new) != old;
+}
+
+__ATOMIC_INLINE__ int __atomic_swap(int _new, volatile int *ptr) {
+ int prev;
+ do {
+ prev = *ptr;
+ } while (__sync_val_compare_and_swap(ptr, prev, _new) != prev);
+ return prev;
+}
+
+__ATOMIC_INLINE__ int __atomic_dec(volatile int *ptr) {
+ return __sync_fetch_and_sub(ptr, 1);
+}
+
+__ATOMIC_INLINE__ int __atomic_inc(volatile int *ptr) {
+ return __sync_fetch_and_add(ptr, 1);
+}
+
+__END_DECLS
+
+#endif /* _ANDROID_LEGACY_SYS_ATOMICS_INLINES_H_ */
diff --git a/libc/include/android/legacy_sys_stat_inlines.h b/libc/include/android/legacy_sys_stat_inlines.h
new file mode 100644
index 0000000..f6d3c0f
--- /dev/null
+++ b/libc/include/android/legacy_sys_stat_inlines.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ANDROID_LEGACY_SYS_STAT_INLINES_H_
+#define _ANDROID_LEGACY_SYS_STAT_INLINES_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+static __inline int mkfifo(const char *__p, mode_t __m) {
+ return mknod(__p, (__m & ~S_IFMT) | S_IFIFO, (dev_t)0);
+}
+
+__END_DECLS
+
+#endif /* _ANDROID_LEGACY_SYS_STAT_INLINES_H_ */
diff --git a/libc/include/android/legacy_termios_inlines.h b/libc/include/android/legacy_termios_inlines.h
new file mode 100644
index 0000000..fb61f27
--- /dev/null
+++ b/libc/include/android/legacy_termios_inlines.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ANDROID_LEGACY_TERMIOS_INLINES_H_
+#define _ANDROID_LEGACY_TERMIOS_INLINES_H_
+
+#include <linux/termios.h>
+#include <sys/cdefs.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+
+__BEGIN_DECLS
+
+static __inline int tcgetattr(int fd, struct termios *s) {
+ return ioctl(fd, TCGETS, s);
+}
+
+static __inline int tcsetattr(int fd, int __opt, const struct termios *s) {
+ return ioctl(fd, __opt, (void *)s);
+}
+
+static __inline int tcflow(int fd, int action) {
+ return ioctl(fd, TCXONC, (void *)(intptr_t)action);
+}
+
+static __inline int tcflush(int fd, int __queue) {
+ return ioctl(fd, TCFLSH, (void *)(intptr_t)__queue);
+}
+
+static __inline pid_t tcgetsid(int fd) {
+ pid_t _pid;
+ return ioctl(fd, TIOCGSID, &_pid) ? (pid_t)-1 : _pid;
+}
+
+static __inline int tcsendbreak(int fd, int __duration) {
+ return ioctl(fd, TCSBRKP, (void *)(uintptr_t)__duration);
+}
+
+static __inline speed_t cfgetospeed(const struct termios *s) {
+ return (speed_t)(s->c_cflag & CBAUD);
+}
+
+static __inline int cfsetospeed(struct termios *s, speed_t speed) {
+ s->c_cflag = (s->c_cflag & ~CBAUD) | (speed & CBAUD);
+ return 0;
+}
+
+static __inline speed_t cfgetispeed(const struct termios *s) {
+ return (speed_t)(s->c_cflag & CBAUD);
+}
+
+static __inline int cfsetispeed(struct termios *s, speed_t speed) {
+ s->c_cflag = (s->c_cflag & ~CBAUD) | (speed & CBAUD);
+ return 0;
+}
+
+static __inline void cfmakeraw(struct termios *s) {
+ s->c_iflag &=
+ ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
+ s->c_oflag &= ~OPOST;
+ s->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+ s->c_cflag &= ~(CSIZE | PARENB);
+ s->c_cflag |= CS8;
+}
+
+__END_DECLS
+
+#endif /* _ANDROID_LEGACY_TERMIOS_INLINES_H_ */
diff --git a/libc/include/ctype.h b/libc/include/ctype.h
index d05a952..83b5ba7 100644
--- a/libc/include/ctype.h
+++ b/libc/include/ctype.h
@@ -43,36 +43,37 @@
#include <sys/cdefs.h>
#include <xlocale.h>
-#define _CTYPE_U 0x01
-#define _CTYPE_L 0x02
-#define _CTYPE_D 0x04
-#define _CTYPE_S 0x08
-#define _CTYPE_P 0x10
-#define _CTYPE_C 0x20
-#define _CTYPE_X 0x40
-#define _CTYPE_B 0x80
-#define _CTYPE_R (_CTYPE_P|_CTYPE_U|_CTYPE_L|_CTYPE_D|_CTYPE_B)
-#define _CTYPE_A (_CTYPE_L|_CTYPE_U)
+#define _CTYPE_U 0x01
+#define _CTYPE_L 0x02
+#define _CTYPE_D 0x04
+#define _CTYPE_S 0x08
+#define _CTYPE_P 0x10
+#define _CTYPE_C 0x20
+#define _CTYPE_X 0x40
+#define _CTYPE_B 0x80
+#define _CTYPE_R (_CTYPE_P|_CTYPE_U|_CTYPE_L|_CTYPE_D|_CTYPE_B)
+#define _CTYPE_A (_CTYPE_L|_CTYPE_U)
__BEGIN_DECLS
extern const char *_ctype_;
#if defined(__GNUC__) || defined(_ANSI_LIBRARY) || defined(lint)
-int isalnum(int);
-int isalpha(int);
-int iscntrl(int);
-int isdigit(int);
-int isgraph(int);
-int islower(int);
-int isprint(int);
-int ispunct(int);
-int isspace(int);
-int isupper(int);
-int isxdigit(int);
-int tolower(int);
-int toupper(int);
+int isalnum(int);
+int isalpha(int);
+int iscntrl(int);
+int isdigit(int);
+int isgraph(int);
+int islower(int);
+int isprint(int);
+int ispunct(int);
+int isspace(int);
+int isupper(int);
+int isxdigit(int);
+int tolower(int);
+int toupper(int);
+#if __ANDROID_API__ >= 21
int isalnum_l(int, locale_t);
int isalpha_l(int, locale_t);
int isblank_l(int, locale_t);
@@ -87,17 +88,18 @@
int isxdigit_l(int, locale_t);
int tolower_l(int, locale_t);
int toupper_l(int, locale_t);
+#endif /* __ANDROID_API__ >= 21 */
#if __BSD_VISIBLE || __ISO_C_VISIBLE >= 1999 || __POSIX_VISIBLE > 200112 \
|| __XPG_VISIBLE > 600
-int isblank(int);
+int isblank(int);
#endif
#if __BSD_VISIBLE || __XPG_VISIBLE
-int isascii(int);
-int toascii(int);
-int _tolower(int);
-int _toupper(int);
+int isascii(int);
+int toascii(int);
+int _tolower(int);
+int _toupper(int);
#endif /* __BSD_VISIBLE || __XPG_VISIBLE */
#endif /* __GNUC__ || _ANSI_LIBRARY || lint */
diff --git a/libc/include/errno.h b/libc/include/errno.h
index 1a36b7a..82f4b42 100644
--- a/libc/include/errno.h
+++ b/libc/include/errno.h
@@ -46,6 +46,10 @@
/* a macro expanding to the errno l-value */
#define errno (*__errno())
+#if __ANDROID_API__ < 21
+#include <android/legacy_errno_inlines.h>
+#endif
+
__END_DECLS
#endif /* _ERRNO_H */
diff --git a/libc/include/poll.h b/libc/include/poll.h
index 0199cab..7c16d81 100644
--- a/libc/include/poll.h
+++ b/libc/include/poll.h
@@ -38,8 +38,52 @@
typedef unsigned int nfds_t;
-extern int poll(struct pollfd*, nfds_t, int);
-extern int ppoll(struct pollfd*, nfds_t, const struct timespec*, const sigset_t*);
+int poll(struct pollfd*, nfds_t, int);
+int ppoll(struct pollfd*, nfds_t, const struct timespec*, const sigset_t*);
+
+int __poll_chk(struct pollfd*, nfds_t, int, size_t);
+int __poll_real(struct pollfd*, nfds_t, int) __RENAME(poll);
+__errordecl(__poll_too_small_error, "poll: pollfd array smaller than fd count");
+
+int __ppoll_chk(struct pollfd*, nfds_t, const struct timespec*, const sigset_t*, size_t);
+int __ppoll_real(struct pollfd*, nfds_t, const struct timespec*, const sigset_t*) __RENAME(ppoll);
+__errordecl(__ppoll_too_small_error, "ppoll: pollfd array smaller than fd count");
+
+#if defined(__BIONIC_FORTIFY)
+
+__BIONIC_FORTIFY_INLINE
+int poll(struct pollfd* fds, nfds_t fd_count, int timeout) {
+#if defined(__clang__)
+ return __poll_chk(fds, fd_count, timeout, __bos(fds));
+#else
+ if (__bos(fds) != __BIONIC_FORTIFY_UNKNOWN_SIZE) {
+ if (!__builtin_constant_p(fd_count)) {
+ return __poll_chk(fds, fd_count, timeout, __bos(fds));
+ } else if (__bos(fds) / sizeof(*fds) < fd_count) {
+ __poll_too_small_error();
+ }
+ }
+ return __poll_real(fds, fd_count, timeout);
+#endif
+}
+
+__BIONIC_FORTIFY_INLINE
+int ppoll(struct pollfd* fds, nfds_t fd_count, const struct timespec* timeout, const sigset_t* mask) {
+#if defined(__clang__)
+ return __ppoll_chk(fds, fd_count, timeout, mask, __bos(fds));
+#else
+ if (__bos(fds) != __BIONIC_FORTIFY_UNKNOWN_SIZE) {
+ if (!__builtin_constant_p(fd_count)) {
+ return __ppoll_chk(fds, fd_count, timeout, mask, __bos(fds));
+ } else if (__bos(fds) / sizeof(*fds) < fd_count) {
+ __ppoll_too_small_error();
+ }
+ }
+ return __ppoll_real(fds, fd_count, timeout, mask);
+#endif
+}
+
+#endif
__END_DECLS
diff --git a/libc/include/pthread.h b/libc/include/pthread.h
index 4281132..8d053ae 100644
--- a/libc/include/pthread.h
+++ b/libc/include/pthread.h
@@ -43,7 +43,7 @@
#endif
typedef struct {
- int volatile value;
+ int value;
#ifdef __LP64__
char __reserved[36];
#endif
diff --git a/libc/include/signal.h b/libc/include/signal.h
index 39d37e9..554e0ac 100644
--- a/libc/include/signal.h
+++ b/libc/include/signal.h
@@ -105,15 +105,15 @@
extern int sigaction(int, const struct sigaction*, struct sigaction*);
-extern sighandler_t signal(int, sighandler_t);
+_BIONIC_NOT_BEFORE_21(extern sighandler_t signal(int, sighandler_t);)
extern int siginterrupt(int, int);
-extern int sigaddset(sigset_t*, int);
-extern int sigdelset(sigset_t*, int);
-extern int sigemptyset(sigset_t*);
-extern int sigfillset(sigset_t*);
-extern int sigismember(const sigset_t*, int);
+_BIONIC_NOT_BEFORE_21(extern int sigaddset(sigset_t*, int);)
+_BIONIC_NOT_BEFORE_21(extern int sigdelset(sigset_t*, int);)
+_BIONIC_NOT_BEFORE_21(extern int sigemptyset(sigset_t*);)
+_BIONIC_NOT_BEFORE_21(extern int sigfillset(sigset_t*);)
+_BIONIC_NOT_BEFORE_21(extern int sigismember(const sigset_t*, int);)
extern int sigpending(sigset_t*) __nonnull((1));
extern int sigprocmask(int, const sigset_t*, sigset_t*);
@@ -136,6 +136,10 @@
extern int sigtimedwait(const sigset_t*, siginfo_t*, const struct timespec*);
extern int sigwaitinfo(const sigset_t*, siginfo_t*);
+#if __ANDROID_API__ < 21
+#include <android/legacy_signal_inlines.h>
+#endif
+
__END_DECLS
#endif /* _SIGNAL_H_ */
diff --git a/libc/include/stdio.h b/libc/include/stdio.h
index c0dac1a..b04aa24 100644
--- a/libc/include/stdio.h
+++ b/libc/include/stdio.h
@@ -207,16 +207,9 @@
#define L_tmpnam 1024 /* XXX must be == PATH_MAX */
#define TMP_MAX 308915776
-/* Always ensure that these are consistent with <fcntl.h> and <unistd.h>! */
-#ifndef SEEK_SET
-#define SEEK_SET 0 /* set file offset to offset */
-#endif
-#ifndef SEEK_CUR
-#define SEEK_CUR 1 /* set file offset to current plus offset */
-#endif
-#ifndef SEEK_END
-#define SEEK_END 2 /* set file offset to EOF plus offset */
-#endif
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
/*
* Functions defined in ANSI C standard.
diff --git a/libc/include/stdlib.h b/libc/include/stdlib.h
index 4cb288d..cbd7aeb 100644
--- a/libc/include/stdlib.h
+++ b/libc/include/stdlib.h
@@ -76,10 +76,10 @@
extern int posix_memalign(void **memptr, size_t alignment, size_t size);
-extern double atof(const char*);
+_BIONIC_NOT_BEFORE_21(extern double atof(const char*);)
extern double strtod(const char*, char**) __LIBC_ABI_PUBLIC__;
-extern float strtof(const char*, char**) __LIBC_ABI_PUBLIC__;
+_BIONIC_NOT_BEFORE_21(extern float strtof(const char*, char**) __LIBC_ABI_PUBLIC__;)
extern long double strtold(const char*, char**) __LIBC_ABI_PUBLIC__;
extern long double strtold_l(const char *, char **, locale_t) __LIBC_ABI_PUBLIC__;
@@ -90,12 +90,12 @@
extern long atol(const char*) __purefunc;
extern long long atoll(const char*) __purefunc;
-extern int abs(int) __pure2;
-extern long labs(long) __pure2;
-extern long long llabs(long long) __pure2;
+_BIONIC_NOT_BEFORE_21(extern int abs(int) __pure2;)
+_BIONIC_NOT_BEFORE_21(extern long labs(long) __pure2;)
+_BIONIC_NOT_BEFORE_21(extern long long llabs(long long) __pure2;)
extern char * realpath(const char *path, char *resolved);
-extern int system(const char * string);
+extern int system(const char *string);
extern void * bsearch(const void *key, const void *base0,
size_t nmemb, size_t size,
@@ -109,9 +109,9 @@
#define RAND_MAX 0x7fffffff
-int rand(void);
+_BIONIC_NOT_BEFORE_21(int rand(void);)
int rand_r(unsigned int*);
-void srand(unsigned int);
+_BIONIC_NOT_BEFORE_21(void srand(unsigned int);)
double drand48(void);
double erand48(unsigned short[3]);
@@ -124,12 +124,12 @@
void srand48(long);
char* initstate(unsigned int, char*, size_t);
-long random(void);
+_BIONIC_NOT_BEFORE_21(long random(void);)
char* setstate(char*);
-void srandom(unsigned int);
+_BIONIC_NOT_BEFORE_21(void srandom(unsigned int);)
int getpt(void);
-int grantpt(int);
+_BIONIC_NOT_BEFORE_21(int grantpt(int);)
int posix_openpt(int);
char* ptsname(int);
int ptsname_r(int, char*, size_t);
@@ -172,6 +172,10 @@
extern size_t __ctype_get_mb_cur_max(void);
#define MB_CUR_MAX __ctype_get_mb_cur_max()
+#if __ANDROID_API__ < 21
+#include <android/legacy_stdlib_inlines.h>
+#endif
+
__END_DECLS
#endif /* _STDLIB_H */
diff --git a/libc/include/string.h b/libc/include/string.h
index 4ca77ae..d67928c 100644
--- a/libc/include/string.h
+++ b/libc/include/string.h
@@ -31,7 +31,6 @@
#include <sys/cdefs.h>
#include <stddef.h>
-#include <malloc.h>
#include <xlocale.h>
__BEGIN_DECLS
diff --git a/libc/include/sys/atomics.h b/libc/include/sys/atomics.h
new file mode 100644
index 0000000..38ab366
--- /dev/null
+++ b/libc/include/sys/atomics.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_ATOMICS_H_
+#define _SYS_ATOMICS_H_
+
+/*
+ * These got proper out of line definitions in L. Putting the inline definitions
+ * back for old targets brings us closer to being able to use one set of headers
+ * for all API levels.
+ *
+ * The other inlines we put back went in to their appropriate headers, but the
+ * sys/atomics.h header was removed, so we'll just add these somewhere we can be
+ * sure they will be included.
+ */
+#if __ANDROID_API__ < 21
+#include <android/legacy_sys_atomics_inlines.h>
+#endif
+
+#endif /* _SYS_ATOMICS_H_ */
diff --git a/libc/include/sys/cdefs.h b/libc/include/sys/cdefs.h
index 21d59fa..48763d7 100644
--- a/libc/include/sys/cdefs.h
+++ b/libc/include/sys/cdefs.h
@@ -556,4 +556,10 @@
/* Used to rename functions so that the compiler emits a call to 'x' rather than the function this was applied to. */
#define __RENAME(x) __asm__(#x)
+#if __ANDROID_API__ >= 21
+#define _BIONIC_NOT_BEFORE_21(x) x
+#else
+#define _BIONIC_NOT_BEFORE_21(x)
+#endif /* __ANDROID_API__ >= 21 */
+
#endif /* !_SYS_CDEFS_H_ */
diff --git a/libc/include/sys/select.h b/libc/include/sys/select.h
index 553050b..0c4a823 100644
--- a/libc/include/sys/select.h
+++ b/libc/include/sys/select.h
@@ -31,7 +31,6 @@
#include <linux/time.h>
#include <signal.h>
-#include <string.h>
#include <sys/cdefs.h>
#include <sys/types.h>
@@ -49,7 +48,14 @@
#define __FDMASK(fd) (1UL << ((fd) % NFDBITS))
#define __FDS_BITS(set) (((fd_set*)(set))->fds_bits)
-#define FD_ZERO(set) (memset(set, 0, sizeof(*(fd_set*)(set))))
+/* Inline loop so we don't have to declare memset. */
+#define FD_ZERO(set) \
+ do { \
+ size_t __i; \
+ for (__i = 0; __i < __FDSET_LONGS; ++__i) { \
+ (set)->fds_bits[__i] = 0; \
+ } \
+ } while (0)
extern void __FD_CLR_chk(int, fd_set*, size_t);
extern void __FD_SET_chk(int, fd_set*, size_t);
diff --git a/libc/include/sys/stat.h b/libc/include/sys/stat.h
index eb9bf2e..c22516f 100644
--- a/libc/include/sys/stat.h
+++ b/libc/include/sys/stat.h
@@ -171,7 +171,7 @@
}
#endif /* defined(__BIONIC_FORTIFY) */
-extern int mkfifo(const char*, mode_t);
+_BIONIC_NOT_BEFORE_21(extern int mkfifo(const char*, mode_t);)
extern int mkfifoat(int, const char*, mode_t);
extern int fchmodat(int, const char*, mode_t, int);
@@ -183,6 +183,10 @@
extern int utimensat(int fd, const char *path, const struct timespec times[2], int flags);
extern int futimens(int fd, const struct timespec times[2]);
+#if __ANDROID_API__ < 21
+#include <android/legacy_sys_stat_inlines.h>
+#endif
+
__END_DECLS
#endif /* _SYS_STAT_H_ */
diff --git a/libc/include/termios.h b/libc/include/termios.h
index b9685ca..683fde2 100644
--- a/libc/include/termios.h
+++ b/libc/include/termios.h
@@ -35,6 +35,7 @@
__BEGIN_DECLS
+#if __ANDROID_API__ >= 21
speed_t cfgetispeed(const struct termios*);
speed_t cfgetospeed(const struct termios*);
void cfmakeraw(struct termios*);
@@ -48,6 +49,9 @@
pid_t tcgetsid(int);
int tcsendbreak(int, int);
int tcsetattr(int, int, const struct termios*);
+#else
+#include <android/legacy_termios_inlines.h>
+#endif
__END_DECLS
diff --git a/libc/include/unistd.h b/libc/include/unistd.h
index c755715..6403d4a 100644
--- a/libc/include/unistd.h
+++ b/libc/include/unistd.h
@@ -35,14 +35,19 @@
#include <sys/select.h>
#include <sys/sysconf.h>
+#include <machine/posix_limits.h>
+
__BEGIN_DECLS
-/* Standard file descriptor numbers. */
#define STDIN_FILENO 0
#define STDOUT_FILENO 1
#define STDERR_FILENO 2
-/* Values for whence in fseek and lseek */
+#define F_OK 0
+#define X_OK 1
+#define W_OK 2
+#define R_OK 4
+
#define SEEK_SET 0
#define SEEK_CUR 1
#define SEEK_END 2
@@ -68,8 +73,6 @@
#define _PC_PRIO_IO 18
#define _PC_SYNC_IO 19
-#include <machine/posix_limits.h>
-
extern char** environ;
extern __noreturn void _exit(int);
@@ -121,13 +124,6 @@
extern long fpathconf(int, int);
extern long pathconf(const char*, int);
-
-/* Macros for access() */
-#define R_OK 4 /* Read */
-#define W_OK 2 /* Write */
-#define X_OK 1 /* Execute */
-#define F_OK 0 /* Existence */
-
extern int access(const char*, int);
extern int faccessat(int, const char*, int, int);
extern int link(const char*, const char*);
diff --git a/libc/tools/gensyscalls.py b/libc/tools/gensyscalls.py
index 4e24077..7e11418 100755
--- a/libc/tools/gensyscalls.py
+++ b/libc/tools/gensyscalls.py
@@ -286,8 +286,9 @@
for alias in aliases:
stub += function_alias % { "func" : syscall["func"], "alias" : alias }
- # Use hidden visibility for any functions beginning with underscores.
- if pointer_length == 64 and syscall["func"].startswith("__"):
+ # Use hidden visibility on LP64 for any functions beginning with underscores.
+ # Force hidden visibility for any functions which begin with 3 underscores
+ if (pointer_length == 64 and syscall["func"].startswith("__")) or syscall["func"].startswith("___"):
stub += '.hidden ' + syscall["func"] + '\n'
return stub
diff --git a/libc/tools/zoneinfo/update-tzdata.py b/libc/tools/zoneinfo/update-tzdata.py
index 330f166..4847356 100755
--- a/libc/tools/zoneinfo/update-tzdata.py
+++ b/libc/tools/zoneinfo/update-tzdata.py
@@ -117,13 +117,20 @@
# Build the ICU tools.
print 'Configuring ICU tools...'
subprocess.check_call(['%s/runConfigureICU' % icu_dir, 'Linux'])
- print 'Making ICU tools...'
- subprocess.check_call(['make', '-j32'])
# Run the ICU tools.
os.chdir('tools/tzcode')
+
+ # The tz2icu tool only picks up icuregions and icuzones in they are in the CWD
+ for icu_data_file in [ 'icuregions', 'icuzones']:
+ icu_data_file_source = '%s/tools/tzcode/%s' % (icu_dir, icu_data_file)
+ icu_data_file_symlink = './%s' % icu_data_file
+ os.symlink(icu_data_file_source, icu_data_file_symlink)
+
shutil.copyfile('%s/%s' % (original_working_dir, data_filename), data_filename)
print 'Making ICU data...'
+ # The Makefile assumes the existence of the bin directory.
+ os.mkdir('%s/bin' % icu_working_dir)
subprocess.check_call(['make'])
# Copy the source file to its ultimate destination.
diff --git a/tests/Android.mk b/tests/Android.mk
index 82a92f0..bd4695f 100644
--- a/tests/Android.mk
+++ b/tests/Android.mk
@@ -329,6 +329,7 @@
bionic-unit-tests-glibc_whole_static_libraries := \
libBionicStandardTests \
libBionicGtestMain \
+ $(fortify_libs) \
bionic-unit-tests-glibc_ldlibs := \
-lrt -ldl -lutil \
diff --git a/tests/fortify_test.cpp b/tests/fortify_test.cpp
index 48764aa..6cbc695 100644
--- a/tests/fortify_test.cpp
+++ b/tests/fortify_test.cpp
@@ -19,6 +19,7 @@
#include <fcntl.h>
#include <malloc.h>
+#include <poll.h>
#include <signal.h>
#include <stdarg.h>
#include <string.h>
@@ -26,6 +27,12 @@
#include <sys/stat.h>
#include <sys/types.h>
+#if __BIONIC__
+#define ASSERT_FORTIFY(expr) ASSERT_EXIT(expr, testing::KilledBySignal(SIGABRT), "FORTIFY")
+#else
+#define ASSERT_FORTIFY(expr) ASSERT_EXIT(expr, testing::KilledBySignal(SIGABRT), "")
+#endif
+
// Fortify test code needs to run multiple times, so TEST_NAME macro is used to
// distinguish different tests. TEST_NAME is defined in compilation command.
#define DEATHTEST_PASTER(name) name##_DeathTest
@@ -48,8 +55,7 @@
TEST_F(DEATHTEST, stpncpy_fortified2) {
foo myfoo;
int copy_amt = atoi("11");
- ASSERT_EXIT(stpncpy(myfoo.a, "01234567890", copy_amt),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(stpncpy(myfoo.a, "01234567890", copy_amt));
}
#endif
@@ -60,8 +66,7 @@
foo myfoo;
memset(&myfoo, 0, sizeof(myfoo));
myfoo.one[0] = 'A'; // not null terminated string
- ASSERT_EXIT(stpncpy(myfoo.b, myfoo.one, sizeof(myfoo.b)),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(stpncpy(myfoo.b, myfoo.one, sizeof(myfoo.b)));
}
#endif
@@ -71,8 +76,7 @@
TEST_F(DEATHTEST, strncpy_fortified2) {
foo myfoo;
int copy_amt = atoi("11");
- ASSERT_EXIT(strncpy(myfoo.a, "01234567890", copy_amt),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncpy(myfoo.a, "01234567890", copy_amt));
}
#endif
@@ -83,8 +87,7 @@
foo myfoo;
memset(&myfoo, 0, sizeof(myfoo));
myfoo.one[0] = 'A'; // not null terminated string
- ASSERT_EXIT(strncpy(myfoo.b, myfoo.one, sizeof(myfoo.b)),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncpy(myfoo.b, myfoo.one, sizeof(myfoo.b)));
}
#endif
@@ -95,8 +98,7 @@
foo myfoo;
char source_buf[15];
memcpy(source_buf, "12345678901234", 15);
- ASSERT_EXIT(sprintf(myfoo.a, "%s", source_buf),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(sprintf(myfoo.a, "%s", source_buf));
}
#endif
@@ -105,8 +107,7 @@
// this buffer overflow. TODO: Fix clang.
TEST_F(DEATHTEST, sprintf2_fortified2) {
foo myfoo;
- ASSERT_EXIT(sprintf(myfoo.a, "0123456789"),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(sprintf(myfoo.a, "0123456789"));
}
#endif
@@ -125,11 +126,11 @@
}
TEST_F(DEATHTEST, vsprintf_fortified2) {
- ASSERT_EXIT(vsprintf_helper2("%s", "0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsprintf_helper2("%s", "0123456789"));
}
TEST_F(DEATHTEST, vsprintf2_fortified2) {
- ASSERT_EXIT(vsprintf_helper2("0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsprintf_helper2("0123456789"));
}
#endif
@@ -149,11 +150,11 @@
}
TEST_F(DEATHTEST, vsnprintf_fortified2) {
- ASSERT_EXIT(vsnprintf_helper2("%s", "0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsnprintf_helper2("%s", "0123456789"));
}
TEST_F(DEATHTEST, vsnprintf2_fortified2) {
- ASSERT_EXIT(vsnprintf_helper2("0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsnprintf_helper2("0123456789"));
}
#endif
@@ -165,8 +166,7 @@
#if defined(__BIONIC__)
foo myfoo;
char* src = strdup("");
- ASSERT_EXIT(stpcpy(myfoo.empty, src),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(stpcpy(myfoo.empty, src));
free(src);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -182,8 +182,7 @@
#if defined(__BIONIC__)
foo myfoo;
char* src = strdup("");
- ASSERT_EXIT(strcpy(myfoo.empty, src),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(myfoo.empty, src));
free(src);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -199,8 +198,7 @@
#if defined(__BIONIC__)
foo myfoo;
char* src = strdup("1");
- ASSERT_EXIT(strcpy(myfoo.empty, src),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(myfoo.empty, src));
free(src);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -216,8 +214,7 @@
#if defined(__BIONIC__)
foo myfoo;
char* src = strdup("12");
- ASSERT_EXIT(strcpy(myfoo.one, src),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(myfoo.one, src));
free(src);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -233,8 +230,7 @@
foo myfoo;
memcpy(myfoo.a, "0123456789", sizeof(myfoo.a));
myfoo.b[0] = '\0';
- ASSERT_EXIT(printf("%s", strchr(myfoo.a, 'a')),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(printf("%s", strchr(myfoo.a, 'a')));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -249,8 +245,7 @@
foo myfoo;
memcpy(myfoo.a, "0123456789", 10);
memcpy(myfoo.b, "01234", 6);
- ASSERT_EXIT(printf("%s", strrchr(myfoo.a, 'a')),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(printf("%s", strrchr(myfoo.a, 'a')));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -265,8 +260,7 @@
foo myfoo;
strcpy(myfoo.a, "01");
size_t n = strlen(myfoo.a);
- ASSERT_EXIT(strlcpy(myfoo.one, myfoo.a, n),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strlcpy(myfoo.one, myfoo.a, n));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -282,8 +276,7 @@
strcpy(myfoo.a, "01");
myfoo.one[0] = '\0';
size_t n = strlen(myfoo.a);
- ASSERT_EXIT(strlcat(myfoo.one, myfoo.a, n),
- testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strlcat(myfoo.one, myfoo.a, n));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -297,7 +290,7 @@
foo myfoo;
size_t n = atoi("10"); // avoid compiler optimizations
strncpy(myfoo.a, "012345678", n);
- ASSERT_EXIT(strncat(myfoo.a, "9", n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncat(myfoo.a, "9", n));
}
#endif
@@ -308,7 +301,7 @@
foo myfoo;
myfoo.a[0] = '\0';
size_t n = atoi("10"); // avoid compiler optimizations
- ASSERT_EXIT(strncat(myfoo.a, "0123456789", n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncat(myfoo.a, "0123456789", n));
}
#endif
@@ -317,7 +310,7 @@
memcpy(myfoo.a, "0123456789", sizeof(myfoo.a)); // unterminated string
myfoo.b[0] = '\0';
size_t n = atoi("10"); // avoid compiler optimizations
- ASSERT_EXIT(strncat(myfoo.b, myfoo.a, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncat(myfoo.b, myfoo.a, n));
}
#ifndef __clang__
@@ -328,7 +321,7 @@
strcpy(src, "0123456789");
foo myfoo;
myfoo.a[0] = '\0';
- ASSERT_EXIT(strcat(myfoo.a, src), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcat(myfoo.a, src));
}
#endif
@@ -336,21 +329,21 @@
foo myfoo;
memcpy(myfoo.a, "0123456789", sizeof(myfoo.a)); // unterminated string
myfoo.b[0] = '\0';
- ASSERT_EXIT(strcat(myfoo.b, myfoo.a), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcat(myfoo.b, myfoo.a));
}
TEST_F(DEATHTEST, snprintf_fortified2) {
foo myfoo;
strcpy(myfoo.a, "012345678");
size_t n = strlen(myfoo.a) + 2;
- ASSERT_EXIT(snprintf(myfoo.b, n, "a%s", myfoo.a), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(snprintf(myfoo.b, n, "a%s", myfoo.a));
}
TEST_F(DEATHTEST, bzero_fortified2) {
foo myfoo;
memcpy(myfoo.b, "0123456789", sizeof(myfoo.b));
size_t n = atoi("11");
- ASSERT_EXIT(bzero(myfoo.b, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(bzero(myfoo.b, n));
}
#endif /* defined(_FORTIFY_SOURCE) && _FORTIFY_SOURCE=2 */
@@ -360,7 +353,7 @@
#if defined(__BIONIC__)
char buf[10];
char *orig = strdup("0123456789");
- ASSERT_EXIT(strcpy(buf, orig), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(buf, orig));
free(orig);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -372,7 +365,7 @@
#if defined(__BIONIC__)
char buf[0];
char *orig = strdup("");
- ASSERT_EXIT(strcpy(buf, orig), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(buf, orig));
free(orig);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -384,7 +377,7 @@
#if defined(__BIONIC__)
char buf[0];
char *orig = strdup("1");
- ASSERT_EXIT(strcpy(buf, orig), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(buf, orig));
free(orig);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -396,7 +389,7 @@
#if defined(__BIONIC__)
char buf[1];
char *orig = strdup("12");
- ASSERT_EXIT(strcpy(buf, orig), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcpy(buf, orig));
free(orig);
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
@@ -407,7 +400,7 @@
#if defined(__BIONIC__)
char buf[10];
memcpy(buf, "0123456789", sizeof(buf));
- ASSERT_EXIT(printf("%zd", strlen(buf)), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(printf("%zd", strlen(buf)));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -417,7 +410,7 @@
#if defined(__BIONIC__)
char buf[10];
memcpy(buf, "0123456789", sizeof(buf));
- ASSERT_EXIT(printf("%s", strchr(buf, 'a')), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(printf("%s", strchr(buf, 'a')));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -427,7 +420,7 @@
#if defined(__BIONIC__)
char buf[10];
memcpy(buf, "0123456789", sizeof(buf));
- ASSERT_EXIT(printf("%s", strrchr(buf, 'a')), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(printf("%s", strrchr(buf, 'a')));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -439,7 +432,7 @@
char bufb[10];
strcpy(bufa, "01234567890123");
size_t n = strlen(bufa);
- ASSERT_EXIT(strlcpy(bufb, bufa, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strlcpy(bufb, bufa, n));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -452,7 +445,7 @@
bufb[0] = '\0';
strcpy(bufa, "01234567890123");
size_t n = strlen(bufa);
- ASSERT_EXIT(strlcat(bufb, bufa, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strlcat(bufb, bufa, n));
#else // __BIONIC__
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif // __BIONIC__
@@ -462,7 +455,7 @@
char buf[10];
char source_buf[15];
memcpy(source_buf, "12345678901234", 15);
- ASSERT_EXIT(sprintf(buf, "%s", source_buf), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(sprintf(buf, "%s", source_buf));
}
#ifndef __clang__
@@ -472,14 +465,14 @@
char* buf = (char *) malloc(10);
char source_buf[11];
memcpy(source_buf, "1234567890", 11);
- ASSERT_EXIT(sprintf(buf, "%s", source_buf), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(sprintf(buf, "%s", source_buf));
free(buf);
}
#endif
TEST_F(DEATHTEST, sprintf2_fortified) {
char buf[5];
- ASSERT_EXIT(sprintf(buf, "aaaaa"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(sprintf(buf, "aaaaa"));
}
static int vsprintf_helper(const char *fmt, ...) {
@@ -494,11 +487,11 @@
}
TEST_F(DEATHTEST, vsprintf_fortified) {
- ASSERT_EXIT(vsprintf_helper("%s", "0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsprintf_helper("%s", "0123456789"));
}
TEST_F(DEATHTEST, vsprintf2_fortified) {
- ASSERT_EXIT(vsprintf_helper("0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsprintf_helper("0123456789"));
}
static int vsnprintf_helper(const char *fmt, ...) {
@@ -514,25 +507,25 @@
}
TEST_F(DEATHTEST, vsnprintf_fortified) {
- ASSERT_EXIT(vsnprintf_helper("%s", "0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsnprintf_helper("%s", "0123456789"));
}
TEST_F(DEATHTEST, vsnprintf2_fortified) {
- ASSERT_EXIT(vsnprintf_helper("0123456789"), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(vsnprintf_helper("0123456789"));
}
TEST_F(DEATHTEST, strncat_fortified) {
char buf[10];
size_t n = atoi("10"); // avoid compiler optimizations
strncpy(buf, "012345678", n);
- ASSERT_EXIT(strncat(buf, "9", n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncat(buf, "9", n));
}
TEST_F(DEATHTEST, strncat2_fortified) {
char buf[10];
buf[0] = '\0';
size_t n = atoi("10"); // avoid compiler optimizations
- ASSERT_EXIT(strncat(buf, "0123456789", n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncat(buf, "0123456789", n));
}
TEST_F(DEATHTEST, strcat_fortified) {
@@ -540,14 +533,14 @@
strcpy(src, "0123456789");
char buf[10];
buf[0] = '\0';
- ASSERT_EXIT(strcat(buf, src), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strcat(buf, src));
}
TEST_F(DEATHTEST, memmove_fortified) {
char buf[20];
strcpy(buf, "0123456789");
size_t n = atoi("10");
- ASSERT_EXIT(memmove(buf + 11, buf, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(memmove(buf + 11, buf, n));
}
TEST_F(DEATHTEST, memcpy_fortified) {
@@ -555,7 +548,7 @@
char bufb[10];
strcpy(bufa, "012345678");
size_t n = atoi("11");
- ASSERT_EXIT(memcpy(bufb, bufa, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(memcpy(bufb, bufa, n));
}
TEST_F(DEATHTEST, stpncpy_fortified) {
@@ -563,14 +556,14 @@
char bufb[10];
strcpy(bufa, "01234567890123");
size_t n = strlen(bufa);
- ASSERT_EXIT(stpncpy(bufb, bufa, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(stpncpy(bufb, bufa, n));
}
TEST_F(DEATHTEST, stpncpy2_fortified) {
char dest[11];
char src[10];
memcpy(src, "0123456789", sizeof(src)); // src is not null terminated
- ASSERT_EXIT(stpncpy(dest, src, sizeof(dest)), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(stpncpy(dest, src, sizeof(dest)));
}
TEST_F(DEATHTEST, strncpy_fortified) {
@@ -578,7 +571,7 @@
char bufb[10];
strcpy(bufa, "01234567890123");
size_t n = strlen(bufa);
- ASSERT_EXIT(strncpy(bufb, bufa, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncpy(bufb, bufa, n));
}
@@ -586,7 +579,7 @@
char dest[11];
char src[10];
memcpy(src, "0123456789", sizeof(src)); // src is not null terminated
- ASSERT_EXIT(strncpy(dest, src, sizeof(dest)), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(strncpy(dest, src, sizeof(dest)));
}
TEST_F(DEATHTEST, snprintf_fortified) {
@@ -594,55 +587,46 @@
char bufb[10];
strcpy(bufa, "0123456789");
size_t n = strlen(bufa) + 1;
- ASSERT_EXIT(snprintf(bufb, n, "%s", bufa), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(snprintf(bufb, n, "%s", bufa));
}
TEST_F(DEATHTEST, bzero_fortified) {
char buf[10];
memcpy(buf, "0123456789", sizeof(buf));
size_t n = atoi("11");
- ASSERT_EXIT(bzero(buf, n), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(bzero(buf, n));
}
TEST_F(DEATHTEST, umask_fortified) {
mode_t mask = atoi("1023"); // 01777 in octal
- ASSERT_EXIT(umask(mask), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(umask(mask));
}
TEST_F(DEATHTEST, recv_fortified) {
size_t data_len = atoi("11"); // suppress compiler optimizations
char buf[10];
- ASSERT_EXIT(recv(0, buf, data_len, 0), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(recv(0, buf, data_len, 0));
}
TEST_F(DEATHTEST, FD_ISSET_fortified) {
#if defined(__BIONIC__) // glibc catches this at compile-time.
fd_set set;
memset(&set, 0, sizeof(set));
- ASSERT_EXIT(FD_ISSET(-1, &set), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(FD_ISSET(-1, &set));
#endif
}
TEST_F(DEATHTEST, FD_ISSET_2_fortified) {
char buf[1];
fd_set* set = (fd_set*) buf;
- ASSERT_EXIT(FD_ISSET(0, set), testing::KilledBySignal(SIGABRT), "");
-}
-
-// gtest's ASSERT_EXIT needs a valid expression, but glibc has a do-while macro.
-static void FD_ZERO_function(fd_set* s) { FD_ZERO(s); }
-
-TEST_F(DEATHTEST, FD_ZERO_fortified) {
- char buf[1];
- fd_set* set = (fd_set*) buf;
- ASSERT_EXIT(FD_ZERO_function(set), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(FD_ISSET(0, set));
}
TEST_F(DEATHTEST, read_fortified) {
char buf[1];
size_t ct = atoi("2"); // prevent optimizations
int fd = open("/dev/null", O_RDONLY);
- ASSERT_EXIT(read(fd, buf, ct), testing::KilledBySignal(SIGABRT), "");
+ ASSERT_FORTIFY(read(fd, buf, ct));
close(fd);
}
@@ -950,3 +934,15 @@
sprintf(BUF_AND_CONTENTS(buf));
EXPECT_STREQ(CONTENTS, buf);
}
+
+TEST_F(DEATHTEST, poll_fortified) {
+ nfds_t fd_count = atoi("2"); // suppress compiler optimizations
+ pollfd buf[1] = {{0, POLLIN, 0}};
+ ASSERT_FORTIFY(poll(buf, fd_count, -1));
+}
+
+TEST_F(DEATHTEST, ppoll_fortified) {
+ nfds_t fd_count = atoi("2"); // suppress compiler optimizations
+ pollfd buf[1] = {{0, POLLIN, 0}};
+ ASSERT_FORTIFY(ppoll(buf, fd_count, NULL, NULL));
+}
diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp
index cb32079..5dc60ee 100644
--- a/tests/pthread_test.cpp
+++ b/tests/pthread_test.cpp
@@ -27,6 +27,7 @@
#include <malloc.h>
#include <pthread.h>
#include <signal.h>
+#include <stdio.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <time.h>
@@ -1092,3 +1093,14 @@
ASSERT_EQ(EPERM, pthread_mutex_unlock(&lock));
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
}
+
+TEST(pthread, pthread_mutex_owner_tid_limit) {
+ FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
+ ASSERT_TRUE(fp != NULL);
+ long pid_max;
+ ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
+ fclose(fp);
+ // Current pthread_mutex uses 16 bits to represent owner tid.
+ // Change the implementation if we need to support higher value than 65535.
+ ASSERT_LE(pid_max, 65536);
+}
diff --git a/tests/sys_stat_test.cpp b/tests/sys_stat_test.cpp
index e465774..7bbb7c6 100644
--- a/tests/sys_stat_test.cpp
+++ b/tests/sys_stat_test.cpp
@@ -95,3 +95,127 @@
ASSERT_EQ(0, fstat64(fd, &sb));
close(fd);
}
+
+TEST(sys_stat, fchmodat_EFAULT_file) {
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, (char *) 0x1, 0751, 0));
+ ASSERT_EQ(EFAULT, errno);
+}
+
+TEST(sys_stat, fchmodat_AT_SYMLINK_NOFOLLOW_EFAULT_file) {
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, (char *) 0x1, 0751, AT_SYMLINK_NOFOLLOW));
+#if defined(__BIONIC__)
+ ASSERT_EQ(EFAULT, errno);
+#else
+ // glibc 2.19 does not implement AT_SYMLINK_NOFOLLOW and always
+ // returns ENOTSUP
+ ASSERT_EQ(ENOTSUP, errno);
+#endif
+}
+
+TEST(sys_stat, fchmodat_bad_flags) {
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, "/blah", 0751, ~AT_SYMLINK_NOFOLLOW));
+ ASSERT_EQ(EINVAL, errno);
+}
+
+TEST(sys_stat, fchmodat_bad_flags_ALL) {
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, "/blah", 0751, ~0));
+ ASSERT_EQ(EINVAL, errno);
+}
+
+TEST(sys_stat, fchmodat_nonexistant_file) {
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, "/blah", 0751, 0));
+ ASSERT_EQ(ENOENT, errno);
+}
+
+TEST(sys_stat, fchmodat_AT_SYMLINK_NOFOLLOW_nonexistant_file) {
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, "/blah", 0751, AT_SYMLINK_NOFOLLOW));
+#if defined(__BIONIC__)
+ ASSERT_EQ(ENOENT, errno);
+#else
+ // glibc 2.19 does not implement AT_SYMLINK_NOFOLLOW and always
+ // returns ENOTSUP
+ ASSERT_EQ(ENOTSUP, errno);
+#endif
+}
+
+TEST(sys_stat, fchmodat_file) {
+ TemporaryFile tf;
+ struct stat sb;
+
+ ASSERT_EQ(0, fchmodat(AT_FDCWD, tf.filename, 0751, 0));
+ ASSERT_EQ(0, fstat(tf.fd, &sb));
+ ASSERT_TRUE(0751 == (sb.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)));
+}
+
+TEST(sys_stat, fchmodat_AT_SYMLINK_NOFOLLOW_file) {
+ TemporaryFile tf;
+ errno = 0;
+ int result = fchmodat(AT_FDCWD, tf.filename, 0751, AT_SYMLINK_NOFOLLOW);
+
+#if defined(__BIONIC__)
+ struct stat sb;
+ ASSERT_EQ(0, result);
+ ASSERT_EQ(0, errno);
+ ASSERT_EQ(0, fstat(tf.fd, &sb));
+ ASSERT_TRUE(0751 == (sb.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)));
+#else
+ // glibc 2.19 does not implement AT_SYMLINK_NOFOLLOW and always
+ // returns ENOTSUP
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(ENOTSUP, errno);
+#endif
+}
+
+TEST(sys_stat, fchmodat_symlink) {
+ TemporaryFile tf;
+ char linkname[255];
+ struct stat sb;
+
+ snprintf(linkname, sizeof(linkname), "%s.link", tf.filename);
+
+ ASSERT_EQ(0, symlink(tf.filename, linkname));
+ ASSERT_EQ(0, fchmodat(AT_FDCWD, linkname, 0751, 0));
+ ASSERT_EQ(0, fstat(tf.fd, &sb));
+ ASSERT_TRUE(0751 == (sb.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)));
+ unlink(linkname);
+}
+
+TEST(sys_stat, fchmodat_dangling_symlink) {
+ TemporaryFile tf;
+ char linkname[255];
+ char target[255];
+
+ snprintf(linkname, sizeof(linkname), "%s.link", tf.filename);
+ snprintf(target, sizeof(target), "%s.doesnotexist", tf.filename);
+
+ ASSERT_EQ(0, symlink(target, linkname));
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, linkname, 0751, 0));
+ ASSERT_EQ(ENOENT, errno);
+ unlink(linkname);
+}
+
+TEST(sys_stat, fchmodat_AT_SYMLINK_NOFOLLOW_with_symlink) {
+ TemporaryFile tf;
+ char linkname[255];
+
+ snprintf(linkname, sizeof(linkname), "%s.link", tf.filename);
+
+ ASSERT_EQ(0, symlink(tf.filename, linkname));
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, linkname, 0751, AT_SYMLINK_NOFOLLOW));
+ ASSERT_EQ(ENOTSUP, errno);
+ unlink(linkname);
+}
+
+TEST(sys_stat, fchmodat_AT_SYMLINK_NOFOLLOW_with_dangling_symlink) {
+ TemporaryFile tf;
+ char linkname[255];
+ char target[255];
+
+ snprintf(linkname, sizeof(linkname), "%s.link", tf.filename);
+ snprintf(target, sizeof(target), "%s.doesnotexist", tf.filename);
+
+ ASSERT_EQ(0, symlink(target, linkname));
+ ASSERT_EQ(-1, fchmodat(AT_FDCWD, linkname, 0751, AT_SYMLINK_NOFOLLOW));
+ ASSERT_EQ(ENOTSUP, errno);
+ unlink(linkname);
+}