Merge "Add signal handling in bionic gtest main."
diff --git a/libc/Android.mk b/libc/Android.mk
index fe7b116..fc4940a 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -516,6 +516,23 @@
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.cpp \
+# Various kinds of LP32 cruft.
+# ========================================================
+libc_bionic_src_files_32 += \
+ bionic/mmap.cpp \
+
+libc_common_src_files_32 += \
+ bionic/legacy_32_bit_support.cpp \
+ bionic/ndk_cruft.cpp \
+ bionic/time64.c \
+
+libc_netbsd_src_files_32 += \
+ upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
+
+libc_openbsd_src_files_32 += \
+ upstream-openbsd/lib/libc/stdio/putw.c \
+
+
# Define some common cflags
# ========================================================
libc_common_cflags := \
@@ -572,12 +589,13 @@
$(LOCAL_PATH)/stdio \
# ========================================================
-# Add in the arch-specific flags.
+# Add in the arch or 32-bit specific flags
# Must be called with $(eval).
# $(1): the LOCAL_ variable name
# $(2): the bionic variable name to pull in
define patch-up-arch-specific-flags
$(1)_$(TARGET_ARCH) += $($(2)_$(TARGET_ARCH))
+$(1)_32 += $($(2)_32)
ifdef TARGET_2ND_ARCH
$(1)_$(TARGET_2ND_ARCH) += $($(2)_$(TARGET_2ND_ARCH))
endif
@@ -1035,7 +1053,6 @@
LOCAL_SRC_FILES := \
$(libc_arch_static_src_files) \
- $(libc_static_common_src_files) \
bionic/libc_init_static.cpp
LOCAL_C_INCLUDES := $(libc_common_c_includes)
@@ -1087,7 +1104,6 @@
LOCAL_SRC_FILES := \
$(libc_arch_static_src_files) \
- $(libc_static_common_src_files) \
bionic/malloc_debug_common.cpp \
bionic/libc_init_static.cpp \
@@ -1122,7 +1138,6 @@
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_SRC_FILES := \
$(libc_arch_dynamic_src_files) \
- $(libc_static_common_src_files) \
bionic/malloc_debug_common.cpp \
bionic/libc_init_dynamic.cpp \
bionic/NetdClient.cpp \
@@ -1152,13 +1167,10 @@
# We'd really like to do this for all architectures, but since this wasn't done
# before, these symbols must continue to be exported on LP32 for binary
# compatibility.
-LOCAL_LDFLAGS_arm64 := -Wl,--exclude-libs,libgcc.a
-LOCAL_LDFLAGS_mips64 := -Wl,--exclude-libs,libgcc.a
-LOCAL_LDFLAGS_x86_64 := -Wl,--exclude-libs,libgcc.a
+LOCAL_LDFLAGS_64 := -Wl,--exclude-libs,libgcc.a
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_arch_dynamic_src_files))
-$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_static_common_src_files))
# special for arm
LOCAL_NO_CRT_arm := true
LOCAL_CFLAGS_arm += -DCRT_LEGACY_WORKAROUND
diff --git a/libc/SYSCALLS.TXT b/libc/SYSCALLS.TXT
index d68a00f..aae7de7 100644
--- a/libc/SYSCALLS.TXT
+++ b/libc/SYSCALLS.TXT
@@ -113,7 +113,7 @@
int __fcntl64:fcntl64(int, int, void*) arm,mips,x86
int fcntl(int, int, void*) arm64,mips64,x86_64
int flock(int, int) all
-int __fchmod:fchmod(int, mode_t) all
+int ___fchmod:fchmod(int, mode_t) all
int dup(int) all
int pipe2(int*, int) all
int dup3(int, int, int) all
@@ -131,7 +131,7 @@
int __openat:openat(int, const char*, int, mode_t) all
int faccessat(int, const char*, int, int) all
-int __fchmodat:fchmodat(int, const char*, mode_t) all
+int ___fchmodat:fchmodat(int, const char*, mode_t) all
int fchownat(int, const char*, uid_t, gid_t, int) all
int fstatat64|fstatat:fstatat64(int, const char*, struct stat*, int) arm,mips,x86
int fstatat64|fstatat:newfstatat(int, const char*, struct stat*, int) arm64,x86_64
diff --git a/libc/arch-arm/arm.mk b/libc/arch-arm/arm.mk
index f712c4c..60600e5 100644
--- a/libc/arch-arm/arm.mk
+++ b/libc/arch-arm/arm.mk
@@ -1,24 +1,6 @@
# 32-bit arm.
#
-# Various kinds of LP32 cruft.
-#
-
-libc_bionic_src_files_arm += \
- bionic/mmap.cpp \
-
-libc_common_src_files_arm += \
- bionic/legacy_32_bit_support.cpp \
- bionic/ndk_cruft.cpp \
- bionic/time64.c \
-
-libc_netbsd_src_files_arm += \
- upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
-
-libc_openbsd_src_files_arm += \
- upstream-openbsd/lib/libc/stdio/putw.c \
-
-#
# Default implementations of functions that are commonly optimized.
#
diff --git a/libc/arch-arm/syscalls/__fchmod.S b/libc/arch-arm/syscalls/___fchmod.S
similarity index 84%
rename from libc/arch-arm/syscalls/__fchmod.S
rename to libc/arch-arm/syscalls/___fchmod.S
index ff888a1..c6da4f8 100644
--- a/libc/arch-arm/syscalls/__fchmod.S
+++ b/libc/arch-arm/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmod)
+ENTRY(___fchmod)
mov ip, r7
ldr r7, =__NR_fchmod
swi #0
@@ -11,4 +11,5 @@
bxls lr
neg r0, r0
b __set_errno_internal
-END(__fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-arm/syscalls/__fchmodat.S b/libc/arch-arm/syscalls/___fchmodat.S
similarity index 82%
rename from libc/arch-arm/syscalls/__fchmodat.S
rename to libc/arch-arm/syscalls/___fchmodat.S
index 4d10f00..91bbda5 100644
--- a/libc/arch-arm/syscalls/__fchmodat.S
+++ b/libc/arch-arm/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmodat)
+ENTRY(___fchmodat)
mov ip, r7
ldr r7, =__NR_fchmodat
swi #0
@@ -11,4 +11,5 @@
bxls lr
neg r0, r0
b __set_errno_internal
-END(__fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-arm64/arm64.mk b/libc/arch-arm64/arm64.mk
index ba78871..8418993 100644
--- a/libc/arch-arm64/arm64.mk
+++ b/libc/arch-arm64/arm64.mk
@@ -59,6 +59,6 @@
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
endif
include $(cpu_variant_mk)
-libc_common_additional_dependencies += $(cpu_variank_mk)
+libc_common_additional_dependencies += $(cpu_variant_mk)
cpu_variant_mk :=
diff --git a/libc/arch-arm64/syscalls/__fchmod.S b/libc/arch-arm64/syscalls/___fchmod.S
similarity index 81%
rename from libc/arch-arm64/syscalls/__fchmod.S
rename to libc/arch-arm64/syscalls/___fchmod.S
index 05c67fc..a143c65 100644
--- a/libc/arch-arm64/syscalls/__fchmod.S
+++ b/libc/arch-arm64/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmod)
+ENTRY(___fchmod)
mov x8, __NR_fchmod
svc #0
@@ -11,5 +11,5 @@
b.hi __set_errno_internal
ret
-END(__fchmod)
-.hidden __fchmod
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-arm64/syscalls/__fchmodat.S b/libc/arch-arm64/syscalls/___fchmodat.S
similarity index 80%
rename from libc/arch-arm64/syscalls/__fchmodat.S
rename to libc/arch-arm64/syscalls/___fchmodat.S
index 2406ea8..1ab3736 100644
--- a/libc/arch-arm64/syscalls/__fchmodat.S
+++ b/libc/arch-arm64/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmodat)
+ENTRY(___fchmodat)
mov x8, __NR_fchmodat
svc #0
@@ -11,5 +11,5 @@
b.hi __set_errno_internal
ret
-END(__fchmodat)
-.hidden __fchmodat
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-mips/mips.mk b/libc/arch-mips/mips.mk
index 7f36635..7e3fe25 100644
--- a/libc/arch-mips/mips.mk
+++ b/libc/arch-mips/mips.mk
@@ -1,24 +1,6 @@
# 32-bit mips.
#
-# Various kinds of LP32 cruft.
-#
-
-libc_bionic_src_files_mips += \
- bionic/mmap.cpp \
-
-libc_common_src_files_mips += \
- bionic/legacy_32_bit_support.cpp \
- bionic/ndk_cruft.cpp \
- bionic/time64.c \
-
-libc_netbsd_src_files_mips += \
- upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
-
-libc_openbsd_src_files_mips += \
- upstream-openbsd/lib/libc/stdio/putw.c \
-
-#
# Default implementations of functions that are commonly optimized.
#
diff --git a/libc/arch-mips/syscalls/__fchmod.S b/libc/arch-mips/syscalls/___fchmod.S
similarity index 84%
rename from libc/arch-mips/syscalls/__fchmod.S
rename to libc/arch-mips/syscalls/___fchmod.S
index 9bc491c..ac102ec 100644
--- a/libc/arch-mips/syscalls/__fchmod.S
+++ b/libc/arch-mips/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmod)
+ENTRY(___fchmod)
.set noreorder
.cpload t9
li v0, __NR_fchmod
@@ -16,4 +16,5 @@
j t9
nop
.set reorder
-END(__fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-mips/syscalls/__fchmodat.S b/libc/arch-mips/syscalls/___fchmodat.S
similarity index 82%
rename from libc/arch-mips/syscalls/__fchmodat.S
rename to libc/arch-mips/syscalls/___fchmodat.S
index 07ea8f8..d581efa 100644
--- a/libc/arch-mips/syscalls/__fchmodat.S
+++ b/libc/arch-mips/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmodat)
+ENTRY(___fchmodat)
.set noreorder
.cpload t9
li v0, __NR_fchmodat
@@ -16,4 +16,5 @@
j t9
nop
.set reorder
-END(__fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-mips64/syscalls/__fchmod.S b/libc/arch-mips64/syscalls/___fchmod.S
similarity index 87%
rename from libc/arch-mips64/syscalls/__fchmod.S
rename to libc/arch-mips64/syscalls/___fchmod.S
index 94dd0a1..7c16c54 100644
--- a/libc/arch-mips64/syscalls/__fchmod.S
+++ b/libc/arch-mips64/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmod)
+ENTRY(___fchmod)
.set push
.set noreorder
li v0, __NR_fchmod
@@ -22,5 +22,5 @@
j t9
move ra, t0
.set pop
-END(__fchmod)
-.hidden __fchmod
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-mips64/syscalls/__fchmodat.S b/libc/arch-mips64/syscalls/___fchmodat.S
similarity index 86%
rename from libc/arch-mips64/syscalls/__fchmodat.S
rename to libc/arch-mips64/syscalls/___fchmodat.S
index 79f453f..50f108e 100644
--- a/libc/arch-mips64/syscalls/__fchmodat.S
+++ b/libc/arch-mips64/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmodat)
+ENTRY(___fchmodat)
.set push
.set noreorder
li v0, __NR_fchmodat
@@ -22,5 +22,5 @@
j t9
move ra, t0
.set pop
-END(__fchmodat)
-.hidden __fchmodat
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-x86/syscalls/__fchmod.S b/libc/arch-x86/syscalls/___fchmod.S
similarity index 91%
rename from libc/arch-x86/syscalls/__fchmod.S
rename to libc/arch-x86/syscalls/___fchmod.S
index 7ad213e..119a695 100644
--- a/libc/arch-x86/syscalls/__fchmod.S
+++ b/libc/arch-x86/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmod)
+ENTRY(___fchmod)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -23,4 +23,5 @@
popl %ecx
popl %ebx
ret
-END(__fchmod)
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-x86/syscalls/__fchmodat.S b/libc/arch-x86/syscalls/___fchmodat.S
similarity index 91%
rename from libc/arch-x86/syscalls/__fchmodat.S
rename to libc/arch-x86/syscalls/___fchmodat.S
index f03c03f..b15bb64 100644
--- a/libc/arch-x86/syscalls/__fchmodat.S
+++ b/libc/arch-x86/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmodat)
+ENTRY(___fchmodat)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -28,4 +28,5 @@
popl %ecx
popl %ebx
ret
-END(__fchmodat)
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/arch-x86/x86.mk b/libc/arch-x86/x86.mk
index 989690c..e5d70a9 100644
--- a/libc/arch-x86/x86.mk
+++ b/libc/arch-x86/x86.mk
@@ -1,24 +1,6 @@
# 32-bit x86.
#
-# Various kinds of LP32 cruft.
-#
-
-libc_bionic_src_files_x86 += \
- bionic/mmap.cpp \
-
-libc_common_src_files_x86 += \
- bionic/legacy_32_bit_support.cpp \
- bionic/ndk_cruft.cpp \
- bionic/time64.c \
-
-libc_netbsd_src_files_x86 += \
- upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
-
-libc_openbsd_src_files_x86 += \
- upstream-openbsd/lib/libc/stdio/putw.c \
-
-#
# Default implementations of functions that are commonly optimized.
#
diff --git a/libc/arch-x86_64/syscalls/__fchmod.S b/libc/arch-x86_64/syscalls/___fchmod.S
similarity index 83%
rename from libc/arch-x86_64/syscalls/__fchmod.S
rename to libc/arch-x86_64/syscalls/___fchmod.S
index ba75f74..7bccbef 100644
--- a/libc/arch-x86_64/syscalls/__fchmod.S
+++ b/libc/arch-x86_64/syscalls/___fchmod.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmod)
+ENTRY(___fchmod)
movl $__NR_fchmod, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -12,5 +12,5 @@
call __set_errno_internal
1:
ret
-END(__fchmod)
-.hidden __fchmod
+END(___fchmod)
+.hidden ___fchmod
diff --git a/libc/arch-x86_64/syscalls/__fchmodat.S b/libc/arch-x86_64/syscalls/___fchmodat.S
similarity index 82%
rename from libc/arch-x86_64/syscalls/__fchmodat.S
rename to libc/arch-x86_64/syscalls/___fchmodat.S
index a8fae95..483ec7d 100644
--- a/libc/arch-x86_64/syscalls/__fchmodat.S
+++ b/libc/arch-x86_64/syscalls/___fchmodat.S
@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
-ENTRY(__fchmodat)
+ENTRY(___fchmodat)
movl $__NR_fchmodat, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -12,5 +12,5 @@
call __set_errno_internal
1:
ret
-END(__fchmodat)
-.hidden __fchmodat
+END(___fchmodat)
+.hidden ___fchmodat
diff --git a/libc/bionic/fchmod.cpp b/libc/bionic/fchmod.cpp
index 6e020b6..ace8c6b 100644
--- a/libc/bionic/fchmod.cpp
+++ b/libc/bionic/fchmod.cpp
@@ -33,11 +33,11 @@
#include <unistd.h>
#include <stdio.h>
-extern "C" int __fchmod(int, mode_t);
+extern "C" int ___fchmod(int, mode_t);
int fchmod(int fd, mode_t mode) {
int saved_errno = errno;
- int result = __fchmod(fd, mode);
+ int result = ___fchmod(fd, mode);
if ((result == 0) || (errno != EBADF)) {
return result;
diff --git a/libc/bionic/fchmodat.cpp b/libc/bionic/fchmodat.cpp
index c28e15a..1f83c4b 100644
--- a/libc/bionic/fchmodat.cpp
+++ b/libc/bionic/fchmodat.cpp
@@ -34,7 +34,7 @@
#include "private/ErrnoRestorer.h"
-extern "C" int __fchmodat(int, const char*, mode_t);
+extern "C" int ___fchmodat(int, const char*, mode_t);
int fchmodat(int dirfd, const char* pathname, mode_t mode, int flags) {
if ((flags & ~AT_SYMLINK_NOFOLLOW) != 0) {
@@ -63,5 +63,5 @@
return result;
}
- return __fchmodat(dirfd, pathname, mode);
+ return ___fchmodat(dirfd, pathname, mode);
}
diff --git a/libc/bionic/pthread_mutex.cpp b/libc/bionic/pthread_mutex.cpp
index 40f1ed2..83d6b54 100644
--- a/libc/bionic/pthread_mutex.cpp
+++ b/libc/bionic/pthread_mutex.cpp
@@ -30,22 +30,19 @@
#include <errno.h>
#include <limits.h>
+#include <stdatomic.h>
+#include <sys/cdefs.h>
#include <sys/mman.h>
#include <unistd.h>
#include "pthread_internal.h"
-#include "private/bionic_atomic_inline.h"
#include "private/bionic_constants.h"
#include "private/bionic_futex.h"
+#include "private/bionic_systrace.h"
#include "private/bionic_time_conversions.h"
#include "private/bionic_tls.h"
-#include "private/bionic_systrace.h"
-
-extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
-extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
-
/* a mutex is implemented as a 32-bit integer holding the following fields
*
* bits: name description
@@ -87,9 +84,6 @@
#define MUTEX_STATE_LOCKED_UNCONTENDED 1 /* must be 1 due to atomic dec in unlock operation */
#define MUTEX_STATE_LOCKED_CONTENDED 2 /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
-#define MUTEX_STATE_FROM_BITS(v) FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
-#define MUTEX_STATE_TO_BITS(v) FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
-
#define MUTEX_STATE_BITS_UNLOCKED MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
#define MUTEX_STATE_BITS_LOCKED_UNCONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
#define MUTEX_STATE_BITS_LOCKED_CONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
@@ -116,10 +110,7 @@
#define MUTEX_COUNTER_BITS_IS_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
/* Used to increment the counter directly after overflow has been checked */
-#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
-
-/* Returns true iff the counter is 0 */
-#define MUTEX_COUNTER_BITS_ARE_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
+#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
/* Mutex shared bit flag
*
@@ -159,30 +150,9 @@
/* Mutex owner field:
*
* This is only used for recursive and errorcheck mutexes. It holds the
- * tid of the owning thread. Note that this works because the Linux
- * kernel _only_ uses 16-bit values for tids.
- *
- * More specifically, it will wrap to 10000 when it reaches over 32768 for
- * application processes. You can check this by running the following inside
- * an adb shell session:
- *
- OLDPID=$$;
- while true; do
- NEWPID=$(sh -c 'echo $$')
- if [ "$NEWPID" -gt 32768 ]; then
- echo "AARGH: new PID $NEWPID is too high!"
- exit 1
- fi
- if [ "$NEWPID" -lt "$OLDPID" ]; then
- echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
- else
- echo -n "$NEWPID!"
- fi
- OLDPID=$NEWPID
- done
-
- * Note that you can run the same example on a desktop Linux system,
- * the wrapping will also happen at 32768, but will go back to 300 instead.
+ * tid of the owning thread. We use 16 bits to represent tid here,
+ * so the highest tid is 65535. There is a test to check /proc/sys/kernel/pid_max
+ * to make sure it will not exceed our limit.
*/
#define MUTEX_OWNER_SHIFT 16
#define MUTEX_OWNER_LEN 16
@@ -267,9 +237,20 @@
return 0;
}
+static inline atomic_int* MUTEX_TO_ATOMIC_POINTER(pthread_mutex_t* mutex) {
+ static_assert(sizeof(atomic_int) == sizeof(mutex->value),
+ "mutex->value should actually be atomic_int in implementation.");
+
+ // We prefer casting to atomic_int instead of declaring mutex->value to be atomic_int directly.
+ // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
+ return reinterpret_cast<atomic_int*>(&mutex->value);
+}
+
int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
if (__predict_true(attr == NULL)) {
- mutex->value = MUTEX_TYPE_BITS_NORMAL;
+ atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
return 0;
}
@@ -292,13 +273,13 @@
return EINVAL;
}
- mutex->value = value;
+ atomic_init(mutex_value_ptr, value);
return 0;
}
/*
- * Lock a non-recursive mutex.
+ * Lock a mutex of type NORMAL.
*
* As noted above, there are three states:
* 0 (unlocked, no contention)
@@ -309,96 +290,75 @@
* "type" value is zero, so the only bits that will be set are the ones in
* the lock state field.
*/
-static inline void _normal_lock(pthread_mutex_t* mutex, int shared) {
+static inline void _normal_mutex_lock(atomic_int* mutex_value_ptr, int shared) {
/* convenience shortcuts */
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- /*
- * The common case is an unlocked mutex, so we begin by trying to
- * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
- * __bionic_cmpxchg() returns 0 if it made the swap successfully.
- * If the result is nonzero, this lock is already held by another thread.
- */
- if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
- const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- /*
- * We want to go to sleep until the mutex is available, which
- * requires promoting it to state 2 (CONTENDED). We need to
- * swap in the new state value and then wait until somebody wakes us up.
- *
- * __bionic_swap() returns the previous value. We swap 2 in and
- * see if we got zero back; if so, we have acquired the lock. If
- * not, another thread still holds the lock and we wait again.
- *
- * The second argument to the __futex_wait() call is compared
- * against the current value. If it doesn't match, __futex_wait()
- * returns immediately (otherwise, it sleeps for a time specified
- * by the third argument; 0 means sleep forever). This ensures
- * that the mutex is in state 2 when we go to sleep on it, which
- * guarantees a wake-up call.
- */
- ScopedTrace trace("Contending for pthread mutex");
-
-
- while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
- __futex_wait_ex(&mutex->value, shared, locked_contended, NULL);
- }
+ // The common case is an unlocked mutex, so we begin by trying to
+ // change the lock's state from unlocked to locked_uncontended.
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ int mvalue = unlocked;
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
+ locked_uncontended,
+ memory_order_acquire,
+ memory_order_relaxed))) {
+ return;
}
- ANDROID_MEMBAR_FULL();
+
+ ScopedTrace trace("Contending for pthread mutex");
+
+ // We want to go to sleep until the mutex is available, which requires
+ // promoting it to locked_contended. We need to swap in the new state
+ // value and then wait until somebody wakes us up.
+ // An atomic_exchange is used to compete with other threads for the lock.
+ // If it returns unlocked, we have acquired the lock, otherwise another
+ // thread still holds the lock and we should wait again.
+ // If lock is acquired, an acquire fence is needed to make all memory accesses
+ // made by other threads visible in current CPU.
+ const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
+ memory_order_acquire) != unlocked) {
+
+ __futex_wait_ex(mutex_value_ptr, shared, locked_contended, NULL);
+ }
}
/*
- * Release a non-recursive mutex. The caller is responsible for determining
+ * Release a mutex of type NORMAL. The caller is responsible for determining
* that we are in fact the owner of this lock.
*/
-static inline void _normal_unlock(pthread_mutex_t* mutex, int shared) {
- ANDROID_MEMBAR_FULL();
+static inline void _normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
+ const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
+ const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- /*
- * The mutex state will be 1 or (rarely) 2. We use an atomic decrement
- * to release the lock. __bionic_atomic_dec() returns the previous value;
- * if it wasn't 1 we have to do some additional work.
- */
- if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
- /*
- * Start by releasing the lock. The decrement changed it from
- * "contended lock" to "uncontended lock", which means we still
- * hold it, and anybody who tries to sneak in will push it back
- * to state 2.
- *
- * Once we set it to zero the lock is up for grabs. We follow
- * this with a __futex_wake() to ensure that one of the waiting
- * threads has a chance to grab it.
- *
- * This doesn't cause a race with the swap/wait pair in
- * _normal_lock(), because the __futex_wait() call there will
- * return immediately if the mutex value isn't 2.
- */
- mutex->value = shared;
-
- /*
- * Wake up one waiting thread. We don't know which thread will be
- * woken or when it'll start executing -- futexes make no guarantees
- * here. There may not even be a thread waiting.
- *
- * The newly-woken thread will replace the 0 we just set above
- * with 2, which means that when it eventually releases the mutex
- * it will also call FUTEX_WAKE. This results in one extra wake
- * call whenever a lock is contended, but lets us avoid forgetting
- * anyone without requiring us to track the number of sleepers.
- *
- * It's possible for another thread to sneak in and grab the lock
- * between the zero assignment above and the wake call below. If
- * the new thread is "slow" and holds the lock for a while, we'll
- * wake up a sleeper, which will swap in a 2 and then go back to
- * sleep since the lock is still held. If the new thread is "fast",
- * running to completion before we call wake, the thread we
- * eventually wake will find an unlocked mutex and will execute.
- * Either way we have correct behavior and nobody is orphaned on
- * the wait queue.
- */
- __futex_wake_ex(&mutex->value, shared, 1);
+ // We use an atomic_exchange to release the lock. If locked_contended state
+ // is returned, some threads is waiting for the lock and we need to wake up
+ // one of them.
+ // A release fence is required to make previous stores visible to next
+ // lock owner threads.
+ if (atomic_exchange_explicit(mutex_value_ptr, unlocked,
+ memory_order_release) == locked_contended) {
+ // Wake up one waiting thread. We don't know which thread will be
+ // woken or when it'll start executing -- futexes make no guarantees
+ // here. There may not even be a thread waiting.
+ //
+ // The newly-woken thread will replace the unlocked state we just set above
+ // with locked_contended state, which means that when it eventually releases
+ // the mutex it will also call FUTEX_WAKE. This results in one extra wake
+ // call whenever a lock is contended, but let us avoid forgetting anyone
+ // without requiring us to track the number of sleepers.
+ //
+ // It's possible for another thread to sneak in and grab the lock between
+ // the exchange above and the wake call below. If the new thread is "slow"
+ // and holds the lock for a while, we'll wake up a sleeper, which will swap
+ // in locked_uncontended state and then go back to sleep since the lock is
+ // still held. If the new thread is "fast", running to completion before
+ // we call wake, the thread we eventually wake will find an unlocked mutex
+ // and will execute. Either way we have correct behavior and nobody is
+ // orphaned on the wait queue.
+ __futex_wake_ex(mutex_value_ptr, shared, 1);
}
}
@@ -414,183 +374,175 @@
* mvalue is the current mutex value (already loaded)
* mutex pointers to the mutex.
*/
-static inline __always_inline int _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype) {
+static inline __always_inline
+int _recursive_increment(atomic_int* mutex_value_ptr, int mvalue, int mtype) {
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
- /* trying to re-lock a mutex we already acquired */
+ // Trying to re-lock a mutex we already acquired.
return EDEADLK;
}
- /* Detect recursive lock overflow and return EAGAIN.
- * This is safe because only the owner thread can modify the
- * counter bits in the mutex value.
- */
+ // Detect recursive lock overflow and return EAGAIN.
+ // This is safe because only the owner thread can modify the
+ // counter bits in the mutex value.
if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
return EAGAIN;
}
- /* We own the mutex, but other threads are able to change
- * the lower bits (e.g. promoting it to "contended"), so we
- * need to use an atomic cmpxchg loop to update the counter.
- */
- for (;;) {
- /* increment counter, overflow was already checked */
- int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
- if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
- /* mutex is still locked, not need for a memory barrier */
- return 0;
- }
- /* the value was changed, this happens when another thread changes
- * the lower state bits from 1 to 2 to indicate contention. This
- * cannot change the counter, so simply reload and try again.
- */
- mvalue = mutex->value;
- }
+ // We own the mutex, but other threads are able to change the lower bits
+ // (e.g. promoting it to "contended"), so we need to use an atomic exchange
+ // loop to update the counter. The counter will not overflow in the loop,
+ // as only the owner thread can change it.
+ // The mutex is still locked, so we don't need a release fence.
+ while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
+ mvalue + MUTEX_COUNTER_BITS_ONE,
+ memory_order_relaxed,
+ memory_order_relaxed)) { }
+ return 0;
}
int pthread_mutex_lock(pthread_mutex_t* mutex) {
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
int mvalue, mtype, tid, shared;
- mvalue = mutex->value;
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
mtype = (mvalue & MUTEX_TYPE_MASK);
shared = (mvalue & MUTEX_SHARED_MASK);
- /* Handle non-recursive case first */
+ // Handle common case first.
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
- _normal_lock(mutex, shared);
+ _normal_mutex_lock(mutex_value_ptr, shared);
return 0;
}
- /* Do we already own this recursive or error-check mutex ? */
+ // Do we already own this recursive or error-check mutex?
tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
- return _recursive_increment(mutex, mvalue, mtype);
+ return _recursive_increment(mutex_value_ptr, mvalue, mtype);
- /* Add in shared state to avoid extra 'or' operations below */
+ // Add in shared state to avoid extra 'or' operations below.
mtype |= shared;
- /* First, if the mutex is unlocked, try to quickly acquire it.
- * In the optimistic case where this works, set the state to 1 to
- * indicate locked with no contention */
+ // First, if the mutex is unlocked, try to quickly acquire it.
+ // In the optimistic case where this works, set the state to locked_uncontended.
if (mvalue == mtype) {
int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
+ newval, memory_order_acquire, memory_order_relaxed))) {
return 0;
}
- /* argh, the value changed, reload before entering the loop */
- mvalue = mutex->value;
}
ScopedTrace trace("Contending for pthread mutex");
- for (;;) {
- int newval;
-
- /* if the mutex is unlocked, its value should be 'mtype' and
- * we try to acquire it by setting its owner and state atomically.
- * NOTE: We put the state to 2 since we _know_ there is contention
- * when we are in this loop. This ensures all waiters will be
- * unlocked.
- */
+ while (true) {
if (mvalue == mtype) {
- newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- /* TODO: Change this to __bionic_cmpxchg_acquire when we
- * implement it to get rid of the explicit memory
- * barrier below.
- */
- if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
- mvalue = mutex->value;
- continue;
- }
- ANDROID_MEMBAR_FULL();
- return 0;
- }
+ // If the mutex is unlocked, its value should be 'mtype' and
+ // we try to acquire it by setting its owner and state atomically.
+ // NOTE: We put the state to locked_contended since we _know_ there
+ // is contention when we are in this loop. This ensures all waiters
+ // will be unlocked.
- /* the mutex is already locked by another thread, if its state is 1
- * we will change it to 2 to indicate contention. */
- if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
- newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
- if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
- mvalue = mutex->value;
+ int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
+ &mvalue, newval,
+ memory_order_acquire,
+ memory_order_relaxed))) {
+ return 0;
+ }
+ continue;
+ } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
+ // The mutex is already locked by another thread, if the state is locked_uncontended,
+ // we should set it to locked_contended beforing going to sleep. This can make
+ // sure waiters will be woken up eventually.
+
+ int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
+ if (__predict_false(!atomic_compare_exchange_weak_explicit(mutex_value_ptr,
+ &mvalue, newval,
+ memory_order_relaxed,
+ memory_order_relaxed))) {
continue;
}
mvalue = newval;
}
- /* wait until the mutex is unlocked */
- __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
-
- mvalue = mutex->value;
+ // We are in locked_contended state, sleep until someone wake us up.
+ __futex_wait_ex(mutex_value_ptr, shared, mvalue, NULL);
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
}
- /* NOTREACHED */
}
int pthread_mutex_unlock(pthread_mutex_t* mutex) {
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
int mvalue, mtype, tid, shared;
- mvalue = mutex->value;
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
mtype = (mvalue & MUTEX_TYPE_MASK);
shared = (mvalue & MUTEX_SHARED_MASK);
- /* Handle common case first */
+ // Handle common case first.
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
- _normal_unlock(mutex, shared);
+ _normal_mutex_unlock(mutex_value_ptr, shared);
return 0;
}
- /* Do we already own this recursive or error-check mutex ? */
+ // Do we already own this recursive or error-check mutex?
tid = __get_thread()->tid;
if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
return EPERM;
- /* If the counter is > 0, we can simply decrement it atomically.
- * Since other threads can mutate the lower state bits (and only the
- * lower state bits), use a cmpxchg to do it.
- */
+ // If the counter is > 0, we can simply decrement it atomically.
+ // Since other threads can mutate the lower state bits (and only the
+ // lower state bits), use a compare_exchange loop to do it.
if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
- for (;;) {
- int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
- if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
- /* success: we still own the mutex, so no memory barrier */
- return 0;
- }
- /* the value changed, so reload and loop */
- mvalue = mutex->value;
- }
+ // We still own the mutex, so a release fence is not needed.
+ while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
+ mvalue - MUTEX_COUNTER_BITS_ONE,
+ memory_order_relaxed,
+ memory_order_relaxed)) { }
+ return 0;
}
- /* the counter is 0, so we're going to unlock the mutex by resetting
- * its value to 'unlocked'. We need to perform a swap in order
- * to read the current state, which will be 2 if there are waiters
- * to awake.
- *
- * TODO: Change this to __bionic_swap_release when we implement it
- * to get rid of the explicit memory barrier below.
- */
- ANDROID_MEMBAR_FULL(); /* RELEASE BARRIER */
- mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
-
- /* Wake one waiting thread, if any */
+ // The counter is 0, so we'are going to unlock the mutex by resetting its
+ // state to unlocked, we need to perform a atomic_exchange inorder to read
+ // the current state, which will be locked_contended if there may have waiters
+ // to awake.
+ // A release fence is required to make previous stores visible to next
+ // lock owner threads.
+ mvalue = atomic_exchange_explicit(mutex_value_ptr,
+ mtype | shared | MUTEX_STATE_BITS_UNLOCKED,
+ memory_order_release);
if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
- __futex_wake_ex(&mutex->value, shared, 1);
+ __futex_wake_ex(mutex_value_ptr, shared, 1);
}
+
return 0;
}
int pthread_mutex_trylock(pthread_mutex_t* mutex) {
- int mvalue = mutex->value;
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+
+ int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
int mtype = (mvalue & MUTEX_TYPE_MASK);
int shared = (mvalue & MUTEX_SHARED_MASK);
// Handle common case first.
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
- if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
- shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
- &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
+ mvalue = shared | MUTEX_STATE_BITS_UNLOCKED;
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (atomic_compare_exchange_strong_explicit(mutex_value_ptr,
+ &mvalue,
+ shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
+ memory_order_acquire,
+ memory_order_relaxed)) {
return 0;
}
-
return EBUSY;
}
@@ -600,158 +552,163 @@
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
return EBUSY;
}
- return _recursive_increment(mutex, mvalue, mtype);
+ return _recursive_increment(mutex_value_ptr, mvalue, mtype);
}
- /* Same as pthread_mutex_lock, except that we don't want to wait, and
- * the only operation that can succeed is a single cmpxchg to acquire the
- * lock if it is released / not owned by anyone. No need for a complex loop.
- */
+ // Same as pthread_mutex_lock, except that we don't want to wait, and
+ // the only operation that can succeed is a single compare_exchange to acquire the
+ // lock if it is released / not owned by anyone. No need for a complex loop.
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
- ANDROID_MEMBAR_FULL();
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
+ &mtype, mvalue,
+ memory_order_acquire,
+ memory_order_relaxed))) {
return 0;
}
-
return EBUSY;
}
static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_ts, clockid_t clock) {
- timespec ts;
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
- int mvalue = mutex->value;
- int mtype = (mvalue & MUTEX_TYPE_MASK);
- int shared = (mvalue & MUTEX_SHARED_MASK);
+ timespec ts;
- // Handle common case first.
- if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
- const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
- const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
+ int mtype = (mvalue & MUTEX_TYPE_MASK);
+ int shared = (mvalue & MUTEX_SHARED_MASK);
- // Fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0.
- if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
- return 0;
+ // Handle common case first.
+ if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
+ const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
+ const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
+ const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ mvalue = unlocked;
+ if (atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, locked_uncontended,
+ memory_order_acquire, memory_order_relaxed)) {
+ return 0;
+ }
+
+ ScopedTrace trace("Contending for timed pthread mutex");
+
+ // Same as pthread_mutex_lock, except that we can only wait for a specified
+ // time interval. If lock is acquired, an acquire fence is needed to make
+ // all memory accesses made by other threads visible in current CPU.
+ while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
+ memory_order_acquire) != unlocked) {
+ if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
+ return ETIMEDOUT;
+ }
+ __futex_wait_ex(mutex_value_ptr, shared, locked_contended, &ts);
+ }
+
+ return 0;
+ }
+
+ // Do we already own this recursive or error-check mutex?
+ pid_t tid = __get_thread()->tid;
+ if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
+ return _recursive_increment(mutex_value_ptr, mvalue, mtype);
+ }
+
+ mtype |= shared;
+
+ // First try a quick lock.
+ if (mvalue == mtype) {
+ int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
+ // If exchanged successfully, An acquire fence is required to make
+ // all memory accesses made by other threads visible in current CPU.
+ if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
+ &mvalue, newval,
+ memory_order_acquire,
+ memory_order_relaxed))) {
+ return 0;
+ }
}
ScopedTrace trace("Contending for timed pthread mutex");
- // Loop while needed.
- while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
- if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
- return ETIMEDOUT;
- }
- __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
+ // The following implements the same loop as pthread_mutex_lock,
+ // but adds checks to ensure that the operation never exceeds the
+ // absolute expiration time.
+ while (true) {
+ if (mvalue == mtype) { // Unlocked.
+ int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
+ // An acquire fence is needed for successful exchange.
+ if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
+ memory_order_acquire,
+ memory_order_relaxed)) {
+ goto check_time;
+ }
+
+ return 0;
+ } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
+ // The value is locked. If the state is locked_uncontended, we need to switch
+ // it to locked_contended before sleep, so we can get woken up later.
+ int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
+ if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
+ memory_order_relaxed,
+ memory_order_relaxed)) {
+ goto check_time;
+ }
+ mvalue = newval;
+ }
+
+ if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
+ return ETIMEDOUT;
+ }
+
+ if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, &ts) == -ETIMEDOUT) {
+ return ETIMEDOUT;
+ }
+
+check_time:
+ if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
+ return ETIMEDOUT;
+ }
+ // After futex_wait or time costly timespec_from_absolte_timespec,
+ // we'd better read mvalue again in case it is changed.
+ mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
}
- ANDROID_MEMBAR_FULL();
- return 0;
- }
-
- // Do we already own this recursive or error-check mutex?
- pid_t tid = __get_thread()->tid;
- if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
- return _recursive_increment(mutex, mvalue, mtype);
- }
-
- // The following implements the same loop as pthread_mutex_lock_impl
- // but adds checks to ensure that the operation never exceeds the
- // absolute expiration time.
- mtype |= shared;
-
- // First try a quick lock.
- if (mvalue == mtype) {
- mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
- if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
- ANDROID_MEMBAR_FULL();
- return 0;
- }
- mvalue = mutex->value;
- }
-
- ScopedTrace trace("Contending for timed pthread mutex");
-
- while (true) {
- // If the value is 'unlocked', try to acquire it directly.
- // NOTE: put state to 2 since we know there is contention.
- if (mvalue == mtype) { // Unlocked.
- mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
- if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
- ANDROID_MEMBAR_FULL();
- return 0;
- }
- // The value changed before we could lock it. We need to check
- // the time to avoid livelocks, reload the value, then loop again.
- if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
- return ETIMEDOUT;
- }
-
- mvalue = mutex->value;
- continue;
- }
-
- // The value is locked. If 'uncontended', try to switch its state
- // to 'contented' to ensure we get woken up later.
- if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
- int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
- if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
- // This failed because the value changed, reload it.
- mvalue = mutex->value;
- } else {
- // This succeeded, update mvalue.
- mvalue = newval;
- }
- }
-
- // Check time and update 'ts'.
- if (timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
- return ETIMEDOUT;
- }
-
- // Only wait to be woken up if the state is '2', otherwise we'll
- // simply loop right now. This can happen when the second cmpxchg
- // in our loop failed because the mutex was unlocked by another thread.
- if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
- if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == -ETIMEDOUT) {
- return ETIMEDOUT;
- }
- mvalue = mutex->value;
- }
- }
- /* NOTREACHED */
}
#if !defined(__LP64__)
extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
- timespec abs_timeout;
- clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
- abs_timeout.tv_sec += ms / 1000;
- abs_timeout.tv_nsec += (ms % 1000) * 1000000;
- if (abs_timeout.tv_nsec >= NS_PER_S) {
- abs_timeout.tv_sec++;
- abs_timeout.tv_nsec -= NS_PER_S;
- }
+ timespec abs_timeout;
+ clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
+ abs_timeout.tv_sec += ms / 1000;
+ abs_timeout.tv_nsec += (ms % 1000) * 1000000;
+ if (abs_timeout.tv_nsec >= NS_PER_S) {
+ abs_timeout.tv_sec++;
+ abs_timeout.tv_nsec -= NS_PER_S;
+ }
- int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
- if (error == ETIMEDOUT) {
- error = EBUSY;
- }
- return error;
+ int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
+ if (error == ETIMEDOUT) {
+ error = EBUSY;
+ }
+ return error;
}
#endif
int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
- return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
+ return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
}
int pthread_mutex_destroy(pthread_mutex_t* mutex) {
- // Use trylock to ensure that the mutex is valid and not already locked.
- int error = pthread_mutex_trylock(mutex);
- if (error != 0) {
- return error;
- }
- mutex->value = 0xdead10cc;
- return 0;
+ // Use trylock to ensure that the mutex is valid and not already locked.
+ int error = pthread_mutex_trylock(mutex);
+ if (error != 0) {
+ return error;
+ }
+
+ atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
+ atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
+ return 0;
}
diff --git a/libc/bionic/semaphore.cpp b/libc/bionic/semaphore.cpp
index dabfea0..0b04650 100644
--- a/libc/bionic/semaphore.cpp
+++ b/libc/bionic/semaphore.cpp
@@ -26,13 +26,19 @@
* SUCH DAMAGE.
*/
+// Memory order requirements for POSIX semaphores appear unclear and are
+// currently interpreted inconsistently.
+// We conservatively prefer sequentially consistent operations for now.
+// CAUTION: This is more conservative than some other major implementations,
+// and may change if and when the issue is resolved.
+
#include <semaphore.h>
#include <errno.h>
#include <limits.h>
+#include <stdatomic.h>
#include <sys/time.h>
#include <time.h>
-#include "private/bionic_atomic_inline.h"
#include "private/bionic_constants.h"
#include "private/bionic_futex.h"
#include "private/bionic_time_conversions.h"
@@ -66,7 +72,7 @@
#define SEMCOUNT_FROM_VALUE(val) (((val) << SEMCOUNT_VALUE_SHIFT) & SEMCOUNT_VALUE_MASK)
// Convert a sem->count bit pattern into the corresponding signed value.
-static inline int SEMCOUNT_TO_VALUE(uint32_t sval) {
+static inline int SEMCOUNT_TO_VALUE(unsigned int sval) {
return (static_cast<int>(sval) >> SEMCOUNT_VALUE_SHIFT);
}
@@ -79,11 +85,20 @@
#define SEMCOUNT_DECREMENT(sval) (((sval) - (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
#define SEMCOUNT_INCREMENT(sval) (((sval) + (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
-// Return the shared bitflag from a semaphore.
-static inline uint32_t SEM_GET_SHARED(sem_t* sem) {
- return (sem->count & SEMCOUNT_SHARED_MASK);
+static inline atomic_uint* SEM_TO_ATOMIC_POINTER(sem_t* sem) {
+ static_assert(sizeof(atomic_uint) == sizeof(sem->count),
+ "sem->count should actually be atomic_uint in implementation.");
+
+ // We prefer casting to atomic_uint instead of declaring sem->count to be atomic_uint directly.
+ // Because using the second method pollutes semaphore.h.
+ return reinterpret_cast<atomic_uint*>(&sem->count);
}
+// Return the shared bitflag from a semaphore counter.
+static inline unsigned int SEM_GET_SHARED(atomic_uint* sem_count_ptr) {
+ // memory_order_relaxed is used as SHARED flag will not be changed after init.
+ return (atomic_load_explicit(sem_count_ptr, memory_order_relaxed) & SEMCOUNT_SHARED_MASK);
+}
int sem_init(sem_t* sem, int pshared, unsigned int value) {
// Ensure that 'value' can be stored in the semaphore.
@@ -92,10 +107,13 @@
return -1;
}
- sem->count = SEMCOUNT_FROM_VALUE(value);
+ unsigned int count = SEMCOUNT_FROM_VALUE(value);
if (pshared != 0) {
- sem->count |= SEMCOUNT_SHARED_MASK;
+ count |= SEMCOUNT_SHARED_MASK;
}
+
+ atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
+ atomic_init(sem_count_ptr, count);
return 0;
}
@@ -122,98 +140,97 @@
// and return the old one. As a special case,
// this returns immediately if the value is
// negative (i.e. -1)
-static int __sem_dec(volatile uint32_t* sem) {
- volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(sem);
- uint32_t shared = (*sem & SEMCOUNT_SHARED_MASK);
- uint32_t old_value, new_value;
- int ret;
+static int __sem_dec(atomic_uint* sem_count_ptr) {
+ unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
+ unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
+ // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
+ // memory access made by other threads can be seen in current thread.
+ // An acquire fence may be sufficient, but it is still in discussion whether
+ // POSIX semaphores should provide sequential consistency.
do {
- old_value = (*sem & SEMCOUNT_VALUE_MASK);
- ret = SEMCOUNT_TO_VALUE(old_value);
- if (ret < 0) {
+ if (SEMCOUNT_TO_VALUE(old_value) < 0) {
break;
}
+ } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
+ SEMCOUNT_DECREMENT(old_value) | shared));
- new_value = SEMCOUNT_DECREMENT(old_value);
- } while (__bionic_cmpxchg((old_value|shared), (new_value|shared), ptr) != 0);
-
- return ret;
+ return SEMCOUNT_TO_VALUE(old_value);
}
// Same as __sem_dec, but will not touch anything if the
// value is already negative *or* 0. Returns the old value.
-static int __sem_trydec(volatile uint32_t* sem) {
- volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(sem);
- uint32_t shared = (*sem & SEMCOUNT_SHARED_MASK);
- uint32_t old_value, new_value;
- int ret;
+static int __sem_trydec(atomic_uint* sem_count_ptr) {
+ unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
+ unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
+ // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
+ // memory access made by other threads can be seen in current thread.
+ // An acquire fence may be sufficient, but it is still in discussion whether
+ // POSIX semaphores should provide sequential consistency.
do {
- old_value = (*sem & SEMCOUNT_VALUE_MASK);
- ret = SEMCOUNT_TO_VALUE(old_value);
- if (ret <= 0) {
+ if (SEMCOUNT_TO_VALUE(old_value) <= 0) {
break;
}
+ } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
+ SEMCOUNT_DECREMENT(old_value) | shared));
- new_value = SEMCOUNT_DECREMENT(old_value);
- } while (__bionic_cmpxchg((old_value|shared), (new_value|shared), ptr) != 0);
-
- return ret;
+ return SEMCOUNT_TO_VALUE(old_value);
}
-
// "Increment" the value of a semaphore atomically and
// return its old value. Note that this implements
// the special case of "incrementing" any negative
// value to +1 directly.
//
// NOTE: The value will _not_ wrap above SEM_VALUE_MAX
-static int __sem_inc(volatile uint32_t* sem) {
- volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(sem);
- uint32_t shared = (*sem & SEMCOUNT_SHARED_MASK);
- uint32_t old_value, new_value;
- int ret;
+static int __sem_inc(atomic_uint* sem_count_ptr) {
+ unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
+ unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
+ unsigned int new_value;
+ // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
+ // memory access made before can be seen in other threads.
+ // A release fence may be sufficient, but it is still in discussion whether
+ // POSIX semaphores should provide sequential consistency.
do {
- old_value = (*sem & SEMCOUNT_VALUE_MASK);
- ret = SEMCOUNT_TO_VALUE(old_value);
-
// Can't go higher than SEM_VALUE_MAX.
- if (ret == SEM_VALUE_MAX) {
+ if (SEMCOUNT_TO_VALUE(old_value) == SEM_VALUE_MAX) {
break;
}
- // If the counter is negative, go directly to +1, otherwise just increment.
- if (ret < 0) {
- new_value = SEMCOUNT_ONE;
+ // If the counter is negative, go directly to one, otherwise just increment.
+ if (SEMCOUNT_TO_VALUE(old_value) < 0) {
+ new_value = SEMCOUNT_ONE | shared;
} else {
- new_value = SEMCOUNT_INCREMENT(old_value);
+ new_value = SEMCOUNT_INCREMENT(old_value) | shared;
}
- } while (__bionic_cmpxchg((old_value|shared), (new_value|shared), ptr) != 0);
+ } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
+ new_value));
- return ret;
+ return SEMCOUNT_TO_VALUE(old_value);
}
int sem_wait(sem_t* sem) {
- uint32_t shared = SEM_GET_SHARED(sem);
+ atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
+ unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
while (true) {
- if (__sem_dec(&sem->count) > 0) {
- ANDROID_MEMBAR_FULL();
+ if (__sem_dec(sem_count_ptr) > 0) {
return 0;
}
- __futex_wait_ex(&sem->count, shared, shared|SEMCOUNT_MINUS_ONE, NULL);
+ __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, NULL);
}
}
int sem_timedwait(sem_t* sem, const timespec* abs_timeout) {
+ atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
+
// POSIX says we need to try to decrement the semaphore
// before checking the timeout value. Note that if the
// value is currently 0, __sem_trydec() does nothing.
- if (__sem_trydec(&sem->count) > 0) {
- ANDROID_MEMBAR_FULL();
+ if (__sem_trydec(sem_count_ptr) > 0) {
return 0;
}
@@ -223,7 +240,7 @@
return -1;
}
- uint32_t shared = SEM_GET_SHARED(sem);
+ unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
while (true) {
// POSIX mandates CLOCK_REALTIME here.
@@ -234,13 +251,12 @@
}
// Try to grab the semaphore. If the value was 0, this will also change it to -1.
- if (__sem_dec(&sem->count) > 0) {
- ANDROID_MEMBAR_FULL();
+ if (__sem_dec(sem_count_ptr) > 0) {
break;
}
// Contention detected. Wait for a wakeup event.
- int ret = __futex_wait_ex(&sem->count, shared, shared|SEMCOUNT_MINUS_ONE, &ts);
+ int ret = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, &ts);
// Return in case of timeout or interrupt.
if (ret == -ETIMEDOUT || ret == -EINTR) {
@@ -252,13 +268,13 @@
}
int sem_post(sem_t* sem) {
- uint32_t shared = SEM_GET_SHARED(sem);
+ atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
+ unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
- ANDROID_MEMBAR_FULL();
- int old_value = __sem_inc(&sem->count);
+ int old_value = __sem_inc(sem_count_ptr);
if (old_value < 0) {
// Contention on the semaphore. Wake up all waiters.
- __futex_wake_ex(&sem->count, shared, INT_MAX);
+ __futex_wake_ex(sem_count_ptr, shared, INT_MAX);
} else if (old_value == SEM_VALUE_MAX) {
// Overflow detected.
errno = EOVERFLOW;
@@ -269,8 +285,8 @@
}
int sem_trywait(sem_t* sem) {
- if (__sem_trydec(&sem->count) > 0) {
- ANDROID_MEMBAR_FULL();
+ atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
+ if (__sem_trydec(sem_count_ptr) > 0) {
return 0;
} else {
errno = EAGAIN;
@@ -279,7 +295,12 @@
}
int sem_getvalue(sem_t* sem, int* sval) {
- int val = SEMCOUNT_TO_VALUE(sem->count);
+ atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
+
+ // Use memory_order_seq_cst in atomic_load operation.
+ // memory_order_relaxed may be fine here, but it is still in discussion
+ // whether POSIX semaphores should provide sequential consistency.
+ int val = SEMCOUNT_TO_VALUE(atomic_load(sem_count_ptr));
if (val < 0) {
val = 0;
}
diff --git a/libc/include/pthread.h b/libc/include/pthread.h
index 4281132..8d053ae 100644
--- a/libc/include/pthread.h
+++ b/libc/include/pthread.h
@@ -43,7 +43,7 @@
#endif
typedef struct {
- int volatile value;
+ int value;
#ifdef __LP64__
char __reserved[36];
#endif
diff --git a/libc/include/semaphore.h b/libc/include/semaphore.h
index 5827870..4ef13af 100644
--- a/libc/include/semaphore.h
+++ b/libc/include/semaphore.h
@@ -36,7 +36,7 @@
struct timespec;
typedef struct {
- volatile unsigned int count;
+ unsigned int count;
#ifdef __LP64__
int __reserved[3];
#endif
diff --git a/libc/include/stdio.h b/libc/include/stdio.h
index c0dac1a..b04aa24 100644
--- a/libc/include/stdio.h
+++ b/libc/include/stdio.h
@@ -207,16 +207,9 @@
#define L_tmpnam 1024 /* XXX must be == PATH_MAX */
#define TMP_MAX 308915776
-/* Always ensure that these are consistent with <fcntl.h> and <unistd.h>! */
-#ifndef SEEK_SET
-#define SEEK_SET 0 /* set file offset to offset */
-#endif
-#ifndef SEEK_CUR
-#define SEEK_CUR 1 /* set file offset to current plus offset */
-#endif
-#ifndef SEEK_END
-#define SEEK_END 2 /* set file offset to EOF plus offset */
-#endif
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
/*
* Functions defined in ANSI C standard.
diff --git a/libc/include/unistd.h b/libc/include/unistd.h
index c755715..6403d4a 100644
--- a/libc/include/unistd.h
+++ b/libc/include/unistd.h
@@ -35,14 +35,19 @@
#include <sys/select.h>
#include <sys/sysconf.h>
+#include <machine/posix_limits.h>
+
__BEGIN_DECLS
-/* Standard file descriptor numbers. */
#define STDIN_FILENO 0
#define STDOUT_FILENO 1
#define STDERR_FILENO 2
-/* Values for whence in fseek and lseek */
+#define F_OK 0
+#define X_OK 1
+#define W_OK 2
+#define R_OK 4
+
#define SEEK_SET 0
#define SEEK_CUR 1
#define SEEK_END 2
@@ -68,8 +73,6 @@
#define _PC_PRIO_IO 18
#define _PC_SYNC_IO 19
-#include <machine/posix_limits.h>
-
extern char** environ;
extern __noreturn void _exit(int);
@@ -121,13 +124,6 @@
extern long fpathconf(int, int);
extern long pathconf(const char*, int);
-
-/* Macros for access() */
-#define R_OK 4 /* Read */
-#define W_OK 2 /* Write */
-#define X_OK 1 /* Execute */
-#define F_OK 0 /* Existence */
-
extern int access(const char*, int);
extern int faccessat(int, const char*, int, int);
extern int link(const char*, const char*);
diff --git a/libc/tools/gensyscalls.py b/libc/tools/gensyscalls.py
index 4e24077..7e11418 100755
--- a/libc/tools/gensyscalls.py
+++ b/libc/tools/gensyscalls.py
@@ -286,8 +286,9 @@
for alias in aliases:
stub += function_alias % { "func" : syscall["func"], "alias" : alias }
- # Use hidden visibility for any functions beginning with underscores.
- if pointer_length == 64 and syscall["func"].startswith("__"):
+ # Use hidden visibility on LP64 for any functions beginning with underscores.
+ # Force hidden visibility for any functions which begin with 3 underscores
+ if (pointer_length == 64 and syscall["func"].startswith("__")) or syscall["func"].startswith("___"):
stub += '.hidden ' + syscall["func"] + '\n'
return stub
diff --git a/libc/tools/zoneinfo/update-tzdata.py b/libc/tools/zoneinfo/update-tzdata.py
index 4847356..d5788af 100755
--- a/libc/tools/zoneinfo/update-tzdata.py
+++ b/libc/tools/zoneinfo/update-tzdata.py
@@ -140,7 +140,7 @@
# Regenerate the .dat file.
os.chdir(icu_working_dir)
- subprocess.check_call(['make', '-j32'])
+ subprocess.check_call(['make', 'INCLUDE_UNI_CORE_DATA=1', '-j32'])
# Copy the .dat file to its ultimate destination.
icu_dat_data_dir = '%s/stubdata' % icu_dir
diff --git a/linker/linker.cpp b/linker/linker.cpp
index f7bcd27..3934484 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -798,7 +798,7 @@
}
ElfW(Sym)* soinfo::gnu_addr_lookup(const void* addr) {
- ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
+ ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - load_bias;
for (size_t i = 0; i < nbucket_; ++i) {
uint32_t n = bucket_[i];
@@ -819,7 +819,7 @@
}
ElfW(Sym)* soinfo::elf_addr_lookup(const void* addr) {
- ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
+ ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - load_bias;
// Search the library's symbol table for any defined symbol which
// contains this address.
diff --git a/tests/fortify_test.cpp b/tests/fortify_test.cpp
index 6cbc695..5cc728f 100644
--- a/tests/fortify_test.cpp
+++ b/tests/fortify_test.cpp
@@ -26,6 +26,7 @@
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <time.h>
#if __BIONIC__
#define ASSERT_FORTIFY(expr) ASSERT_EXIT(expr, testing::KilledBySignal(SIGABRT), "FORTIFY")
@@ -938,11 +939,15 @@
TEST_F(DEATHTEST, poll_fortified) {
nfds_t fd_count = atoi("2"); // suppress compiler optimizations
pollfd buf[1] = {{0, POLLIN, 0}};
- ASSERT_FORTIFY(poll(buf, fd_count, -1));
+ // Set timeout to zero to prevent waiting in poll when fortify test fails.
+ ASSERT_FORTIFY(poll(buf, fd_count, 0));
}
TEST_F(DEATHTEST, ppoll_fortified) {
nfds_t fd_count = atoi("2"); // suppress compiler optimizations
pollfd buf[1] = {{0, POLLIN, 0}};
- ASSERT_FORTIFY(ppoll(buf, fd_count, NULL, NULL));
+ // Set timeout to zero to prevent waiting in ppoll when fortify test fails.
+ timespec timeout;
+ timeout.tv_sec = timeout.tv_nsec = 0;
+ ASSERT_FORTIFY(ppoll(buf, fd_count, &timeout, NULL));
}
diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp
index cb32079..5dc60ee 100644
--- a/tests/pthread_test.cpp
+++ b/tests/pthread_test.cpp
@@ -27,6 +27,7 @@
#include <malloc.h>
#include <pthread.h>
#include <signal.h>
+#include <stdio.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <time.h>
@@ -1092,3 +1093,14 @@
ASSERT_EQ(EPERM, pthread_mutex_unlock(&lock));
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
}
+
+TEST(pthread, pthread_mutex_owner_tid_limit) {
+ FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
+ ASSERT_TRUE(fp != NULL);
+ long pid_max;
+ ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
+ fclose(fp);
+ // Current pthread_mutex uses 16 bits to represent owner tid.
+ // Change the implementation if we need to support higher value than 65535.
+ ASSERT_LE(pid_max, 65536);
+}