Merge "Hide internal __system_property_* functions"
diff --git a/libc/SECCOMP_WHITELIST.TXT b/libc/SECCOMP_WHITELIST.TXT
index cdf252f..22e8987 100644
--- a/libc/SECCOMP_WHITELIST.TXT
+++ b/libc/SECCOMP_WHITELIST.TXT
@@ -86,3 +86,8 @@
 
 # b/35059702
 int	lstat64:lstat64(const char *restrict path, struct stat64 *restrict buf)	arm
+
+# b/35217603
+int	fcntl:fcntl(int fd, int cmd, ... /* arg */ )	arm
+pid_t	fork:fork()	arm
+int	poll:poll(struct pollfd *fds, nfds_t nfds, int timeout)	arm
diff --git a/libc/bionic/__cxa_guard.cpp b/libc/bionic/__cxa_guard.cpp
index 97284d5..06926df 100644
--- a/libc/bionic/__cxa_guard.cpp
+++ b/libc/bionic/__cxa_guard.cpp
@@ -79,38 +79,33 @@
 #define CONSTRUCTION_UNDERWAY_WITH_WAITER       0x200
 
 extern "C" int __cxa_guard_acquire(_guard_t* gv) {
-  int old_value = atomic_load_explicit(&gv->state, memory_order_relaxed);
+  int old_value = atomic_load_explicit(&gv->state, memory_order_acquire);
+  // In the common CONSTRUCTION_COMPLETE case we have to ensure that all the stores performed by
+  // the construction function are observable on this CPU after we exit. A similar constraint may
+  // apply in the CONSTRUCTION_NOT_YET_STARTED case with a prior abort.
 
   while (true) {
     if (old_value == CONSTRUCTION_COMPLETE) {
-      // A load_acquire operation is need before exiting with COMPLETE state, as we have to ensure
-      // that all the stores performed by the construction function are observable on this CPU
-      // after we exit.
-      atomic_thread_fence(memory_order_acquire);
       return 0;
     } else if (old_value == CONSTRUCTION_NOT_YET_STARTED) {
       if (!atomic_compare_exchange_weak_explicit(&gv->state, &old_value,
                                                   CONSTRUCTION_UNDERWAY_WITHOUT_WAITER,
-                                                  memory_order_relaxed,
-                                                  memory_order_relaxed)) {
+                                                  memory_order_acquire /* or relaxed in C++17 */,
+                                                  memory_order_acquire)) {
         continue;
       }
-      // The acquire fence may not be needed. But as described in section 3.3.2 of
-      // the Itanium C++ ABI specification, it probably has to behave like the
-      // acquisition of a mutex, which needs an acquire fence.
-      atomic_thread_fence(memory_order_acquire);
       return 1;
     } else if (old_value == CONSTRUCTION_UNDERWAY_WITHOUT_WAITER) {
       if (!atomic_compare_exchange_weak_explicit(&gv->state, &old_value,
                                                  CONSTRUCTION_UNDERWAY_WITH_WAITER,
-                                                 memory_order_relaxed,
-                                                 memory_order_relaxed)) {
+                                                 memory_order_acquire /* or relaxed in C++17 */,
+                                                 memory_order_acquire)) {
         continue;
       }
     }
 
     __futex_wait_ex(&gv->state, false, CONSTRUCTION_UNDERWAY_WITH_WAITER, false, nullptr);
-    old_value = atomic_load_explicit(&gv->state, memory_order_relaxed);
+    old_value = atomic_load_explicit(&gv->state, memory_order_acquire);
   }
 }
 
diff --git a/libc/include/android/legacy_termios_inlines.h b/libc/include/android/legacy_termios_inlines.h
index 4424bdb..02e9429 100644
--- a/libc/include/android/legacy_termios_inlines.h
+++ b/libc/include/android/legacy_termios_inlines.h
@@ -91,6 +91,18 @@
   s->c_cflag |= CS8;
 }
 
+static __inline int cfsetspeed(struct termios* s, speed_t speed) {
+  // TODO: check 'speed' is valid.
+  s->c_cflag = (s->c_cflag & ~CBAUD) | (speed & CBAUD);
+  return 0;
+}
+
+static __inline int tcdrain(int fd) {
+  // A non-zero argument to TCSBRK means "don't send a break".
+  // The drain is a side-effect of the ioctl!
+  return ioctl(fd, TCSBRK, __BIONIC_CAST(static_cast, unsigned long, 1));
+}
+
 __END_DECLS
 
 #endif
diff --git a/libc/include/sys/epoll.h b/libc/include/sys/epoll.h
index 4ec8969..b7fdd4d 100644
--- a/libc/include/sys/epoll.h
+++ b/libc/include/sys/epoll.h
@@ -31,11 +31,17 @@
 
 #include <sys/cdefs.h>
 #include <sys/types.h>
-#include <fcntl.h> /* For O_CLOEXEC. */
 #include <signal.h> /* For sigset_t. */
 
+#include <linux/eventpoll.h>
+/* TODO: https://lkml.org/lkml/2017/2/23/416 has a better fix. */
+#undef EPOLLWAKEUP
+#undef EPOLLONESHOT
+#undef EPOLLET
+
 __BEGIN_DECLS
 
+/* TODO: remove once https://lkml.org/lkml/2017/2/23/417 is upstream. */
 #define EPOLLIN          0x00000001
 #define EPOLLPRI         0x00000002
 #define EPOLLOUT         0x00000004
@@ -51,12 +57,6 @@
 #define EPOLLONESHOT     0x40000000
 #define EPOLLET          0x80000000
 
-#define EPOLL_CTL_ADD    1
-#define EPOLL_CTL_DEL    2
-#define EPOLL_CTL_MOD    3
-
-#define EPOLL_CLOEXEC O_CLOEXEC
-
 typedef union epoll_data {
   void* ptr;
   int fd;
diff --git a/libc/include/termios.h b/libc/include/termios.h
index 49ffcd2..66ae71c 100644
--- a/libc/include/termios.h
+++ b/libc/include/termios.h
@@ -40,8 +40,10 @@
 speed_t cfgetispeed(const struct termios*) __INTRODUCED_IN(21);
 speed_t cfgetospeed(const struct termios*) __INTRODUCED_IN(21);
 void cfmakeraw(struct termios*) __INTRODUCED_IN(21);
+int cfsetspeed(struct termios*, speed_t) __INTRODUCED_IN(21);
 int cfsetispeed(struct termios*, speed_t) __INTRODUCED_IN(21);
 int cfsetospeed(struct termios*, speed_t) __INTRODUCED_IN(21);
+int tcdrain(int) __INTRODUCED_IN(21);
 int tcflow(int, int) __INTRODUCED_IN(21);
 int tcflush(int, int) __INTRODUCED_IN(21);
 int tcgetattr(int, struct termios*) __INTRODUCED_IN(21);
@@ -50,9 +52,6 @@
 int tcsetattr(int, int, const struct termios*) __INTRODUCED_IN(21);
 #endif
 
-int cfsetspeed(struct termios*, speed_t) __INTRODUCED_IN(21);
-int tcdrain(int) __INTRODUCED_IN(21);
-
 __END_DECLS
 
 #include <android/legacy_termios_inlines.h>
diff --git a/libc/kernel/tools/defaults.py b/libc/kernel/tools/defaults.py
index d5577da..620bb31 100644
--- a/libc/kernel/tools/defaults.py
+++ b/libc/kernel/tools/defaults.py
@@ -75,6 +75,8 @@
     "SIGRTMAX": "__SIGRTMAX",
     # We want to support both BSD and Linux member names in struct udphdr.
     "udphdr": "__kernel_udphdr",
+    # The kernel's struct epoll_event just has __u64 for the data.
+    "epoll_event": "__kernel_uapi_epoll_event",
     }
 
 # this is the set of known static inline functions that we want to keep
diff --git a/libc/kernel/uapi/linux/eventpoll.h b/libc/kernel/uapi/linux/eventpoll.h
index ec51b9f..6c7e355 100644
--- a/libc/kernel/uapi/linux/eventpoll.h
+++ b/libc/kernel/uapi/linux/eventpoll.h
@@ -37,7 +37,7 @@
 #define EPOLL_PACKED
 /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
 #endif
-struct epoll_event {
+struct __kernel_uapi_epoll_event {
   __u32 events;
   __u64 data;
 /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
diff --git a/libc/seccomp/arm_policy.c b/libc/seccomp/arm_policy.c
index 8f5cb0d..f175d6a 100644
--- a/libc/seccomp/arm_policy.c
+++ b/libc/seccomp/arm_policy.c
@@ -6,76 +6,76 @@
 #include "seccomp_policy.h"
 const struct sock_filter arm_filter[] = {
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 0, 0, 125),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 150, 63, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 74, 31, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 41, 15, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 19, 7, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 8, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 3, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 2, 119, 118), //restart_syscall|exit
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 7, 118, 117), //read|write|open|close
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 10, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 9, 116, 115), //creat
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 13, 115, 114), //unlink|execve|chdir
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 33, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 26, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 22, 112, 111), //lseek|getpid|mount
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 27, 111, 110), //ptrace
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 36, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 34, 109, 108), //access
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 38, 108, 107), //sync|kill
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 57, 7, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 51, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 45, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 44, 104, 103), //dup|pipe|times
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 46, 103, 102), //brk
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 54, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 53, 101, 100), //acct|umount2
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 55, 100, 99), //ioctl
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 64, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 60, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 58, 97, 96), //setpgid
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 62, 96, 95), //umask|chroot
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 66, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 65, 94, 93), //getppid
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 68, 93, 92), //setsid|sigaction
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 114, 15, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 91, 7, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 85, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 77, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 76, 88, 87), //sethostname|setrlimit
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 80, 87, 86), //getrusage|gettimeofday|settimeofday
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 87, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 86, 85, 84), //readlink
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 89, 84, 83), //swapon|reboot
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 96, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 94, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 93, 81, 80), //munmap|truncate
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 95, 80, 79), //fchmod
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 103, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 98, 78, 77), //getpriority|setpriority
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 106, 77, 76), //syslog|setitimer|getitimer
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 131, 7, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 124, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 118, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 117, 73, 72), //wait4|swapoff|sysinfo
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 123, 72, 71), //fsync|sigreturn|clone|setdomainname|uname
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 128, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 126, 70, 69), //adjtimex|mprotect
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 130, 69, 68), //init_module|delete_module
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 138, 3, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 136, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 134, 66, 65), //quotactl|getpgid|fchdir
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 137, 65, 64), //personality
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 143, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 142, 63, 62), //setfsuid|setfsgid|_llseek|getdents
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 149, 62, 61), //flock|msync|readv|writev|getsid|fdatasync
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 168, 63, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 77, 31, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 45, 15, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 26, 7, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 10, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 8, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 7, 119, 118), //restart_syscall|exit|fork|read|write|open|close
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 9, 118, 117), //creat
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 19, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 13, 116, 115), //unlink|execve|chdir
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 22, 115, 114), //lseek|getpid|mount
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 36, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 33, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 27, 112, 111), //ptrace
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 34, 111, 110), //access
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 41, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 38, 109, 108), //sync|kill
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 44, 108, 107), //dup|pipe|times
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 60, 7, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 54, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 51, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 46, 104, 103), //brk
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 53, 103, 102), //acct|umount2
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 57, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 56, 101, 100), //ioctl|fcntl
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 58, 100, 99), //setpgid
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 66, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 64, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 62, 97, 96), //umask|chroot
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 65, 96, 95), //getppid
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 74, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 68, 94, 93), //setsid|sigaction
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 76, 93, 92), //sethostname|setrlimit
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 118, 15, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 94, 7, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 87, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 85, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 80, 88, 87), //getrusage|gettimeofday|settimeofday
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 86, 87, 86), //readlink
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 91, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 89, 85, 84), //swapon|reboot
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 93, 84, 83), //munmap|truncate
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 103, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 96, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 95, 81, 80), //fchmod
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 98, 80, 79), //getpriority|setpriority
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 114, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 106, 78, 77), //syslog|setitimer|getitimer
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 117, 77, 76), //wait4|swapoff|sysinfo
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 136, 7, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 128, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 124, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 123, 73, 72), //fsync|sigreturn|clone|setdomainname|uname
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 126, 72, 71), //adjtimex|mprotect
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 131, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 130, 70, 69), //init_module|delete_module
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 134, 69, 68), //quotactl|getpgid|fchdir
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 143, 3, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 138, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 137, 66, 65), //personality
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 142, 65, 64), //setfsuid|setfsgid|_llseek|getdents
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 150, 1, 0),
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 149, 63, 62), //flock|msync|readv|writev|getsid|fdatasync
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 164, 62, 61), //mlock|munlock|mlockall|munlockall|sched_setparam|sched_getparam|sched_setscheduler|sched_getscheduler|sched_yield|sched_get_priority_max|sched_get_priority_min|sched_rr_get_interval|nanosleep|mremap
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 290, 31, 0),
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 219, 15, 0),
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 199, 7, 0),
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 183, 3, 0),
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 172, 1, 0),
-BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 164, 56, 55), //mlock|munlock|mlockall|munlockall|sched_setparam|sched_getparam|sched_setscheduler|sched_getscheduler|sched_yield|sched_get_priority_max|sched_get_priority_min|sched_rr_get_interval|nanosleep|mremap
+BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 169, 56, 55), //poll
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 182, 55, 54), //prctl|rt_sigreturn|rt_sigaction|rt_sigprocmask|rt_sigpending|rt_sigtimedwait|rt_sigqueueinfo|rt_sigsuspend|pread64|pwrite64
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 190, 1, 0),
 BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 188, 53, 52), //getcwd|capget|capset|sigaltstack|sendfile
diff --git a/tests/sys_ptrace_test.cpp b/tests/sys_ptrace_test.cpp
index 8fe7a29..bce5898 100644
--- a/tests/sys_ptrace_test.cpp
+++ b/tests/sys_ptrace_test.cpp
@@ -28,6 +28,7 @@
 
 #include <gtest/gtest.h>
 
+#include <android-base/macros.h>
 #include <android-base/unique_fd.h>
 
 using android::base::unique_fd;
@@ -37,33 +38,6 @@
 #define TRAP_HWBKPT 4
 #endif
 
-template <typename T>
-static void __attribute__((noreturn)) watchpoint_fork_child(unsigned cpu, T& data) {
-  // Extra precaution: make sure we go away if anything happens to our parent.
-  if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0) == -1) {
-    perror("prctl(PR_SET_PDEATHSIG)");
-    _exit(1);
-  }
-
-  cpu_set_t cpus;
-  CPU_ZERO(&cpus);
-  CPU_SET(cpu, &cpus);
-  if (sched_setaffinity(0, sizeof cpus, &cpus) == -1) {
-    perror("sched_setaffinity");
-    _exit(2);
-  }
-  if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1) {
-    perror("ptrace(PTRACE_TRACEME)");
-    _exit(3);
-  }
-
-  raise(SIGSTOP); // Synchronize with the tracer, let it set the watchpoint.
-
-  data = 1; // Now trigger the watchpoint.
-
-  _exit(0);
-}
-
 class ChildGuard {
  public:
   explicit ChildGuard(pid_t pid) : pid(pid) {}
@@ -109,18 +83,19 @@
   return (dreg_state.dbg_info & 0xff) > 0;
 #elif defined(__i386__) || defined(__x86_64__)
   // We assume watchpoints and breakpoints are always supported on x86.
-  (void) child;
-  (void)feature;
+  UNUSED(child);
+  UNUSED(feature);
   return true;
 #else
   // TODO: mips support.
-  (void) child;
-  (void)feature;
+  UNUSED(child);
+  UNUSED(feature);
   return false;
 #endif
 }
 
-static void set_watchpoint(pid_t child, const void *address, size_t size) {
+static void set_watchpoint(pid_t child, uintptr_t address, size_t size) {
+  ASSERT_EQ(0u, address & 0x7) << "address: " << address;
 #if defined(__arm__) || defined(__aarch64__)
   const unsigned byte_mask = (1 << size) - 1;
   const unsigned type = 2; // Write.
@@ -133,7 +108,7 @@
 #else // aarch64
   user_hwdebug_state dreg_state;
   memset(&dreg_state, 0, sizeof dreg_state);
-  dreg_state.dbg_regs[0].addr = reinterpret_cast<uintptr_t>(address);
+  dreg_state.dbg_regs[0].addr = address;
   dreg_state.dbg_regs[0].ctrl = control;
 
   iovec iov;
@@ -158,19 +133,33 @@
   data |= value;
   ASSERT_EQ(0, ptrace(PTRACE_POKEUSER, child, offsetof(user, u_debugreg[7]), data)) << strerror(errno);
 #else
-  (void) child;
-  (void) address;
-  (void) size;
+  UNUSED(child);
+  UNUSED(address);
+  UNUSED(size);
 #endif
 }
 
-template<typename T>
-static void run_watchpoint_test_impl(unsigned cpu) {
-  alignas(8) T data = 0;
+template <typename T>
+static void run_watchpoint_test(std::function<void(T&)> child_func, size_t offset, size_t size) {
+  alignas(16) T data{};
 
   pid_t child = fork();
   ASSERT_NE(-1, child) << strerror(errno);
-  if (child == 0) watchpoint_fork_child(cpu, data);
+  if (child == 0) {
+    // Extra precaution: make sure we go away if anything happens to our parent.
+    if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0) == -1) {
+      perror("prctl(PR_SET_PDEATHSIG)");
+      _exit(1);
+    }
+
+    if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1) {
+      perror("ptrace(PTRACE_TRACEME)");
+      _exit(2);
+    }
+
+    child_func(data);
+    _exit(0);
+  }
 
   ChildGuard guard(child);
 
@@ -184,7 +173,7 @@
     return;
   }
 
-  set_watchpoint(child, &data, sizeof data);
+  set_watchpoint(child, uintptr_t(&data) + offset, size);
 
   ASSERT_EQ(0, ptrace(PTRACE_CONT, child, nullptr, nullptr)) << strerror(errno);
   ASSERT_EQ(child, waitpid(child, &status, __WALL)) << strerror(errno);
@@ -195,17 +184,29 @@
   ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child, nullptr, &siginfo)) << strerror(errno);
   ASSERT_EQ(TRAP_HWBKPT, siginfo.si_code);
 #if defined(__arm__) || defined(__aarch64__)
-  ASSERT_EQ(&data, siginfo.si_addr);
+  ASSERT_LE(&data, siginfo.si_addr);
+  ASSERT_GT((&data) + 1, siginfo.si_addr);
 #endif
 }
 
-static void run_watchpoint_test(unsigned cpu) {
-  run_watchpoint_test_impl<uint8_t>(cpu);
-  run_watchpoint_test_impl<uint16_t>(cpu);
-  run_watchpoint_test_impl<uint32_t>(cpu);
-#if defined(__LP64__)
-  run_watchpoint_test_impl<uint64_t>(cpu);
-#endif
+template <typename T>
+static void watchpoint_stress_child(unsigned cpu, T& data) {
+  cpu_set_t cpus;
+  CPU_ZERO(&cpus);
+  CPU_SET(cpu, &cpus);
+  if (sched_setaffinity(0, sizeof cpus, &cpus) == -1) {
+    perror("sched_setaffinity");
+    _exit(3);
+  }
+  raise(SIGSTOP);  // Synchronize with the tracer, let it set the watchpoint.
+
+  data = 1;  // Now trigger the watchpoint.
+}
+
+template <typename T>
+static void run_watchpoint_stress(size_t cpu) {
+  run_watchpoint_test<T>(std::bind(watchpoint_stress_child<T>, cpu, std::placeholders::_1), 0,
+                         sizeof(T));
 }
 
 // Test watchpoint API. The test is considered successful if our watchpoints get hit OR the
@@ -217,10 +218,53 @@
 
   for (size_t cpu = 0; cpu < CPU_SETSIZE; ++cpu) {
     if (!CPU_ISSET(cpu, &available_cpus)) continue;
-    run_watchpoint_test(cpu);
+
+    run_watchpoint_stress<uint8_t>(cpu);
+    run_watchpoint_stress<uint16_t>(cpu);
+    run_watchpoint_stress<uint32_t>(cpu);
+#if defined(__LP64__)
+    run_watchpoint_stress<uint64_t>(cpu);
+#endif
   }
 }
 
+struct Uint128_t {
+  uint64_t data[2];
+};
+static void watchpoint_imprecise_child(Uint128_t& data) {
+  raise(SIGSTOP);  // Synchronize with the tracer, let it set the watchpoint.
+
+#if defined(__i386__) || defined(__x86_64__)
+  asm volatile("movdqa %%xmm0, %0" : : "m"(data));
+#elif defined(__arm__)
+  asm volatile("stm %0, { r0, r1, r2, r3 }" : : "r"(&data));
+#elif defined(__aarch64__)
+  asm volatile("stp x0, x1, %0" : : "m"(data));
+#elif defined(__mips__)
+// TODO
+  UNUSED(data);
+#endif
+}
+
+// Test that the kernel is able to handle the case when the instruction writes
+// to a larger block of memory than the one we are watching. If you see this
+// test fail on arm64, you will likely need to cherry-pick fdfeff0f into your
+// kernel.
+TEST(sys_ptrace, watchpoint_imprecise) {
+  // Make sure we get interrupted in case a buggy kernel does not report the
+  // watchpoint hit correctly.
+  struct sigaction action, oldaction;
+  action.sa_handler = [](int) {};
+  sigemptyset(&action.sa_mask);
+  action.sa_flags = 0;
+  ASSERT_EQ(0, sigaction(SIGALRM, &action, &oldaction)) << strerror(errno);
+  alarm(5);
+
+  run_watchpoint_test<Uint128_t>(watchpoint_imprecise_child, 8, 8);
+
+  ASSERT_EQ(0, sigaction(SIGALRM, &oldaction, nullptr)) << strerror(errno);
+}
+
 static void __attribute__((noinline)) breakpoint_func() {
   asm volatile("");
 }
@@ -285,8 +329,8 @@
   ASSERT_EQ(0, ptrace(PTRACE_POKEUSER, child, offsetof(user, u_debugreg[7]), data))
       << strerror(errno);
 #else
-  (void)child;
-  (void)address;
+  UNUSED(child);
+  UNUSED(address);
 #endif
 }
 
diff --git a/tools/versioner/src/VFS.cpp b/tools/versioner/src/VFS.cpp
index 1aa7229..cdf232f 100644
--- a/tools/versioner/src/VFS.cpp
+++ b/tools/versioner/src/VFS.cpp
@@ -59,7 +59,7 @@
       errx(1, "failed to map header '%s'", file_path);
     }
 
-    if (!vfs->addFile(file_path, ent->fts_statp->st_mtim.tv_sec, std::move(buffer_opt.get()))) {
+    if (!vfs->addFile(file_path, ent->fts_statp->st_mtime, std::move(buffer_opt.get()))) {
       errx(1, "failed to add file '%s'", file_path);
     }
   }