Support larger guard regions.

This also fixes a long-standing bug where the guard region would be taken
out of the stack itself, rather than being -- as POSIX demands -- additional
space after the stack. Historically a 128KiB stack with a 256KiB guard would
have given you an immediate crash.

Bug: http://b/38413813
Test: builds, boots
Change-Id: Idd12a3899be1d92fea3d3e0fa6882ca2216bd79c
diff --git a/libc/bionic/pthread_attr.cpp b/libc/bionic/pthread_attr.cpp
index 4b6a8f2..39d6e09 100644
--- a/libc/bionic/pthread_attr.cpp
+++ b/libc/bionic/pthread_attr.cpp
@@ -43,7 +43,7 @@
   attr->flags = 0;
   attr->stack_base = NULL;
   attr->stack_size = PTHREAD_STACK_SIZE_DEFAULT;
-  attr->guard_size = PAGE_SIZE;
+  attr->guard_size = PTHREAD_GUARD_SIZE;
   attr->sched_policy = SCHED_NORMAL;
   attr->sched_priority = 0;
   return 0;
diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp
index 9be86f1..dc1b984 100644
--- a/libc/bionic/pthread_create.cpp
+++ b/libc/bionic/pthread_create.cpp
@@ -55,15 +55,16 @@
   thread->tls[TLS_SLOT_SELF] = thread->tls;
   thread->tls[TLS_SLOT_THREAD_ID] = thread;
 
-  // Add a guard page before and after.
-  size_t allocation_size = BIONIC_TLS_SIZE + 2 * PAGE_SIZE;
+  // Add a guard before and after.
+  size_t allocation_size = BIONIC_TLS_SIZE + (2 * PTHREAD_GUARD_SIZE);
   void* allocation = mmap(nullptr, allocation_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   if (allocation == MAP_FAILED) {
     async_safe_fatal("failed to allocate TLS");
   }
-  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, allocation, allocation_size, "bionic TLS guard page");
+  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, allocation, allocation_size, "bionic TLS guard");
 
-  thread->bionic_tls = reinterpret_cast<bionic_tls*>(static_cast<char*>(allocation) + PAGE_SIZE);
+  thread->bionic_tls = reinterpret_cast<bionic_tls*>(static_cast<char*>(allocation) +
+                                                     PTHREAD_GUARD_SIZE);
   if (mprotect(thread->bionic_tls, BIONIC_TLS_SIZE, PROT_READ | PROT_WRITE) != 0) {
     async_safe_fatal("failed to mprotect TLS");
   }
@@ -79,15 +80,14 @@
   // Create and set an alternate signal stack.
   void* stack_base = mmap(NULL, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   if (stack_base != MAP_FAILED) {
-
-    // Create a guard page to catch stack overflows in signal handlers.
-    if (mprotect(stack_base, PAGE_SIZE, PROT_NONE) == -1) {
+    // Create a guard to catch stack overflows in signal handlers.
+    if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) {
       munmap(stack_base, SIGNAL_STACK_SIZE);
       return;
     }
     stack_t ss;
-    ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PAGE_SIZE;
-    ss.ss_size = SIGNAL_STACK_SIZE - PAGE_SIZE;
+    ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PTHREAD_GUARD_SIZE;
+    ss.ss_size = SIGNAL_STACK_SIZE - PTHREAD_GUARD_SIZE;
     ss.ss_flags = 0;
     sigaltstack(&ss, NULL);
     thread->alternate_signal_stack = stack_base;
@@ -95,7 +95,7 @@
     // We can only use const static allocated string for mapped region name, as Android kernel
     // uses the string pointer directly when dumping /proc/pid/maps.
     prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ss.ss_sp, ss.ss_size, "thread signal stack");
-    prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, stack_base, PAGE_SIZE, "thread signal stack guard page");
+    prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, stack_base, PTHREAD_GUARD_SIZE, "thread signal stack guard");
   }
 }
 
@@ -149,7 +149,7 @@
     munmap(space, mmap_size);
     return NULL;
   }
-  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, space, stack_guard_size, "thread stack guard page");
+  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, space, stack_guard_size, "thread stack guard");
 
   return space;
 }
@@ -161,7 +161,9 @@
   if (attr->stack_base == NULL) {
     // The caller didn't provide a stack, so allocate one.
     // Make sure the stack size and guard size are multiples of PAGE_SIZE.
-    mmap_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
+    if (__builtin_add_overflow(attr->stack_size, attr->guard_size, &mmap_size)) return EAGAIN;
+    if (__builtin_add_overflow(mmap_size, sizeof(pthread_internal_t), &mmap_size)) return EAGAIN;
+    mmap_size = BIONIC_ALIGN(mmap_size, PAGE_SIZE);
     attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
     attr->stack_base = __create_thread_mapped_space(mmap_size, attr->guard_size);
     if (attr->stack_base == NULL) {
@@ -176,7 +178,7 @@
 
   // Mapped space(or user allocated stack) is used for:
   //   pthread_internal_t
-  //   thread stack (including guard page)
+  //   thread stack (including guard)
 
   // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
   stack_top = reinterpret_cast<uint8_t*>(
diff --git a/libc/bionic/pthread_exit.cpp b/libc/bionic/pthread_exit.cpp
index 9adf405..ac29c1d 100644
--- a/libc/bionic/pthread_exit.cpp
+++ b/libc/bionic/pthread_exit.cpp
@@ -93,8 +93,8 @@
   }
 
   // Unmap the bionic TLS, including guard pages.
-  void* allocation = reinterpret_cast<char*>(thread->bionic_tls) - PAGE_SIZE;
-  munmap(allocation, BIONIC_TLS_SIZE + 2 * PAGE_SIZE);
+  void* allocation = reinterpret_cast<char*>(thread->bionic_tls) - PTHREAD_GUARD_SIZE;
+  munmap(allocation, BIONIC_TLS_SIZE + 2 * PTHREAD_GUARD_SIZE);
 
   ThreadJoinState old_state = THREAD_NOT_JOINED;
   while (old_state == THREAD_NOT_JOINED &&
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
index 6faf5a4..77bdd85 100644
--- a/libc/bionic/pthread_internal.h
+++ b/libc/bionic/pthread_internal.h
@@ -141,25 +141,29 @@
 
 __LIBC_HIDDEN__ void pthread_key_clean_all(void);
 
-// SIGSTKSZ (8kB) is not big enough.
-// snprintf to a stack buffer of size PATH_MAX consumes ~7kB of stack.
-// Also, on 64-bit, logging uses more than 8kB by itself:
-// https://code.google.com/p/android/issues/detail?id=187064
-#define SIGNAL_STACK_SIZE_WITHOUT_GUARD_PAGE (16 * 1024)
+// Address space is precious on LP32, so use the minimum unit: one page.
+// On LP64, we could use more but there's no obvious advantage to doing
+// so, and the various media processes use RLIMIT_AS as a way to limit
+// the amount of allocation they'll do.
+#define PTHREAD_GUARD_SIZE PAGE_SIZE
 
-/*
- * Traditionally we gave threads a 1MiB stack. When we started
- * allocating per-thread alternate signal stacks to ease debugging of
- * stack overflows, we subtracted the same amount we were using there
- * from the default thread stack size. This should keep memory usage
- * roughly constant.
- */
-#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGNAL_STACK_SIZE_WITHOUT_GUARD_PAGE)
+// SIGSTKSZ (8KiB) is not big enough.
+// An snprintf to a stack buffer of size PATH_MAX consumes ~7KiB of stack.
+// Also, on 64-bit, logging uses more than 8KiB by itself:
+// https://code.google.com/p/android/issues/detail?id=187064
+#define SIGNAL_STACK_SIZE_WITHOUT_GUARD (16 * 1024)
+
+// Traditionally we gave threads a 1MiB stack. When we started
+// allocating per-thread alternate signal stacks to ease debugging of
+// stack overflows, we subtracted the same amount we were using there
+// from the default thread stack size. This should keep memory usage
+// roughly constant.
+#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGNAL_STACK_SIZE_WITHOUT_GUARD)
 
 // Leave room for a guard page in the internally created signal stacks.
-#define SIGNAL_STACK_SIZE (SIGNAL_STACK_SIZE_WITHOUT_GUARD_PAGE + PAGE_SIZE)
+#define SIGNAL_STACK_SIZE (SIGNAL_STACK_SIZE_WITHOUT_GUARD + PTHREAD_GUARD_SIZE)
 
-/* Needed by fork. */
+// Needed by fork.
 __LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare();
 __LIBC_HIDDEN__ extern void __bionic_atfork_run_child();
 __LIBC_HIDDEN__ extern void __bionic_atfork_run_parent();