Update to v4.15 kernel headers.

Test: Compiles, boots bullhead/hikey960.
Change-Id: I118beb8b6cac0881b1270f9bf6981959297a41a8
diff --git a/libc/kernel/uapi/asm-arm64/asm/bpf_perf_event.h b/libc/kernel/uapi/asm-arm64/asm/bpf_perf_event.h
new file mode 100644
index 0000000..cad04bf
--- /dev/null
+++ b/libc/kernel/uapi/asm-arm64/asm/bpf_perf_event.h
@@ -0,0 +1,23 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+#include <asm/ptrace.h>
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+#endif
diff --git a/libc/kernel/uapi/asm-arm64/asm/hwcap.h b/libc/kernel/uapi/asm-arm64/asm/hwcap.h
index 60b8305..4e95b95 100644
--- a/libc/kernel/uapi/asm-arm64/asm/hwcap.h
+++ b/libc/kernel/uapi/asm-arm64/asm/hwcap.h
@@ -35,4 +35,10 @@
 #define HWCAP_FCMA (1 << 14)
 #define HWCAP_LRCPC (1 << 15)
 #define HWCAP_DCPOP (1 << 16)
+#define HWCAP_SHA3 (1 << 17)
+#define HWCAP_SM3 (1 << 18)
+#define HWCAP_SM4 (1 << 19)
+#define HWCAP_ASIMDDP (1 << 20)
+#define HWCAP_SHA512 (1 << 21)
+#define HWCAP_SVE (1 << 22)
 #endif
diff --git a/libc/kernel/uapi/asm-arm64/asm/kvm.h b/libc/kernel/uapi/asm-arm64/asm/kvm.h
index a68a8a2..c4178a1 100644
--- a/libc/kernel/uapi/asm-arm64/asm/kvm.h
+++ b/libc/kernel/uapi/asm-arm64/asm/kvm.h
@@ -117,6 +117,9 @@
 #define ARM64_SYS_REG_SHIFT_MASK(x,n) (((x) << KVM_REG_ARM64_SYSREG_ ##n ##_SHIFT) & KVM_REG_ARM64_SYSREG_ ##n ##_MASK)
 #define __ARM64_SYS_REG(op0,op1,crn,crm,op2) (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
 #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
+#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
 #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
 #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -144,6 +147,7 @@
 #define KVM_DEV_ARM_ITS_SAVE_TABLES 1
 #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
 #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define KVM_DEV_ARM_ITS_CTRL_RESET 4
 #define KVM_ARM_VCPU_PMU_V3_CTRL 0
 #define KVM_ARM_VCPU_PMU_V3_IRQ 0
 #define KVM_ARM_VCPU_PMU_V3_INIT 1
diff --git a/libc/kernel/uapi/asm-arm64/asm/ptrace.h b/libc/kernel/uapi/asm-arm64/asm/ptrace.h
index 0eccf79..4e6c755 100644
--- a/libc/kernel/uapi/asm-arm64/asm/ptrace.h
+++ b/libc/kernel/uapi/asm-arm64/asm/ptrace.h
@@ -20,6 +20,7 @@
 #define _UAPI__ASM_PTRACE_H
 #include <linux/types.h>
 #include <asm/hwcap.h>
+#include <asm/sigcontext.h>
 #define PSR_MODE_EL0t 0x00000000
 #define PSR_MODE_EL1t 0x00000004
 #define PSR_MODE_EL1h 0x00000005
@@ -35,7 +36,6 @@
 #define PSR_D_BIT 0x00000200
 #define PSR_PAN_BIT 0x00400000
 #define PSR_UAO_BIT 0x00800000
-#define PSR_Q_BIT 0x08000000
 #define PSR_V_BIT 0x10000000
 #define PSR_C_BIT 0x20000000
 #define PSR_Z_BIT 0x40000000
@@ -45,6 +45,7 @@
 #define PSR_x 0x0000ff00
 #define PSR_c 0x000000ff
 #ifndef __ASSEMBLY__
+#include <linux/prctl.h>
 struct user_pt_regs {
   __u64 regs[31];
   __u64 sp;
@@ -66,5 +67,39 @@
     __u32 pad;
   } dbg_regs[16];
 };
+struct user_sve_header {
+  __u32 size;
+  __u32 max_size;
+  __u16 vl;
+  __u16 max_vl;
+  __u16 flags;
+  __u16 __reserved;
+};
+#define SVE_PT_REGS_MASK (1 << 0)
+#define SVE_PT_REGS_FPSIMD 0
+#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
+#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16)
+#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16)
+#define SVE_PT_REGS_OFFSET ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET
+#define SVE_PT_FPSIMD_SIZE(vq,flags) (sizeof(struct user_fpsimd_state))
+#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq)
+#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
+#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq)
+#define SVE_PT_SVE_FPSR_SIZE sizeof(__u32)
+#define SVE_PT_SVE_FPCR_SIZE sizeof(__u32)
+#define __SVE_SIG_TO_PT(offset) ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
+#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET
+#define SVE_PT_SVE_ZREGS_OFFSET __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
+#define SVE_PT_SVE_ZREG_OFFSET(vq,n) __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
+#define SVE_PT_SVE_ZREGS_SIZE(vq) (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
+#define SVE_PT_SVE_PREGS_OFFSET(vq) __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
+#define SVE_PT_SVE_PREG_OFFSET(vq,n) __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
+#define SVE_PT_SVE_PREGS_SIZE(vq) (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_PT_SVE_PREGS_OFFSET(vq))
+#define SVE_PT_SVE_FFR_OFFSET(vq) __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
+#define SVE_PT_SVE_FPSR_OFFSET(vq) ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_SVE_FPCR_OFFSET(vq) (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
+#define SVE_PT_SVE_SIZE(vq,flags) ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_SIZE(vq,flags) (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
 #endif
 #endif
diff --git a/libc/kernel/uapi/asm-arm64/asm/sigcontext.h b/libc/kernel/uapi/asm-arm64/asm/sigcontext.h
index be2464e..b0617de 100644
--- a/libc/kernel/uapi/asm-arm64/asm/sigcontext.h
+++ b/libc/kernel/uapi/asm-arm64/asm/sigcontext.h
@@ -18,6 +18,7 @@
  ****************************************************************************/
 #ifndef _UAPI__ASM_SIGCONTEXT_H
 #define _UAPI__ASM_SIGCONTEXT_H
+#ifndef __ASSEMBLY__
 #include <linux/types.h>
 struct sigcontext {
   __u64 fault_address;
@@ -50,4 +51,34 @@
   __u32 size;
   __u32 __reserved[3];
 };
+#define SVE_MAGIC 0x53564501
+struct sve_context {
+  struct _aarch64_ctx head;
+  __u16 vl;
+  __u16 __reserved[3];
+};
+#endif
+#define SVE_VQ_BYTES 16
+#define SVE_VQ_MIN 1
+#define SVE_VQ_MAX 512
+#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
+#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
+#define SVE_NUM_ZREGS 32
+#define SVE_NUM_PREGS 16
+#define sve_vl_valid(vl) ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
+#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
+#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
+#define SVE_SIG_ZREG_SIZE(vq) ((__u32) (vq) * SVE_VQ_BYTES)
+#define SVE_SIG_PREG_SIZE(vq) ((__u32) (vq) * (SVE_VQ_BYTES / 8))
+#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
+#define SVE_SIG_REGS_OFFSET ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
+#define SVE_SIG_ZREG_OFFSET(vq,n) (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
+#define SVE_SIG_ZREGS_SIZE(vq) (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
+#define SVE_SIG_PREGS_OFFSET(vq) (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
+#define SVE_SIG_PREG_OFFSET(vq,n) (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
+#define SVE_SIG_PREGS_SIZE(vq) (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
+#define SVE_SIG_FFR_OFFSET(vq) (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
+#define SVE_SIG_REGS_SIZE(vq) (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
+#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
 #endif