Update to v5.0 kernel headers.

Test: Builds and boots on taimen.
Change-Id: I13843bf1ab30ea89a50852adc88f2cba8401bded
diff --git a/libc/kernel/uapi/asm-arm64/asm/sve_context.h b/libc/kernel/uapi/asm-arm64/asm/sve_context.h
new file mode 100644
index 0000000..ff0063e
--- /dev/null
+++ b/libc/kernel/uapi/asm-arm64/asm/sve_context.h
@@ -0,0 +1,42 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _UAPI__ASM_SVE_CONTEXT_H
+#define _UAPI__ASM_SVE_CONTEXT_H
+#include <linux/types.h>
+#define __SVE_VQ_BYTES 16
+#define __SVE_VQ_MIN 1
+#define __SVE_VQ_MAX 512
+#define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES)
+#define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES)
+#define __SVE_NUM_ZREGS 32
+#define __SVE_NUM_PREGS 16
+#define __sve_vl_valid(vl) ((vl) % __SVE_VQ_BYTES == 0 && (vl) >= __SVE_VL_MIN && (vl) <= __SVE_VL_MAX)
+#define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES)
+#define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES)
+#define __SVE_ZREG_SIZE(vq) ((__u32) (vq) * __SVE_VQ_BYTES)
+#define __SVE_PREG_SIZE(vq) ((__u32) (vq) * (__SVE_VQ_BYTES / 8))
+#define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq)
+#define __SVE_ZREGS_OFFSET 0
+#define __SVE_ZREG_OFFSET(vq,n) (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
+#define __SVE_ZREGS_SIZE(vq) (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
+#define __SVE_PREGS_OFFSET(vq) (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
+#define __SVE_PREG_OFFSET(vq,n) (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n))
+#define __SVE_PREGS_SIZE(vq) (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq))
+#define __SVE_FFR_OFFSET(vq) (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq))
+#endif