Update to v6.8 kernel headers.

Kernel headers coming from:

Git: https://android.googlesource.com/kernel/common/
Branch: android-mainline
Tag: android-mainline-6.8

Test: Builds and bionic unit tests pass on raven.
Test: Able to log in to an Android GO 32 bit device.
Change-Id: I0022cdb20c19726f526acaab2866f1e25794b77e
diff --git a/libc/kernel/uapi/drm/drm.h b/libc/kernel/uapi/drm/drm.h
index 0fe5c26..c8fab3c 100644
--- a/libc/kernel/uapi/drm/drm.h
+++ b/libc/kernel/uapi/drm/drm.h
@@ -353,6 +353,7 @@
 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
 #define DRM_CAP_SYNCOBJ 0x13
 #define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
 struct drm_get_cap {
   __u64 capability;
   __u64 value;
@@ -362,6 +363,7 @@
 #define DRM_CLIENT_CAP_ATOMIC 3
 #define DRM_CLIENT_CAP_ASPECT_RATIO 4
 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
 struct drm_set_client_cap {
   __u64 capability;
   __u64 value;
@@ -401,6 +403,7 @@
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3)
 struct drm_syncobj_wait {
   __u64 handles;
   __s64 timeout_nsec;
@@ -408,6 +411,7 @@
   __u32 flags;
   __u32 first_signaled;
   __u32 pad;
+  __u64 deadline_nsec;
 };
 struct drm_syncobj_timeline_wait {
   __u64 handles;
@@ -417,6 +421,7 @@
   __u32 flags;
   __u32 first_signaled;
   __u32 pad;
+  __u64 deadline_nsec;
 };
 struct drm_syncobj_eventfd {
   __u32 handle;
@@ -570,6 +575,7 @@
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
 #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
 #define DRM_COMMAND_BASE 0x40
 #define DRM_COMMAND_END 0xA0
 struct drm_event {
diff --git a/libc/kernel/uapi/drm/drm_mode.h b/libc/kernel/uapi/drm/drm_mode.h
index f7b65b2..9e6296c 100644
--- a/libc/kernel/uapi/drm/drm_mode.h
+++ b/libc/kernel/uapi/drm/drm_mode.h
@@ -357,6 +357,9 @@
 struct drm_color_ctm {
   __u64 matrix[9];
 };
+struct drm_color_ctm_3x4 {
+  __u64 matrix[12];
+};
 struct drm_color_lut {
   __u16 red;
   __u16 green;
@@ -483,6 +486,10 @@
   __s32 x2;
   __s32 y2;
 };
+struct drm_mode_closefb {
+  __u32 fb_id;
+  __u32 pad;
+};
 #ifdef __cplusplus
 }
 #endif
diff --git a/libc/kernel/uapi/drm/habanalabs_accel.h b/libc/kernel/uapi/drm/habanalabs_accel.h
index 158e937..47afb82 100644
--- a/libc/kernel/uapi/drm/habanalabs_accel.h
+++ b/libc/kernel/uapi/drm/habanalabs_accel.h
@@ -656,6 +656,7 @@
 #define HL_INFO_HW_ERR_EVENT 36
 #define HL_INFO_FW_ERR_EVENT 37
 #define HL_INFO_USER_ENGINE_ERR_EVENT 38
+#define HL_INFO_DEV_SIGNED 40
 #define HL_INFO_VERSION_MAX_LEN 128
 #define HL_INFO_CARD_NAME_MAX_LEN 16
 #define HL_ENGINES_DATA_MAX_SIZE SZ_1M
@@ -849,6 +850,7 @@
 #define SEC_SIGNATURE_BUF_SZ 255
 #define SEC_PUB_DATA_BUF_SZ 510
 #define SEC_CERTIFICATE_BUF_SZ 2046
+#define SEC_DEV_INFO_BUF_SZ 5120
 struct hl_info_sec_attest {
   __u32 nonce;
   __u16 pcr_quote_len;
@@ -864,6 +866,18 @@
   __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
   __u8 pad0[2];
 };
+struct hl_info_signed {
+  __u32 nonce;
+  __u16 pub_data_len;
+  __u16 certificate_len;
+  __u8 info_sig_len;
+  __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+  __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+  __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+  __u16 dev_info_len;
+  __u8 dev_info[SEC_DEV_INFO_BUF_SZ];
+  __u8 pad[2];
+};
 struct hl_page_fault_info {
   __s64 timestamp;
   __u64 addr;
diff --git a/libc/kernel/uapi/drm/ivpu_accel.h b/libc/kernel/uapi/drm/ivpu_accel.h
index ca2014f..fcbf6f7 100644
--- a/libc/kernel/uapi/drm/ivpu_accel.h
+++ b/libc/kernel/uapi/drm/ivpu_accel.h
@@ -43,6 +43,11 @@
 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
+#define DRM_IVPU_JOB_PRIORITY_IDLE 1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
 #define DRM_IVPU_CAP_METRIC_STREAMER 1
 #define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
 struct drm_ivpu_param {
@@ -80,8 +85,10 @@
   __u32 engine;
   __u32 flags;
   __u32 commands_offset;
+  __u32 priority;
 };
 #define DRM_IVPU_JOB_STATUS_SUCCESS 0
+#define DRM_IVPU_JOB_STATUS_ABORTED 256
 struct drm_ivpu_bo_wait {
   __u32 handle;
   __u32 flags;
diff --git a/libc/kernel/uapi/drm/msm_drm.h b/libc/kernel/uapi/drm/msm_drm.h
index d180f70..4d83744 100644
--- a/libc/kernel/uapi/drm/msm_drm.h
+++ b/libc/kernel/uapi/drm/msm_drm.h
@@ -36,6 +36,7 @@
 #define MSM_PARAM_CMDLINE 0x0d
 #define MSM_PARAM_VA_START 0x0e
 #define MSM_PARAM_VA_SIZE 0x0f
+#define MSM_PARAM_HIGHEST_BANK_BIT 0x10
 #define MSM_PARAM_NR_RINGS MSM_PARAM_PRIORITIES
 struct drm_msm_param {
   __u32 pipe;
@@ -63,6 +64,8 @@
 #define MSM_INFO_GET_NAME 0x03
 #define MSM_INFO_SET_IOVA 0x04
 #define MSM_INFO_GET_FLAGS 0x05
+#define MSM_INFO_SET_METADATA 0x06
+#define MSM_INFO_GET_METADATA 0x07
 struct drm_msm_gem_info {
   __u32 handle;
   __u32 info;
diff --git a/libc/kernel/uapi/drm/nouveau_drm.h b/libc/kernel/uapi/drm/nouveau_drm.h
index 221ff21..f7d870e 100644
--- a/libc/kernel/uapi/drm/nouveau_drm.h
+++ b/libc/kernel/uapi/drm/nouveau_drm.h
@@ -23,6 +23,8 @@
 #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
 #define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
 #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+#define NOUVEAU_GETPARAM_VRAM_USED 19
 struct drm_nouveau_getparam {
   __u64 param;
   __u64 value;
diff --git a/libc/kernel/uapi/drm/pvr_drm.h b/libc/kernel/uapi/drm/pvr_drm.h
new file mode 100644
index 0000000..c68c719
--- /dev/null
+++ b/libc/kernel/uapi/drm/pvr_drm.h
@@ -0,0 +1,271 @@
+/*
+ * This file is auto-generated. Modifications will be lost.
+ *
+ * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
+ * for more information.
+ */
+#ifndef PVR_DRM_UAPI_H
+#define PVR_DRM_UAPI_H
+#include "drm.h"
+#include <linux/const.h>
+#include <linux/types.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+struct drm_pvr_obj_array {
+  __u32 stride;
+  __u32 count;
+  __u64 array;
+};
+#define DRM_PVR_OBJ_ARRAY(cnt,ptr) {.stride = sizeof((ptr)[0]),.count = (cnt),.array = (__u64) (uintptr_t) (ptr) }
+#define PVR_IOCTL(_ioctl,_mode,_data) _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_ ##_data ##_args)
+#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
+#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
+#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
+#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
+#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
+#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
+#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
+#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
+#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
+#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
+#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
+#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
+#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
+#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
+struct drm_pvr_dev_query_gpu_info {
+  __u64 gpu_id;
+  __u32 num_phantoms;
+  __u32 _padding_c;
+};
+struct drm_pvr_dev_query_runtime_info {
+  __u64 free_list_min_pages;
+  __u64 free_list_max_pages;
+  __u32 common_store_alloc_region_size;
+  __u32 common_store_partition_space_size;
+  __u32 max_coeffs;
+  __u32 cdm_max_local_mem_size_regs;
+};
+struct drm_pvr_dev_query_quirks {
+  __u64 quirks;
+  __u16 count;
+  __u16 musthave_count;
+  __u32 _padding_c;
+};
+struct drm_pvr_dev_query_enhancements {
+  __u64 enhancements;
+  __u16 count;
+  __u16 _padding_a;
+  __u32 _padding_c;
+};
+enum drm_pvr_heap_id {
+  DRM_PVR_HEAP_GENERAL = 0,
+  DRM_PVR_HEAP_PDS_CODE_DATA,
+  DRM_PVR_HEAP_USC_CODE,
+  DRM_PVR_HEAP_RGNHDR,
+  DRM_PVR_HEAP_VIS_TEST,
+  DRM_PVR_HEAP_TRANSFER_FRAG,
+  DRM_PVR_HEAP_COUNT
+};
+struct drm_pvr_heap {
+  __u64 base;
+  __u64 size;
+  __u32 flags;
+  __u32 page_size_log2;
+};
+struct drm_pvr_dev_query_heap_info {
+  struct drm_pvr_obj_array heaps;
+};
+enum drm_pvr_static_data_area_usage {
+  DRM_PVR_STATIC_DATA_AREA_EOT = 0,
+  DRM_PVR_STATIC_DATA_AREA_FENCE,
+  DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+  DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+};
+struct drm_pvr_static_data_area {
+  __u16 area_usage;
+  __u16 location_heap_id;
+  __u32 size;
+  __u64 offset;
+};
+struct drm_pvr_dev_query_static_data_areas {
+  struct drm_pvr_obj_array static_data_areas;
+};
+enum drm_pvr_dev_query {
+  DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
+  DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
+  DRM_PVR_DEV_QUERY_QUIRKS_GET,
+  DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
+  DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
+  DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
+};
+struct drm_pvr_ioctl_dev_query_args {
+  __u32 type;
+  __u32 size;
+  __u64 pointer;
+};
+#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0)
+#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1)
+#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2)
+#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)
+struct drm_pvr_ioctl_create_bo_args {
+  __u64 size;
+  __u32 handle;
+  __u32 _padding_c;
+  __u64 flags;
+};
+struct drm_pvr_ioctl_get_bo_mmap_offset_args {
+  __u32 handle;
+  __u32 _padding_4;
+  __u64 offset;
+};
+struct drm_pvr_ioctl_create_vm_context_args {
+  __u32 handle;
+  __u32 _padding_4;
+};
+struct drm_pvr_ioctl_destroy_vm_context_args {
+  __u32 handle;
+  __u32 _padding_4;
+};
+struct drm_pvr_ioctl_vm_map_args {
+  __u32 vm_context_handle;
+  __u32 flags;
+  __u64 device_addr;
+  __u32 handle;
+  __u32 _padding_14;
+  __u64 offset;
+  __u64 size;
+};
+struct drm_pvr_ioctl_vm_unmap_args {
+  __u32 vm_context_handle;
+  __u32 _padding_4;
+  __u64 device_addr;
+  __u64 size;
+};
+enum drm_pvr_ctx_priority {
+  DRM_PVR_CTX_PRIORITY_LOW = - 512,
+  DRM_PVR_CTX_PRIORITY_NORMAL = 0,
+  DRM_PVR_CTX_PRIORITY_HIGH = 512,
+};
+enum drm_pvr_ctx_type {
+  DRM_PVR_CTX_TYPE_RENDER = 0,
+  DRM_PVR_CTX_TYPE_COMPUTE,
+  DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
+};
+struct drm_pvr_ioctl_create_context_args {
+  __u32 type;
+  __u32 flags;
+  __s32 priority;
+  __u32 handle;
+  __u64 static_context_state;
+  __u32 static_context_state_len;
+  __u32 vm_context_handle;
+  __u64 callstack_addr;
+};
+struct drm_pvr_ioctl_destroy_context_args {
+  __u32 handle;
+  __u32 _padding_4;
+};
+struct drm_pvr_ioctl_create_free_list_args {
+  __u64 free_list_gpu_addr;
+  __u32 initial_num_pages;
+  __u32 max_num_pages;
+  __u32 grow_num_pages;
+  __u32 grow_threshold;
+  __u32 vm_context_handle;
+  __u32 handle;
+};
+struct drm_pvr_ioctl_destroy_free_list_args {
+  __u32 handle;
+  __u32 _padding_4;
+};
+struct drm_pvr_create_hwrt_geom_data_args {
+  __u64 tpc_dev_addr;
+  __u32 tpc_size;
+  __u32 tpc_stride;
+  __u64 vheap_table_dev_addr;
+  __u64 rtc_dev_addr;
+};
+struct drm_pvr_create_hwrt_rt_data_args {
+  __u64 pm_mlist_dev_addr;
+  __u64 macrotile_array_dev_addr;
+  __u64 region_header_dev_addr;
+};
+#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0
+#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U
+struct drm_pvr_ioctl_create_hwrt_dataset_args {
+  struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
+  struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
+  __u32 free_list_handles[2];
+  __u32 width;
+  __u32 height;
+  __u32 samples;
+  __u32 layers;
+  __u32 isp_merge_lower_x;
+  __u32 isp_merge_lower_y;
+  __u32 isp_merge_scale_x;
+  __u32 isp_merge_scale_y;
+  __u32 isp_merge_upper_x;
+  __u32 isp_merge_upper_y;
+  __u32 region_header_size;
+  __u32 handle;
+};
+struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
+  __u32 handle;
+  __u32 _padding_4;
+};
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
+#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
+#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
+#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+struct drm_pvr_sync_op {
+  __u32 handle;
+  __u32 flags;
+  __u64 value;
+};
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+enum drm_pvr_job_type {
+  DRM_PVR_JOB_TYPE_GEOMETRY = 0,
+  DRM_PVR_JOB_TYPE_FRAGMENT,
+  DRM_PVR_JOB_TYPE_COMPUTE,
+  DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+};
+struct drm_pvr_hwrt_data_ref {
+  __u32 set_handle;
+  __u32 data_index;
+};
+struct drm_pvr_job {
+  __u32 type;
+  __u32 context_handle;
+  __u32 flags;
+  __u32 cmd_stream_len;
+  __u64 cmd_stream;
+  struct drm_pvr_obj_array sync_ops;
+  struct drm_pvr_hwrt_data_ref hwrt;
+};
+struct drm_pvr_ioctl_submit_jobs_args {
+  struct drm_pvr_obj_array jobs;
+};
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libc/kernel/uapi/drm/v3d_drm.h b/libc/kernel/uapi/drm/v3d_drm.h
index 594856f..4000fd3 100644
--- a/libc/kernel/uapi/drm/v3d_drm.h
+++ b/libc/kernel/uapi/drm/v3d_drm.h
@@ -21,6 +21,7 @@
 #define DRM_V3D_PERFMON_CREATE 0x08
 #define DRM_V3D_PERFMON_DESTROY 0x09
 #define DRM_V3D_PERFMON_GET_VALUES 0x0a
+#define DRM_V3D_SUBMIT_CPU 0x0b
 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
 #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
 #define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
@@ -32,12 +33,19 @@
 #define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, struct drm_v3d_perfmon_create)
 #define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, struct drm_v3d_perfmon_destroy)
 #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
 #define DRM_V3D_SUBMIT_EXTENSION 0x02
 struct drm_v3d_extension {
   __u64 next;
   __u32 id;
 #define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
   __u32 flags;
 };
 struct drm_v3d_sem {
@@ -52,6 +60,7 @@
   V3D_TFU,
   V3D_CSD,
   V3D_CACHE_CLEAN,
+  V3D_CPU,
 };
 struct drm_v3d_multi_sync {
   struct drm_v3d_extension base;
@@ -109,6 +118,7 @@
   DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
   DRM_V3D_PARAM_SUPPORTS_PERFMON,
   DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+  DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
 };
 struct drm_v3d_get_param {
   __u32 param;
@@ -133,6 +143,10 @@
   __u32 out_sync;
   __u32 flags;
   __u64 extensions;
+  struct {
+    __u32 ioc;
+    __u32 pad;
+  } v71;
 };
 struct drm_v3d_submit_csd {
   __u32 cfg[7];
@@ -146,6 +160,66 @@
   __u32 flags;
   __u32 pad;
 };
+struct drm_v3d_indirect_csd {
+  struct drm_v3d_extension base;
+  struct drm_v3d_submit_csd submit;
+  __u32 indirect;
+  __u32 offset;
+  __u32 wg_size;
+  __u32 wg_uniform_offsets[3];
+};
+struct drm_v3d_timestamp_query {
+  struct drm_v3d_extension base;
+  __u64 offsets;
+  __u64 syncs;
+  __u32 count;
+  __u32 pad;
+};
+struct drm_v3d_reset_timestamp_query {
+  struct drm_v3d_extension base;
+  __u64 syncs;
+  __u32 offset;
+  __u32 count;
+};
+struct drm_v3d_copy_timestamp_query {
+  struct drm_v3d_extension base;
+  __u8 do_64bit;
+  __u8 do_partial;
+  __u8 availability_bit;
+  __u8 pad;
+  __u32 offset;
+  __u32 stride;
+  __u32 count;
+  __u64 offsets;
+  __u64 syncs;
+};
+struct drm_v3d_reset_performance_query {
+  struct drm_v3d_extension base;
+  __u64 syncs;
+  __u32 count;
+  __u32 nperfmons;
+  __u64 kperfmon_ids;
+};
+struct drm_v3d_copy_performance_query {
+  struct drm_v3d_extension base;
+  __u8 do_64bit;
+  __u8 do_partial;
+  __u8 availability_bit;
+  __u8 pad;
+  __u32 offset;
+  __u32 stride;
+  __u32 nperfmons;
+  __u32 ncounters;
+  __u32 count;
+  __u64 syncs;
+  __u64 kperfmon_ids;
+};
+struct drm_v3d_submit_cpu {
+  __u64 bo_handles;
+  __u32 bo_handle_count;
+  __u32 flags;
+  __u64 extensions;
+};
 enum {
   V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
   V3D_PERFCNT_FEP_VALID_PRIMS,
diff --git a/libc/kernel/uapi/drm/virtgpu_drm.h b/libc/kernel/uapi/drm/virtgpu_drm.h
index 6babe2d..d79c7d9 100644
--- a/libc/kernel/uapi/drm/virtgpu_drm.h
+++ b/libc/kernel/uapi/drm/virtgpu_drm.h
@@ -58,6 +58,7 @@
 #define VIRTGPU_PARAM_CROSS_DEVICE 5
 #define VIRTGPU_PARAM_CONTEXT_INIT 6
 #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7
+#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8
 struct drm_virtgpu_getparam {
   __u64 param;
   __u64 value;
@@ -140,6 +141,7 @@
 #define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
 #define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
 #define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
 struct drm_virtgpu_context_set_param {
   __u64 param;
   __u64 value;
diff --git a/libc/kernel/uapi/drm/xe_drm.h b/libc/kernel/uapi/drm/xe_drm.h
new file mode 100644
index 0000000..1e9f128
--- /dev/null
+++ b/libc/kernel/uapi/drm/xe_drm.h
@@ -0,0 +1,296 @@
+/*
+ * This file is auto-generated. Modifications will be lost.
+ *
+ * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
+ * for more information.
+ */
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
+#include "drm.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#define DRM_XE_DEVICE_QUERY 0x00
+#define DRM_XE_GEM_CREATE 0x01
+#define DRM_XE_GEM_MMAP_OFFSET 0x02
+#define DRM_XE_VM_CREATE 0x03
+#define DRM_XE_VM_DESTROY 0x04
+#define DRM_XE_VM_BIND 0x05
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC 0x09
+#define DRM_XE_WAIT_USER_FENCE 0x0a
+#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
+#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
+#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
+#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
+#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
+#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
+struct drm_xe_user_extension {
+  __u64 next_extension;
+  __u32 name;
+  __u32 pad;
+};
+struct drm_xe_ext_set_property {
+  struct drm_xe_user_extension base;
+  __u32 property;
+  __u32 pad;
+  __u64 value;
+  __u64 reserved[2];
+};
+struct drm_xe_engine_class_instance {
+#define DRM_XE_ENGINE_CLASS_RENDER 0
+#define DRM_XE_ENGINE_CLASS_COPY 1
+#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
+#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
+#define DRM_XE_ENGINE_CLASS_COMPUTE 4
+#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+  __u16 engine_class;
+  __u16 engine_instance;
+  __u16 gt_id;
+  __u16 pad;
+};
+struct drm_xe_engine {
+  struct drm_xe_engine_class_instance instance;
+  __u64 reserved[3];
+};
+struct drm_xe_query_engines {
+  __u32 num_engines;
+  __u32 pad;
+  struct drm_xe_engine engines[];
+};
+enum drm_xe_memory_class {
+  DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
+  DRM_XE_MEM_REGION_CLASS_VRAM
+};
+struct drm_xe_mem_region {
+  __u16 mem_class;
+  __u16 instance;
+  __u32 min_page_size;
+  __u64 total_size;
+  __u64 used;
+  __u64 cpu_visible_size;
+  __u64 cpu_visible_used;
+  __u64 reserved[6];
+};
+struct drm_xe_query_mem_regions {
+  __u32 num_mem_regions;
+  __u32 pad;
+  struct drm_xe_mem_region mem_regions[];
+};
+struct drm_xe_query_config {
+  __u32 num_params;
+  __u32 pad;
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
+#define DRM_XE_QUERY_CONFIG_FLAGS 1
+#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
+#define DRM_XE_QUERY_CONFIG_VA_BITS 3
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
+  __u64 info[];
+};
+struct drm_xe_gt {
+#define DRM_XE_QUERY_GT_TYPE_MAIN 0
+#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
+  __u16 type;
+  __u16 tile_id;
+  __u16 gt_id;
+  __u16 pad[3];
+  __u32 reference_clock;
+  __u64 near_mem_regions;
+  __u64 far_mem_regions;
+  __u64 reserved[8];
+};
+struct drm_xe_query_gt_list {
+  __u32 num_gt;
+  __u32 pad;
+  struct drm_xe_gt gt_list[];
+};
+struct drm_xe_query_topology_mask {
+  __u16 gt_id;
+#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
+#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
+#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+  __u16 type;
+  __u32 num_bytes;
+  __u8 mask[];
+};
+struct drm_xe_query_engine_cycles {
+  struct drm_xe_engine_class_instance eci;
+  __s32 clockid;
+  __u32 width;
+  __u64 engine_cycles;
+  __u64 cpu_timestamp;
+  __u64 cpu_delta;
+};
+struct drm_xe_device_query {
+  __u64 extensions;
+#define DRM_XE_DEVICE_QUERY_ENGINES 0
+#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
+#define DRM_XE_DEVICE_QUERY_CONFIG 2
+#define DRM_XE_DEVICE_QUERY_GT_LIST 3
+#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
+#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
+#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
+  __u32 query;
+  __u32 size;
+  __u64 data;
+  __u64 reserved[2];
+};
+struct drm_xe_gem_create {
+  __u64 extensions;
+  __u64 size;
+  __u32 placement;
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+  __u32 flags;
+  __u32 vm_id;
+  __u32 handle;
+#define DRM_XE_GEM_CPU_CACHING_WB 1
+#define DRM_XE_GEM_CPU_CACHING_WC 2
+  __u16 cpu_caching;
+  __u16 pad[3];
+  __u64 reserved[2];
+};
+struct drm_xe_gem_mmap_offset {
+  __u64 extensions;
+  __u32 handle;
+  __u32 flags;
+  __u64 offset;
+  __u64 reserved[2];
+};
+struct drm_xe_vm_create {
+  __u64 extensions;
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+  __u32 flags;
+  __u32 vm_id;
+  __u64 reserved[2];
+};
+struct drm_xe_vm_destroy {
+  __u32 vm_id;
+  __u32 pad;
+  __u64 reserved[2];
+};
+struct drm_xe_vm_bind_op {
+  __u64 extensions;
+  __u32 obj;
+  __u16 pat_index;
+  __u16 pad;
+  union {
+    __u64 obj_offset;
+    __u64 userptr;
+  };
+  __u64 range;
+  __u64 addr;
+#define DRM_XE_VM_BIND_OP_MAP 0x0
+#define DRM_XE_VM_BIND_OP_UNMAP 0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
+  __u32 op;
+#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+  __u32 flags;
+  __u32 prefetch_mem_region_instance;
+  __u32 pad2;
+  __u64 reserved[3];
+};
+struct drm_xe_vm_bind {
+  __u64 extensions;
+  __u32 vm_id;
+  __u32 exec_queue_id;
+  __u32 pad;
+  __u32 num_binds;
+  union {
+    struct drm_xe_vm_bind_op bind;
+    __u64 vector_of_binds;
+  };
+  __u32 pad2;
+  __u32 num_syncs;
+  __u64 syncs;
+  __u64 reserved[2];
+};
+struct drm_xe_exec_queue_create {
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
+  __u64 extensions;
+  __u16 width;
+  __u16 num_placements;
+  __u32 vm_id;
+  __u32 flags;
+  __u32 exec_queue_id;
+  __u64 instances;
+  __u64 reserved[2];
+};
+struct drm_xe_exec_queue_destroy {
+  __u32 exec_queue_id;
+  __u32 pad;
+  __u64 reserved[2];
+};
+struct drm_xe_exec_queue_get_property {
+  __u64 extensions;
+  __u32 exec_queue_id;
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
+  __u32 property;
+  __u64 value;
+  __u64 reserved[2];
+};
+struct drm_xe_sync {
+  __u64 extensions;
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+  __u32 type;
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+  __u32 flags;
+  union {
+    __u32 handle;
+    __u64 addr;
+  };
+  __u64 timeline_value;
+  __u64 reserved[2];
+};
+struct drm_xe_exec {
+  __u64 extensions;
+  __u32 exec_queue_id;
+  __u32 num_syncs;
+  __u64 syncs;
+  __u64 address;
+  __u16 num_batch_buffer;
+  __u16 pad[3];
+  __u64 reserved[2];
+};
+struct drm_xe_wait_user_fence {
+  __u64 extensions;
+  __u64 addr;
+#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
+#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
+#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
+#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
+#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
+#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
+  __u16 op;
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
+  __u16 flags;
+  __u32 pad;
+  __u64 value;
+  __u64 mask;
+  __s64 timeout;
+  __u32 exec_queue_id;
+  __u32 pad2;
+  __u64 reserved[2];
+};
+#ifdef __cplusplus
+}
+#endif
+#endif