Update to v6.10 kernel headers.

Kernel headers coming from:

Git: https://android.googlesource.com/kernel/common/
Branch: android-mainline
Tag: android-mainline-6.10

Test: Builds and bionic unit tests pass on raven.
Test: Able to log in to an Android GO 32 bit device.
Change-Id: Ic444690472155421388fdc76dfd1e84834efac63
diff --git a/libc/kernel/uapi/drm/drm_mode.h b/libc/kernel/uapi/drm/drm_mode.h
index 9e6296c..8fccdaf 100644
--- a/libc/kernel/uapi/drm/drm_mode.h
+++ b/libc/kernel/uapi/drm/drm_mode.h
@@ -366,6 +366,10 @@
   __u16 blue;
   __u16 reserved;
 };
+struct drm_plane_size_hint {
+  __u16 width;
+  __u16 height;
+};
 struct hdr_metadata_infoframe {
   __u8 eotf;
   __u8 metadata_type;
diff --git a/libc/kernel/uapi/drm/i915_drm.h b/libc/kernel/uapi/drm/i915_drm.h
index 0738cee..13eda7c 100644
--- a/libc/kernel/uapi/drm/i915_drm.h
+++ b/libc/kernel/uapi/drm/i915_drm.h
@@ -365,6 +365,7 @@
 #define I915_PARAM_HAS_USERPTR_PROBE 56
 #define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
 #define I915_PARAM_PXP_STATUS 58
+#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
 struct drm_i915_getparam {
   __s32 param;
   int  * value;
@@ -743,6 +744,7 @@
 #define I915_CONTEXT_PARAM_PERSISTENCE 0xb
 #define I915_CONTEXT_PARAM_RINGSIZE 0xc
 #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
+#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
   __u64 value;
 };
 struct drm_i915_gem_context_param_sseu {
diff --git a/libc/kernel/uapi/drm/nouveau_drm.h b/libc/kernel/uapi/drm/nouveau_drm.h
index f7d870e..01897af 100644
--- a/libc/kernel/uapi/drm/nouveau_drm.h
+++ b/libc/kernel/uapi/drm/nouveau_drm.h
@@ -25,10 +25,16 @@
 #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
 #define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
 #define NOUVEAU_GETPARAM_VRAM_USED 19
+#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
 struct drm_nouveau_getparam {
   __u64 param;
   __u64 value;
 };
+#define NOUVEAU_FIFO_ENGINE_GR 0x01
+#define NOUVEAU_FIFO_ENGINE_VP 0x02
+#define NOUVEAU_FIFO_ENGINE_PPP 0x04
+#define NOUVEAU_FIFO_ENGINE_BSP 0x08
+#define NOUVEAU_FIFO_ENGINE_CE 0x30
 struct drm_nouveau_channel_alloc {
   __u32 fb_ctxdma_handle;
   __u32 tt_ctxdma_handle;
@@ -44,6 +50,16 @@
 struct drm_nouveau_channel_free {
   __s32 channel;
 };
+struct drm_nouveau_notifierobj_alloc {
+  __u32 channel;
+  __u32 handle;
+  __u32 size;
+  __u32 offset;
+};
+struct drm_nouveau_gpuobj_free {
+  __s32 channel;
+  __u32 handle;
+};
 #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
 #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
 #define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/libc/kernel/uapi/drm/panthor_drm.h b/libc/kernel/uapi/drm/panthor_drm.h
new file mode 100644
index 0000000..b45c1dc
--- /dev/null
+++ b/libc/kernel/uapi/drm/panthor_drm.h
@@ -0,0 +1,240 @@
+/*
+ * This file is auto-generated. Modifications will be lost.
+ *
+ * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
+ * for more information.
+ */
+#ifndef _PANTHOR_DRM_H_
+#define _PANTHOR_DRM_H_
+#include "drm.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
+#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
+#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
+#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
+enum drm_panthor_ioctl_id {
+  DRM_PANTHOR_DEV_QUERY = 0,
+  DRM_PANTHOR_VM_CREATE,
+  DRM_PANTHOR_VM_DESTROY,
+  DRM_PANTHOR_VM_BIND,
+  DRM_PANTHOR_VM_GET_STATE,
+  DRM_PANTHOR_BO_CREATE,
+  DRM_PANTHOR_BO_MMAP_OFFSET,
+  DRM_PANTHOR_GROUP_CREATE,
+  DRM_PANTHOR_GROUP_DESTROY,
+  DRM_PANTHOR_GROUP_SUBMIT,
+  DRM_PANTHOR_GROUP_GET_STATE,
+  DRM_PANTHOR_TILER_HEAP_CREATE,
+  DRM_PANTHOR_TILER_HEAP_DESTROY,
+};
+#define DRM_IOCTL_PANTHOR(__access,__id,__type) DRM_IO ##__access(DRM_COMMAND_BASE + DRM_PANTHOR_ ##__id, struct drm_panthor_ ##__type)
+#define DRM_IOCTL_PANTHOR_DEV_QUERY DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query)
+#define DRM_IOCTL_PANTHOR_VM_CREATE DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create)
+#define DRM_IOCTL_PANTHOR_VM_DESTROY DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy)
+#define DRM_IOCTL_PANTHOR_VM_BIND DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind)
+#define DRM_IOCTL_PANTHOR_VM_GET_STATE DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state)
+#define DRM_IOCTL_PANTHOR_BO_CREATE DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create)
+#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset)
+#define DRM_IOCTL_PANTHOR_GROUP_CREATE DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create)
+#define DRM_IOCTL_PANTHOR_GROUP_DESTROY DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy)
+#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit)
+#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state)
+#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create)
+#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy)
+struct drm_panthor_obj_array {
+  __u32 stride;
+  __u32 count;
+  __u64 array;
+};
+#define DRM_PANTHOR_OBJ_ARRAY(cnt,ptr) {.stride = sizeof((ptr)[0]),.count = (cnt),.array = (__u64) (uintptr_t) (ptr) }
+enum drm_panthor_sync_op_flags {
+  DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
+  DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
+  DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
+  DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
+  DRM_PANTHOR_SYNC_OP_SIGNAL = (int) (1u << 31),
+};
+struct drm_panthor_sync_op {
+  __u32 flags;
+  __u32 handle;
+  __u64 timeline_value;
+};
+enum drm_panthor_dev_query_type {
+  DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
+  DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
+};
+struct drm_panthor_gpu_info {
+  __u32 gpu_id;
+#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
+#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
+#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
+#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
+#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
+#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
+#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
+  __u32 gpu_rev;
+  __u32 csf_id;
+#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
+#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
+#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
+#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
+#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
+#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
+  __u32 l2_features;
+  __u32 tiler_features;
+  __u32 mem_features;
+  __u32 mmu_features;
+#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
+  __u32 thread_features;
+  __u32 max_threads;
+  __u32 thread_max_workgroup_size;
+  __u32 thread_max_barrier_size;
+  __u32 coherency_features;
+  __u32 texture_features[4];
+  __u32 as_present;
+  __u64 shader_present;
+  __u64 l2_present;
+  __u64 tiler_present;
+  __u32 core_features;
+  __u32 pad;
+};
+struct drm_panthor_csif_info {
+  __u32 csg_slot_count;
+  __u32 cs_slot_count;
+  __u32 cs_reg_count;
+  __u32 scoreboard_slot_count;
+  __u32 unpreserved_cs_reg_count;
+  __u32 pad;
+};
+struct drm_panthor_dev_query {
+  __u32 type;
+  __u32 size;
+  __u64 pointer;
+};
+struct drm_panthor_vm_create {
+  __u32 flags;
+  __u32 id;
+  __u64 user_va_range;
+};
+struct drm_panthor_vm_destroy {
+  __u32 id;
+  __u32 pad;
+};
+enum drm_panthor_vm_bind_op_flags {
+  DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
+  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
+  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
+  DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int) (0xfu << 28),
+  DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
+  DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
+  DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
+};
+struct drm_panthor_vm_bind_op {
+  __u32 flags;
+  __u32 bo_handle;
+  __u64 bo_offset;
+  __u64 va;
+  __u64 size;
+  struct drm_panthor_obj_array syncs;
+};
+enum drm_panthor_vm_bind_flags {
+  DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
+};
+struct drm_panthor_vm_bind {
+  __u32 vm_id;
+  __u32 flags;
+  struct drm_panthor_obj_array ops;
+};
+enum drm_panthor_vm_state {
+  DRM_PANTHOR_VM_STATE_USABLE,
+  DRM_PANTHOR_VM_STATE_UNUSABLE,
+};
+struct drm_panthor_vm_get_state {
+  __u32 vm_id;
+  __u32 state;
+};
+enum drm_panthor_bo_flags {
+  DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
+};
+struct drm_panthor_bo_create {
+  __u64 size;
+  __u32 flags;
+  __u32 exclusive_vm_id;
+  __u32 handle;
+  __u32 pad;
+};
+struct drm_panthor_bo_mmap_offset {
+  __u32 handle;
+  __u32 pad;
+  __u64 offset;
+};
+struct drm_panthor_queue_create {
+  __u8 priority;
+  __u8 pad[3];
+  __u32 ringbuf_size;
+};
+enum drm_panthor_group_priority {
+  PANTHOR_GROUP_PRIORITY_LOW = 0,
+  PANTHOR_GROUP_PRIORITY_MEDIUM,
+  PANTHOR_GROUP_PRIORITY_HIGH,
+};
+struct drm_panthor_group_create {
+  struct drm_panthor_obj_array queues;
+  __u8 max_compute_cores;
+  __u8 max_fragment_cores;
+  __u8 max_tiler_cores;
+  __u8 priority;
+  __u32 pad;
+  __u64 compute_core_mask;
+  __u64 fragment_core_mask;
+  __u64 tiler_core_mask;
+  __u32 vm_id;
+  __u32 group_handle;
+};
+struct drm_panthor_group_destroy {
+  __u32 group_handle;
+  __u32 pad;
+};
+struct drm_panthor_queue_submit {
+  __u32 queue_index;
+  __u32 stream_size;
+  __u64 stream_addr;
+  __u32 latest_flush;
+  __u32 pad;
+  struct drm_panthor_obj_array syncs;
+};
+struct drm_panthor_group_submit {
+  __u32 group_handle;
+  __u32 pad;
+  struct drm_panthor_obj_array queue_submits;
+};
+enum drm_panthor_group_state_flags {
+  DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
+  DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
+};
+struct drm_panthor_group_get_state {
+  __u32 group_handle;
+  __u32 state;
+  __u32 fatal_queues;
+  __u32 pad;
+};
+struct drm_panthor_tiler_heap_create {
+  __u32 vm_id;
+  __u32 initial_chunk_count;
+  __u32 chunk_size;
+  __u32 max_chunks;
+  __u32 target_in_flight;
+  __u32 handle;
+  __u64 tiler_heap_ctx_gpu_va;
+  __u64 first_heap_chunk_gpu_va;
+};
+struct drm_panthor_tiler_heap_destroy {
+  __u32 handle;
+  __u32 pad;
+};
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libc/kernel/uapi/drm/xe_drm.h b/libc/kernel/uapi/drm/xe_drm.h
index dd0da6f..d1b6dad 100644
--- a/libc/kernel/uapi/drm/xe_drm.h
+++ b/libc/kernel/uapi/drm/xe_drm.h
@@ -105,7 +105,11 @@
   __u32 reference_clock;
   __u64 near_mem_regions;
   __u64 far_mem_regions;
-  __u64 reserved[8];
+  __u16 ip_ver_major;
+  __u16 ip_ver_minor;
+  __u16 ip_ver_rev;
+  __u16 pad2;
+  __u64 reserved[7];
 };
 struct drm_xe_query_gt_list {
   __u32 num_gt;
@@ -114,9 +118,9 @@
 };
 struct drm_xe_query_topology_mask {
   __u16 gt_id;
-#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
-#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
-#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+#define DRM_XE_TOPO_DSS_GEOMETRY 1
+#define DRM_XE_TOPO_DSS_COMPUTE 2
+#define DRM_XE_TOPO_EU_PER_DSS 4
   __u16 type;
   __u32 num_bytes;
   __u8 mask[];
@@ -131,6 +135,7 @@
 };
 struct drm_xe_query_uc_fw_version {
 #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
+#define XE_QUERY_UC_TYPE_HUC 1
   __u16 uc_type;
   __u16 pad;
   __u32 branch_ver;
@@ -209,6 +214,8 @@
 #define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
 #define DRM_XE_VM_BIND_OP_PREFETCH 0x4
   __u32 op;
+#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
+#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
 #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
 #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
   __u32 flags;