| Christopher Ferris | 7ac54f5 | 2024-08-07 21:07:12 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is auto-generated. Modifications will be lost. | 
|  | 3 | * | 
|  | 4 | * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/ | 
|  | 5 | * for more information. | 
|  | 6 | */ | 
|  | 7 | #ifndef _PANTHOR_DRM_H_ | 
|  | 8 | #define _PANTHOR_DRM_H_ | 
|  | 9 | #include "drm.h" | 
|  | 10 | #ifdef __cplusplus | 
|  | 11 | extern "C" { | 
|  | 12 | #endif | 
|  | 13 | #define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43) | 
|  | 14 | #define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56) | 
|  | 15 | #define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : DRM_PANTHOR_USER_MMIO_OFFSET_64BIT) | 
|  | 16 | #define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0) | 
|  | 17 | enum drm_panthor_ioctl_id { | 
|  | 18 | DRM_PANTHOR_DEV_QUERY = 0, | 
|  | 19 | DRM_PANTHOR_VM_CREATE, | 
|  | 20 | DRM_PANTHOR_VM_DESTROY, | 
|  | 21 | DRM_PANTHOR_VM_BIND, | 
|  | 22 | DRM_PANTHOR_VM_GET_STATE, | 
|  | 23 | DRM_PANTHOR_BO_CREATE, | 
|  | 24 | DRM_PANTHOR_BO_MMAP_OFFSET, | 
|  | 25 | DRM_PANTHOR_GROUP_CREATE, | 
|  | 26 | DRM_PANTHOR_GROUP_DESTROY, | 
|  | 27 | DRM_PANTHOR_GROUP_SUBMIT, | 
|  | 28 | DRM_PANTHOR_GROUP_GET_STATE, | 
|  | 29 | DRM_PANTHOR_TILER_HEAP_CREATE, | 
|  | 30 | DRM_PANTHOR_TILER_HEAP_DESTROY, | 
|  | 31 | }; | 
|  | 32 | #define DRM_IOCTL_PANTHOR(__access,__id,__type) DRM_IO ##__access(DRM_COMMAND_BASE + DRM_PANTHOR_ ##__id, struct drm_panthor_ ##__type) | 
|  | 33 | #define DRM_IOCTL_PANTHOR_DEV_QUERY DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query) | 
|  | 34 | #define DRM_IOCTL_PANTHOR_VM_CREATE DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create) | 
|  | 35 | #define DRM_IOCTL_PANTHOR_VM_DESTROY DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy) | 
|  | 36 | #define DRM_IOCTL_PANTHOR_VM_BIND DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind) | 
|  | 37 | #define DRM_IOCTL_PANTHOR_VM_GET_STATE DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state) | 
|  | 38 | #define DRM_IOCTL_PANTHOR_BO_CREATE DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create) | 
|  | 39 | #define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset) | 
|  | 40 | #define DRM_IOCTL_PANTHOR_GROUP_CREATE DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create) | 
|  | 41 | #define DRM_IOCTL_PANTHOR_GROUP_DESTROY DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy) | 
|  | 42 | #define DRM_IOCTL_PANTHOR_GROUP_SUBMIT DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit) | 
|  | 43 | #define DRM_IOCTL_PANTHOR_GROUP_GET_STATE DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state) | 
|  | 44 | #define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create) | 
|  | 45 | #define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy) | 
|  | 46 | struct drm_panthor_obj_array { | 
|  | 47 | __u32 stride; | 
|  | 48 | __u32 count; | 
|  | 49 | __u64 array; | 
|  | 50 | }; | 
|  | 51 | #define DRM_PANTHOR_OBJ_ARRAY(cnt,ptr) {.stride = sizeof((ptr)[0]),.count = (cnt),.array = (__u64) (uintptr_t) (ptr) } | 
|  | 52 | enum drm_panthor_sync_op_flags { | 
|  | 53 | DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff, | 
|  | 54 | DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0, | 
|  | 55 | DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1, | 
|  | 56 | DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31, | 
|  | 57 | DRM_PANTHOR_SYNC_OP_SIGNAL = (int) (1u << 31), | 
|  | 58 | }; | 
|  | 59 | struct drm_panthor_sync_op { | 
|  | 60 | __u32 flags; | 
|  | 61 | __u32 handle; | 
|  | 62 | __u64 timeline_value; | 
|  | 63 | }; | 
|  | 64 | enum drm_panthor_dev_query_type { | 
|  | 65 | DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0, | 
|  | 66 | DRM_PANTHOR_DEV_QUERY_CSIF_INFO, | 
|  | 67 | }; | 
|  | 68 | struct drm_panthor_gpu_info { | 
|  | 69 | __u32 gpu_id; | 
|  | 70 | #define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28) | 
|  | 71 | #define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf) | 
|  | 72 | #define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf) | 
|  | 73 | #define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf) | 
|  | 74 | #define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf) | 
|  | 75 | #define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff) | 
|  | 76 | #define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf) | 
|  | 77 | __u32 gpu_rev; | 
|  | 78 | __u32 csf_id; | 
|  | 79 | #define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f) | 
|  | 80 | #define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f) | 
|  | 81 | #define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf) | 
|  | 82 | #define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f) | 
|  | 83 | #define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f) | 
|  | 84 | #define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf) | 
|  | 85 | __u32 l2_features; | 
|  | 86 | __u32 tiler_features; | 
|  | 87 | __u32 mem_features; | 
|  | 88 | __u32 mmu_features; | 
|  | 89 | #define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff) | 
|  | 90 | __u32 thread_features; | 
|  | 91 | __u32 max_threads; | 
|  | 92 | __u32 thread_max_workgroup_size; | 
|  | 93 | __u32 thread_max_barrier_size; | 
|  | 94 | __u32 coherency_features; | 
|  | 95 | __u32 texture_features[4]; | 
|  | 96 | __u32 as_present; | 
|  | 97 | __u64 shader_present; | 
|  | 98 | __u64 l2_present; | 
|  | 99 | __u64 tiler_present; | 
|  | 100 | __u32 core_features; | 
|  | 101 | __u32 pad; | 
|  | 102 | }; | 
|  | 103 | struct drm_panthor_csif_info { | 
|  | 104 | __u32 csg_slot_count; | 
|  | 105 | __u32 cs_slot_count; | 
|  | 106 | __u32 cs_reg_count; | 
|  | 107 | __u32 scoreboard_slot_count; | 
|  | 108 | __u32 unpreserved_cs_reg_count; | 
|  | 109 | __u32 pad; | 
|  | 110 | }; | 
|  | 111 | struct drm_panthor_dev_query { | 
|  | 112 | __u32 type; | 
|  | 113 | __u32 size; | 
|  | 114 | __u64 pointer; | 
|  | 115 | }; | 
|  | 116 | struct drm_panthor_vm_create { | 
|  | 117 | __u32 flags; | 
|  | 118 | __u32 id; | 
|  | 119 | __u64 user_va_range; | 
|  | 120 | }; | 
|  | 121 | struct drm_panthor_vm_destroy { | 
|  | 122 | __u32 id; | 
|  | 123 | __u32 pad; | 
|  | 124 | }; | 
|  | 125 | enum drm_panthor_vm_bind_op_flags { | 
|  | 126 | DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0, | 
|  | 127 | DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1, | 
|  | 128 | DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2, | 
|  | 129 | DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int) (0xfu << 28), | 
|  | 130 | DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28, | 
|  | 131 | DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28, | 
|  | 132 | DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28, | 
|  | 133 | }; | 
|  | 134 | struct drm_panthor_vm_bind_op { | 
|  | 135 | __u32 flags; | 
|  | 136 | __u32 bo_handle; | 
|  | 137 | __u64 bo_offset; | 
|  | 138 | __u64 va; | 
|  | 139 | __u64 size; | 
|  | 140 | struct drm_panthor_obj_array syncs; | 
|  | 141 | }; | 
|  | 142 | enum drm_panthor_vm_bind_flags { | 
|  | 143 | DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0, | 
|  | 144 | }; | 
|  | 145 | struct drm_panthor_vm_bind { | 
|  | 146 | __u32 vm_id; | 
|  | 147 | __u32 flags; | 
|  | 148 | struct drm_panthor_obj_array ops; | 
|  | 149 | }; | 
|  | 150 | enum drm_panthor_vm_state { | 
|  | 151 | DRM_PANTHOR_VM_STATE_USABLE, | 
|  | 152 | DRM_PANTHOR_VM_STATE_UNUSABLE, | 
|  | 153 | }; | 
|  | 154 | struct drm_panthor_vm_get_state { | 
|  | 155 | __u32 vm_id; | 
|  | 156 | __u32 state; | 
|  | 157 | }; | 
|  | 158 | enum drm_panthor_bo_flags { | 
|  | 159 | DRM_PANTHOR_BO_NO_MMAP = (1 << 0), | 
|  | 160 | }; | 
|  | 161 | struct drm_panthor_bo_create { | 
|  | 162 | __u64 size; | 
|  | 163 | __u32 flags; | 
|  | 164 | __u32 exclusive_vm_id; | 
|  | 165 | __u32 handle; | 
|  | 166 | __u32 pad; | 
|  | 167 | }; | 
|  | 168 | struct drm_panthor_bo_mmap_offset { | 
|  | 169 | __u32 handle; | 
|  | 170 | __u32 pad; | 
|  | 171 | __u64 offset; | 
|  | 172 | }; | 
|  | 173 | struct drm_panthor_queue_create { | 
|  | 174 | __u8 priority; | 
|  | 175 | __u8 pad[3]; | 
|  | 176 | __u32 ringbuf_size; | 
|  | 177 | }; | 
|  | 178 | enum drm_panthor_group_priority { | 
|  | 179 | PANTHOR_GROUP_PRIORITY_LOW = 0, | 
|  | 180 | PANTHOR_GROUP_PRIORITY_MEDIUM, | 
|  | 181 | PANTHOR_GROUP_PRIORITY_HIGH, | 
|  | 182 | }; | 
|  | 183 | struct drm_panthor_group_create { | 
|  | 184 | struct drm_panthor_obj_array queues; | 
|  | 185 | __u8 max_compute_cores; | 
|  | 186 | __u8 max_fragment_cores; | 
|  | 187 | __u8 max_tiler_cores; | 
|  | 188 | __u8 priority; | 
|  | 189 | __u32 pad; | 
|  | 190 | __u64 compute_core_mask; | 
|  | 191 | __u64 fragment_core_mask; | 
|  | 192 | __u64 tiler_core_mask; | 
|  | 193 | __u32 vm_id; | 
|  | 194 | __u32 group_handle; | 
|  | 195 | }; | 
|  | 196 | struct drm_panthor_group_destroy { | 
|  | 197 | __u32 group_handle; | 
|  | 198 | __u32 pad; | 
|  | 199 | }; | 
|  | 200 | struct drm_panthor_queue_submit { | 
|  | 201 | __u32 queue_index; | 
|  | 202 | __u32 stream_size; | 
|  | 203 | __u64 stream_addr; | 
|  | 204 | __u32 latest_flush; | 
|  | 205 | __u32 pad; | 
|  | 206 | struct drm_panthor_obj_array syncs; | 
|  | 207 | }; | 
|  | 208 | struct drm_panthor_group_submit { | 
|  | 209 | __u32 group_handle; | 
|  | 210 | __u32 pad; | 
|  | 211 | struct drm_panthor_obj_array queue_submits; | 
|  | 212 | }; | 
|  | 213 | enum drm_panthor_group_state_flags { | 
|  | 214 | DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0, | 
|  | 215 | DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1, | 
|  | 216 | }; | 
|  | 217 | struct drm_panthor_group_get_state { | 
|  | 218 | __u32 group_handle; | 
|  | 219 | __u32 state; | 
|  | 220 | __u32 fatal_queues; | 
|  | 221 | __u32 pad; | 
|  | 222 | }; | 
|  | 223 | struct drm_panthor_tiler_heap_create { | 
|  | 224 | __u32 vm_id; | 
|  | 225 | __u32 initial_chunk_count; | 
|  | 226 | __u32 chunk_size; | 
|  | 227 | __u32 max_chunks; | 
|  | 228 | __u32 target_in_flight; | 
|  | 229 | __u32 handle; | 
|  | 230 | __u64 tiler_heap_ctx_gpu_va; | 
|  | 231 | __u64 first_heap_chunk_gpu_va; | 
|  | 232 | }; | 
|  | 233 | struct drm_panthor_tiler_heap_destroy { | 
|  | 234 | __u32 handle; | 
|  | 235 | __u32 pad; | 
|  | 236 | }; | 
|  | 237 | #ifdef __cplusplus | 
|  | 238 | } | 
|  | 239 | #endif | 
|  | 240 | #endif |