blob: 1e9f128e9b5ffa2a5eddde3f03a79c4c58a7a589 [file] [log] [blame]
Christopher Ferrisb830ddf2024-03-28 11:48:08 -07001/*
2 * This file is auto-generated. Modifications will be lost.
3 *
4 * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5 * for more information.
6 */
7#ifndef _UAPI_XE_DRM_H_
8#define _UAPI_XE_DRM_H_
9#include "drm.h"
10#ifdef __cplusplus
11extern "C" {
12#endif
13#define DRM_XE_DEVICE_QUERY 0x00
14#define DRM_XE_GEM_CREATE 0x01
15#define DRM_XE_GEM_MMAP_OFFSET 0x02
16#define DRM_XE_VM_CREATE 0x03
17#define DRM_XE_VM_DESTROY 0x04
18#define DRM_XE_VM_BIND 0x05
19#define DRM_XE_EXEC_QUEUE_CREATE 0x06
20#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
21#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
22#define DRM_XE_EXEC 0x09
23#define DRM_XE_WAIT_USER_FENCE 0x0a
24#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
25#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
26#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
27#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
28#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
29#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
30#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
31#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
32#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
33#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
34#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
35struct drm_xe_user_extension {
36 __u64 next_extension;
37 __u32 name;
38 __u32 pad;
39};
40struct drm_xe_ext_set_property {
41 struct drm_xe_user_extension base;
42 __u32 property;
43 __u32 pad;
44 __u64 value;
45 __u64 reserved[2];
46};
47struct drm_xe_engine_class_instance {
48#define DRM_XE_ENGINE_CLASS_RENDER 0
49#define DRM_XE_ENGINE_CLASS_COPY 1
50#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
51#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
52#define DRM_XE_ENGINE_CLASS_COMPUTE 4
53#define DRM_XE_ENGINE_CLASS_VM_BIND 5
54 __u16 engine_class;
55 __u16 engine_instance;
56 __u16 gt_id;
57 __u16 pad;
58};
59struct drm_xe_engine {
60 struct drm_xe_engine_class_instance instance;
61 __u64 reserved[3];
62};
63struct drm_xe_query_engines {
64 __u32 num_engines;
65 __u32 pad;
66 struct drm_xe_engine engines[];
67};
68enum drm_xe_memory_class {
69 DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
70 DRM_XE_MEM_REGION_CLASS_VRAM
71};
72struct drm_xe_mem_region {
73 __u16 mem_class;
74 __u16 instance;
75 __u32 min_page_size;
76 __u64 total_size;
77 __u64 used;
78 __u64 cpu_visible_size;
79 __u64 cpu_visible_used;
80 __u64 reserved[6];
81};
82struct drm_xe_query_mem_regions {
83 __u32 num_mem_regions;
84 __u32 pad;
85 struct drm_xe_mem_region mem_regions[];
86};
87struct drm_xe_query_config {
88 __u32 num_params;
89 __u32 pad;
90#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
91#define DRM_XE_QUERY_CONFIG_FLAGS 1
92#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
93#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
94#define DRM_XE_QUERY_CONFIG_VA_BITS 3
95#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
96 __u64 info[];
97};
98struct drm_xe_gt {
99#define DRM_XE_QUERY_GT_TYPE_MAIN 0
100#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
101 __u16 type;
102 __u16 tile_id;
103 __u16 gt_id;
104 __u16 pad[3];
105 __u32 reference_clock;
106 __u64 near_mem_regions;
107 __u64 far_mem_regions;
108 __u64 reserved[8];
109};
110struct drm_xe_query_gt_list {
111 __u32 num_gt;
112 __u32 pad;
113 struct drm_xe_gt gt_list[];
114};
115struct drm_xe_query_topology_mask {
116 __u16 gt_id;
117#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
118#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
119#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
120 __u16 type;
121 __u32 num_bytes;
122 __u8 mask[];
123};
124struct drm_xe_query_engine_cycles {
125 struct drm_xe_engine_class_instance eci;
126 __s32 clockid;
127 __u32 width;
128 __u64 engine_cycles;
129 __u64 cpu_timestamp;
130 __u64 cpu_delta;
131};
132struct drm_xe_device_query {
133 __u64 extensions;
134#define DRM_XE_DEVICE_QUERY_ENGINES 0
135#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
136#define DRM_XE_DEVICE_QUERY_CONFIG 2
137#define DRM_XE_DEVICE_QUERY_GT_LIST 3
138#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
139#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
140#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
141 __u32 query;
142 __u32 size;
143 __u64 data;
144 __u64 reserved[2];
145};
146struct drm_xe_gem_create {
147 __u64 extensions;
148 __u64 size;
149 __u32 placement;
150#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
151#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
152#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
153 __u32 flags;
154 __u32 vm_id;
155 __u32 handle;
156#define DRM_XE_GEM_CPU_CACHING_WB 1
157#define DRM_XE_GEM_CPU_CACHING_WC 2
158 __u16 cpu_caching;
159 __u16 pad[3];
160 __u64 reserved[2];
161};
162struct drm_xe_gem_mmap_offset {
163 __u64 extensions;
164 __u32 handle;
165 __u32 flags;
166 __u64 offset;
167 __u64 reserved[2];
168};
169struct drm_xe_vm_create {
170 __u64 extensions;
171#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
172#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
173#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
174 __u32 flags;
175 __u32 vm_id;
176 __u64 reserved[2];
177};
178struct drm_xe_vm_destroy {
179 __u32 vm_id;
180 __u32 pad;
181 __u64 reserved[2];
182};
183struct drm_xe_vm_bind_op {
184 __u64 extensions;
185 __u32 obj;
186 __u16 pat_index;
187 __u16 pad;
188 union {
189 __u64 obj_offset;
190 __u64 userptr;
191 };
192 __u64 range;
193 __u64 addr;
194#define DRM_XE_VM_BIND_OP_MAP 0x0
195#define DRM_XE_VM_BIND_OP_UNMAP 0x1
196#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
197#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
198#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
199 __u32 op;
200#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
201#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
202 __u32 flags;
203 __u32 prefetch_mem_region_instance;
204 __u32 pad2;
205 __u64 reserved[3];
206};
207struct drm_xe_vm_bind {
208 __u64 extensions;
209 __u32 vm_id;
210 __u32 exec_queue_id;
211 __u32 pad;
212 __u32 num_binds;
213 union {
214 struct drm_xe_vm_bind_op bind;
215 __u64 vector_of_binds;
216 };
217 __u32 pad2;
218 __u32 num_syncs;
219 __u64 syncs;
220 __u64 reserved[2];
221};
222struct drm_xe_exec_queue_create {
223#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
224#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
225#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
226 __u64 extensions;
227 __u16 width;
228 __u16 num_placements;
229 __u32 vm_id;
230 __u32 flags;
231 __u32 exec_queue_id;
232 __u64 instances;
233 __u64 reserved[2];
234};
235struct drm_xe_exec_queue_destroy {
236 __u32 exec_queue_id;
237 __u32 pad;
238 __u64 reserved[2];
239};
240struct drm_xe_exec_queue_get_property {
241 __u64 extensions;
242 __u32 exec_queue_id;
243#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
244 __u32 property;
245 __u64 value;
246 __u64 reserved[2];
247};
248struct drm_xe_sync {
249 __u64 extensions;
250#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
251#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
252#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
253 __u32 type;
254#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
255 __u32 flags;
256 union {
257 __u32 handle;
258 __u64 addr;
259 };
260 __u64 timeline_value;
261 __u64 reserved[2];
262};
263struct drm_xe_exec {
264 __u64 extensions;
265 __u32 exec_queue_id;
266 __u32 num_syncs;
267 __u64 syncs;
268 __u64 address;
269 __u16 num_batch_buffer;
270 __u16 pad[3];
271 __u64 reserved[2];
272};
273struct drm_xe_wait_user_fence {
274 __u64 extensions;
275 __u64 addr;
276#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
277#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
278#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
279#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
280#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
281#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
282 __u16 op;
283#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
284 __u16 flags;
285 __u32 pad;
286 __u64 value;
287 __u64 mask;
288 __s64 timeout;
289 __u32 exec_queue_id;
290 __u32 pad2;
291 __u64 reserved[2];
292};
293#ifdef __cplusplus
294}
295#endif
296#endif