blob: d21e0fc0b8eb1ec08c8583de26890e0acc54365c [file] [log] [blame]
Christopher Ferris05d08e92016-02-04 13:16:38 -08001/****************************************************************************
2 ****************************************************************************
3 ***
4 *** This header was automatically generated from a Linux kernel header
5 *** of the same name, to make information necessary for userspace to
6 *** call into the kernel available to libc. It contains only constants,
7 *** structures, and macros generated from the original header, and thus,
8 *** contains no copyrightable information.
9 ***
10 *** To edit the content of this header, modify the corresponding
11 *** source file (e.g. under external/kernel-headers/original/) then
12 *** run bionic/libc/kernel/tools/update_all.py
13 ***
14 *** Any manual change here will be lost the next time this script will
15 *** be run. You've been warned!
16 ***
17 ****************************************************************************
18 ****************************************************************************/
19#ifndef KFD_IOCTL_H_INCLUDED
20#define KFD_IOCTL_H_INCLUDED
Christopher Ferris1308ad32017-11-14 17:32:13 -080021#include <drm/drm.h>
Christopher Ferris05d08e92016-02-04 13:16:38 -080022#include <linux/ioctl.h>
Christopher Ferris05d08e92016-02-04 13:16:38 -080023#define KFD_IOCTL_MAJOR_VERSION 1
Christopher Ferris7447a1c2022-10-04 18:24:44 -070024#define KFD_IOCTL_MINOR_VERSION 11
Christopher Ferris05d08e92016-02-04 13:16:38 -080025struct kfd_ioctl_get_version_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080026 __u32 major_version;
27 __u32 minor_version;
Christopher Ferris05d08e92016-02-04 13:16:38 -080028};
Christopher Ferrisb8a95e22019-10-02 18:29:20 -070029#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
30#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
31#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
32#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
Christopher Ferris05d08e92016-02-04 13:16:38 -080033#define KFD_MAX_QUEUE_PERCENTAGE 100
34#define KFD_MAX_QUEUE_PRIORITY 15
35struct kfd_ioctl_create_queue_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080036 __u64 ring_base_address;
37 __u64 write_pointer_address;
38 __u64 read_pointer_address;
39 __u64 doorbell_offset;
40 __u32 ring_size;
41 __u32 gpu_id;
42 __u32 queue_type;
43 __u32 queue_percentage;
44 __u32 queue_priority;
45 __u32 queue_id;
46 __u64 eop_buffer_address;
47 __u64 eop_buffer_size;
48 __u64 ctx_save_restore_address;
Christopher Ferris76a1d452018-06-27 14:12:29 -070049 __u32 ctx_save_restore_size;
50 __u32 ctl_stack_size;
Christopher Ferris05d08e92016-02-04 13:16:38 -080051};
52struct kfd_ioctl_destroy_queue_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080053 __u32 queue_id;
54 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -080055};
56struct kfd_ioctl_update_queue_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080057 __u64 ring_base_address;
58 __u32 queue_id;
59 __u32 ring_size;
60 __u32 queue_percentage;
61 __u32 queue_priority;
Christopher Ferris05d08e92016-02-04 13:16:38 -080062};
Christopher Ferris9ce28842018-10-25 12:11:39 -070063struct kfd_ioctl_set_cu_mask_args {
64 __u32 queue_id;
65 __u32 num_cu_mask;
66 __u64 cu_mask_ptr;
67};
Christopher Ferris86a48372019-01-10 14:14:59 -080068struct kfd_ioctl_get_queue_wave_state_args {
69 __u64 ctl_stack_address;
70 __u32 ctl_stack_used_size;
71 __u32 save_area_used_size;
72 __u32 queue_id;
73 __u32 pad;
74};
Christopher Ferris7447a1c2022-10-04 18:24:44 -070075struct kfd_ioctl_get_available_memory_args {
76 __u64 available;
77 __u32 gpu_id;
78 __u32 pad;
79};
Christopher Ferris05d08e92016-02-04 13:16:38 -080080#define KFD_IOC_CACHE_POLICY_COHERENT 0
81#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
Christopher Ferris05d08e92016-02-04 13:16:38 -080082struct kfd_ioctl_set_memory_policy_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080083 __u64 alternate_aperture_base;
84 __u64 alternate_aperture_size;
85 __u32 gpu_id;
86 __u32 default_policy;
87 __u32 alternate_policy;
88 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -080089};
Christopher Ferris05d08e92016-02-04 13:16:38 -080090struct kfd_ioctl_get_clock_counters_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080091 __u64 gpu_clock_counter;
92 __u64 cpu_clock_counter;
93 __u64 system_clock_counter;
94 __u64 system_clock_freq;
95 __u32 gpu_id;
96 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -080097};
Christopher Ferris05d08e92016-02-04 13:16:38 -080098struct kfd_process_device_apertures {
Christopher Ferris1308ad32017-11-14 17:32:13 -080099 __u64 lds_base;
100 __u64 lds_limit;
101 __u64 scratch_base;
102 __u64 scratch_limit;
103 __u64 gpuvm_base;
104 __u64 gpuvm_limit;
105 __u32 gpu_id;
106 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800107};
Christopher Ferris76a1d452018-06-27 14:12:29 -0700108#define NUM_OF_SUPPORTED_GPUS 7
Christopher Ferris05d08e92016-02-04 13:16:38 -0800109struct kfd_ioctl_get_process_apertures_args {
Christopher Ferris05d08e92016-02-04 13:16:38 -0800110 struct kfd_process_device_apertures process_apertures[NUM_OF_SUPPORTED_GPUS];
Christopher Ferris1308ad32017-11-14 17:32:13 -0800111 __u32 num_of_nodes;
112 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800113};
Christopher Ferris76a1d452018-06-27 14:12:29 -0700114struct kfd_ioctl_get_process_apertures_new_args {
115 __u64 kfd_process_device_apertures_ptr;
116 __u32 num_of_nodes;
117 __u32 pad;
118};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800119#define MAX_ALLOWED_NUM_POINTS 100
120#define MAX_ALLOWED_AW_BUFF_SIZE 4096
121#define MAX_ALLOWED_WAC_BUFF_SIZE 128
122struct kfd_ioctl_dbg_register_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800123 __u32 gpu_id;
124 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800125};
126struct kfd_ioctl_dbg_unregister_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800127 __u32 gpu_id;
128 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800129};
130struct kfd_ioctl_dbg_address_watch_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800131 __u64 content_ptr;
132 __u32 gpu_id;
133 __u32 buf_size_in_bytes;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800134};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800135struct kfd_ioctl_dbg_wave_control_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800136 __u64 content_ptr;
137 __u32 gpu_id;
138 __u32 buf_size_in_bytes;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800139};
Christopher Ferris10a76e62022-06-08 13:31:52 -0700140#define KFD_INVALID_FD 0xffffffff
Christopher Ferris05d08e92016-02-04 13:16:38 -0800141#define KFD_IOC_EVENT_SIGNAL 0
142#define KFD_IOC_EVENT_NODECHANGE 1
143#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
Christopher Ferris05d08e92016-02-04 13:16:38 -0800144#define KFD_IOC_EVENT_HW_EXCEPTION 3
145#define KFD_IOC_EVENT_SYSTEM_EVENT 4
146#define KFD_IOC_EVENT_DEBUG_EVENT 5
147#define KFD_IOC_EVENT_PROFILE_EVENT 6
Christopher Ferris05d08e92016-02-04 13:16:38 -0800148#define KFD_IOC_EVENT_QUEUE_EVENT 7
149#define KFD_IOC_EVENT_MEMORY 8
150#define KFD_IOC_WAIT_RESULT_COMPLETE 0
151#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
Christopher Ferris05d08e92016-02-04 13:16:38 -0800152#define KFD_IOC_WAIT_RESULT_FAIL 2
Christopher Ferris934ec942018-01-31 15:29:16 -0800153#define KFD_SIGNAL_EVENT_LIMIT 4096
Christopher Ferris9ce28842018-10-25 12:11:39 -0700154#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
155#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
156#define KFD_HW_EXCEPTION_GPU_HANG 0
157#define KFD_HW_EXCEPTION_ECC 1
Christopher Ferrisaeddbcf2019-07-08 12:45:46 -0700158#define KFD_MEM_ERR_NO_RAS 0
159#define KFD_MEM_ERR_SRAM_ECC 1
160#define KFD_MEM_ERR_POISON_CONSUMED 2
161#define KFD_MEM_ERR_GPU_HANG 3
Christopher Ferris05d08e92016-02-04 13:16:38 -0800162struct kfd_ioctl_create_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800163 __u64 event_page_offset;
164 __u32 event_trigger_data;
165 __u32 event_type;
166 __u32 auto_reset;
167 __u32 node_id;
168 __u32 event_id;
169 __u32 event_slot_index;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800170};
171struct kfd_ioctl_destroy_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800172 __u32 event_id;
173 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800174};
175struct kfd_ioctl_set_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800176 __u32 event_id;
177 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800178};
179struct kfd_ioctl_reset_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800180 __u32 event_id;
181 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800182};
183struct kfd_memory_exception_failure {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800184 __u32 NotPresent;
185 __u32 ReadOnly;
186 __u32 NoExecute;
Christopher Ferris9ce28842018-10-25 12:11:39 -0700187 __u32 imprecise;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800188};
189struct kfd_hsa_memory_exception_data {
190 struct kfd_memory_exception_failure failure;
Christopher Ferris1308ad32017-11-14 17:32:13 -0800191 __u64 va;
192 __u32 gpu_id;
Christopher Ferrisaeddbcf2019-07-08 12:45:46 -0700193 __u32 ErrorType;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800194};
Christopher Ferris9ce28842018-10-25 12:11:39 -0700195struct kfd_hsa_hw_exception_data {
Christopher Ferris86a48372019-01-10 14:14:59 -0800196 __u32 reset_type;
197 __u32 reset_cause;
198 __u32 memory_lost;
199 __u32 gpu_id;
Christopher Ferris9ce28842018-10-25 12:11:39 -0700200};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800201struct kfd_event_data {
Christopher Ferris05d08e92016-02-04 13:16:38 -0800202 union {
203 struct kfd_hsa_memory_exception_data memory_exception_data;
Christopher Ferris9ce28842018-10-25 12:11:39 -0700204 struct kfd_hsa_hw_exception_data hw_exception_data;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800205 };
Christopher Ferris1308ad32017-11-14 17:32:13 -0800206 __u64 kfd_event_data_ext;
207 __u32 event_id;
208 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800209};
210struct kfd_ioctl_wait_events_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800211 __u64 events_ptr;
212 __u32 num_events;
213 __u32 wait_for_all;
214 __u32 timeout;
215 __u32 wait_result;
216};
217struct kfd_ioctl_set_scratch_backing_va_args {
Christopher Ferris934ec942018-01-31 15:29:16 -0800218 __u64 va_addr;
219 __u32 gpu_id;
220 __u32 pad;
Christopher Ferris1308ad32017-11-14 17:32:13 -0800221};
222struct kfd_ioctl_get_tile_config_args {
Christopher Ferris934ec942018-01-31 15:29:16 -0800223 __u64 tile_config_ptr;
224 __u64 macro_tile_config_ptr;
225 __u32 num_tile_configs;
226 __u32 num_macro_tile_configs;
227 __u32 gpu_id;
228 __u32 gb_addr_config;
229 __u32 num_banks;
230 __u32 num_ranks;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800231};
Christopher Ferris76a1d452018-06-27 14:12:29 -0700232struct kfd_ioctl_set_trap_handler_args {
233 __u64 tba_addr;
234 __u64 tma_addr;
235 __u32 gpu_id;
236 __u32 pad;
237};
238struct kfd_ioctl_acquire_vm_args {
239 __u32 drm_fd;
240 __u32 gpu_id;
241};
242#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
243#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
244#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
245#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
Christopher Ferrisb8a95e22019-10-02 18:29:20 -0700246#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
Christopher Ferris76a1d452018-06-27 14:12:29 -0700247#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
248#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
249#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
250#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
251#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
252#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
Christopher Ferrisfcc3b4f2021-07-01 01:30:21 +0000253#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
Christopher Ferris76a1d452018-06-27 14:12:29 -0700254struct kfd_ioctl_alloc_memory_of_gpu_args {
255 __u64 va_addr;
256 __u64 size;
257 __u64 handle;
258 __u64 mmap_offset;
259 __u32 gpu_id;
260 __u32 flags;
261};
262struct kfd_ioctl_free_memory_of_gpu_args {
263 __u64 handle;
264};
265struct kfd_ioctl_map_memory_to_gpu_args {
266 __u64 handle;
267 __u64 device_ids_array_ptr;
268 __u32 n_devices;
269 __u32 n_success;
270};
271struct kfd_ioctl_unmap_memory_from_gpu_args {
272 __u64 handle;
273 __u64 device_ids_array_ptr;
274 __u32 n_devices;
275 __u32 n_success;
276};
Christopher Ferris8177cdf2020-08-03 11:53:55 -0700277struct kfd_ioctl_alloc_queue_gws_args {
278 __u32 queue_id;
279 __u32 num_gws;
280 __u32 first_gws;
281 __u32 pad;
282};
Christopher Ferrisd842e432019-03-07 10:21:59 -0800283struct kfd_ioctl_get_dmabuf_info_args {
284 __u64 size;
285 __u64 metadata_ptr;
286 __u32 metadata_size;
287 __u32 gpu_id;
288 __u32 flags;
289 __u32 dmabuf_fd;
290};
291struct kfd_ioctl_import_dmabuf_args {
292 __u64 va_addr;
293 __u64 handle;
294 __u32 gpu_id;
295 __u32 dmabuf_fd;
296};
Christopher Ferris32ff3f82020-12-14 13:10:04 -0800297enum kfd_smi_event {
298 KFD_SMI_EVENT_NONE = 0,
299 KFD_SMI_EVENT_VMFAULT = 1,
300 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
301 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
302 KFD_SMI_EVENT_GPU_POST_RESET = 4,
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700303 KFD_SMI_EVENT_MIGRATE_START = 5,
304 KFD_SMI_EVENT_MIGRATE_END = 6,
305 KFD_SMI_EVENT_PAGE_FAULT_START = 7,
306 KFD_SMI_EVENT_PAGE_FAULT_END = 8,
307 KFD_SMI_EVENT_QUEUE_EVICTION = 9,
308 KFD_SMI_EVENT_QUEUE_RESTORE = 10,
309 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
310 KFD_SMI_EVENT_ALL_PROCESS = 64
311};
312enum KFD_MIGRATE_TRIGGERS {
313 KFD_MIGRATE_TRIGGER_PREFETCH,
314 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
315 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
316 KFD_MIGRATE_TRIGGER_TTM_EVICTION
317};
318enum KFD_QUEUE_EVICTION_TRIGGERS {
319 KFD_QUEUE_EVICTION_TRIGGER_SVM,
320 KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
321 KFD_QUEUE_EVICTION_TRIGGER_TTM,
322 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
323 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
324 KFD_QUEUE_EVICTION_CRIU_RESTORE
325};
326enum KFD_SVM_UNMAP_TRIGGERS {
327 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
328 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
329 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
Christopher Ferris32ff3f82020-12-14 13:10:04 -0800330};
331#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
Christopher Ferris10a76e62022-06-08 13:31:52 -0700332#define KFD_SMI_EVENT_MSG_SIZE 96
Christopher Ferris25c18d42020-10-14 17:42:58 -0700333struct kfd_ioctl_smi_events_args {
334 __u32 gpuid;
335 __u32 anon_fd;
336};
Christopher Ferris10a76e62022-06-08 13:31:52 -0700337enum kfd_criu_op {
338 KFD_CRIU_OP_PROCESS_INFO,
339 KFD_CRIU_OP_CHECKPOINT,
340 KFD_CRIU_OP_UNPAUSE,
341 KFD_CRIU_OP_RESTORE,
342 KFD_CRIU_OP_RESUME,
343};
344struct kfd_ioctl_criu_args {
345 __u64 devices;
346 __u64 bos;
347 __u64 priv_data;
348 __u64 priv_data_size;
349 __u32 num_devices;
350 __u32 num_bos;
351 __u32 num_objects;
352 __u32 pid;
353 __u32 op;
354};
355struct kfd_criu_device_bucket {
356 __u32 user_gpu_id;
357 __u32 actual_gpu_id;
358 __u32 drm_fd;
359 __u32 pad;
360};
361struct kfd_criu_bo_bucket {
362 __u64 addr;
363 __u64 size;
364 __u64 offset;
365 __u64 restored_offset;
366 __u32 gpu_id;
367 __u32 alloc_flags;
368 __u32 dmabuf_fd;
369 __u32 pad;
370};
Christopher Ferrisb8a95e22019-10-02 18:29:20 -0700371enum kfd_mmio_remap {
372 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
373 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
374};
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000375#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
376#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
377#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
378#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
379#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
380#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700381#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000382enum kfd_ioctl_svm_op {
383 KFD_IOCTL_SVM_OP_SET_ATTR,
384 KFD_IOCTL_SVM_OP_GET_ATTR
385};
386enum kfd_ioctl_svm_location {
387 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
388 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
389};
390enum kfd_ioctl_svm_attr_type {
391 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
392 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
393 KFD_IOCTL_SVM_ATTR_ACCESS,
394 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
395 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
396 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
397 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
398 KFD_IOCTL_SVM_ATTR_GRANULARITY
399};
400struct kfd_ioctl_svm_attribute {
401 __u32 type;
402 __u32 value;
403};
404struct kfd_ioctl_svm_args {
405 __u64 start_addr;
406 __u64 size;
407 __u32 op;
408 __u32 nattr;
Christopher Ferris10a76e62022-06-08 13:31:52 -0700409 struct kfd_ioctl_svm_attribute attrs[];
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000410};
411struct kfd_ioctl_set_xnack_mode_args {
412 __s32 xnack_enabled;
413};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800414#define AMDKFD_IOCTL_BASE 'K'
415#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800416#define AMDKFD_IOR(nr,type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
417#define AMDKFD_IOW(nr,type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
418#define AMDKFD_IOWR(nr,type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
419#define AMDKFD_IOC_GET_VERSION AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800420#define AMDKFD_IOC_CREATE_QUEUE AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
421#define AMDKFD_IOC_DESTROY_QUEUE AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
422#define AMDKFD_IOC_SET_MEMORY_POLICY AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
423#define AMDKFD_IOC_GET_CLOCK_COUNTERS AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800424#define AMDKFD_IOC_GET_PROCESS_APERTURES AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
425#define AMDKFD_IOC_UPDATE_QUEUE AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
426#define AMDKFD_IOC_CREATE_EVENT AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
427#define AMDKFD_IOC_DESTROY_EVENT AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800428#define AMDKFD_IOC_SET_EVENT AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
429#define AMDKFD_IOC_RESET_EVENT AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
430#define AMDKFD_IOC_WAIT_EVENTS AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
Christopher Ferris10a76e62022-06-08 13:31:52 -0700431#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
432#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
433#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
434#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
Christopher Ferris1308ad32017-11-14 17:32:13 -0800435#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
436#define AMDKFD_IOC_GET_TILE_CONFIG AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
Christopher Ferris76a1d452018-06-27 14:12:29 -0700437#define AMDKFD_IOC_SET_TRAP_HANDLER AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
438#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW AMDKFD_IOWR(0x14, struct kfd_ioctl_get_process_apertures_new_args)
439#define AMDKFD_IOC_ACQUIRE_VM AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
440#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
441#define AMDKFD_IOC_FREE_MEMORY_OF_GPU AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
442#define AMDKFD_IOC_MAP_MEMORY_TO_GPU AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
443#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
Christopher Ferris9ce28842018-10-25 12:11:39 -0700444#define AMDKFD_IOC_SET_CU_MASK AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
Christopher Ferris86a48372019-01-10 14:14:59 -0800445#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
Christopher Ferrisd842e432019-03-07 10:21:59 -0800446#define AMDKFD_IOC_GET_DMABUF_INFO AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
447#define AMDKFD_IOC_IMPORT_DMABUF AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
Christopher Ferris8177cdf2020-08-03 11:53:55 -0700448#define AMDKFD_IOC_ALLOC_QUEUE_GWS AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
Christopher Ferris25c18d42020-10-14 17:42:58 -0700449#define AMDKFD_IOC_SMI_EVENTS AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000450#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
451#define AMDKFD_IOC_SET_XNACK_MODE AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
Christopher Ferris10a76e62022-06-08 13:31:52 -0700452#define AMDKFD_IOC_CRIU_OP AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700453#define AMDKFD_IOC_AVAILABLE_MEMORY AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800454#define AMDKFD_COMMAND_START 0x01
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700455#define AMDKFD_COMMAND_END 0x24
Christopher Ferris05d08e92016-02-04 13:16:38 -0800456#endif