blob: 003d87cd032703073b777a9fd356bf12fe675fe0 [file] [log] [blame]
Elliott Hughes180edef2023-11-02 00:08:05 +00001/*
2 * This file is auto-generated. Modifications will be lost.
3 *
4 * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5 * for more information.
6 */
Christopher Ferris05d08e92016-02-04 13:16:38 -08007#ifndef KFD_IOCTL_H_INCLUDED
8#define KFD_IOCTL_H_INCLUDED
Christopher Ferris1308ad32017-11-14 17:32:13 -08009#include <drm/drm.h>
Christopher Ferris05d08e92016-02-04 13:16:38 -080010#include <linux/ioctl.h>
Christopher Ferris05d08e92016-02-04 13:16:38 -080011#define KFD_IOCTL_MAJOR_VERSION 1
Christopher Ferris8666d042023-09-06 14:55:31 -070012#define KFD_IOCTL_MINOR_VERSION 14
Christopher Ferris05d08e92016-02-04 13:16:38 -080013struct kfd_ioctl_get_version_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080014 __u32 major_version;
15 __u32 minor_version;
Christopher Ferris05d08e92016-02-04 13:16:38 -080016};
Christopher Ferrisb8a95e22019-10-02 18:29:20 -070017#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
18#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
19#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
20#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
Christopher Ferris05d08e92016-02-04 13:16:38 -080021#define KFD_MAX_QUEUE_PERCENTAGE 100
22#define KFD_MAX_QUEUE_PRIORITY 15
23struct kfd_ioctl_create_queue_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080024 __u64 ring_base_address;
25 __u64 write_pointer_address;
26 __u64 read_pointer_address;
27 __u64 doorbell_offset;
28 __u32 ring_size;
29 __u32 gpu_id;
30 __u32 queue_type;
31 __u32 queue_percentage;
32 __u32 queue_priority;
33 __u32 queue_id;
34 __u64 eop_buffer_address;
35 __u64 eop_buffer_size;
36 __u64 ctx_save_restore_address;
Christopher Ferris76a1d452018-06-27 14:12:29 -070037 __u32 ctx_save_restore_size;
38 __u32 ctl_stack_size;
Christopher Ferris05d08e92016-02-04 13:16:38 -080039};
40struct kfd_ioctl_destroy_queue_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080041 __u32 queue_id;
42 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -080043};
44struct kfd_ioctl_update_queue_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080045 __u64 ring_base_address;
46 __u32 queue_id;
47 __u32 ring_size;
48 __u32 queue_percentage;
49 __u32 queue_priority;
Christopher Ferris05d08e92016-02-04 13:16:38 -080050};
Christopher Ferris9ce28842018-10-25 12:11:39 -070051struct kfd_ioctl_set_cu_mask_args {
52 __u32 queue_id;
53 __u32 num_cu_mask;
54 __u64 cu_mask_ptr;
55};
Christopher Ferris86a48372019-01-10 14:14:59 -080056struct kfd_ioctl_get_queue_wave_state_args {
57 __u64 ctl_stack_address;
58 __u32 ctl_stack_used_size;
59 __u32 save_area_used_size;
60 __u32 queue_id;
61 __u32 pad;
62};
Christopher Ferris7447a1c2022-10-04 18:24:44 -070063struct kfd_ioctl_get_available_memory_args {
64 __u64 available;
65 __u32 gpu_id;
66 __u32 pad;
67};
Christopher Ferris8666d042023-09-06 14:55:31 -070068struct kfd_dbg_device_info_entry {
69 __u64 exception_status;
70 __u64 lds_base;
71 __u64 lds_limit;
72 __u64 scratch_base;
73 __u64 scratch_limit;
74 __u64 gpuvm_base;
75 __u64 gpuvm_limit;
76 __u32 gpu_id;
77 __u32 location_id;
78 __u32 vendor_id;
79 __u32 device_id;
80 __u32 revision_id;
81 __u32 subsystem_vendor_id;
82 __u32 subsystem_device_id;
83 __u32 fw_version;
84 __u32 gfx_target_version;
85 __u32 simd_count;
86 __u32 max_waves_per_simd;
87 __u32 array_count;
88 __u32 simd_arrays_per_engine;
89 __u32 num_xcc;
90 __u32 capability;
91 __u32 debug_prop;
92};
Christopher Ferris05d08e92016-02-04 13:16:38 -080093#define KFD_IOC_CACHE_POLICY_COHERENT 0
94#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
Christopher Ferris05d08e92016-02-04 13:16:38 -080095struct kfd_ioctl_set_memory_policy_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -080096 __u64 alternate_aperture_base;
97 __u64 alternate_aperture_size;
98 __u32 gpu_id;
99 __u32 default_policy;
100 __u32 alternate_policy;
101 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800102};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800103struct kfd_ioctl_get_clock_counters_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800104 __u64 gpu_clock_counter;
105 __u64 cpu_clock_counter;
106 __u64 system_clock_counter;
107 __u64 system_clock_freq;
108 __u32 gpu_id;
109 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800110};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800111struct kfd_process_device_apertures {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800112 __u64 lds_base;
113 __u64 lds_limit;
114 __u64 scratch_base;
115 __u64 scratch_limit;
116 __u64 gpuvm_base;
117 __u64 gpuvm_limit;
118 __u32 gpu_id;
119 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800120};
Christopher Ferris76a1d452018-06-27 14:12:29 -0700121#define NUM_OF_SUPPORTED_GPUS 7
Christopher Ferris05d08e92016-02-04 13:16:38 -0800122struct kfd_ioctl_get_process_apertures_args {
Christopher Ferris05d08e92016-02-04 13:16:38 -0800123 struct kfd_process_device_apertures process_apertures[NUM_OF_SUPPORTED_GPUS];
Christopher Ferris1308ad32017-11-14 17:32:13 -0800124 __u32 num_of_nodes;
125 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800126};
Christopher Ferris76a1d452018-06-27 14:12:29 -0700127struct kfd_ioctl_get_process_apertures_new_args {
128 __u64 kfd_process_device_apertures_ptr;
129 __u32 num_of_nodes;
130 __u32 pad;
131};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800132#define MAX_ALLOWED_NUM_POINTS 100
133#define MAX_ALLOWED_AW_BUFF_SIZE 4096
134#define MAX_ALLOWED_WAC_BUFF_SIZE 128
135struct kfd_ioctl_dbg_register_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800136 __u32 gpu_id;
137 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800138};
139struct kfd_ioctl_dbg_unregister_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800140 __u32 gpu_id;
141 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800142};
143struct kfd_ioctl_dbg_address_watch_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800144 __u64 content_ptr;
145 __u32 gpu_id;
146 __u32 buf_size_in_bytes;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800147};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800148struct kfd_ioctl_dbg_wave_control_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800149 __u64 content_ptr;
150 __u32 gpu_id;
151 __u32 buf_size_in_bytes;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800152};
Christopher Ferris10a76e62022-06-08 13:31:52 -0700153#define KFD_INVALID_FD 0xffffffff
Christopher Ferris05d08e92016-02-04 13:16:38 -0800154#define KFD_IOC_EVENT_SIGNAL 0
155#define KFD_IOC_EVENT_NODECHANGE 1
156#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
Christopher Ferris05d08e92016-02-04 13:16:38 -0800157#define KFD_IOC_EVENT_HW_EXCEPTION 3
158#define KFD_IOC_EVENT_SYSTEM_EVENT 4
159#define KFD_IOC_EVENT_DEBUG_EVENT 5
160#define KFD_IOC_EVENT_PROFILE_EVENT 6
Christopher Ferris05d08e92016-02-04 13:16:38 -0800161#define KFD_IOC_EVENT_QUEUE_EVENT 7
162#define KFD_IOC_EVENT_MEMORY 8
163#define KFD_IOC_WAIT_RESULT_COMPLETE 0
164#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
Christopher Ferris05d08e92016-02-04 13:16:38 -0800165#define KFD_IOC_WAIT_RESULT_FAIL 2
Christopher Ferris934ec942018-01-31 15:29:16 -0800166#define KFD_SIGNAL_EVENT_LIMIT 4096
Christopher Ferris9ce28842018-10-25 12:11:39 -0700167#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
168#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
169#define KFD_HW_EXCEPTION_GPU_HANG 0
170#define KFD_HW_EXCEPTION_ECC 1
Christopher Ferrisaeddbcf2019-07-08 12:45:46 -0700171#define KFD_MEM_ERR_NO_RAS 0
172#define KFD_MEM_ERR_SRAM_ECC 1
173#define KFD_MEM_ERR_POISON_CONSUMED 2
174#define KFD_MEM_ERR_GPU_HANG 3
Christopher Ferris05d08e92016-02-04 13:16:38 -0800175struct kfd_ioctl_create_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800176 __u64 event_page_offset;
177 __u32 event_trigger_data;
178 __u32 event_type;
179 __u32 auto_reset;
180 __u32 node_id;
181 __u32 event_id;
182 __u32 event_slot_index;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800183};
184struct kfd_ioctl_destroy_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800185 __u32 event_id;
186 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800187};
188struct kfd_ioctl_set_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800189 __u32 event_id;
190 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800191};
192struct kfd_ioctl_reset_event_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800193 __u32 event_id;
194 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800195};
196struct kfd_memory_exception_failure {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800197 __u32 NotPresent;
198 __u32 ReadOnly;
199 __u32 NoExecute;
Christopher Ferris9ce28842018-10-25 12:11:39 -0700200 __u32 imprecise;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800201};
202struct kfd_hsa_memory_exception_data {
203 struct kfd_memory_exception_failure failure;
Christopher Ferris1308ad32017-11-14 17:32:13 -0800204 __u64 va;
205 __u32 gpu_id;
Christopher Ferrisaeddbcf2019-07-08 12:45:46 -0700206 __u32 ErrorType;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800207};
Christopher Ferris9ce28842018-10-25 12:11:39 -0700208struct kfd_hsa_hw_exception_data {
Christopher Ferris86a48372019-01-10 14:14:59 -0800209 __u32 reset_type;
210 __u32 reset_cause;
211 __u32 memory_lost;
212 __u32 gpu_id;
Christopher Ferris9ce28842018-10-25 12:11:39 -0700213};
Christopher Ferris8666d042023-09-06 14:55:31 -0700214struct kfd_hsa_signal_event_data {
215 __u64 last_event_age;
216};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800217struct kfd_event_data {
Christopher Ferris05d08e92016-02-04 13:16:38 -0800218 union {
219 struct kfd_hsa_memory_exception_data memory_exception_data;
Christopher Ferris9ce28842018-10-25 12:11:39 -0700220 struct kfd_hsa_hw_exception_data hw_exception_data;
Christopher Ferris8666d042023-09-06 14:55:31 -0700221 struct kfd_hsa_signal_event_data signal_event_data;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800222 };
Christopher Ferris1308ad32017-11-14 17:32:13 -0800223 __u64 kfd_event_data_ext;
224 __u32 event_id;
225 __u32 pad;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800226};
227struct kfd_ioctl_wait_events_args {
Christopher Ferris1308ad32017-11-14 17:32:13 -0800228 __u64 events_ptr;
229 __u32 num_events;
230 __u32 wait_for_all;
231 __u32 timeout;
232 __u32 wait_result;
233};
234struct kfd_ioctl_set_scratch_backing_va_args {
Christopher Ferris934ec942018-01-31 15:29:16 -0800235 __u64 va_addr;
236 __u32 gpu_id;
237 __u32 pad;
Christopher Ferris1308ad32017-11-14 17:32:13 -0800238};
239struct kfd_ioctl_get_tile_config_args {
Christopher Ferris934ec942018-01-31 15:29:16 -0800240 __u64 tile_config_ptr;
241 __u64 macro_tile_config_ptr;
242 __u32 num_tile_configs;
243 __u32 num_macro_tile_configs;
244 __u32 gpu_id;
245 __u32 gb_addr_config;
246 __u32 num_banks;
247 __u32 num_ranks;
Christopher Ferris05d08e92016-02-04 13:16:38 -0800248};
Christopher Ferris76a1d452018-06-27 14:12:29 -0700249struct kfd_ioctl_set_trap_handler_args {
250 __u64 tba_addr;
251 __u64 tma_addr;
252 __u32 gpu_id;
253 __u32 pad;
254};
255struct kfd_ioctl_acquire_vm_args {
256 __u32 drm_fd;
257 __u32 gpu_id;
258};
259#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
260#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
261#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
262#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
Christopher Ferrisb8a95e22019-10-02 18:29:20 -0700263#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
Christopher Ferris76a1d452018-06-27 14:12:29 -0700264#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
265#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
266#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
267#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
268#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
269#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
Christopher Ferrisfcc3b4f2021-07-01 01:30:21 +0000270#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
Christopher Ferris76a1d452018-06-27 14:12:29 -0700271struct kfd_ioctl_alloc_memory_of_gpu_args {
272 __u64 va_addr;
273 __u64 size;
274 __u64 handle;
275 __u64 mmap_offset;
276 __u32 gpu_id;
277 __u32 flags;
278};
279struct kfd_ioctl_free_memory_of_gpu_args {
280 __u64 handle;
281};
282struct kfd_ioctl_map_memory_to_gpu_args {
283 __u64 handle;
284 __u64 device_ids_array_ptr;
285 __u32 n_devices;
286 __u32 n_success;
287};
288struct kfd_ioctl_unmap_memory_from_gpu_args {
289 __u64 handle;
290 __u64 device_ids_array_ptr;
291 __u32 n_devices;
292 __u32 n_success;
293};
Christopher Ferris8177cdf2020-08-03 11:53:55 -0700294struct kfd_ioctl_alloc_queue_gws_args {
295 __u32 queue_id;
296 __u32 num_gws;
297 __u32 first_gws;
298 __u32 pad;
299};
Christopher Ferrisd842e432019-03-07 10:21:59 -0800300struct kfd_ioctl_get_dmabuf_info_args {
301 __u64 size;
302 __u64 metadata_ptr;
303 __u32 metadata_size;
304 __u32 gpu_id;
305 __u32 flags;
306 __u32 dmabuf_fd;
307};
308struct kfd_ioctl_import_dmabuf_args {
309 __u64 va_addr;
310 __u64 handle;
311 __u32 gpu_id;
312 __u32 dmabuf_fd;
313};
Christopher Ferris37c3f3c2023-07-10 10:59:05 -0700314struct kfd_ioctl_export_dmabuf_args {
315 __u64 handle;
316 __u32 flags;
317 __u32 dmabuf_fd;
318};
Christopher Ferris32ff3f82020-12-14 13:10:04 -0800319enum kfd_smi_event {
320 KFD_SMI_EVENT_NONE = 0,
321 KFD_SMI_EVENT_VMFAULT = 1,
322 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
323 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
324 KFD_SMI_EVENT_GPU_POST_RESET = 4,
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700325 KFD_SMI_EVENT_MIGRATE_START = 5,
326 KFD_SMI_EVENT_MIGRATE_END = 6,
327 KFD_SMI_EVENT_PAGE_FAULT_START = 7,
328 KFD_SMI_EVENT_PAGE_FAULT_END = 8,
329 KFD_SMI_EVENT_QUEUE_EVICTION = 9,
330 KFD_SMI_EVENT_QUEUE_RESTORE = 10,
331 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
332 KFD_SMI_EVENT_ALL_PROCESS = 64
333};
334enum KFD_MIGRATE_TRIGGERS {
335 KFD_MIGRATE_TRIGGER_PREFETCH,
336 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
337 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
338 KFD_MIGRATE_TRIGGER_TTM_EVICTION
339};
340enum KFD_QUEUE_EVICTION_TRIGGERS {
341 KFD_QUEUE_EVICTION_TRIGGER_SVM,
342 KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
343 KFD_QUEUE_EVICTION_TRIGGER_TTM,
344 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
345 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
346 KFD_QUEUE_EVICTION_CRIU_RESTORE
347};
348enum KFD_SVM_UNMAP_TRIGGERS {
349 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
350 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
351 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
Christopher Ferris32ff3f82020-12-14 13:10:04 -0800352};
353#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
Christopher Ferris10a76e62022-06-08 13:31:52 -0700354#define KFD_SMI_EVENT_MSG_SIZE 96
Christopher Ferris25c18d42020-10-14 17:42:58 -0700355struct kfd_ioctl_smi_events_args {
356 __u32 gpuid;
357 __u32 anon_fd;
358};
Christopher Ferris10a76e62022-06-08 13:31:52 -0700359enum kfd_criu_op {
360 KFD_CRIU_OP_PROCESS_INFO,
361 KFD_CRIU_OP_CHECKPOINT,
362 KFD_CRIU_OP_UNPAUSE,
363 KFD_CRIU_OP_RESTORE,
364 KFD_CRIU_OP_RESUME,
365};
366struct kfd_ioctl_criu_args {
367 __u64 devices;
368 __u64 bos;
369 __u64 priv_data;
370 __u64 priv_data_size;
371 __u32 num_devices;
372 __u32 num_bos;
373 __u32 num_objects;
374 __u32 pid;
375 __u32 op;
376};
377struct kfd_criu_device_bucket {
378 __u32 user_gpu_id;
379 __u32 actual_gpu_id;
380 __u32 drm_fd;
381 __u32 pad;
382};
383struct kfd_criu_bo_bucket {
384 __u64 addr;
385 __u64 size;
386 __u64 offset;
387 __u64 restored_offset;
388 __u32 gpu_id;
389 __u32 alloc_flags;
390 __u32 dmabuf_fd;
391 __u32 pad;
392};
Christopher Ferrisb8a95e22019-10-02 18:29:20 -0700393enum kfd_mmio_remap {
394 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
395 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
396};
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000397#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
398#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
399#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
400#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
401#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
402#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700403#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000404enum kfd_ioctl_svm_op {
405 KFD_IOCTL_SVM_OP_SET_ATTR,
406 KFD_IOCTL_SVM_OP_GET_ATTR
407};
408enum kfd_ioctl_svm_location {
409 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
410 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
411};
412enum kfd_ioctl_svm_attr_type {
413 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
414 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
415 KFD_IOCTL_SVM_ATTR_ACCESS,
416 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
417 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
418 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
419 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
420 KFD_IOCTL_SVM_ATTR_GRANULARITY
421};
422struct kfd_ioctl_svm_attribute {
423 __u32 type;
424 __u32 value;
425};
426struct kfd_ioctl_svm_args {
427 __u64 start_addr;
428 __u64 size;
429 __u32 op;
430 __u32 nattr;
Christopher Ferris10a76e62022-06-08 13:31:52 -0700431 struct kfd_ioctl_svm_attribute attrs[];
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000432};
433struct kfd_ioctl_set_xnack_mode_args {
434 __s32 xnack_enabled;
435};
Christopher Ferris8666d042023-09-06 14:55:31 -0700436enum kfd_dbg_trap_override_mode {
437 KFD_DBG_TRAP_OVERRIDE_OR = 0,
438 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
439};
440enum kfd_dbg_trap_mask {
441 KFD_DBG_TRAP_MASK_FP_INVALID = 1,
442 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
443 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
444 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
445 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
446 KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
447 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
448 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
449 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
450 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
451 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
452};
453enum kfd_dbg_trap_wave_launch_mode {
454 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
455 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
456 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
457};
458enum kfd_dbg_trap_address_watch_mode {
459 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
460 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
461 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
462 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
463};
464enum kfd_dbg_trap_flags {
465 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
466};
467enum kfd_dbg_trap_exception_code {
468 EC_NONE = 0,
469 EC_QUEUE_WAVE_ABORT = 1,
470 EC_QUEUE_WAVE_TRAP = 2,
471 EC_QUEUE_WAVE_MATH_ERROR = 3,
472 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
473 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
474 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
475 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
476 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
477 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
478 EC_QUEUE_PACKET_RESERVED = 19,
479 EC_QUEUE_PACKET_UNSUPPORTED = 20,
480 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
481 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
482 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
483 EC_QUEUE_PREEMPTION_ERROR = 30,
484 EC_QUEUE_NEW = 31,
485 EC_DEVICE_QUEUE_DELETE = 32,
486 EC_DEVICE_MEMORY_VIOLATION = 33,
487 EC_DEVICE_RAS_ERROR = 34,
488 EC_DEVICE_FATAL_HALT = 35,
489 EC_DEVICE_NEW = 36,
490 EC_PROCESS_RUNTIME = 48,
491 EC_PROCESS_DEVICE_REMOVE = 49,
492 EC_MAX
493};
494#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
495#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | KFD_EC_MASK(EC_QUEUE_NEW))
496#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | KFD_EC_MASK(EC_DEVICE_NEW))
497#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
498#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) (! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
499#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) (! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
500#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) (! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
501enum kfd_dbg_runtime_state {
502 DEBUG_RUNTIME_STATE_DISABLED = 0,
503 DEBUG_RUNTIME_STATE_ENABLED = 1,
504 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
505 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
506};
507struct kfd_runtime_info {
508 __u64 r_debug;
509 __u32 runtime_state;
510 __u32 ttmp_setup;
511};
512#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
513#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
514struct kfd_ioctl_runtime_enable_args {
515 __u64 r_debug;
516 __u32 mode_mask;
517 __u32 capabilities_mask;
518};
519struct kfd_queue_snapshot_entry {
520 __u64 exception_status;
521 __u64 ring_base_address;
522 __u64 write_pointer_address;
523 __u64 read_pointer_address;
524 __u64 ctx_save_restore_address;
525 __u32 queue_id;
526 __u32 gpu_id;
527 __u32 ring_size;
528 __u32 queue_type;
529 __u32 ctx_save_restore_area_size;
530 __u32 reserved;
531};
532#define KFD_DBG_QUEUE_ERROR_BIT 30
533#define KFD_DBG_QUEUE_INVALID_BIT 31
534#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
535#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
536struct kfd_context_save_area_header {
537 struct {
538 __u32 control_stack_offset;
539 __u32 control_stack_size;
540 __u32 wave_state_offset;
541 __u32 wave_state_size;
542 } wave_state;
543 __u32 debug_offset;
544 __u32 debug_size;
545 __u64 err_payload_addr;
546 __u32 err_event_id;
547 __u32 reserved1;
548};
549enum kfd_dbg_trap_operations {
550 KFD_IOC_DBG_TRAP_ENABLE = 0,
551 KFD_IOC_DBG_TRAP_DISABLE = 1,
552 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
553 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
554 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,
555 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,
556 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,
557 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,
558 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,
559 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,
560 KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
561 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
562 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
563 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
564 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
565};
566struct kfd_ioctl_dbg_trap_enable_args {
567 __u64 exception_mask;
568 __u64 rinfo_ptr;
569 __u32 rinfo_size;
570 __u32 dbg_fd;
571};
572struct kfd_ioctl_dbg_trap_send_runtime_event_args {
573 __u64 exception_mask;
574 __u32 gpu_id;
575 __u32 queue_id;
576};
577struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
578 __u64 exception_mask;
579};
580struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
581 __u32 override_mode;
582 __u32 enable_mask;
583 __u32 support_request_mask;
584 __u32 pad;
585};
586struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
587 __u32 launch_mode;
588 __u32 pad;
589};
590struct kfd_ioctl_dbg_trap_suspend_queues_args {
591 __u64 exception_mask;
592 __u64 queue_array_ptr;
593 __u32 num_queues;
594 __u32 grace_period;
595};
596struct kfd_ioctl_dbg_trap_resume_queues_args {
597 __u64 queue_array_ptr;
598 __u32 num_queues;
599 __u32 pad;
600};
601struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
602 __u64 address;
603 __u32 mode;
604 __u32 mask;
605 __u32 gpu_id;
606 __u32 id;
607};
608struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
609 __u32 gpu_id;
610 __u32 id;
611};
612struct kfd_ioctl_dbg_trap_set_flags_args {
613 __u32 flags;
614 __u32 pad;
615};
616struct kfd_ioctl_dbg_trap_query_debug_event_args {
617 __u64 exception_mask;
618 __u32 gpu_id;
619 __u32 queue_id;
620};
621struct kfd_ioctl_dbg_trap_query_exception_info_args {
622 __u64 info_ptr;
623 __u32 info_size;
624 __u32 source_id;
625 __u32 exception_code;
626 __u32 clear_exception;
627};
628struct kfd_ioctl_dbg_trap_queue_snapshot_args {
629 __u64 exception_mask;
630 __u64 snapshot_buf_ptr;
631 __u32 num_queues;
632 __u32 entry_size;
633};
634struct kfd_ioctl_dbg_trap_device_snapshot_args {
635 __u64 exception_mask;
636 __u64 snapshot_buf_ptr;
637 __u32 num_devices;
638 __u32 entry_size;
639};
640struct kfd_ioctl_dbg_trap_args {
641 __u32 pid;
642 __u32 op;
643 union {
644 struct kfd_ioctl_dbg_trap_enable_args enable;
645 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
646 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
647 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
648 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
649 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
650 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
651 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
652 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
653 struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
654 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
655 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
656 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
657 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
658 };
659};
Christopher Ferris05d08e92016-02-04 13:16:38 -0800660#define AMDKFD_IOCTL_BASE 'K'
661#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800662#define AMDKFD_IOR(nr,type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
663#define AMDKFD_IOW(nr,type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
664#define AMDKFD_IOWR(nr,type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
665#define AMDKFD_IOC_GET_VERSION AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800666#define AMDKFD_IOC_CREATE_QUEUE AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
667#define AMDKFD_IOC_DESTROY_QUEUE AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
668#define AMDKFD_IOC_SET_MEMORY_POLICY AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
669#define AMDKFD_IOC_GET_CLOCK_COUNTERS AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800670#define AMDKFD_IOC_GET_PROCESS_APERTURES AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
671#define AMDKFD_IOC_UPDATE_QUEUE AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
672#define AMDKFD_IOC_CREATE_EVENT AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
673#define AMDKFD_IOC_DESTROY_EVENT AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800674#define AMDKFD_IOC_SET_EVENT AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
675#define AMDKFD_IOC_RESET_EVENT AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
676#define AMDKFD_IOC_WAIT_EVENTS AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
Christopher Ferris10a76e62022-06-08 13:31:52 -0700677#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
678#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
679#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
680#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
Christopher Ferris1308ad32017-11-14 17:32:13 -0800681#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
682#define AMDKFD_IOC_GET_TILE_CONFIG AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
Christopher Ferris76a1d452018-06-27 14:12:29 -0700683#define AMDKFD_IOC_SET_TRAP_HANDLER AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
684#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW AMDKFD_IOWR(0x14, struct kfd_ioctl_get_process_apertures_new_args)
685#define AMDKFD_IOC_ACQUIRE_VM AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
686#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
687#define AMDKFD_IOC_FREE_MEMORY_OF_GPU AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
688#define AMDKFD_IOC_MAP_MEMORY_TO_GPU AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
689#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
Christopher Ferris9ce28842018-10-25 12:11:39 -0700690#define AMDKFD_IOC_SET_CU_MASK AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
Christopher Ferris86a48372019-01-10 14:14:59 -0800691#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
Christopher Ferrisd842e432019-03-07 10:21:59 -0800692#define AMDKFD_IOC_GET_DMABUF_INFO AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
693#define AMDKFD_IOC_IMPORT_DMABUF AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
Christopher Ferris8177cdf2020-08-03 11:53:55 -0700694#define AMDKFD_IOC_ALLOC_QUEUE_GWS AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
Christopher Ferris25c18d42020-10-14 17:42:58 -0700695#define AMDKFD_IOC_SMI_EVENTS AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
Christopher Ferris3a39c0b2021-09-02 00:03:38 +0000696#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
697#define AMDKFD_IOC_SET_XNACK_MODE AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
Christopher Ferris10a76e62022-06-08 13:31:52 -0700698#define AMDKFD_IOC_CRIU_OP AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
Christopher Ferris7447a1c2022-10-04 18:24:44 -0700699#define AMDKFD_IOC_AVAILABLE_MEMORY AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
Christopher Ferris37c3f3c2023-07-10 10:59:05 -0700700#define AMDKFD_IOC_EXPORT_DMABUF AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
Christopher Ferris8666d042023-09-06 14:55:31 -0700701#define AMDKFD_IOC_RUNTIME_ENABLE AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
702#define AMDKFD_IOC_DBG_TRAP AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
Christopher Ferris05d08e92016-02-04 13:16:38 -0800703#define AMDKFD_COMMAND_START 0x01
Christopher Ferris8666d042023-09-06 14:55:31 -0700704#define AMDKFD_COMMAND_END 0x27
Christopher Ferris05d08e92016-02-04 13:16:38 -0800705#endif