blob: a034b2921af174d489e23183d7ccd74243ba5248 [file] [log] [blame]
/*
* This file is auto-generated. Modifications will be lost.
*
* See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
* for more information.
*/
#ifndef _UAPI_XE_DRM_H_
#define _UAPI_XE_DRM_H_
#include "drm.h"
#ifdef __cplusplus
extern "C" {
#endif
#define DRM_XE_DEVICE_QUERY 0x00
#define DRM_XE_GEM_CREATE 0x01
#define DRM_XE_GEM_MMAP_OFFSET 0x02
#define DRM_XE_VM_CREATE 0x03
#define DRM_XE_VM_DESTROY 0x04
#define DRM_XE_VM_BIND 0x05
#define DRM_XE_EXEC_QUEUE_CREATE 0x06
#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
struct drm_xe_user_extension {
__u64 next_extension;
__u32 name;
__u32 pad;
};
struct drm_xe_ext_set_property {
struct drm_xe_user_extension base;
__u32 property;
__u32 pad;
__u64 value;
__u64 reserved[2];
};
struct drm_xe_engine_class_instance {
#define DRM_XE_ENGINE_CLASS_RENDER 0
#define DRM_XE_ENGINE_CLASS_COPY 1
#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
#define DRM_XE_ENGINE_CLASS_COMPUTE 4
#define DRM_XE_ENGINE_CLASS_VM_BIND 5
__u16 engine_class;
__u16 engine_instance;
__u16 gt_id;
__u16 pad;
};
struct drm_xe_engine {
struct drm_xe_engine_class_instance instance;
__u64 reserved[3];
};
struct drm_xe_query_engines {
__u32 num_engines;
__u32 pad;
struct drm_xe_engine engines[];
};
enum drm_xe_memory_class {
DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
DRM_XE_MEM_REGION_CLASS_VRAM
};
struct drm_xe_mem_region {
__u16 mem_class;
__u16 instance;
__u32 min_page_size;
__u64 total_size;
__u64 used;
__u64 cpu_visible_size;
__u64 cpu_visible_used;
__u64 reserved[6];
};
struct drm_xe_query_mem_regions {
__u32 num_mem_regions;
__u32 pad;
struct drm_xe_mem_region mem_regions[];
};
struct drm_xe_query_config {
__u32 num_params;
__u32 pad;
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
__u64 info[];
};
struct drm_xe_gt {
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
__u16 type;
__u16 tile_id;
__u16 gt_id;
__u16 pad[3];
__u32 reference_clock;
__u64 near_mem_regions;
__u64 far_mem_regions;
__u16 ip_ver_major;
__u16 ip_ver_minor;
__u16 ip_ver_rev;
__u16 pad2;
__u64 reserved[7];
};
struct drm_xe_query_gt_list {
__u32 num_gt;
__u32 pad;
struct drm_xe_gt gt_list[];
};
struct drm_xe_query_topology_mask {
__u16 gt_id;
#define DRM_XE_TOPO_DSS_GEOMETRY 1
#define DRM_XE_TOPO_DSS_COMPUTE 2
#define DRM_XE_TOPO_L3_BANK 3
#define DRM_XE_TOPO_EU_PER_DSS 4
__u16 type;
__u32 num_bytes;
__u8 mask[];
};
struct drm_xe_query_engine_cycles {
struct drm_xe_engine_class_instance eci;
__s32 clockid;
__u32 width;
__u64 engine_cycles;
__u64 cpu_timestamp;
__u64 cpu_delta;
};
struct drm_xe_query_uc_fw_version {
#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
#define XE_QUERY_UC_TYPE_HUC 1
__u16 uc_type;
__u16 pad;
__u32 branch_ver;
__u32 major_ver;
__u32 minor_ver;
__u32 patch_ver;
__u32 pad2;
__u64 reserved;
};
struct drm_xe_device_query {
__u64 extensions;
#define DRM_XE_DEVICE_QUERY_ENGINES 0
#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
#define DRM_XE_DEVICE_QUERY_CONFIG 2
#define DRM_XE_DEVICE_QUERY_GT_LIST 3
#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
#define DRM_XE_DEVICE_QUERY_OA_UNITS 8
__u32 query;
__u32 size;
__u64 data;
__u64 reserved[2];
};
struct drm_xe_gem_create {
__u64 extensions;
__u64 size;
__u32 placement;
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
__u32 flags;
__u32 vm_id;
__u32 handle;
#define DRM_XE_GEM_CPU_CACHING_WB 1
#define DRM_XE_GEM_CPU_CACHING_WC 2
__u16 cpu_caching;
__u16 pad[3];
__u64 reserved[2];
};
struct drm_xe_gem_mmap_offset {
__u64 extensions;
__u32 handle;
__u32 flags;
__u64 offset;
__u64 reserved[2];
};
struct drm_xe_vm_create {
__u64 extensions;
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
__u32 flags;
__u32 vm_id;
__u64 reserved[2];
};
struct drm_xe_vm_destroy {
__u32 vm_id;
__u32 pad;
__u64 reserved[2];
};
struct drm_xe_vm_bind_op {
__u64 extensions;
__u32 obj;
__u16 pat_index;
__u16 pad;
union {
__u64 obj_offset;
__u64 userptr;
};
__u64 range;
__u64 addr;
#define DRM_XE_VM_BIND_OP_MAP 0x0
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
__u32 op;
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
__u32 flags;
__u32 prefetch_mem_region_instance;
__u32 pad2;
__u64 reserved[3];
};
struct drm_xe_vm_bind {
__u64 extensions;
__u32 vm_id;
__u32 exec_queue_id;
__u32 pad;
__u32 num_binds;
union {
struct drm_xe_vm_bind_op bind;
__u64 vector_of_binds;
};
__u32 pad2;
__u32 num_syncs;
__u64 syncs;
__u64 reserved[2];
};
struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
__u64 extensions;
__u16 width;
__u16 num_placements;
__u32 vm_id;
__u32 flags;
__u32 exec_queue_id;
__u64 instances;
__u64 reserved[2];
};
struct drm_xe_exec_queue_destroy {
__u32 exec_queue_id;
__u32 pad;
__u64 reserved[2];
};
struct drm_xe_exec_queue_get_property {
__u64 extensions;
__u32 exec_queue_id;
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
__u32 property;
__u64 value;
__u64 reserved[2];
};
struct drm_xe_sync {
__u64 extensions;
#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
__u32 type;
#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
__u32 flags;
union {
__u32 handle;
__u64 addr;
};
__u64 timeline_value;
__u64 reserved[2];
};
struct drm_xe_exec {
__u64 extensions;
__u32 exec_queue_id;
__u32 num_syncs;
__u64 syncs;
__u64 address;
__u16 num_batch_buffer;
__u16 pad[3];
__u64 reserved[2];
};
struct drm_xe_wait_user_fence {
__u64 extensions;
__u64 addr;
#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
__u16 op;
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
__u16 flags;
__u32 pad;
__u64 value;
__u64 mask;
__s64 timeout;
__u32 exec_queue_id;
__u32 pad2;
__u64 reserved[2];
};
enum drm_xe_observation_type {
DRM_XE_OBSERVATION_TYPE_OA,
};
enum drm_xe_observation_op {
DRM_XE_OBSERVATION_OP_STREAM_OPEN,
DRM_XE_OBSERVATION_OP_ADD_CONFIG,
DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
};
struct drm_xe_observation_param {
__u64 extensions;
__u64 observation_type;
__u64 observation_op;
__u64 param;
};
enum drm_xe_observation_ioctls {
DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
};
enum drm_xe_oa_unit_type {
DRM_XE_OA_UNIT_TYPE_OAG,
DRM_XE_OA_UNIT_TYPE_OAM,
};
struct drm_xe_oa_unit {
__u64 extensions;
__u32 oa_unit_id;
__u32 oa_unit_type;
__u64 capabilities;
#define DRM_XE_OA_CAPS_BASE (1 << 0)
__u64 oa_timestamp_freq;
__u64 reserved[4];
__u64 num_engines;
struct drm_xe_engine_class_instance eci[];
};
struct drm_xe_query_oa_units {
__u64 extensions;
__u32 num_oa_units;
__u32 pad;
__u64 oa_units[];
};
enum drm_xe_oa_format_type {
DRM_XE_OA_FMT_TYPE_OAG,
DRM_XE_OA_FMT_TYPE_OAR,
DRM_XE_OA_FMT_TYPE_OAM,
DRM_XE_OA_FMT_TYPE_OAC,
DRM_XE_OA_FMT_TYPE_OAM_MPEC,
DRM_XE_OA_FMT_TYPE_PEC,
};
enum drm_xe_oa_property_id {
#define DRM_XE_OA_EXTENSION_SET_PROPERTY 0
DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1,
DRM_XE_OA_PROPERTY_SAMPLE_OA,
DRM_XE_OA_PROPERTY_OA_METRIC_SET,
DRM_XE_OA_PROPERTY_OA_FORMAT,
#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0)
#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8)
#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16)
#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24)
DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT,
DRM_XE_OA_PROPERTY_OA_DISABLED,
DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,
DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE,
DRM_XE_OA_PROPERTY_NO_PREEMPT,
};
struct drm_xe_oa_config {
__u64 extensions;
char uuid[36];
__u32 n_regs;
__u64 regs_ptr;
};
struct drm_xe_oa_stream_status {
__u64 extensions;
__u64 oa_status;
#define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3)
#define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2)
#define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1)
#define DRM_XE_OASTATUS_REPORT_LOST (1 << 0)
__u64 reserved[3];
};
struct drm_xe_oa_stream_info {
__u64 extensions;
__u64 oa_buf_size;
__u64 reserved[3];
};
#ifdef __cplusplus
}
#endif
#endif