aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/include/uapi/drm/i915_drm.h254
1 files changed, 181 insertions, 73 deletions
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 397810fa2d33..3a73f5316766 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -63,6 +63,28 @@ extern "C" {
63#define I915_RESET_UEVENT "RESET" 63#define I915_RESET_UEVENT "RESET"
64 64
65/* 65/*
66 * i915_user_extension: Base class for defining a chain of extensions
67 *
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
73 *
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the __user boundary with pointers encapsulated inside u64.
79 */
80struct i915_user_extension {
81 __u64 next_extension;
82 __u32 name;
83 __u32 flags; /* All undefined bits must be zero. */
84 __u32 rsvd[4]; /* Reserved for future use; must be zero. */
85};
86
87/*
66 * MOCS indexes used for GPU surfaces, defining the cacheability of the 88 * MOCS indexes used for GPU surfaces, defining the cacheability of the
67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
68 */ 90 */
@@ -99,9 +121,23 @@ enum drm_i915_gem_engine_class {
99 I915_ENGINE_CLASS_VIDEO = 2, 121 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 122 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101 123
124 /* should be kept compact */
125
102 I915_ENGINE_CLASS_INVALID = -1 126 I915_ENGINE_CLASS_INVALID = -1
103}; 127};
104 128
129/*
130 * There may be more than one engine fulfilling any role within the system.
131 * Each engine of a class is given a unique instance number and therefore
132 * any engine can be specified by its class:instance tuplet. APIs that allow
133 * access to any engine in the system will use struct i915_engine_class_instance
134 * for this identification.
135 */
136struct i915_engine_class_instance {
137 __u16 engine_class; /* see enum drm_i915_gem_engine_class */
138 __u16 engine_instance;
139};
140
105/** 141/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 142 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 * 143 *
@@ -319,6 +355,7 @@ typedef struct _drm_i915_sarea {
319#define DRM_I915_PERF_ADD_CONFIG 0x37 355#define DRM_I915_PERF_ADD_CONFIG 0x37
320#define DRM_I915_PERF_REMOVE_CONFIG 0x38 356#define DRM_I915_PERF_REMOVE_CONFIG 0x38
321#define DRM_I915_QUERY 0x39 357#define DRM_I915_QUERY 0x39
358/* Must be kept compact -- no holes */
322 359
323#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 360#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
324#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 361#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -367,6 +404,7 @@ typedef struct _drm_i915_sarea {
367#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 404#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
368#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 405#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
369#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 406#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
407#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
370#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 408#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
371#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 409#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
372#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 410#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
@@ -476,6 +514,7 @@ typedef struct drm_i915_irq_wait {
476#define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 514#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
477#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 515#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
478#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 516#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
517#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
479 518
480#define I915_PARAM_HUC_STATUS 42 519#define I915_PARAM_HUC_STATUS 42
481 520
@@ -559,6 +598,8 @@ typedef struct drm_i915_irq_wait {
559 */ 598 */
560#define I915_PARAM_MMAP_GTT_COHERENT 52 599#define I915_PARAM_MMAP_GTT_COHERENT 52
561 600
601/* Must be kept compact -- no holes and well documented */
602
562typedef struct drm_i915_getparam { 603typedef struct drm_i915_getparam {
563 __s32 param; 604 __s32 param;
564 /* 605 /*
@@ -574,6 +615,7 @@ typedef struct drm_i915_getparam {
574#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 615#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
575#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 616#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
576#define I915_SETPARAM_NUM_USED_FENCES 4 617#define I915_SETPARAM_NUM_USED_FENCES 4
618/* Must be kept compact -- no holes */
577 619
578typedef struct drm_i915_setparam { 620typedef struct drm_i915_setparam {
579 int param; 621 int param;
@@ -972,7 +1014,7 @@ struct drm_i915_gem_execbuffer2 {
972 * struct drm_i915_gem_exec_fence *fences. 1014 * struct drm_i915_gem_exec_fence *fences.
973 */ 1015 */
974 __u64 cliprects_ptr; 1016 __u64 cliprects_ptr;
975#define I915_EXEC_RING_MASK (7<<0) 1017#define I915_EXEC_RING_MASK (0x3f)
976#define I915_EXEC_DEFAULT (0<<0) 1018#define I915_EXEC_DEFAULT (0<<0)
977#define I915_EXEC_RENDER (1<<0) 1019#define I915_EXEC_RENDER (1<<0)
978#define I915_EXEC_BSD (2<<0) 1020#define I915_EXEC_BSD (2<<0)
@@ -1120,32 +1162,34 @@ struct drm_i915_gem_busy {
1120 * as busy may become idle before the ioctl is completed. 1162 * as busy may become idle before the ioctl is completed.
1121 * 1163 *
1122 * Furthermore, if the object is busy, which engine is busy is only 1164 * Furthermore, if the object is busy, which engine is busy is only
1123 * provided as a guide. There are race conditions which prevent the 1165 * provided as a guide and only indirectly by reporting its class
1124 * report of which engines are busy from being always accurate. 1166 * (there may be more than one engine in each class). There are race
1125 * However, the converse is not true. If the object is idle, the 1167 * conditions which prevent the report of which engines are busy from
1126 * result of the ioctl, that all engines are idle, is accurate. 1168 * being always accurate. However, the converse is not true. If the
1169 * object is idle, the result of the ioctl, that all engines are idle,
1170 * is accurate.
1127 * 1171 *
1128 * The returned dword is split into two fields to indicate both 1172 * The returned dword is split into two fields to indicate both
1129 * the engines on which the object is being read, and the 1173 * the engine classess on which the object is being read, and the
1130 * engine on which it is currently being written (if any). 1174 * engine class on which it is currently being written (if any).
1131 * 1175 *
1132 * The low word (bits 0:15) indicate if the object is being written 1176 * The low word (bits 0:15) indicate if the object is being written
1133 * to by any engine (there can only be one, as the GEM implicit 1177 * to by any engine (there can only be one, as the GEM implicit
1134 * synchronisation rules force writes to be serialised). Only the 1178 * synchronisation rules force writes to be serialised). Only the
1135 * engine for the last write is reported. 1179 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1180 * 1 not 0 etc) for the last write is reported.
1136 * 1181 *
1137 * The high word (bits 16:31) are a bitmask of which engines are 1182 * The high word (bits 16:31) are a bitmask of which engines classes
1138 * currently reading from the object. Multiple engines may be 1183 * are currently reading from the object. Multiple engines may be
1139 * reading from the object simultaneously. 1184 * reading from the object simultaneously.
1140 * 1185 *
1141 * The value of each engine is the same as specified in the 1186 * The value of each engine class is the same as specified in the
1142 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. 1187 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1143 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to 1188 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1144 * the I915_EXEC_RENDER engine for execution, and so it is never
1145 * reported as active itself. Some hardware may have parallel 1189 * reported as active itself. Some hardware may have parallel
1146 * execution engines, e.g. multiple media engines, which are 1190 * execution engines, e.g. multiple media engines, which are
1147 * mapped to the same identifier in the EXECBUFFER2 ioctl and 1191 * mapped to the same class identifier and so are not separately
1148 * so are not separately reported for busyness. 1192 * reported for busyness.
1149 * 1193 *
1150 * Caveat emptor: 1194 * Caveat emptor:
1151 * Only the boolean result of this query is reliable; that is whether 1195 * Only the boolean result of this query is reliable; that is whether
@@ -1412,65 +1456,17 @@ struct drm_i915_gem_wait {
1412}; 1456};
1413 1457
1414struct drm_i915_gem_context_create { 1458struct drm_i915_gem_context_create {
1415 /* output: id of new context*/ 1459 __u32 ctx_id; /* output: id of new context*/
1416 __u32 ctx_id;
1417 __u32 pad;
1418};
1419
1420struct drm_i915_gem_context_destroy {
1421 __u32 ctx_id;
1422 __u32 pad; 1460 __u32 pad;
1423}; 1461};
1424 1462
1425struct drm_i915_reg_read { 1463struct drm_i915_gem_context_create_ext {
1426 /* 1464 __u32 ctx_id; /* output: id of new context*/
1427 * Register offset.
1428 * For 64bit wide registers where the upper 32bits don't immediately
1429 * follow the lower 32bits, the offset of the lower 32bits must
1430 * be specified
1431 */
1432 __u64 offset;
1433#define I915_REG_READ_8B_WA (1ul << 0)
1434
1435 __u64 val; /* Return value */
1436};
1437/* Known registers:
1438 *
1439 * Render engine timestamp - 0x2358 + 64bit - gen7+
1440 * - Note this register returns an invalid value if using the default
1441 * single instruction 8byte read, in order to workaround that pass
1442 * flag I915_REG_READ_8B_WA in offset field.
1443 *
1444 */
1445
1446struct drm_i915_reset_stats {
1447 __u32 ctx_id;
1448 __u32 flags;
1449
1450 /* All resets since boot/module reload, for all contexts */
1451 __u32 reset_count;
1452
1453 /* Number of batches lost when active in GPU, for this context */
1454 __u32 batch_active;
1455
1456 /* Number of batches lost pending for execution, for this context */
1457 __u32 batch_pending;
1458
1459 __u32 pad;
1460};
1461
1462struct drm_i915_gem_userptr {
1463 __u64 user_ptr;
1464 __u64 user_size;
1465 __u32 flags; 1465 __u32 flags;
1466#define I915_USERPTR_READ_ONLY 0x1 1466#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
1467#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 1467#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1468 /** 1468 (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
1469 * Returned handle for the object. 1469 __u64 extensions;
1470 *
1471 * Object handles are nonzero.
1472 */
1473 __u32 handle;
1474}; 1470};
1475 1471
1476struct drm_i915_gem_context_param { 1472struct drm_i915_gem_context_param {
@@ -1491,6 +1487,28 @@ struct drm_i915_gem_context_param {
1491 * drm_i915_gem_context_param_sseu. 1487 * drm_i915_gem_context_param_sseu.
1492 */ 1488 */
1493#define I915_CONTEXT_PARAM_SSEU 0x7 1489#define I915_CONTEXT_PARAM_SSEU 0x7
1490
1491/*
1492 * Not all clients may want to attempt automatic recover of a context after
1493 * a hang (for example, some clients may only submit very small incremental
1494 * batches relying on known logical state of previous batches which will never
1495 * recover correctly and each attempt will hang), and so would prefer that
1496 * the context is forever banned instead.
1497 *
1498 * If set to false (0), after a reset, subsequent (and in flight) rendering
1499 * from this context is discarded, and the client will need to create a new
1500 * context to use instead.
1501 *
1502 * If set to true (1), the kernel will automatically attempt to recover the
1503 * context by skipping the hanging batch and executing the next batch starting
1504 * from the default context state (discarding the incomplete logical context
1505 * state lost due to the reset).
1506 *
1507 * On creation, all new contexts are marked as recoverable.
1508 */
1509#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1510/* Must be kept compact -- no holes and well documented */
1511
1494 __u64 value; 1512 __u64 value;
1495}; 1513};
1496 1514
@@ -1519,8 +1537,7 @@ struct drm_i915_gem_context_param_sseu {
1519 /* 1537 /*
1520 * Engine class & instance to be configured or queried. 1538 * Engine class & instance to be configured or queried.
1521 */ 1539 */
1522 __u16 engine_class; 1540 struct i915_engine_class_instance engine;
1523 __u16 engine_instance;
1524 1541
1525 /* 1542 /*
1526 * Unused for now. Must be cleared to zero. 1543 * Unused for now. Must be cleared to zero.
@@ -1553,6 +1570,96 @@ struct drm_i915_gem_context_param_sseu {
1553 __u32 rsvd; 1570 __u32 rsvd;
1554}; 1571};
1555 1572
1573struct drm_i915_gem_context_create_ext_setparam {
1574#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1575 struct i915_user_extension base;
1576 struct drm_i915_gem_context_param param;
1577};
1578
1579struct drm_i915_gem_context_destroy {
1580 __u32 ctx_id;
1581 __u32 pad;
1582};
1583
1584/*
1585 * DRM_I915_GEM_VM_CREATE -
1586 *
1587 * Create a new virtual memory address space (ppGTT) for use within a context
1588 * on the same file. Extensions can be provided to configure exactly how the
1589 * address space is setup upon creation.
1590 *
1591 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1592 * returned in the outparam @id.
1593 *
1594 * No flags are defined, with all bits reserved and must be zero.
1595 *
1596 * An extension chain maybe provided, starting with @extensions, and terminated
1597 * by the @next_extension being 0. Currently, no extensions are defined.
1598 *
1599 * DRM_I915_GEM_VM_DESTROY -
1600 *
1601 * Destroys a previously created VM id, specified in @id.
1602 *
1603 * No extensions or flags are allowed currently, and so must be zero.
1604 */
1605struct drm_i915_gem_vm_control {
1606 __u64 extensions;
1607 __u32 flags;
1608 __u32 vm_id;
1609};
1610
1611struct drm_i915_reg_read {
1612 /*
1613 * Register offset.
1614 * For 64bit wide registers where the upper 32bits don't immediately
1615 * follow the lower 32bits, the offset of the lower 32bits must
1616 * be specified
1617 */
1618 __u64 offset;
1619#define I915_REG_READ_8B_WA (1ul << 0)
1620
1621 __u64 val; /* Return value */
1622};
1623
1624/* Known registers:
1625 *
1626 * Render engine timestamp - 0x2358 + 64bit - gen7+
1627 * - Note this register returns an invalid value if using the default
1628 * single instruction 8byte read, in order to workaround that pass
1629 * flag I915_REG_READ_8B_WA in offset field.
1630 *
1631 */
1632
1633struct drm_i915_reset_stats {
1634 __u32 ctx_id;
1635 __u32 flags;
1636
1637 /* All resets since boot/module reload, for all contexts */
1638 __u32 reset_count;
1639
1640 /* Number of batches lost when active in GPU, for this context */
1641 __u32 batch_active;
1642
1643 /* Number of batches lost pending for execution, for this context */
1644 __u32 batch_pending;
1645
1646 __u32 pad;
1647};
1648
1649struct drm_i915_gem_userptr {
1650 __u64 user_ptr;
1651 __u64 user_size;
1652 __u32 flags;
1653#define I915_USERPTR_READ_ONLY 0x1
1654#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1655 /**
1656 * Returned handle for the object.
1657 *
1658 * Object handles are nonzero.
1659 */
1660 __u32 handle;
1661};
1662
1556enum drm_i915_oa_format { 1663enum drm_i915_oa_format {
1557 I915_OA_FORMAT_A13 = 1, /* HSW only */ 1664 I915_OA_FORMAT_A13 = 1, /* HSW only */
1558 I915_OA_FORMAT_A29, /* HSW only */ 1665 I915_OA_FORMAT_A29, /* HSW only */
@@ -1714,6 +1821,7 @@ struct drm_i915_perf_oa_config {
1714struct drm_i915_query_item { 1821struct drm_i915_query_item {
1715 __u64 query_id; 1822 __u64 query_id;
1716#define DRM_I915_QUERY_TOPOLOGY_INFO 1 1823#define DRM_I915_QUERY_TOPOLOGY_INFO 1
1824/* Must be kept compact -- no holes and well documented */
1717 1825
1718 /* 1826 /*
1719 * When set to zero by userspace, this is filled with the size of the 1827 * When set to zero by userspace, this is filled with the size of the