aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h22
1 files changed, 20 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c761fe05ad6f..4be66f60504d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,6 +2,7 @@
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4#include <linux/hashtable.h> 4#include <linux/hashtable.h>
5#include "i915_gem_batch_pool.h"
5 6
6#define I915_CMD_HASH_ORDER 9 7#define I915_CMD_HASH_ORDER 9
7 8
@@ -117,6 +118,7 @@ struct intel_ringbuffer {
117}; 118};
118 119
119struct intel_context; 120struct intel_context;
121struct drm_i915_reg_descriptor;
120 122
121struct intel_engine_cs { 123struct intel_engine_cs {
122 const char *name; 124 const char *name;
@@ -133,6 +135,13 @@ struct intel_engine_cs {
133 struct drm_device *dev; 135 struct drm_device *dev;
134 struct intel_ringbuffer *buffer; 136 struct intel_ringbuffer *buffer;
135 137
138 /*
139 * A pool of objects to use as shadow copies of client batch buffers
140 * when the command parser is enabled. Prevents the client from
141 * modifying the batch contents after software parsing.
142 */
143 struct i915_gem_batch_pool batch_pool;
144
136 struct intel_hw_status_page status_page; 145 struct intel_hw_status_page status_page;
137 146
138 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 147 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
@@ -266,6 +275,13 @@ struct intel_engine_cs {
266 * Do we have some not yet emitted requests outstanding? 275 * Do we have some not yet emitted requests outstanding?
267 */ 276 */
268 struct drm_i915_gem_request *outstanding_lazy_request; 277 struct drm_i915_gem_request *outstanding_lazy_request;
278 /**
279 * Seqno of request most recently submitted to request_list.
280 * Used exclusively by hang checker to avoid grabbing lock while
281 * inspecting request list.
282 */
283 u32 last_submitted_seqno;
284
269 bool gpu_caches_dirty; 285 bool gpu_caches_dirty;
270 286
271 wait_queue_head_t irq_queue; 287 wait_queue_head_t irq_queue;
@@ -292,14 +308,14 @@ struct intel_engine_cs {
292 /* 308 /*
293 * Table of registers allowed in commands that read/write registers. 309 * Table of registers allowed in commands that read/write registers.
294 */ 310 */
295 const u32 *reg_table; 311 const struct drm_i915_reg_descriptor *reg_table;
296 int reg_count; 312 int reg_count;
297 313
298 /* 314 /*
299 * Table of registers allowed in commands that read/write registers, but 315 * Table of registers allowed in commands that read/write registers, but
300 * only from the DRM master. 316 * only from the DRM master.
301 */ 317 */
302 const u32 *master_reg_table; 318 const struct drm_i915_reg_descriptor *master_reg_table;
303 int master_reg_count; 319 int master_reg_count;
304 320
305 /* 321 /*
@@ -390,6 +406,8 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
390void intel_stop_ring_buffer(struct intel_engine_cs *ring); 406void intel_stop_ring_buffer(struct intel_engine_cs *ring);
391void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 407void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
392 408
409int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
410
393int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); 411int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
394int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); 412int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
395static inline void intel_ring_emit(struct intel_engine_cs *ring, 413static inline void intel_ring_emit(struct intel_engine_cs *ring,