aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h46
1 files changed, 41 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 70525d0c2c74..96479c89f4bd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,13 @@
5 5
6#define I915_CMD_HASH_ORDER 9 6#define I915_CMD_HASH_ORDER 9
7 7
8/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
9 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
10 * to give some inclination as to some of the magic values used in the various
11 * workarounds!
12 */
13#define CACHELINE_BYTES 64
14
8/* 15/*
9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 16 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 17 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -90,6 +97,15 @@ struct intel_ringbuffer {
90 struct drm_i915_gem_object *obj; 97 struct drm_i915_gem_object *obj;
91 void __iomem *virtual_start; 98 void __iomem *virtual_start;
92 99
100 struct intel_engine_cs *ring;
101
102 /*
103 * FIXME: This backpointer is an artifact of the history of how the
104 * execlist patches came into being. It will get removed once the basic
105 * code has landed.
106 */
107 struct intel_context *FIXME_lrc_ctx;
108
93 u32 head; 109 u32 head;
94 u32 tail; 110 u32 tail;
95 int space; 111 int space;
@@ -132,6 +148,8 @@ struct intel_engine_cs {
132 148
133 int (*init)(struct intel_engine_cs *ring); 149 int (*init)(struct intel_engine_cs *ring);
134 150
151 int (*init_context)(struct intel_engine_cs *ring);
152
135 void (*write_tail)(struct intel_engine_cs *ring, 153 void (*write_tail)(struct intel_engine_cs *ring,
136 u32 value); 154 u32 value);
137 int __must_check (*flush)(struct intel_engine_cs *ring, 155 int __must_check (*flush)(struct intel_engine_cs *ring,
@@ -214,6 +232,18 @@ struct intel_engine_cs {
214 unsigned int num_dwords); 232 unsigned int num_dwords);
215 } semaphore; 233 } semaphore;
216 234
235 /* Execlists */
236 spinlock_t execlist_lock;
237 struct list_head execlist_queue;
238 u8 next_context_status_buffer;
239 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
240 int (*emit_request)(struct intel_ringbuffer *ringbuf);
241 int (*emit_flush)(struct intel_ringbuffer *ringbuf,
242 u32 invalidate_domains,
243 u32 flush_domains);
244 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
245 u64 offset, unsigned flags);
246
217 /** 247 /**
218 * List of objects currently involved in rendering from the 248 * List of objects currently involved in rendering from the
219 * ringbuffer. 249 * ringbuffer.
@@ -287,11 +317,7 @@ struct intel_engine_cs {
287 u32 (*get_cmd_length_mask)(u32 cmd_header); 317 u32 (*get_cmd_length_mask)(u32 cmd_header);
288}; 318};
289 319
290static inline bool 320bool intel_ring_initialized(struct intel_engine_cs *ring);
291intel_ring_initialized(struct intel_engine_cs *ring)
292{
293 return ring->buffer && ring->buffer->obj;
294}
295 321
296static inline unsigned 322static inline unsigned
297intel_ring_flag(struct intel_engine_cs *ring) 323intel_ring_flag(struct intel_engine_cs *ring)
@@ -355,6 +381,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
355#define I915_GEM_HWS_SCRATCH_INDEX 0x30 381#define I915_GEM_HWS_SCRATCH_INDEX 0x30
356#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
357 383
384void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
385int intel_alloc_ringbuffer_obj(struct drm_device *dev,
386 struct intel_ringbuffer *ringbuf);
387
358void intel_stop_ring_buffer(struct intel_engine_cs *ring); 388void intel_stop_ring_buffer(struct intel_engine_cs *ring);
359void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 389void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
360 390
@@ -372,6 +402,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
372 struct intel_ringbuffer *ringbuf = ring->buffer; 402 struct intel_ringbuffer *ringbuf = ring->buffer;
373 ringbuf->tail &= ringbuf->size - 1; 403 ringbuf->tail &= ringbuf->size - 1;
374} 404}
405int __intel_ring_space(int head, int tail, int size);
406int intel_ring_space(struct intel_ringbuffer *ringbuf);
407bool intel_ring_stopped(struct intel_engine_cs *ring);
375void __intel_ring_advance(struct intel_engine_cs *ring); 408void __intel_ring_advance(struct intel_engine_cs *ring);
376 409
377int __must_check intel_ring_idle(struct intel_engine_cs *ring); 410int __must_check intel_ring_idle(struct intel_engine_cs *ring);
@@ -379,6 +412,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
379int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 412int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
380int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 413int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
381 414
415void intel_fini_pipe_control(struct intel_engine_cs *ring);
416int intel_init_pipe_control(struct intel_engine_cs *ring);
417
382int intel_init_render_ring_buffer(struct drm_device *dev); 418int intel_init_render_ring_buffer(struct drm_device *dev);
383int intel_init_bsd_ring_buffer(struct drm_device *dev); 419int intel_init_bsd_ring_buffer(struct drm_device *dev);
384int intel_init_bsd2_ring_buffer(struct drm_device *dev); 420int intel_init_bsd2_ring_buffer(struct drm_device *dev);