aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-07-20 13:02:28 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-08-08 03:34:32 -0400
commit6c6cf5aa9c583478b19e23149feaa92d01fb8c2d (patch)
tree188e1c0c4381913f1a2e16f2509704672e7689dd /drivers/gpu/drm/i915/intel_ringbuffer.c
parentab3951eb74e7c33a2f5b7b64d72e82f1eea61571 (diff)
drm/i915: Only apply the SNB pipe control w/a to gen6
The requirements for the sync flush to be emitted prior to the render cache flush is only true for SandyBridge. On IvyBridge and friends we can just emit the flushes with an inline CS stall. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c58f1b91d08b..8733da529edf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -214,15 +214,8 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
214 u32 invalidate_domains, u32 flush_domains) 214 u32 invalidate_domains, u32 flush_domains)
215{ 215{
216 u32 flags = 0; 216 u32 flags = 0;
217 struct pipe_control *pc = ring->private;
218 u32 scratch_addr = pc->gtt_offset + 128;
219 int ret; 217 int ret;
220 218
221 /* Force SNB workarounds for PIPE_CONTROL flushes */
222 ret = intel_emit_post_sync_nonzero_flush(ring);
223 if (ret)
224 return ret;
225
226 /* Just flush everything. Experiments have shown that reducing the 219 /* Just flush everything. Experiments have shown that reducing the
227 * number of bits based on the write domains has little performance 220 * number of bits based on the write domains has little performance
228 * impact. 221 * impact.
@@ -242,21 +235,33 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
242 if (flush_domains) 235 if (flush_domains)
243 flags |= PIPE_CONTROL_CS_STALL; 236 flags |= PIPE_CONTROL_CS_STALL;
244 237
245 ret = intel_ring_begin(ring, 6); 238 ret = intel_ring_begin(ring, 4);
246 if (ret) 239 if (ret)
247 return ret; 240 return ret;
248 241
249 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 242 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
250 intel_ring_emit(ring, flags); 243 intel_ring_emit(ring, flags);
251 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 244 intel_ring_emit(ring, 0);
252 intel_ring_emit(ring, 0); /* lower dword */ 245 intel_ring_emit(ring, 0);
253 intel_ring_emit(ring, 0); /* uppwer dword */
254 intel_ring_emit(ring, MI_NOOP);
255 intel_ring_advance(ring); 246 intel_ring_advance(ring);
256 247
257 return 0; 248 return 0;
258} 249}
259 250
251static int
252gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
253 u32 invalidate_domains, u32 flush_domains)
254{
255 int ret;
256
257 /* Force SNB workarounds for PIPE_CONTROL flushes */
258 ret = intel_emit_post_sync_nonzero_flush(ring);
259 if (ret)
260 return ret;
261
262 return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
263}
264
260static void ring_write_tail(struct intel_ring_buffer *ring, 265static void ring_write_tail(struct intel_ring_buffer *ring,
261 u32 value) 266 u32 value)
262{ 267{
@@ -1371,6 +1376,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1371 if (INTEL_INFO(dev)->gen >= 6) { 1376 if (INTEL_INFO(dev)->gen >= 6) {
1372 ring->add_request = gen6_add_request; 1377 ring->add_request = gen6_add_request;
1373 ring->flush = gen6_render_ring_flush; 1378 ring->flush = gen6_render_ring_flush;
1379 if (INTEL_INFO(dev)->gen == 6)
1380 ring->flush = gen6_render_ring_flush__wa;
1374 ring->irq_get = gen6_ring_get_irq; 1381 ring->irq_get = gen6_ring_get_irq;
1375 ring->irq_put = gen6_ring_put_irq; 1382 ring->irq_put = gen6_ring_put_irq;
1376 ring->irq_enable_mask = GT_USER_INTERRUPT; 1383 ring->irq_enable_mask = GT_USER_INTERRUPT;