aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaulo Zanoni <paulo.r.zanoni@intel.com>2012-08-17 17:35:42 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-09-03 04:09:26 -0400
commitb31115092724925a434905dc3dbf83a2e752ba4b (patch)
treecbb5a3f3e7f5aeae91862b80df6526769e806582
parent4772eaebcdf86dd65630339dbe58316b90f80aed (diff)
drm/i915: add workarounds directly to gen6_render_ring_flush
Since gen 7+ now run the new gen7_render_ring_flush function. Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 074b7d67c1c4..42a4b85b0eae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -218,6 +218,11 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
218 u32 scratch_addr = pc->gtt_offset + 128; 218 u32 scratch_addr = pc->gtt_offset + 128;
219 int ret; 219 int ret;
220 220
221 /* Force SNB workarounds for PIPE_CONTROL flushes */
222 ret = intel_emit_post_sync_nonzero_flush(ring);
223 if (ret)
224 return ret;
225
221 /* Just flush everything. Experiments have shown that reducing the 226 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance 227 * number of bits based on the write domains has little performance
223 * impact. 228 * impact.
@@ -305,20 +310,6 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
305 return 0; 310 return 0;
306} 311}
307 312
308static int
309gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
310 u32 invalidate_domains, u32 flush_domains)
311{
312 int ret;
313
314 /* Force SNB workarounds for PIPE_CONTROL flushes */
315 ret = intel_emit_post_sync_nonzero_flush(ring);
316 if (ret)
317 return ret;
318
319 return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
320}
321
322static void ring_write_tail(struct intel_ring_buffer *ring, 313static void ring_write_tail(struct intel_ring_buffer *ring,
323 u32 value) 314 u32 value)
324{ 315{
@@ -1435,7 +1426,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1435 ring->add_request = gen6_add_request; 1426 ring->add_request = gen6_add_request;
1436 ring->flush = gen7_render_ring_flush; 1427 ring->flush = gen7_render_ring_flush;
1437 if (INTEL_INFO(dev)->gen == 6) 1428 if (INTEL_INFO(dev)->gen == 6)
1438 ring->flush = gen6_render_ring_flush__wa; 1429 ring->flush = gen6_render_ring_flush;
1439 ring->irq_get = gen6_ring_get_irq; 1430 ring->irq_get = gen6_ring_get_irq;
1440 ring->irq_put = gen6_ring_put_irq; 1431 ring->irq_put = gen6_ring_put_irq;
1441 ring->irq_enable_mask = GT_USER_INTERRUPT; 1432 ring->irq_enable_mask = GT_USER_INTERRUPT;