aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c100
1 files changed, 86 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2346b920bd86..42ff97d667d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
505 struct drm_i915_private *dev_priv = dev->dev_private; 505 struct drm_i915_private *dev_priv = dev->dev_private;
506 int ret = init_ring_common(ring); 506 int ret = init_ring_common(ring);
507 507
508 if (INTEL_INFO(dev)->gen > 3) { 508 if (INTEL_INFO(dev)->gen > 3)
509 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 509 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
510 if (IS_GEN7(dev)) 510
511 I915_WRITE(GFX_MODE_GEN7, 511 /* We need to disable the AsyncFlip performance optimisations in order
512 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 512 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
513 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 513 * programmed to '1' on all products.
514 } 514 */
515 if (INTEL_INFO(dev)->gen >= 6)
516 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
517
518 /* Required for the hardware to program scanline values for waiting */
519 if (INTEL_INFO(dev)->gen == 6)
520 I915_WRITE(GFX_MODE,
521 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
522
523 if (IS_GEN7(dev))
524 I915_WRITE(GFX_MODE_GEN7,
525 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
526 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
515 527
516 if (INTEL_INFO(dev)->gen >= 5) { 528 if (INTEL_INFO(dev)->gen >= 5) {
517 ret = init_pipe_control(ring); 529 ret = init_pipe_control(ring);
@@ -547,9 +559,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
547 559
548static void render_ring_cleanup(struct intel_ring_buffer *ring) 560static void render_ring_cleanup(struct intel_ring_buffer *ring)
549{ 561{
562 struct drm_device *dev = ring->dev;
563
550 if (!ring->private) 564 if (!ring->private)
551 return; 565 return;
552 566
567 if (HAS_BROKEN_CS_TLB(dev))
568 drm_gem_object_unreference(to_gem_object(ring->private));
569
553 cleanup_pipe_control(ring); 570 cleanup_pipe_control(ring);
554} 571}
555 572
@@ -969,6 +986,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
969 return 0; 986 return 0;
970} 987}
971 988
989/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
990#define I830_BATCH_LIMIT (256*1024)
972static int 991static int
973i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 992i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
974 u32 offset, u32 len, 993 u32 offset, u32 len,
@@ -976,15 +995,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
976{ 995{
977 int ret; 996 int ret;
978 997
979 ret = intel_ring_begin(ring, 4); 998 if (flags & I915_DISPATCH_PINNED) {
980 if (ret) 999 ret = intel_ring_begin(ring, 4);
981 return ret; 1000 if (ret)
1001 return ret;
982 1002
983 intel_ring_emit(ring, MI_BATCH_BUFFER); 1003 intel_ring_emit(ring, MI_BATCH_BUFFER);
984 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1004 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
985 intel_ring_emit(ring, offset + len - 8); 1005 intel_ring_emit(ring, offset + len - 8);
986 intel_ring_emit(ring, 0); 1006 intel_ring_emit(ring, MI_NOOP);
987 intel_ring_advance(ring); 1007 intel_ring_advance(ring);
1008 } else {
1009 struct drm_i915_gem_object *obj = ring->private;
1010 u32 cs_offset = obj->gtt_offset;
1011
1012 if (len > I830_BATCH_LIMIT)
1013 return -ENOSPC;
1014
1015 ret = intel_ring_begin(ring, 9+3);
1016 if (ret)
1017 return ret;
1018 /* Blit the batch (which has now all relocs applied) to the stable batch
1019 * scratch bo area (so that the CS never stumbles over its tlb
1020 * invalidation bug) ... */
1021 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1022 XY_SRC_COPY_BLT_WRITE_ALPHA |
1023 XY_SRC_COPY_BLT_WRITE_RGB);
1024 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1025 intel_ring_emit(ring, 0);
1026 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1027 intel_ring_emit(ring, cs_offset);
1028 intel_ring_emit(ring, 0);
1029 intel_ring_emit(ring, 4096);
1030 intel_ring_emit(ring, offset);
1031 intel_ring_emit(ring, MI_FLUSH);
1032
1033 /* ... and execute it. */
1034 intel_ring_emit(ring, MI_BATCH_BUFFER);
1035 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1036 intel_ring_emit(ring, cs_offset + len - 8);
1037 intel_ring_advance(ring);
1038 }
988 1039
989 return 0; 1040 return 0;
990} 1041}
@@ -1596,6 +1647,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1596 ring->init = init_render_ring; 1647 ring->init = init_render_ring;
1597 ring->cleanup = render_ring_cleanup; 1648 ring->cleanup = render_ring_cleanup;
1598 1649
1650 /* Workaround batchbuffer to combat CS tlb bug. */
1651 if (HAS_BROKEN_CS_TLB(dev)) {
1652 struct drm_i915_gem_object *obj;
1653 int ret;
1654
1655 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1656 if (obj == NULL) {
1657 DRM_ERROR("Failed to allocate batch bo\n");
1658 return -ENOMEM;
1659 }
1660
1661 ret = i915_gem_object_pin(obj, 0, true, false);
1662 if (ret != 0) {
1663 drm_gem_object_unreference(&obj->base);
1664 DRM_ERROR("Failed to ping batch bo\n");
1665 return ret;
1666 }
1667
1668 ring->private = obj;
1669 }
1670
1599 return intel_init_ring_buffer(dev, ring); 1671 return intel_init_ring_buffer(dev, ring);
1600} 1672}
1601 1673