aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c159
1 files changed, 141 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae..89a65be8a3f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -156,28 +156,30 @@ static int init_ring_common(struct drm_device *dev,
156 156
157 /* G45 ring initialization fails to reset head to zero */ 157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) { 158 if (head != 0) {
159 DRM_ERROR("%s head not reset to zero " 159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n", 160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name, 161 ring->name,
162 I915_READ_CTL(ring), 162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring), 163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring), 164 I915_READ_TAIL(ring),
165 I915_READ_START(ring)); 165 I915_READ_START(ring));
166 166
167 I915_WRITE_HEAD(ring, 0); 167 I915_WRITE_HEAD(ring, 0);
168 168
169 DRM_ERROR("%s head forced to zero " 169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 "ctl %08x head %08x tail %08x start %08x\n", 170 DRM_ERROR("failed to set %s head to zero "
171 ring->name, 171 "ctl %08x head %08x tail %08x start %08x\n",
172 I915_READ_CTL(ring), 172 ring->name,
173 I915_READ_HEAD(ring), 173 I915_READ_CTL(ring),
174 I915_READ_TAIL(ring), 174 I915_READ_HEAD(ring),
175 I915_READ_START(ring)); 175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
176 } 178 }
177 179
178 I915_WRITE_CTL(ring, 180 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 181 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 182 | RING_REPORT_64K | RING_VALID);
181 183
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 184 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 185 /* If the head is still not zero, the ring is dead */
@@ -654,6 +656,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 656 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 657 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 658 ring->gem_object = NULL;
659
660 if (ring->cleanup)
661 ring->cleanup(ring);
662
657 cleanup_status_page(dev, ring); 663 cleanup_status_page(dev, ring);
658} 664}
659 665
@@ -688,6 +694,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 694{
689 unsigned long end; 695 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 696 drm_i915_private_t *dev_priv = dev->dev_private;
697 u32 head;
698
699 head = intel_read_status_page(ring, 4);
700 if (head) {
701 ring->head = head & HEAD_ADDR;
702 ring->space = ring->head - (ring->tail + 8);
703 if (ring->space < 0)
704 ring->space += ring->size;
705 if (ring->space >= n)
706 return 0;
707 }
691 708
692 trace_i915_ring_wait_begin (dev); 709 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 710 end = jiffies + 3 * HZ;
@@ -854,19 +871,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 871 /* do nothing */
855} 872}
856 873
874
875/* Workaround for some stepping of SNB,
876 * each time when BLT engine ring tail moved,
877 * the first command in the ring to be parsed
878 * should be MI_BATCH_BUFFER_START
879 */
880#define NEED_BLT_WORKAROUND(dev) \
881 (IS_GEN6(dev) && (dev->pdev->revision < 8))
882
883static inline struct drm_i915_gem_object *
884to_blt_workaround(struct intel_ring_buffer *ring)
885{
886 return ring->private;
887}
888
889static int blt_ring_init(struct drm_device *dev,
890 struct intel_ring_buffer *ring)
891{
892 if (NEED_BLT_WORKAROUND(dev)) {
893 struct drm_i915_gem_object *obj;
894 u32 __iomem *ptr;
895 int ret;
896
897 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
898 if (obj == NULL)
899 return -ENOMEM;
900
901 ret = i915_gem_object_pin(&obj->base, 4096);
902 if (ret) {
903 drm_gem_object_unreference(&obj->base);
904 return ret;
905 }
906
907 ptr = kmap(obj->pages[0]);
908 iowrite32(MI_BATCH_BUFFER_END, ptr);
909 iowrite32(MI_NOOP, ptr+1);
910 kunmap(obj->pages[0]);
911
912 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
913 if (ret) {
914 i915_gem_object_unpin(&obj->base);
915 drm_gem_object_unreference(&obj->base);
916 return ret;
917 }
918
919 ring->private = obj;
920 }
921
922 return init_ring_common(dev, ring);
923}
924
925static void blt_ring_begin(struct drm_device *dev,
926 struct intel_ring_buffer *ring,
927 int num_dwords)
928{
929 if (ring->private) {
930 intel_ring_begin(dev, ring, num_dwords+2);
931 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
932 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
933 } else
934 intel_ring_begin(dev, ring, 4);
935}
936
937static void blt_ring_flush(struct drm_device *dev,
938 struct intel_ring_buffer *ring,
939 u32 invalidate_domains,
940 u32 flush_domains)
941{
942 blt_ring_begin(dev, ring, 4);
943 intel_ring_emit(dev, ring, MI_FLUSH_DW);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_emit(dev, ring, 0);
946 intel_ring_emit(dev, ring, 0);
947 intel_ring_advance(dev, ring);
948}
949
950static u32
951blt_ring_add_request(struct drm_device *dev,
952 struct intel_ring_buffer *ring,
953 u32 flush_domains)
954{
955 u32 seqno = i915_gem_get_seqno(dev);
956
957 blt_ring_begin(dev, ring, 4);
958 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
959 intel_ring_emit(dev, ring,
960 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
961 intel_ring_emit(dev, ring, seqno);
962 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
963 intel_ring_advance(dev, ring);
964
965 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
966 return seqno;
967}
968
969static void blt_ring_cleanup(struct intel_ring_buffer *ring)
970{
971 if (!ring->private)
972 return;
973
974 i915_gem_object_unpin(ring->private);
975 drm_gem_object_unreference(ring->private);
976 ring->private = NULL;
977}
978
857static const struct intel_ring_buffer gen6_blt_ring = { 979static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 980 .name = "blt ring",
859 .id = RING_BLT, 981 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 982 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 983 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 984 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 985 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 986 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 987 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 988 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 989 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 990 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 991 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
992 .cleanup = blt_ring_cleanup,
870}; 993};
871 994
872int intel_init_render_ring_buffer(struct drm_device *dev) 995int intel_init_render_ring_buffer(struct drm_device *dev)