aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c260
1 files changed, 187 insertions, 73 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 56bc95c056dd..f6b9baa6a63d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,7 +48,7 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
48 return seqno; 48 return seqno;
49} 49}
50 50
51static void 51static int
52render_ring_flush(struct intel_ring_buffer *ring, 52render_ring_flush(struct intel_ring_buffer *ring,
53 u32 invalidate_domains, 53 u32 invalidate_domains,
54 u32 flush_domains) 54 u32 flush_domains)
@@ -56,6 +56,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
56 struct drm_device *dev = ring->dev; 56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 int ret;
59 60
60#if WATCH_EXEC 61#if WATCH_EXEC
61 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -116,12 +117,16 @@ render_ring_flush(struct intel_ring_buffer *ring,
116#if WATCH_EXEC 117#if WATCH_EXEC
117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 118 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
118#endif 119#endif
119 if (intel_ring_begin(ring, 2) == 0) { 120 ret = intel_ring_begin(ring, 2);
120 intel_ring_emit(ring, cmd); 121 if (ret)
121 intel_ring_emit(ring, MI_NOOP); 122 return ret;
122 intel_ring_advance(ring); 123
123 } 124 intel_ring_emit(ring, cmd);
125 intel_ring_emit(ring, MI_NOOP);
126 intel_ring_advance(ring);
124 } 127 }
128
129 return 0;
125} 130}
126 131
127static void ring_write_tail(struct intel_ring_buffer *ring, 132static void ring_write_tail(struct intel_ring_buffer *ring,
@@ -480,26 +485,56 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
480 return pc->cpu_page[0]; 485 return pc->cpu_page[0];
481} 486}
482 487
488static void
489ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
490{
491 dev_priv->gt_irq_mask &= ~mask;
492 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493 POSTING_READ(GTIMR);
494}
495
496static void
497ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
498{
499 dev_priv->gt_irq_mask |= mask;
500 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
501 POSTING_READ(GTIMR);
502}
503
504static void
505i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
506{
507 dev_priv->irq_mask &= ~mask;
508 I915_WRITE(IMR, dev_priv->irq_mask);
509 POSTING_READ(IMR);
510}
511
512static void
513i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
514{
515 dev_priv->irq_mask |= mask;
516 I915_WRITE(IMR, dev_priv->irq_mask);
517 POSTING_READ(IMR);
518}
519
483static bool 520static bool
484render_ring_get_irq(struct intel_ring_buffer *ring) 521render_ring_get_irq(struct intel_ring_buffer *ring)
485{ 522{
486 struct drm_device *dev = ring->dev; 523 struct drm_device *dev = ring->dev;
524 drm_i915_private_t *dev_priv = dev->dev_private;
487 525
488 if (!dev->irq_enabled) 526 if (!dev->irq_enabled)
489 return false; 527 return false;
490 528
491 if (atomic_inc_return(&ring->irq_refcount) == 1) { 529 spin_lock(&ring->irq_lock);
492 drm_i915_private_t *dev_priv = dev->dev_private; 530 if (ring->irq_refcount++ == 0) {
493 unsigned long irqflags;
494
495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
496 if (HAS_PCH_SPLIT(dev)) 531 if (HAS_PCH_SPLIT(dev))
497 ironlake_enable_graphics_irq(dev_priv, 532 ironlake_enable_irq(dev_priv,
498 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 533 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
499 else 534 else
500 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 535 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
502 } 536 }
537 spin_unlock(&ring->irq_lock);
503 538
504 return true; 539 return true;
505} 540}
@@ -508,20 +543,18 @@ static void
508render_ring_put_irq(struct intel_ring_buffer *ring) 543render_ring_put_irq(struct intel_ring_buffer *ring)
509{ 544{
510 struct drm_device *dev = ring->dev; 545 struct drm_device *dev = ring->dev;
546 drm_i915_private_t *dev_priv = dev->dev_private;
511 547
512 if (atomic_dec_and_test(&ring->irq_refcount)) { 548 spin_lock(&ring->irq_lock);
513 drm_i915_private_t *dev_priv = dev->dev_private; 549 if (--ring->irq_refcount == 0) {
514 unsigned long irqflags;
515
516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
517 if (HAS_PCH_SPLIT(dev)) 550 if (HAS_PCH_SPLIT(dev))
518 ironlake_disable_graphics_irq(dev_priv, 551 ironlake_disable_irq(dev_priv,
519 GT_USER_INTERRUPT | 552 GT_USER_INTERRUPT |
520 GT_PIPE_NOTIFY); 553 GT_PIPE_NOTIFY);
521 else 554 else
522 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 555 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
523 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
524 } 556 }
557 spin_unlock(&ring->irq_lock);
525} 558}
526 559
527void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 560void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -534,19 +567,24 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
534 POSTING_READ(mmio); 567 POSTING_READ(mmio);
535} 568}
536 569
537static void 570static int
538bsd_ring_flush(struct intel_ring_buffer *ring, 571bsd_ring_flush(struct intel_ring_buffer *ring,
539 u32 invalidate_domains, 572 u32 invalidate_domains,
540 u32 flush_domains) 573 u32 flush_domains)
541{ 574{
575 int ret;
576
542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 577 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
543 return; 578 return 0;
544 579
545 if (intel_ring_begin(ring, 2) == 0) { 580 ret = intel_ring_begin(ring, 2);
546 intel_ring_emit(ring, MI_FLUSH); 581 if (ret)
547 intel_ring_emit(ring, MI_NOOP); 582 return ret;
548 intel_ring_advance(ring); 583
549 } 584 intel_ring_emit(ring, MI_FLUSH);
585 intel_ring_emit(ring, MI_NOOP);
586 intel_ring_advance(ring);
587 return 0;
550} 588}
551 589
552static int 590static int
@@ -577,18 +615,15 @@ static bool
577ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 615ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
578{ 616{
579 struct drm_device *dev = ring->dev; 617 struct drm_device *dev = ring->dev;
618 drm_i915_private_t *dev_priv = dev->dev_private;
580 619
581 if (!dev->irq_enabled) 620 if (!dev->irq_enabled)
582 return false; 621 return false;
583 622
584 if (atomic_inc_return(&ring->irq_refcount) == 1) { 623 spin_lock(&ring->irq_lock);
585 drm_i915_private_t *dev_priv = dev->dev_private; 624 if (ring->irq_refcount++ == 0)
586 unsigned long irqflags; 625 ironlake_enable_irq(dev_priv, flag);
587 626 spin_unlock(&ring->irq_lock);
588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
589 ironlake_enable_graphics_irq(dev_priv, flag);
590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
591 }
592 627
593 return true; 628 return true;
594} 629}
@@ -597,15 +632,47 @@ static void
597ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 632ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
598{ 633{
599 struct drm_device *dev = ring->dev; 634 struct drm_device *dev = ring->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private;
600 636
601 if (atomic_dec_and_test(&ring->irq_refcount)) { 637 spin_lock(&ring->irq_lock);
602 drm_i915_private_t *dev_priv = dev->dev_private; 638 if (--ring->irq_refcount == 0)
603 unsigned long irqflags; 639 ironlake_disable_irq(dev_priv, flag);
640 spin_unlock(&ring->irq_lock);
641}
604 642
605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 643static bool
606 ironlake_disable_graphics_irq(dev_priv, flag); 644gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 645{
646 struct drm_device *dev = ring->dev;
647 drm_i915_private_t *dev_priv = dev->dev_private;
648
649 if (!dev->irq_enabled)
650 return false;
651
652 spin_lock(&ring->irq_lock);
653 if (ring->irq_refcount++ == 0) {
654 ring->irq_mask &= ~rflag;
655 I915_WRITE_IMR(ring, ring->irq_mask);
656 ironlake_enable_irq(dev_priv, gflag);
608 } 657 }
658 spin_unlock(&ring->irq_lock);
659
660 return true;
661}
662
663static void
664gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
665{
666 struct drm_device *dev = ring->dev;
667 drm_i915_private_t *dev_priv = dev->dev_private;
668
669 spin_lock(&ring->irq_lock);
670 if (--ring->irq_refcount == 0) {
671 ring->irq_mask |= rflag;
672 I915_WRITE_IMR(ring, ring->irq_mask);
673 ironlake_disable_irq(dev_priv, gflag);
674 }
675 spin_unlock(&ring->irq_lock);
609} 676}
610 677
611static bool 678static bool
@@ -748,6 +815,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
748 INIT_LIST_HEAD(&ring->request_list); 815 INIT_LIST_HEAD(&ring->request_list);
749 INIT_LIST_HEAD(&ring->gpu_write_list); 816 INIT_LIST_HEAD(&ring->gpu_write_list);
750 817
818 spin_lock_init(&ring->irq_lock);
819 ring->irq_mask = ~0;
820
751 if (I915_NEED_GFX_HWS(dev)) { 821 if (I915_NEED_GFX_HWS(dev)) {
752 ret = init_status_page(ring); 822 ret = init_status_page(ring);
753 if (ret) 823 if (ret)
@@ -785,6 +855,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
785 if (ret) 855 if (ret)
786 goto err_unmap; 856 goto err_unmap;
787 857
858 /* Workaround an erratum on the i830 which causes a hang if
859 * the TAIL pointer points to within the last 2 cachelines
860 * of the buffer.
861 */
862 ring->effective_size = ring->size;
863 if (IS_I830(ring->dev))
864 ring->effective_size -= 128;
865
788 return 0; 866 return 0;
789 867
790err_unmap: 868err_unmap:
@@ -827,8 +905,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
827static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 905static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
828{ 906{
829 unsigned int *virt; 907 unsigned int *virt;
830 int rem; 908 int rem = ring->size - ring->tail;
831 rem = ring->size - ring->tail;
832 909
833 if (ring->space < rem) { 910 if (ring->space < rem) {
834 int ret = intel_wait_ring_buffer(ring, rem); 911 int ret = intel_wait_ring_buffer(ring, rem);
@@ -851,6 +928,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
851 928
852int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 929int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
853{ 930{
931 int reread = 0;
854 struct drm_device *dev = ring->dev; 932 struct drm_device *dev = ring->dev;
855 struct drm_i915_private *dev_priv = dev->dev_private; 933 struct drm_i915_private *dev_priv = dev->dev_private;
856 unsigned long end; 934 unsigned long end;
@@ -863,9 +941,8 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
863 * fallback to the slow and accurate path. 941 * fallback to the slow and accurate path.
864 */ 942 */
865 head = intel_read_status_page(ring, 4); 943 head = intel_read_status_page(ring, 4);
866 if (head < ring->actual_head) 944 if (reread)
867 head = I915_READ_HEAD(ring); 945 head = I915_READ_HEAD(ring);
868 ring->actual_head = head;
869 ring->head = head & HEAD_ADDR; 946 ring->head = head & HEAD_ADDR;
870 ring->space = ring->head - (ring->tail + 8); 947 ring->space = ring->head - (ring->tail + 8);
871 if (ring->space < 0) 948 if (ring->space < 0)
@@ -884,6 +961,7 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
884 msleep(1); 961 msleep(1);
885 if (atomic_read(&dev_priv->mm.wedged)) 962 if (atomic_read(&dev_priv->mm.wedged))
886 return -EAGAIN; 963 return -EAGAIN;
964 reread = 1;
887 } while (!time_after(jiffies, end)); 965 } while (!time_after(jiffies, end));
888 trace_i915_ring_wait_end (dev); 966 trace_i915_ring_wait_end (dev);
889 return -EBUSY; 967 return -EBUSY;
@@ -895,7 +973,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
895 int n = 4*num_dwords; 973 int n = 4*num_dwords;
896 int ret; 974 int ret;
897 975
898 if (unlikely(ring->tail + n > ring->size)) { 976 if (unlikely(ring->tail + n > ring->effective_size)) {
899 ret = intel_wrap_ring_buffer(ring); 977 ret = intel_wrap_ring_buffer(ring);
900 if (unlikely(ret)) 978 if (unlikely(ret))
901 return ret; 979 return ret;
@@ -973,20 +1051,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 1051 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
974} 1052}
975 1053
976static void gen6_ring_flush(struct intel_ring_buffer *ring, 1054static int gen6_ring_flush(struct intel_ring_buffer *ring,
977 u32 invalidate_domains, 1055 u32 invalidate_domains,
978 u32 flush_domains) 1056 u32 flush_domains)
979{ 1057{
1058 int ret;
1059
980 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1060 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
981 return; 1061 return 0;
982 1062
983 if (intel_ring_begin(ring, 4) == 0) { 1063 ret = intel_ring_begin(ring, 4);
984 intel_ring_emit(ring, MI_FLUSH_DW); 1064 if (ret)
985 intel_ring_emit(ring, 0); 1065 return ret;
986 intel_ring_emit(ring, 0); 1066
987 intel_ring_emit(ring, 0); 1067 intel_ring_emit(ring, MI_FLUSH_DW);
988 intel_ring_advance(ring); 1068 intel_ring_emit(ring, 0);
989 } 1069 intel_ring_emit(ring, 0);
1070 intel_ring_emit(ring, 0);
1071 intel_ring_advance(ring);
1072 return 0;
990} 1073}
991 1074
992static int 1075static int
@@ -1008,15 +1091,35 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1008} 1091}
1009 1092
1010static bool 1093static bool
1094gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1095{
1096 return gen6_ring_get_irq(ring,
1097 GT_USER_INTERRUPT,
1098 GEN6_RENDER_USER_INTERRUPT);
1099}
1100
1101static void
1102gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1103{
1104 return gen6_ring_put_irq(ring,
1105 GT_USER_INTERRUPT,
1106 GEN6_RENDER_USER_INTERRUPT);
1107}
1108
1109static bool
1011gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) 1110gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1012{ 1111{
1013 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1112 return gen6_ring_get_irq(ring,
1113 GT_GEN6_BSD_USER_INTERRUPT,
1114 GEN6_BSD_USER_INTERRUPT);
1014} 1115}
1015 1116
1016static void 1117static void
1017gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) 1118gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1018{ 1119{
1019 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1120 return gen6_ring_put_irq(ring,
1121 GT_GEN6_BSD_USER_INTERRUPT,
1122 GEN6_BSD_USER_INTERRUPT);
1020} 1123}
1021 1124
1022/* ring buffer for Video Codec for Gen6+ */ 1125/* ring buffer for Video Codec for Gen6+ */
@@ -1040,13 +1143,17 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1040static bool 1143static bool
1041blt_ring_get_irq(struct intel_ring_buffer *ring) 1144blt_ring_get_irq(struct intel_ring_buffer *ring)
1042{ 1145{
1043 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); 1146 return gen6_ring_get_irq(ring,
1147 GT_BLT_USER_INTERRUPT,
1148 GEN6_BLITTER_USER_INTERRUPT);
1044} 1149}
1045 1150
1046static void 1151static void
1047blt_ring_put_irq(struct intel_ring_buffer *ring) 1152blt_ring_put_irq(struct intel_ring_buffer *ring)
1048{ 1153{
1049 ring_put_irq(ring, GT_BLT_USER_INTERRUPT); 1154 gen6_ring_put_irq(ring,
1155 GT_BLT_USER_INTERRUPT,
1156 GEN6_BLITTER_USER_INTERRUPT);
1050} 1157}
1051 1158
1052 1159
@@ -1115,20 +1222,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1115 return intel_ring_begin(ring, 4); 1222 return intel_ring_begin(ring, 4);
1116} 1223}
1117 1224
1118static void blt_ring_flush(struct intel_ring_buffer *ring, 1225static int blt_ring_flush(struct intel_ring_buffer *ring,
1119 u32 invalidate_domains, 1226 u32 invalidate_domains,
1120 u32 flush_domains) 1227 u32 flush_domains)
1121{ 1228{
1229 int ret;
1230
1122 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1231 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1123 return; 1232 return 0;
1124 1233
1125 if (blt_ring_begin(ring, 4) == 0) { 1234 ret = blt_ring_begin(ring, 4);
1126 intel_ring_emit(ring, MI_FLUSH_DW); 1235 if (ret)
1127 intel_ring_emit(ring, 0); 1236 return ret;
1128 intel_ring_emit(ring, 0); 1237
1129 intel_ring_emit(ring, 0); 1238 intel_ring_emit(ring, MI_FLUSH_DW);
1130 intel_ring_advance(ring); 1239 intel_ring_emit(ring, 0);
1131 } 1240 intel_ring_emit(ring, 0);
1241 intel_ring_emit(ring, 0);
1242 intel_ring_advance(ring);
1243 return 0;
1132} 1244}
1133 1245
1134static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1246static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -1165,6 +1277,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1165 *ring = render_ring; 1277 *ring = render_ring;
1166 if (INTEL_INFO(dev)->gen >= 6) { 1278 if (INTEL_INFO(dev)->gen >= 6) {
1167 ring->add_request = gen6_add_request; 1279 ring->add_request = gen6_add_request;
1280 ring->irq_get = gen6_render_ring_get_irq;
1281 ring->irq_put = gen6_render_ring_put_irq;
1168 } else if (IS_GEN5(dev)) { 1282 } else if (IS_GEN5(dev)) {
1169 ring->add_request = pc_render_add_request; 1283 ring->add_request = pc_render_add_request;
1170 ring->get_seqno = pc_render_get_seqno; 1284 ring->get_seqno = pc_render_get_seqno;