aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c255
1 files changed, 184 insertions, 71 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 56bc95c056dd..03e337072517 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,7 +48,7 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
48 return seqno; 48 return seqno;
49} 49}
50 50
51static void 51static int
52render_ring_flush(struct intel_ring_buffer *ring, 52render_ring_flush(struct intel_ring_buffer *ring,
53 u32 invalidate_domains, 53 u32 invalidate_domains,
54 u32 flush_domains) 54 u32 flush_domains)
@@ -56,6 +56,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
56 struct drm_device *dev = ring->dev; 56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 int ret;
59 60
60#if WATCH_EXEC 61#if WATCH_EXEC
61 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -116,12 +117,16 @@ render_ring_flush(struct intel_ring_buffer *ring,
116#if WATCH_EXEC 117#if WATCH_EXEC
117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 118 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
118#endif 119#endif
119 if (intel_ring_begin(ring, 2) == 0) { 120 ret = intel_ring_begin(ring, 2);
120 intel_ring_emit(ring, cmd); 121 if (ret)
121 intel_ring_emit(ring, MI_NOOP); 122 return ret;
122 intel_ring_advance(ring); 123
123 } 124 intel_ring_emit(ring, cmd);
125 intel_ring_emit(ring, MI_NOOP);
126 intel_ring_advance(ring);
124 } 127 }
128
129 return 0;
125} 130}
126 131
127static void ring_write_tail(struct intel_ring_buffer *ring, 132static void ring_write_tail(struct intel_ring_buffer *ring,
@@ -480,26 +485,56 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
480 return pc->cpu_page[0]; 485 return pc->cpu_page[0];
481} 486}
482 487
488static void
489ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
490{
491 dev_priv->gt_irq_mask &= ~mask;
492 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493 POSTING_READ(GTIMR);
494}
495
496static void
497ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
498{
499 dev_priv->gt_irq_mask |= mask;
500 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
501 POSTING_READ(GTIMR);
502}
503
504static void
505i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
506{
507 dev_priv->irq_mask &= ~mask;
508 I915_WRITE(IMR, dev_priv->irq_mask);
509 POSTING_READ(IMR);
510}
511
512static void
513i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
514{
515 dev_priv->irq_mask |= mask;
516 I915_WRITE(IMR, dev_priv->irq_mask);
517 POSTING_READ(IMR);
518}
519
483static bool 520static bool
484render_ring_get_irq(struct intel_ring_buffer *ring) 521render_ring_get_irq(struct intel_ring_buffer *ring)
485{ 522{
486 struct drm_device *dev = ring->dev; 523 struct drm_device *dev = ring->dev;
524 drm_i915_private_t *dev_priv = dev->dev_private;
487 525
488 if (!dev->irq_enabled) 526 if (!dev->irq_enabled)
489 return false; 527 return false;
490 528
491 if (atomic_inc_return(&ring->irq_refcount) == 1) { 529 spin_lock(&ring->irq_lock);
492 drm_i915_private_t *dev_priv = dev->dev_private; 530 if (ring->irq_refcount++ == 0) {
493 unsigned long irqflags;
494
495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
496 if (HAS_PCH_SPLIT(dev)) 531 if (HAS_PCH_SPLIT(dev))
497 ironlake_enable_graphics_irq(dev_priv, 532 ironlake_enable_irq(dev_priv,
498 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 533 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
499 else 534 else
500 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 535 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
502 } 536 }
537 spin_unlock(&ring->irq_lock);
503 538
504 return true; 539 return true;
505} 540}
@@ -508,20 +543,18 @@ static void
508render_ring_put_irq(struct intel_ring_buffer *ring) 543render_ring_put_irq(struct intel_ring_buffer *ring)
509{ 544{
510 struct drm_device *dev = ring->dev; 545 struct drm_device *dev = ring->dev;
546 drm_i915_private_t *dev_priv = dev->dev_private;
511 547
512 if (atomic_dec_and_test(&ring->irq_refcount)) { 548 spin_lock(&ring->irq_lock);
513 drm_i915_private_t *dev_priv = dev->dev_private; 549 if (--ring->irq_refcount == 0) {
514 unsigned long irqflags;
515
516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
517 if (HAS_PCH_SPLIT(dev)) 550 if (HAS_PCH_SPLIT(dev))
518 ironlake_disable_graphics_irq(dev_priv, 551 ironlake_disable_irq(dev_priv,
519 GT_USER_INTERRUPT | 552 GT_USER_INTERRUPT |
520 GT_PIPE_NOTIFY); 553 GT_PIPE_NOTIFY);
521 else 554 else
522 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 555 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
523 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
524 } 556 }
557 spin_unlock(&ring->irq_lock);
525} 558}
526 559
527void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 560void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -534,19 +567,24 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
534 POSTING_READ(mmio); 567 POSTING_READ(mmio);
535} 568}
536 569
537static void 570static int
538bsd_ring_flush(struct intel_ring_buffer *ring, 571bsd_ring_flush(struct intel_ring_buffer *ring,
539 u32 invalidate_domains, 572 u32 invalidate_domains,
540 u32 flush_domains) 573 u32 flush_domains)
541{ 574{
575 int ret;
576
542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 577 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
543 return; 578 return 0;
544 579
545 if (intel_ring_begin(ring, 2) == 0) { 580 ret = intel_ring_begin(ring, 2);
546 intel_ring_emit(ring, MI_FLUSH); 581 if (ret)
547 intel_ring_emit(ring, MI_NOOP); 582 return ret;
548 intel_ring_advance(ring); 583
549 } 584 intel_ring_emit(ring, MI_FLUSH);
585 intel_ring_emit(ring, MI_NOOP);
586 intel_ring_advance(ring);
587 return 0;
550} 588}
551 589
552static int 590static int
@@ -577,18 +615,15 @@ static bool
577ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 615ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
578{ 616{
579 struct drm_device *dev = ring->dev; 617 struct drm_device *dev = ring->dev;
618 drm_i915_private_t *dev_priv = dev->dev_private;
580 619
581 if (!dev->irq_enabled) 620 if (!dev->irq_enabled)
582 return false; 621 return false;
583 622
584 if (atomic_inc_return(&ring->irq_refcount) == 1) { 623 spin_lock(&ring->irq_lock);
585 drm_i915_private_t *dev_priv = dev->dev_private; 624 if (ring->irq_refcount++ == 0)
586 unsigned long irqflags; 625 ironlake_enable_irq(dev_priv, flag);
587 626 spin_unlock(&ring->irq_lock);
588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
589 ironlake_enable_graphics_irq(dev_priv, flag);
590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
591 }
592 627
593 return true; 628 return true;
594} 629}
@@ -597,15 +632,47 @@ static void
597ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 632ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
598{ 633{
599 struct drm_device *dev = ring->dev; 634 struct drm_device *dev = ring->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private;
600 636
601 if (atomic_dec_and_test(&ring->irq_refcount)) { 637 spin_lock(&ring->irq_lock);
602 drm_i915_private_t *dev_priv = dev->dev_private; 638 if (--ring->irq_refcount == 0)
603 unsigned long irqflags; 639 ironlake_disable_irq(dev_priv, flag);
640 spin_unlock(&ring->irq_lock);
641}
604 642
605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 643static bool
606 ironlake_disable_graphics_irq(dev_priv, flag); 644gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 645{
646 struct drm_device *dev = ring->dev;
647 drm_i915_private_t *dev_priv = dev->dev_private;
648
649 if (!dev->irq_enabled)
650 return false;
651
652 spin_lock(&ring->irq_lock);
653 if (ring->irq_refcount++ == 0) {
654 ring->irq_mask &= ~rflag;
655 I915_WRITE_IMR(ring, ring->irq_mask);
656 ironlake_enable_irq(dev_priv, gflag);
608 } 657 }
658 spin_unlock(&ring->irq_lock);
659
660 return true;
661}
662
663static void
664gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
665{
666 struct drm_device *dev = ring->dev;
667 drm_i915_private_t *dev_priv = dev->dev_private;
668
669 spin_lock(&ring->irq_lock);
670 if (--ring->irq_refcount == 0) {
671 ring->irq_mask |= rflag;
672 I915_WRITE_IMR(ring, ring->irq_mask);
673 ironlake_disable_irq(dev_priv, gflag);
674 }
675 spin_unlock(&ring->irq_lock);
609} 676}
610 677
611static bool 678static bool
@@ -748,6 +815,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
748 INIT_LIST_HEAD(&ring->request_list); 815 INIT_LIST_HEAD(&ring->request_list);
749 INIT_LIST_HEAD(&ring->gpu_write_list); 816 INIT_LIST_HEAD(&ring->gpu_write_list);
750 817
818 spin_lock_init(&ring->irq_lock);
819 ring->irq_mask = ~0;
820
751 if (I915_NEED_GFX_HWS(dev)) { 821 if (I915_NEED_GFX_HWS(dev)) {
752 ret = init_status_page(ring); 822 ret = init_status_page(ring);
753 if (ret) 823 if (ret)
@@ -785,6 +855,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
785 if (ret) 855 if (ret)
786 goto err_unmap; 856 goto err_unmap;
787 857
858 /* Workaround an erratum on the i830 which causes a hang if
859 * the TAIL pointer points to within the last 2 cachelines
860 * of the buffer.
861 */
862 ring->effective_size = ring->size;
863 if (IS_I830(ring->dev))
864 ring->effective_size -= 128;
865
788 return 0; 866 return 0;
789 867
790err_unmap: 868err_unmap:
@@ -827,8 +905,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
827static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 905static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
828{ 906{
829 unsigned int *virt; 907 unsigned int *virt;
830 int rem; 908 int rem = ring->size - ring->tail;
831 rem = ring->size - ring->tail;
832 909
833 if (ring->space < rem) { 910 if (ring->space < rem) {
834 int ret = intel_wait_ring_buffer(ring, rem); 911 int ret = intel_wait_ring_buffer(ring, rem);
@@ -895,7 +972,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
895 int n = 4*num_dwords; 972 int n = 4*num_dwords;
896 int ret; 973 int ret;
897 974
898 if (unlikely(ring->tail + n > ring->size)) { 975 if (unlikely(ring->tail + n > ring->effective_size)) {
899 ret = intel_wrap_ring_buffer(ring); 976 ret = intel_wrap_ring_buffer(ring);
900 if (unlikely(ret)) 977 if (unlikely(ret))
901 return ret; 978 return ret;
@@ -973,20 +1050,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 1050 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
974} 1051}
975 1052
976static void gen6_ring_flush(struct intel_ring_buffer *ring, 1053static int gen6_ring_flush(struct intel_ring_buffer *ring,
977 u32 invalidate_domains, 1054 u32 invalidate_domains,
978 u32 flush_domains) 1055 u32 flush_domains)
979{ 1056{
1057 int ret;
1058
980 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1059 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
981 return; 1060 return 0;
982 1061
983 if (intel_ring_begin(ring, 4) == 0) { 1062 ret = intel_ring_begin(ring, 4);
984 intel_ring_emit(ring, MI_FLUSH_DW); 1063 if (ret)
985 intel_ring_emit(ring, 0); 1064 return ret;
986 intel_ring_emit(ring, 0); 1065
987 intel_ring_emit(ring, 0); 1066 intel_ring_emit(ring, MI_FLUSH_DW);
988 intel_ring_advance(ring); 1067 intel_ring_emit(ring, 0);
989 } 1068 intel_ring_emit(ring, 0);
1069 intel_ring_emit(ring, 0);
1070 intel_ring_advance(ring);
1071 return 0;
990} 1072}
991 1073
992static int 1074static int
@@ -1008,15 +1090,35 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1008} 1090}
1009 1091
1010static bool 1092static bool
1093gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1094{
1095 return gen6_ring_get_irq(ring,
1096 GT_USER_INTERRUPT,
1097 GEN6_RENDER_USER_INTERRUPT);
1098}
1099
1100static void
1101gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1102{
1103 return gen6_ring_put_irq(ring,
1104 GT_USER_INTERRUPT,
1105 GEN6_RENDER_USER_INTERRUPT);
1106}
1107
1108static bool
1011gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) 1109gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1012{ 1110{
1013 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1111 return gen6_ring_get_irq(ring,
1112 GT_GEN6_BSD_USER_INTERRUPT,
1113 GEN6_BSD_USER_INTERRUPT);
1014} 1114}
1015 1115
1016static void 1116static void
1017gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) 1117gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1018{ 1118{
1019 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1119 return gen6_ring_put_irq(ring,
1120 GT_GEN6_BSD_USER_INTERRUPT,
1121 GEN6_BSD_USER_INTERRUPT);
1020} 1122}
1021 1123
1022/* ring buffer for Video Codec for Gen6+ */ 1124/* ring buffer for Video Codec for Gen6+ */
@@ -1040,13 +1142,17 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1040static bool 1142static bool
1041blt_ring_get_irq(struct intel_ring_buffer *ring) 1143blt_ring_get_irq(struct intel_ring_buffer *ring)
1042{ 1144{
1043 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); 1145 return gen6_ring_get_irq(ring,
1146 GT_BLT_USER_INTERRUPT,
1147 GEN6_BLITTER_USER_INTERRUPT);
1044} 1148}
1045 1149
1046static void 1150static void
1047blt_ring_put_irq(struct intel_ring_buffer *ring) 1151blt_ring_put_irq(struct intel_ring_buffer *ring)
1048{ 1152{
1049 ring_put_irq(ring, GT_BLT_USER_INTERRUPT); 1153 gen6_ring_put_irq(ring,
1154 GT_BLT_USER_INTERRUPT,
1155 GEN6_BLITTER_USER_INTERRUPT);
1050} 1156}
1051 1157
1052 1158
@@ -1115,20 +1221,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1115 return intel_ring_begin(ring, 4); 1221 return intel_ring_begin(ring, 4);
1116} 1222}
1117 1223
1118static void blt_ring_flush(struct intel_ring_buffer *ring, 1224static int blt_ring_flush(struct intel_ring_buffer *ring,
1119 u32 invalidate_domains, 1225 u32 invalidate_domains,
1120 u32 flush_domains) 1226 u32 flush_domains)
1121{ 1227{
1228 int ret;
1229
1122 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1230 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1123 return; 1231 return 0;
1124 1232
1125 if (blt_ring_begin(ring, 4) == 0) { 1233 ret = blt_ring_begin(ring, 4);
1126 intel_ring_emit(ring, MI_FLUSH_DW); 1234 if (ret)
1127 intel_ring_emit(ring, 0); 1235 return ret;
1128 intel_ring_emit(ring, 0); 1236
1129 intel_ring_emit(ring, 0); 1237 intel_ring_emit(ring, MI_FLUSH_DW);
1130 intel_ring_advance(ring); 1238 intel_ring_emit(ring, 0);
1131 } 1239 intel_ring_emit(ring, 0);
1240 intel_ring_emit(ring, 0);
1241 intel_ring_advance(ring);
1242 return 0;
1132} 1243}
1133 1244
1134static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1245static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -1165,6 +1276,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1165 *ring = render_ring; 1276 *ring = render_ring;
1166 if (INTEL_INFO(dev)->gen >= 6) { 1277 if (INTEL_INFO(dev)->gen >= 6) {
1167 ring->add_request = gen6_add_request; 1278 ring->add_request = gen6_add_request;
1279 ring->irq_get = gen6_render_ring_get_irq;
1280 ring->irq_put = gen6_render_ring_put_irq;
1168 } else if (IS_GEN5(dev)) { 1281 } else if (IS_GEN5(dev)) {
1169 ring->add_request = pc_render_add_request; 1282 ring->add_request = pc_render_add_request;
1170 ring->get_seqno = pc_render_get_seqno; 1283 ring->get_seqno = pc_render_get_seqno;