aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c725
1 files changed, 334 insertions, 391 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 62892a826ede..b59b6d5b7583 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
53} 53}
54 54
55static int 55static int
56render_ring_flush(struct intel_ring_buffer *ring, 56gen2_render_ring_flush(struct intel_ring_buffer *ring,
57 u32 invalidate_domains, 57 u32 invalidate_domains,
58 u32 flush_domains) 58 u32 flush_domains)
59{
60 u32 cmd;
61 int ret;
62
63 cmd = MI_FLUSH;
64 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
65 cmd |= MI_NO_WRITE_FLUSH;
66
67 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
68 cmd |= MI_READ_FLUSH;
69
70 ret = intel_ring_begin(ring, 2);
71 if (ret)
72 return ret;
73
74 intel_ring_emit(ring, cmd);
75 intel_ring_emit(ring, MI_NOOP);
76 intel_ring_advance(ring);
77
78 return 0;
79}
80
81static int
82gen4_render_ring_flush(struct intel_ring_buffer *ring,
83 u32 invalidate_domains,
84 u32 flush_domains)
59{ 85{
60 struct drm_device *dev = ring->dev; 86 struct drm_device *dev = ring->dev;
61 u32 cmd; 87 u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
90 */ 116 */
91 117
92 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 118 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93 if ((invalidate_domains|flush_domains) & 119 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
94 I915_GEM_DOMAIN_RENDER)
95 cmd &= ~MI_NO_WRITE_FLUSH; 120 cmd &= ~MI_NO_WRITE_FLUSH;
96 if (INTEL_INFO(dev)->gen < 4) {
97 /*
98 * On the 965, the sampler cache always gets flushed
99 * and this bit is reserved.
100 */
101 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102 cmd |= MI_READ_FLUSH;
103 }
104 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 121 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105 cmd |= MI_EXE_FLUSH; 122 cmd |= MI_EXE_FLUSH;
106 123
@@ -290,9 +307,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
290 | RING_VALID); 307 | RING_VALID);
291 308
292 /* If the head is still not zero, the ring is dead */ 309 /* If the head is still not zero, the ring is dead */
293 if ((I915_READ_CTL(ring) & RING_VALID) == 0 || 310 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
294 I915_READ_START(ring) != obj->gtt_offset || 311 I915_READ_START(ring) == obj->gtt_offset &&
295 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { 312 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
296 DRM_ERROR("%s initialization failed " 313 DRM_ERROR("%s initialization failed "
297 "ctl %08x head %08x tail %08x start %08x\n", 314 "ctl %08x head %08x tail %08x start %08x\n",
298 ring->name, 315 ring->name,
@@ -384,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
384 int ret = init_ring_common(ring); 401 int ret = init_ring_common(ring);
385 402
386 if (INTEL_INFO(dev)->gen > 3) { 403 if (INTEL_INFO(dev)->gen > 3) {
387 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 404 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
388 I915_WRITE(MI_MODE, mode);
389 if (IS_GEN7(dev)) 405 if (IS_GEN7(dev))
390 I915_WRITE(GFX_MODE_GEN7, 406 I915_WRITE(GFX_MODE_GEN7,
391 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 407 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
392 GFX_MODE_ENABLE(GFX_REPLAY_MODE)); 408 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
393 } 409 }
394 410
395 if (INTEL_INFO(dev)->gen >= 5) { 411 if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +414,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
398 return ret; 414 return ret;
399 } 415 }
400 416
401
402 if (IS_GEN6(dev)) { 417 if (IS_GEN6(dev)) {
403 /* From the Sandybridge PRM, volume 1 part 3, page 24: 418 /* From the Sandybridge PRM, volume 1 part 3, page 24:
404 * "If this bit is set, STCunit will have LRA as replacement 419 * "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +421,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
406 * policy is not supported." 421 * policy is not supported."
407 */ 422 */
408 I915_WRITE(CACHE_MODE_0, 423 I915_WRITE(CACHE_MODE_0,
409 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); 424 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
410 } 425 }
411 426
412 if (INTEL_INFO(dev)->gen >= 6) { 427 if (INTEL_INFO(dev)->gen >= 6)
413 I915_WRITE(INSTPM, 428 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
414 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
415 }
416 429
417 return ret; 430 return ret;
418} 431}
@@ -483,21 +496,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
483 * @seqno - seqno which the waiter will block on 496 * @seqno - seqno which the waiter will block on
484 */ 497 */
485static int 498static int
486intel_ring_sync(struct intel_ring_buffer *waiter, 499gen6_ring_sync(struct intel_ring_buffer *waiter,
487 struct intel_ring_buffer *signaller, 500 struct intel_ring_buffer *signaller,
488 int ring, 501 u32 seqno)
489 u32 seqno)
490{ 502{
491 int ret; 503 int ret;
492 u32 dw1 = MI_SEMAPHORE_MBOX | 504 u32 dw1 = MI_SEMAPHORE_MBOX |
493 MI_SEMAPHORE_COMPARE | 505 MI_SEMAPHORE_COMPARE |
494 MI_SEMAPHORE_REGISTER; 506 MI_SEMAPHORE_REGISTER;
495 507
508 /* Throughout all of the GEM code, seqno passed implies our current
509 * seqno is >= the last seqno executed. However for hardware the
510 * comparison is strictly greater than.
511 */
512 seqno -= 1;
513
514 WARN_ON(signaller->semaphore_register[waiter->id] ==
515 MI_SEMAPHORE_SYNC_INVALID);
516
496 ret = intel_ring_begin(waiter, 4); 517 ret = intel_ring_begin(waiter, 4);
497 if (ret) 518 if (ret)
498 return ret; 519 return ret;
499 520
500 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); 521 intel_ring_emit(waiter,
522 dw1 | signaller->semaphore_register[waiter->id]);
501 intel_ring_emit(waiter, seqno); 523 intel_ring_emit(waiter, seqno);
502 intel_ring_emit(waiter, 0); 524 intel_ring_emit(waiter, 0);
503 intel_ring_emit(waiter, MI_NOOP); 525 intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +528,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
506 return 0; 528 return 0;
507} 529}
508 530
509/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
510int
511render_ring_sync_to(struct intel_ring_buffer *waiter,
512 struct intel_ring_buffer *signaller,
513 u32 seqno)
514{
515 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
516 return intel_ring_sync(waiter,
517 signaller,
518 RCS,
519 seqno);
520}
521
522/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
523int
524gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
525 struct intel_ring_buffer *signaller,
526 u32 seqno)
527{
528 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
529 return intel_ring_sync(waiter,
530 signaller,
531 VCS,
532 seqno);
533}
534
535/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
536int
537gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
538 struct intel_ring_buffer *signaller,
539 u32 seqno)
540{
541 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
542 return intel_ring_sync(waiter,
543 signaller,
544 BCS,
545 seqno);
546}
547
548
549
550#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 531#define PIPE_CONTROL_FLUSH(ring__, addr__) \
551do { \ 532do { \
552 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 533 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -608,27 +589,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
608 return 0; 589 return 0;
609} 590}
610 591
611static int
612render_ring_add_request(struct intel_ring_buffer *ring,
613 u32 *result)
614{
615 u32 seqno = i915_gem_next_request_seqno(ring);
616 int ret;
617
618 ret = intel_ring_begin(ring, 4);
619 if (ret)
620 return ret;
621
622 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
623 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
624 intel_ring_emit(ring, seqno);
625 intel_ring_emit(ring, MI_USER_INTERRUPT);
626 intel_ring_advance(ring);
627
628 *result = seqno;
629 return 0;
630}
631
632static u32 592static u32
633gen6_ring_get_seqno(struct intel_ring_buffer *ring) 593gen6_ring_get_seqno(struct intel_ring_buffer *ring)
634{ 594{
@@ -655,76 +615,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
655 return pc->cpu_page[0]; 615 return pc->cpu_page[0];
656} 616}
657 617
658static void 618static bool
659ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 619gen5_ring_get_irq(struct intel_ring_buffer *ring)
660{ 620{
661 dev_priv->gt_irq_mask &= ~mask; 621 struct drm_device *dev = ring->dev;
662 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 622 drm_i915_private_t *dev_priv = dev->dev_private;
663 POSTING_READ(GTIMR); 623 unsigned long flags;
624
625 if (!dev->irq_enabled)
626 return false;
627
628 spin_lock_irqsave(&dev_priv->irq_lock, flags);
629 if (ring->irq_refcount++ == 0) {
630 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
631 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
632 POSTING_READ(GTIMR);
633 }
634 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
635
636 return true;
664} 637}
665 638
666static void 639static void
667ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 640gen5_ring_put_irq(struct intel_ring_buffer *ring)
668{ 641{
669 dev_priv->gt_irq_mask |= mask; 642 struct drm_device *dev = ring->dev;
670 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 643 drm_i915_private_t *dev_priv = dev->dev_private;
671 POSTING_READ(GTIMR); 644 unsigned long flags;
645
646 spin_lock_irqsave(&dev_priv->irq_lock, flags);
647 if (--ring->irq_refcount == 0) {
648 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
649 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
650 POSTING_READ(GTIMR);
651 }
652 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
672} 653}
673 654
674static void 655static bool
675i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 656i9xx_ring_get_irq(struct intel_ring_buffer *ring)
676{ 657{
677 dev_priv->irq_mask &= ~mask; 658 struct drm_device *dev = ring->dev;
678 I915_WRITE(IMR, dev_priv->irq_mask); 659 drm_i915_private_t *dev_priv = dev->dev_private;
679 POSTING_READ(IMR); 660 unsigned long flags;
661
662 if (!dev->irq_enabled)
663 return false;
664
665 spin_lock_irqsave(&dev_priv->irq_lock, flags);
666 if (ring->irq_refcount++ == 0) {
667 dev_priv->irq_mask &= ~ring->irq_enable_mask;
668 I915_WRITE(IMR, dev_priv->irq_mask);
669 POSTING_READ(IMR);
670 }
671 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
672
673 return true;
680} 674}
681 675
682static void 676static void
683i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 677i9xx_ring_put_irq(struct intel_ring_buffer *ring)
684{ 678{
685 dev_priv->irq_mask |= mask; 679 struct drm_device *dev = ring->dev;
686 I915_WRITE(IMR, dev_priv->irq_mask); 680 drm_i915_private_t *dev_priv = dev->dev_private;
687 POSTING_READ(IMR); 681 unsigned long flags;
682
683 spin_lock_irqsave(&dev_priv->irq_lock, flags);
684 if (--ring->irq_refcount == 0) {
685 dev_priv->irq_mask |= ring->irq_enable_mask;
686 I915_WRITE(IMR, dev_priv->irq_mask);
687 POSTING_READ(IMR);
688 }
689 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
688} 690}
689 691
690static bool 692static bool
691render_ring_get_irq(struct intel_ring_buffer *ring) 693i8xx_ring_get_irq(struct intel_ring_buffer *ring)
692{ 694{
693 struct drm_device *dev = ring->dev; 695 struct drm_device *dev = ring->dev;
694 drm_i915_private_t *dev_priv = dev->dev_private; 696 drm_i915_private_t *dev_priv = dev->dev_private;
697 unsigned long flags;
695 698
696 if (!dev->irq_enabled) 699 if (!dev->irq_enabled)
697 return false; 700 return false;
698 701
699 spin_lock(&ring->irq_lock); 702 spin_lock_irqsave(&dev_priv->irq_lock, flags);
700 if (ring->irq_refcount++ == 0) { 703 if (ring->irq_refcount++ == 0) {
701 if (HAS_PCH_SPLIT(dev)) 704 dev_priv->irq_mask &= ~ring->irq_enable_mask;
702 ironlake_enable_irq(dev_priv, 705 I915_WRITE16(IMR, dev_priv->irq_mask);
703 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 706 POSTING_READ16(IMR);
704 else
705 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
706 } 707 }
707 spin_unlock(&ring->irq_lock); 708 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
708 709
709 return true; 710 return true;
710} 711}
711 712
712static void 713static void
713render_ring_put_irq(struct intel_ring_buffer *ring) 714i8xx_ring_put_irq(struct intel_ring_buffer *ring)
714{ 715{
715 struct drm_device *dev = ring->dev; 716 struct drm_device *dev = ring->dev;
716 drm_i915_private_t *dev_priv = dev->dev_private; 717 drm_i915_private_t *dev_priv = dev->dev_private;
718 unsigned long flags;
717 719
718 spin_lock(&ring->irq_lock); 720 spin_lock_irqsave(&dev_priv->irq_lock, flags);
719 if (--ring->irq_refcount == 0) { 721 if (--ring->irq_refcount == 0) {
720 if (HAS_PCH_SPLIT(dev)) 722 dev_priv->irq_mask |= ring->irq_enable_mask;
721 ironlake_disable_irq(dev_priv, 723 I915_WRITE16(IMR, dev_priv->irq_mask);
722 GT_USER_INTERRUPT | 724 POSTING_READ16(IMR);
723 GT_PIPE_NOTIFY);
724 else
725 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
726 } 725 }
727 spin_unlock(&ring->irq_lock); 726 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
728} 727}
729 728
730void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 729void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +775,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
776} 775}
777 776
778static int 777static int
779ring_add_request(struct intel_ring_buffer *ring, 778i9xx_add_request(struct intel_ring_buffer *ring,
780 u32 *result) 779 u32 *result)
781{ 780{
782 u32 seqno; 781 u32 seqno;
@@ -799,10 +798,11 @@ ring_add_request(struct intel_ring_buffer *ring,
799} 798}
800 799
801static bool 800static bool
802gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 801gen6_ring_get_irq(struct intel_ring_buffer *ring)
803{ 802{
804 struct drm_device *dev = ring->dev; 803 struct drm_device *dev = ring->dev;
805 drm_i915_private_t *dev_priv = dev->dev_private; 804 drm_i915_private_t *dev_priv = dev->dev_private;
805 unsigned long flags;
806 806
807 if (!dev->irq_enabled) 807 if (!dev->irq_enabled)
808 return false; 808 return false;
@@ -812,120 +812,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
812 * blt/bsd rings on ivb. */ 812 * blt/bsd rings on ivb. */
813 gen6_gt_force_wake_get(dev_priv); 813 gen6_gt_force_wake_get(dev_priv);
814 814
815 spin_lock(&ring->irq_lock); 815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
816 if (ring->irq_refcount++ == 0) { 816 if (ring->irq_refcount++ == 0) {
817 ring->irq_mask &= ~rflag; 817 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
818 I915_WRITE_IMR(ring, ring->irq_mask); 818 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
819 ironlake_enable_irq(dev_priv, gflag); 819 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
820 POSTING_READ(GTIMR);
820 } 821 }
821 spin_unlock(&ring->irq_lock); 822 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
822 823
823 return true; 824 return true;
824} 825}
825 826
826static void 827static void
827gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 828gen6_ring_put_irq(struct intel_ring_buffer *ring)
828{ 829{
829 struct drm_device *dev = ring->dev; 830 struct drm_device *dev = ring->dev;
830 drm_i915_private_t *dev_priv = dev->dev_private; 831 drm_i915_private_t *dev_priv = dev->dev_private;
832 unsigned long flags;
831 833
832 spin_lock(&ring->irq_lock); 834 spin_lock_irqsave(&dev_priv->irq_lock, flags);
833 if (--ring->irq_refcount == 0) { 835 if (--ring->irq_refcount == 0) {
834 ring->irq_mask |= rflag; 836 I915_WRITE_IMR(ring, ~0);
835 I915_WRITE_IMR(ring, ring->irq_mask); 837 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
836 ironlake_disable_irq(dev_priv, gflag); 838 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
839 POSTING_READ(GTIMR);
837 } 840 }
838 spin_unlock(&ring->irq_lock); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
839 842
840 gen6_gt_force_wake_put(dev_priv); 843 gen6_gt_force_wake_put(dev_priv);
841} 844}
842 845
843static bool 846static int
844bsd_ring_get_irq(struct intel_ring_buffer *ring) 847i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
845{ 848{
846 struct drm_device *dev = ring->dev; 849 int ret;
847 drm_i915_private_t *dev_priv = dev->dev_private;
848
849 if (!dev->irq_enabled)
850 return false;
851 850
852 spin_lock(&ring->irq_lock); 851 ret = intel_ring_begin(ring, 2);
853 if (ring->irq_refcount++ == 0) { 852 if (ret)
854 if (IS_G4X(dev)) 853 return ret;
855 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
856 else
857 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
858 }
859 spin_unlock(&ring->irq_lock);
860 854
861 return true; 855 intel_ring_emit(ring,
862} 856 MI_BATCH_BUFFER_START |
863static void 857 MI_BATCH_GTT |
864bsd_ring_put_irq(struct intel_ring_buffer *ring) 858 MI_BATCH_NON_SECURE_I965);
865{ 859 intel_ring_emit(ring, offset);
866 struct drm_device *dev = ring->dev; 860 intel_ring_advance(ring);
867 drm_i915_private_t *dev_priv = dev->dev_private;
868 861
869 spin_lock(&ring->irq_lock); 862 return 0;
870 if (--ring->irq_refcount == 0) {
871 if (IS_G4X(dev))
872 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
873 else
874 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
875 }
876 spin_unlock(&ring->irq_lock);
877} 863}
878 864
879static int 865static int
880ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 866i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
867 u32 offset, u32 len)
881{ 868{
882 int ret; 869 int ret;
883 870
884 ret = intel_ring_begin(ring, 2); 871 ret = intel_ring_begin(ring, 4);
885 if (ret) 872 if (ret)
886 return ret; 873 return ret;
887 874
888 intel_ring_emit(ring, 875 intel_ring_emit(ring, MI_BATCH_BUFFER);
889 MI_BATCH_BUFFER_START | (2 << 6) | 876 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
890 MI_BATCH_NON_SECURE_I965); 877 intel_ring_emit(ring, offset + len - 8);
891 intel_ring_emit(ring, offset); 878 intel_ring_emit(ring, 0);
892 intel_ring_advance(ring); 879 intel_ring_advance(ring);
893 880
894 return 0; 881 return 0;
895} 882}
896 883
897static int 884static int
898render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 885i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
899 u32 offset, u32 len) 886 u32 offset, u32 len)
900{ 887{
901 struct drm_device *dev = ring->dev;
902 int ret; 888 int ret;
903 889
904 if (IS_I830(dev) || IS_845G(dev)) { 890 ret = intel_ring_begin(ring, 2);
905 ret = intel_ring_begin(ring, 4); 891 if (ret)
906 if (ret) 892 return ret;
907 return ret;
908
909 intel_ring_emit(ring, MI_BATCH_BUFFER);
910 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
911 intel_ring_emit(ring, offset + len - 8);
912 intel_ring_emit(ring, 0);
913 } else {
914 ret = intel_ring_begin(ring, 2);
915 if (ret)
916 return ret;
917 893
918 if (INTEL_INFO(dev)->gen >= 4) { 894 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
919 intel_ring_emit(ring, 895 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
920 MI_BATCH_BUFFER_START | (2 << 6) |
921 MI_BATCH_NON_SECURE_I965);
922 intel_ring_emit(ring, offset);
923 } else {
924 intel_ring_emit(ring,
925 MI_BATCH_BUFFER_START | (2 << 6));
926 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
927 }
928 }
929 intel_ring_advance(ring); 896 intel_ring_advance(ring);
930 897
931 return 0; 898 return 0;
@@ -933,7 +900,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
933 900
934static void cleanup_status_page(struct intel_ring_buffer *ring) 901static void cleanup_status_page(struct intel_ring_buffer *ring)
935{ 902{
936 drm_i915_private_t *dev_priv = ring->dev->dev_private;
937 struct drm_i915_gem_object *obj; 903 struct drm_i915_gem_object *obj;
938 904
939 obj = ring->status_page.obj; 905 obj = ring->status_page.obj;
@@ -944,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
944 i915_gem_object_unpin(obj); 910 i915_gem_object_unpin(obj);
945 drm_gem_object_unreference(&obj->base); 911 drm_gem_object_unreference(&obj->base);
946 ring->status_page.obj = NULL; 912 ring->status_page.obj = NULL;
947
948 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
949} 913}
950 914
951static int init_status_page(struct intel_ring_buffer *ring) 915static int init_status_page(struct intel_ring_buffer *ring)
952{ 916{
953 struct drm_device *dev = ring->dev; 917 struct drm_device *dev = ring->dev;
954 drm_i915_private_t *dev_priv = dev->dev_private;
955 struct drm_i915_gem_object *obj; 918 struct drm_i915_gem_object *obj;
956 int ret; 919 int ret;
957 920
@@ -972,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
972 ring->status_page.gfx_addr = obj->gtt_offset; 935 ring->status_page.gfx_addr = obj->gtt_offset;
973 ring->status_page.page_addr = kmap(obj->pages[0]); 936 ring->status_page.page_addr = kmap(obj->pages[0]);
974 if (ring->status_page.page_addr == NULL) { 937 if (ring->status_page.page_addr == NULL) {
975 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
976 goto err_unpin; 938 goto err_unpin;
977 } 939 }
978 ring->status_page.obj = obj; 940 ring->status_page.obj = obj;
@@ -992,8 +954,8 @@ err:
992 return ret; 954 return ret;
993} 955}
994 956
995int intel_init_ring_buffer(struct drm_device *dev, 957static int intel_init_ring_buffer(struct drm_device *dev,
996 struct intel_ring_buffer *ring) 958 struct intel_ring_buffer *ring)
997{ 959{
998 struct drm_i915_gem_object *obj; 960 struct drm_i915_gem_object *obj;
999 int ret; 961 int ret;
@@ -1002,10 +964,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
1002 INIT_LIST_HEAD(&ring->active_list); 964 INIT_LIST_HEAD(&ring->active_list);
1003 INIT_LIST_HEAD(&ring->request_list); 965 INIT_LIST_HEAD(&ring->request_list);
1004 INIT_LIST_HEAD(&ring->gpu_write_list); 966 INIT_LIST_HEAD(&ring->gpu_write_list);
967 ring->size = 32 * PAGE_SIZE;
1005 968
1006 init_waitqueue_head(&ring->irq_queue); 969 init_waitqueue_head(&ring->irq_queue);
1007 spin_lock_init(&ring->irq_lock);
1008 ring->irq_mask = ~0;
1009 970
1010 if (I915_NEED_GFX_HWS(dev)) { 971 if (I915_NEED_GFX_HWS(dev)) {
1011 ret = init_status_page(ring); 972 ret = init_status_page(ring);
@@ -1026,20 +987,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
1026 if (ret) 987 if (ret)
1027 goto err_unref; 988 goto err_unref;
1028 989
1029 ring->map.size = ring->size; 990 ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
1030 ring->map.offset = dev->agp->base + obj->gtt_offset; 991 ring->size);
1031 ring->map.type = 0; 992 if (ring->virtual_start == NULL) {
1032 ring->map.flags = 0;
1033 ring->map.mtrr = 0;
1034
1035 drm_core_ioremap_wc(&ring->map, dev);
1036 if (ring->map.handle == NULL) {
1037 DRM_ERROR("Failed to map ringbuffer.\n"); 993 DRM_ERROR("Failed to map ringbuffer.\n");
1038 ret = -EINVAL; 994 ret = -EINVAL;
1039 goto err_unpin; 995 goto err_unpin;
1040 } 996 }
1041 997
1042 ring->virtual_start = ring->map.handle;
1043 ret = ring->init(ring); 998 ret = ring->init(ring);
1044 if (ret) 999 if (ret)
1045 goto err_unmap; 1000 goto err_unmap;
@@ -1055,7 +1010,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
1055 return 0; 1010 return 0;
1056 1011
1057err_unmap: 1012err_unmap:
1058 drm_core_ioremapfree(&ring->map, dev); 1013 iounmap(ring->virtual_start);
1059err_unpin: 1014err_unpin:
1060 i915_gem_object_unpin(obj); 1015 i915_gem_object_unpin(obj);
1061err_unref: 1016err_unref:
@@ -1083,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1083 1038
1084 I915_WRITE_CTL(ring, 0); 1039 I915_WRITE_CTL(ring, 0);
1085 1040
1086 drm_core_ioremapfree(&ring->map, ring->dev); 1041 iounmap(ring->virtual_start);
1087 1042
1088 i915_gem_object_unpin(ring->obj); 1043 i915_gem_object_unpin(ring->obj);
1089 drm_gem_object_unreference(&ring->obj->base); 1044 drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1097 1052
1098static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1053static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1099{ 1054{
1100 unsigned int *virt; 1055 uint32_t __iomem *virt;
1101 int rem = ring->size - ring->tail; 1056 int rem = ring->size - ring->tail;
1102 1057
1103 if (ring->space < rem) { 1058 if (ring->space < rem) {
@@ -1106,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1106 return ret; 1061 return ret;
1107 } 1062 }
1108 1063
1109 virt = (unsigned int *)(ring->virtual_start + ring->tail); 1064 virt = ring->virtual_start + ring->tail;
1110 rem /= 8; 1065 rem /= 4;
1111 while (rem--) { 1066 while (rem--)
1112 *virt++ = MI_NOOP; 1067 iowrite32(MI_NOOP, virt++);
1113 *virt++ = MI_NOOP;
1114 }
1115 1068
1116 ring->tail = 0; 1069 ring->tail = 0;
1117 ring->space = ring_space(ring); 1070 ring->space = ring_space(ring);
@@ -1132,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1132 was_interruptible = dev_priv->mm.interruptible; 1085 was_interruptible = dev_priv->mm.interruptible;
1133 dev_priv->mm.interruptible = false; 1086 dev_priv->mm.interruptible = false;
1134 1087
1135 ret = i915_wait_request(ring, seqno, true); 1088 ret = i915_wait_request(ring, seqno);
1136 1089
1137 dev_priv->mm.interruptible = was_interruptible; 1090 dev_priv->mm.interruptible = was_interruptible;
1091 if (!ret)
1092 i915_gem_retire_requests_ring(ring);
1138 1093
1139 return ret; 1094 return ret;
1140} 1095}
@@ -1208,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1208 return ret; 1163 return ret;
1209 1164
1210 trace_i915_ring_wait_begin(ring); 1165 trace_i915_ring_wait_begin(ring);
1211 if (drm_core_check_feature(dev, DRIVER_GEM)) 1166 /* With GEM the hangcheck timer should kick us out of the loop,
1212 /* With GEM the hangcheck timer should kick us out of the loop, 1167 * leaving it early runs the risk of corrupting GEM state (due
1213 * leaving it early runs the risk of corrupting GEM state (due 1168 * to running on almost untested codepaths). But on resume
1214 * to running on almost untested codepaths). But on resume 1169 * timers don't work yet, so prevent a complete hang in that
1215 * timers don't work yet, so prevent a complete hang in that 1170 * case by choosing an insanely large timeout. */
1216 * case by choosing an insanely large timeout. */ 1171 end = jiffies + 60 * HZ;
1217 end = jiffies + 60 * HZ;
1218 else
1219 end = jiffies + 3 * HZ;
1220 1172
1221 do { 1173 do {
1222 ring->head = I915_READ_HEAD(ring); 1174 ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1220,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1268 1220
1269void intel_ring_advance(struct intel_ring_buffer *ring) 1221void intel_ring_advance(struct intel_ring_buffer *ring)
1270{ 1222{
1223 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1224
1271 ring->tail &= ring->size - 1; 1225 ring->tail &= ring->size - 1;
1226 if (dev_priv->stop_rings & intel_ring_flag(ring))
1227 return;
1272 ring->write_tail(ring, ring->tail); 1228 ring->write_tail(ring, ring->tail);
1273} 1229}
1274 1230
1275static const struct intel_ring_buffer render_ring = {
1276 .name = "render ring",
1277 .id = RCS,
1278 .mmio_base = RENDER_RING_BASE,
1279 .size = 32 * PAGE_SIZE,
1280 .init = init_render_ring,
1281 .write_tail = ring_write_tail,
1282 .flush = render_ring_flush,
1283 .add_request = render_ring_add_request,
1284 .get_seqno = ring_get_seqno,
1285 .irq_get = render_ring_get_irq,
1286 .irq_put = render_ring_put_irq,
1287 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1288 .cleanup = render_ring_cleanup,
1289 .sync_to = render_ring_sync_to,
1290 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1291 MI_SEMAPHORE_SYNC_RV,
1292 MI_SEMAPHORE_SYNC_RB},
1293 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
1294};
1295
1296/* ring buffer for bit-stream decoder */
1297
1298static const struct intel_ring_buffer bsd_ring = {
1299 .name = "bsd ring",
1300 .id = VCS,
1301 .mmio_base = BSD_RING_BASE,
1302 .size = 32 * PAGE_SIZE,
1303 .init = init_ring_common,
1304 .write_tail = ring_write_tail,
1305 .flush = bsd_ring_flush,
1306 .add_request = ring_add_request,
1307 .get_seqno = ring_get_seqno,
1308 .irq_get = bsd_ring_get_irq,
1309 .irq_put = bsd_ring_put_irq,
1310 .dispatch_execbuffer = ring_dispatch_execbuffer,
1311};
1312
1313 1231
1314static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1232static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1315 u32 value) 1233 u32 value)
@@ -1372,77 +1290,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1372 return 0; 1290 return 0;
1373} 1291}
1374 1292
1375static bool
1376gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1377{
1378 return gen6_ring_get_irq(ring,
1379 GT_USER_INTERRUPT,
1380 GEN6_RENDER_USER_INTERRUPT);
1381}
1382
1383static void
1384gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1385{
1386 return gen6_ring_put_irq(ring,
1387 GT_USER_INTERRUPT,
1388 GEN6_RENDER_USER_INTERRUPT);
1389}
1390
1391static bool
1392gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1393{
1394 return gen6_ring_get_irq(ring,
1395 GT_GEN6_BSD_USER_INTERRUPT,
1396 GEN6_BSD_USER_INTERRUPT);
1397}
1398
1399static void
1400gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1401{
1402 return gen6_ring_put_irq(ring,
1403 GT_GEN6_BSD_USER_INTERRUPT,
1404 GEN6_BSD_USER_INTERRUPT);
1405}
1406
1407/* ring buffer for Video Codec for Gen6+ */
1408static const struct intel_ring_buffer gen6_bsd_ring = {
1409 .name = "gen6 bsd ring",
1410 .id = VCS,
1411 .mmio_base = GEN6_BSD_RING_BASE,
1412 .size = 32 * PAGE_SIZE,
1413 .init = init_ring_common,
1414 .write_tail = gen6_bsd_ring_write_tail,
1415 .flush = gen6_ring_flush,
1416 .add_request = gen6_add_request,
1417 .get_seqno = gen6_ring_get_seqno,
1418 .irq_get = gen6_bsd_ring_get_irq,
1419 .irq_put = gen6_bsd_ring_put_irq,
1420 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1421 .sync_to = gen6_bsd_ring_sync_to,
1422 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1423 MI_SEMAPHORE_SYNC_INVALID,
1424 MI_SEMAPHORE_SYNC_VB},
1425 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
1426};
1427
1428/* Blitter support (SandyBridge+) */ 1293/* Blitter support (SandyBridge+) */
1429 1294
1430static bool
1431blt_ring_get_irq(struct intel_ring_buffer *ring)
1432{
1433 return gen6_ring_get_irq(ring,
1434 GT_BLT_USER_INTERRUPT,
1435 GEN6_BLITTER_USER_INTERRUPT);
1436}
1437
1438static void
1439blt_ring_put_irq(struct intel_ring_buffer *ring)
1440{
1441 gen6_ring_put_irq(ring,
1442 GT_BLT_USER_INTERRUPT,
1443 GEN6_BLITTER_USER_INTERRUPT);
1444}
1445
1446static int blt_ring_flush(struct intel_ring_buffer *ring, 1295static int blt_ring_flush(struct intel_ring_buffer *ring,
1447 u32 invalidate, u32 flush) 1296 u32 invalidate, u32 flush)
1448{ 1297{
@@ -1464,42 +1313,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
1464 return 0; 1313 return 0;
1465} 1314}
1466 1315
1467static const struct intel_ring_buffer gen6_blt_ring = {
1468 .name = "blt ring",
1469 .id = BCS,
1470 .mmio_base = BLT_RING_BASE,
1471 .size = 32 * PAGE_SIZE,
1472 .init = init_ring_common,
1473 .write_tail = ring_write_tail,
1474 .flush = blt_ring_flush,
1475 .add_request = gen6_add_request,
1476 .get_seqno = gen6_ring_get_seqno,
1477 .irq_get = blt_ring_get_irq,
1478 .irq_put = blt_ring_put_irq,
1479 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1480 .sync_to = gen6_blt_ring_sync_to,
1481 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1482 MI_SEMAPHORE_SYNC_BV,
1483 MI_SEMAPHORE_SYNC_INVALID},
1484 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
1485};
1486
1487int intel_init_render_ring_buffer(struct drm_device *dev) 1316int intel_init_render_ring_buffer(struct drm_device *dev)
1488{ 1317{
1489 drm_i915_private_t *dev_priv = dev->dev_private; 1318 drm_i915_private_t *dev_priv = dev->dev_private;
1490 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1319 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1491 1320
1492 *ring = render_ring; 1321 ring->name = "render ring";
1322 ring->id = RCS;
1323 ring->mmio_base = RENDER_RING_BASE;
1324
1493 if (INTEL_INFO(dev)->gen >= 6) { 1325 if (INTEL_INFO(dev)->gen >= 6) {
1494 ring->add_request = gen6_add_request; 1326 ring->add_request = gen6_add_request;
1495 ring->flush = gen6_render_ring_flush; 1327 ring->flush = gen6_render_ring_flush;
1496 ring->irq_get = gen6_render_ring_get_irq; 1328 ring->irq_get = gen6_ring_get_irq;
1497 ring->irq_put = gen6_render_ring_put_irq; 1329 ring->irq_put = gen6_ring_put_irq;
1330 ring->irq_enable_mask = GT_USER_INTERRUPT;
1498 ring->get_seqno = gen6_ring_get_seqno; 1331 ring->get_seqno = gen6_ring_get_seqno;
1332 ring->sync_to = gen6_ring_sync;
1333 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1334 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1335 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1336 ring->signal_mbox[0] = GEN6_VRSYNC;
1337 ring->signal_mbox[1] = GEN6_BRSYNC;
1499 } else if (IS_GEN5(dev)) { 1338 } else if (IS_GEN5(dev)) {
1500 ring->add_request = pc_render_add_request; 1339 ring->add_request = pc_render_add_request;
1340 ring->flush = gen4_render_ring_flush;
1501 ring->get_seqno = pc_render_get_seqno; 1341 ring->get_seqno = pc_render_get_seqno;
1342 ring->irq_get = gen5_ring_get_irq;
1343 ring->irq_put = gen5_ring_put_irq;
1344 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1345 } else {
1346 ring->add_request = i9xx_add_request;
1347 if (INTEL_INFO(dev)->gen < 4)
1348 ring->flush = gen2_render_ring_flush;
1349 else
1350 ring->flush = gen4_render_ring_flush;
1351 ring->get_seqno = ring_get_seqno;
1352 if (IS_GEN2(dev)) {
1353 ring->irq_get = i8xx_ring_get_irq;
1354 ring->irq_put = i8xx_ring_put_irq;
1355 } else {
1356 ring->irq_get = i9xx_ring_get_irq;
1357 ring->irq_put = i9xx_ring_put_irq;
1358 }
1359 ring->irq_enable_mask = I915_USER_INTERRUPT;
1502 } 1360 }
1361 ring->write_tail = ring_write_tail;
1362 if (INTEL_INFO(dev)->gen >= 6)
1363 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1364 else if (INTEL_INFO(dev)->gen >= 4)
1365 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1366 else if (IS_I830(dev) || IS_845G(dev))
1367 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1368 else
1369 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1370 ring->init = init_render_ring;
1371 ring->cleanup = render_ring_cleanup;
1372
1503 1373
1504 if (!I915_NEED_GFX_HWS(dev)) { 1374 if (!I915_NEED_GFX_HWS(dev)) {
1505 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1375 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,15 +1384,41 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1514 drm_i915_private_t *dev_priv = dev->dev_private; 1384 drm_i915_private_t *dev_priv = dev->dev_private;
1515 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1385 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1516 1386
1517 *ring = render_ring; 1387 ring->name = "render ring";
1388 ring->id = RCS;
1389 ring->mmio_base = RENDER_RING_BASE;
1390
1518 if (INTEL_INFO(dev)->gen >= 6) { 1391 if (INTEL_INFO(dev)->gen >= 6) {
1519 ring->add_request = gen6_add_request; 1392 /* non-kms not supported on gen6+ */
1520 ring->irq_get = gen6_render_ring_get_irq; 1393 return -ENODEV;
1521 ring->irq_put = gen6_render_ring_put_irq; 1394 }
1522 } else if (IS_GEN5(dev)) { 1395
1523 ring->add_request = pc_render_add_request; 1396 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1524 ring->get_seqno = pc_render_get_seqno; 1397 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1398 * the special gen5 functions. */
1399 ring->add_request = i9xx_add_request;
1400 if (INTEL_INFO(dev)->gen < 4)
1401 ring->flush = gen2_render_ring_flush;
1402 else
1403 ring->flush = gen4_render_ring_flush;
1404 ring->get_seqno = ring_get_seqno;
1405 if (IS_GEN2(dev)) {
1406 ring->irq_get = i8xx_ring_get_irq;
1407 ring->irq_put = i8xx_ring_put_irq;
1408 } else {
1409 ring->irq_get = i9xx_ring_get_irq;
1410 ring->irq_put = i9xx_ring_put_irq;
1525 } 1411 }
1412 ring->irq_enable_mask = I915_USER_INTERRUPT;
1413 ring->write_tail = ring_write_tail;
1414 if (INTEL_INFO(dev)->gen >= 4)
1415 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1416 else if (IS_I830(dev) || IS_845G(dev))
1417 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1418 else
1419 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1420 ring->init = init_render_ring;
1421 ring->cleanup = render_ring_cleanup;
1526 1422
1527 if (!I915_NEED_GFX_HWS(dev)) 1423 if (!I915_NEED_GFX_HWS(dev))
1528 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1424 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1537 if (IS_I830(ring->dev)) 1433 if (IS_I830(ring->dev))
1538 ring->effective_size -= 128; 1434 ring->effective_size -= 128;
1539 1435
1540 ring->map.offset = start; 1436 ring->virtual_start = ioremap_wc(start, size);
1541 ring->map.size = size; 1437 if (ring->virtual_start == NULL) {
1542 ring->map.type = 0;
1543 ring->map.flags = 0;
1544 ring->map.mtrr = 0;
1545
1546 drm_core_ioremap_wc(&ring->map, dev);
1547 if (ring->map.handle == NULL) {
1548 DRM_ERROR("can not ioremap virtual address for" 1438 DRM_ERROR("can not ioremap virtual address for"
1549 " ring buffer\n"); 1439 " ring buffer\n");
1550 return -ENOMEM; 1440 return -ENOMEM;
1551 } 1441 }
1552 1442
1553 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1554 return 0; 1443 return 0;
1555} 1444}
1556 1445
@@ -1559,10 +1448,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1559 drm_i915_private_t *dev_priv = dev->dev_private; 1448 drm_i915_private_t *dev_priv = dev->dev_private;
1560 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1449 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1561 1450
1562 if (IS_GEN6(dev) || IS_GEN7(dev)) 1451 ring->name = "bsd ring";
1563 *ring = gen6_bsd_ring; 1452 ring->id = VCS;
1564 else 1453
1565 *ring = bsd_ring; 1454 ring->write_tail = ring_write_tail;
1455 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1456 ring->mmio_base = GEN6_BSD_RING_BASE;
1457 /* gen6 bsd needs a special wa for tail updates */
1458 if (IS_GEN6(dev))
1459 ring->write_tail = gen6_bsd_ring_write_tail;
1460 ring->flush = gen6_ring_flush;
1461 ring->add_request = gen6_add_request;
1462 ring->get_seqno = gen6_ring_get_seqno;
1463 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1464 ring->irq_get = gen6_ring_get_irq;
1465 ring->irq_put = gen6_ring_put_irq;
1466 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1467 ring->sync_to = gen6_ring_sync;
1468 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1469 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1470 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1471 ring->signal_mbox[0] = GEN6_RVSYNC;
1472 ring->signal_mbox[1] = GEN6_BVSYNC;
1473 } else {
1474 ring->mmio_base = BSD_RING_BASE;
1475 ring->flush = bsd_ring_flush;
1476 ring->add_request = i9xx_add_request;
1477 ring->get_seqno = ring_get_seqno;
1478 if (IS_GEN5(dev)) {
1479 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1480 ring->irq_get = gen5_ring_get_irq;
1481 ring->irq_put = gen5_ring_put_irq;
1482 } else {
1483 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1484 ring->irq_get = i9xx_ring_get_irq;
1485 ring->irq_put = i9xx_ring_put_irq;
1486 }
1487 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1488 }
1489 ring->init = init_ring_common;
1490
1566 1491
1567 return intel_init_ring_buffer(dev, ring); 1492 return intel_init_ring_buffer(dev, ring);
1568} 1493}
@@ -1572,7 +1497,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1572 drm_i915_private_t *dev_priv = dev->dev_private; 1497 drm_i915_private_t *dev_priv = dev->dev_private;
1573 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 1498 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1574 1499
1575 *ring = gen6_blt_ring; 1500 ring->name = "blitter ring";
1501 ring->id = BCS;
1502
1503 ring->mmio_base = BLT_RING_BASE;
1504 ring->write_tail = ring_write_tail;
1505 ring->flush = blt_ring_flush;
1506 ring->add_request = gen6_add_request;
1507 ring->get_seqno = gen6_ring_get_seqno;
1508 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1509 ring->irq_get = gen6_ring_get_irq;
1510 ring->irq_put = gen6_ring_put_irq;
1511 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1512 ring->sync_to = gen6_ring_sync;
1513 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1514 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1515 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1516 ring->signal_mbox[0] = GEN6_RBSYNC;
1517 ring->signal_mbox[1] = GEN6_VBSYNC;
1518 ring->init = init_ring_common;
1576 1519
1577 return intel_init_ring_buffer(dev, ring); 1520 return intel_init_ring_buffer(dev, ring);
1578} 1521}