aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl8
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c289
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h40
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c68
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c248
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h262
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c27
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c37
-rw-r--r--drivers/gpu/drm/i915/intel_display.c498
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c145
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h26
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c24
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c69
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c26
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c13
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c698
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c201
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h22
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c177
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--include/drm/drm_rect.h9
-rw-r--r--include/uapi/drm/i915_drm.h3
30 files changed, 2146 insertions, 896 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7c7af25b330c..91ee107d5d0e 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1653,8 +1653,6 @@ void intel_crt_init(struct drm_device *dev)
1653 <sect2> 1653 <sect2>
1654 <title>KMS API Functions</title> 1654 <title>KMS API Functions</title>
1655!Edrivers/gpu/drm/drm_crtc.c 1655!Edrivers/gpu/drm/drm_crtc.c
1656!Edrivers/gpu/drm/drm_rect.c
1657!Finclude/drm/drm_rect.h
1658 </sect2> 1656 </sect2>
1659 </sect1> 1657 </sect1>
1660 1658
@@ -2163,6 +2161,12 @@ void intel_crt_init(struct drm_device *dev)
2163 <title>EDID Helper Functions Reference</title> 2161 <title>EDID Helper Functions Reference</title>
2164!Edrivers/gpu/drm/drm_edid.c 2162!Edrivers/gpu/drm/drm_edid.c
2165 </sect2> 2163 </sect2>
2164 <sect2>
2165 <title>Rectangle Utilities Reference</title>
2166!Pinclude/drm/drm_rect.h rect utils
2167!Iinclude/drm/drm_rect.h
2168!Edrivers/gpu/drm/drm_rect.c
2169 </sect2>
2166 </sect1> 2170 </sect1>
2167 2171
2168 <!-- Internals: vertical blanking --> 2172 <!-- Internals: vertical blanking -->
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91f3ac6cef35..40034ecefd3b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -36,6 +36,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
36 intel_overlay.o \ 36 intel_overlay.o \
37 intel_sprite.o \ 37 intel_sprite.o \
38 intel_opregion.o \ 38 intel_opregion.o \
39 intel_sideband.o \
39 dvo_ch7xxx.o \ 40 dvo_ch7xxx.o \
40 dvo_ch7017.o \ 41 dvo_ch7017.o \
41 dvo_ivch.o \ 42 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a55630a80f83..76255a69752a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -570,6 +570,7 @@ static const char *ring_str(int ring)
570 case RCS: return "render"; 570 case RCS: return "render";
571 case VCS: return "bsd"; 571 case VCS: return "bsd";
572 case BCS: return "blt"; 572 case BCS: return "blt";
573 case VECS: return "vebox";
573 default: return ""; 574 default: return "";
574 } 575 }
575} 576}
@@ -604,15 +605,80 @@ static const char *purgeable_flag(int purgeable)
604 return purgeable ? " purgeable" : ""; 605 return purgeable ? " purgeable" : "";
605} 606}
606 607
607static void print_error_buffers(struct seq_file *m, 608static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
609 const char *f, va_list args)
610{
611 unsigned len;
612
613 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
614 e->err = -ENOSPC;
615 return;
616 }
617
618 if (e->bytes == e->size - 1 || e->err)
619 return;
620
621 /* Seek the first printf which is hits start position */
622 if (e->pos < e->start) {
623 len = vsnprintf(NULL, 0, f, args);
624 if (e->pos + len <= e->start) {
625 e->pos += len;
626 return;
627 }
628
629 /* First vsnprintf needs to fit in full for memmove*/
630 if (len >= e->size) {
631 e->err = -EIO;
632 return;
633 }
634 }
635
636 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
637 if (len >= e->size - e->bytes)
638 len = e->size - e->bytes - 1;
639
640 /* If this is first printf in this window, adjust it so that
641 * start position matches start of the buffer
642 */
643 if (e->pos < e->start) {
644 const size_t off = e->start - e->pos;
645
646 /* Should not happen but be paranoid */
647 if (off > len || e->bytes) {
648 e->err = -EIO;
649 return;
650 }
651
652 memmove(e->buf, e->buf + off, len - off);
653 e->bytes = len - off;
654 e->pos = e->start;
655 return;
656 }
657
658 e->bytes += len;
659 e->pos += len;
660}
661
662void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
663{
664 va_list args;
665
666 va_start(args, f);
667 i915_error_vprintf(e, f, args);
668 va_end(args);
669}
670
671#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
672
673static void print_error_buffers(struct drm_i915_error_state_buf *m,
608 const char *name, 674 const char *name,
609 struct drm_i915_error_buffer *err, 675 struct drm_i915_error_buffer *err,
610 int count) 676 int count)
611{ 677{
612 seq_printf(m, "%s [%d]:\n", name, count); 678 err_printf(m, "%s [%d]:\n", name, count);
613 679
614 while (count--) { 680 while (count--) {
615 seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", 681 err_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
616 err->gtt_offset, 682 err->gtt_offset,
617 err->size, 683 err->size,
618 err->read_domains, 684 err->read_domains,
@@ -627,50 +693,50 @@ static void print_error_buffers(struct seq_file *m,
627 cache_level_str(err->cache_level)); 693 cache_level_str(err->cache_level));
628 694
629 if (err->name) 695 if (err->name)
630 seq_printf(m, " (name: %d)", err->name); 696 err_printf(m, " (name: %d)", err->name);
631 if (err->fence_reg != I915_FENCE_REG_NONE) 697 if (err->fence_reg != I915_FENCE_REG_NONE)
632 seq_printf(m, " (fence: %d)", err->fence_reg); 698 err_printf(m, " (fence: %d)", err->fence_reg);
633 699
634 seq_printf(m, "\n"); 700 err_printf(m, "\n");
635 err++; 701 err++;
636 } 702 }
637} 703}
638 704
639static void i915_ring_error_state(struct seq_file *m, 705static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
640 struct drm_device *dev, 706 struct drm_device *dev,
641 struct drm_i915_error_state *error, 707 struct drm_i915_error_state *error,
642 unsigned ring) 708 unsigned ring)
643{ 709{
644 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 710 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
645 seq_printf(m, "%s command stream:\n", ring_str(ring)); 711 err_printf(m, "%s command stream:\n", ring_str(ring));
646 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 712 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
647 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 713 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
648 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 714 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
649 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 715 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
650 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 716 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
651 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 717 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
652 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 718 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
653 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 719 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
654 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 720 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
655 721
656 if (INTEL_INFO(dev)->gen >= 4) 722 if (INTEL_INFO(dev)->gen >= 4)
657 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 723 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
658 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 724 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
659 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 725 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
660 if (INTEL_INFO(dev)->gen >= 6) { 726 if (INTEL_INFO(dev)->gen >= 6) {
661 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 727 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
662 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 728 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
663 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 729 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
664 error->semaphore_mboxes[ring][0], 730 error->semaphore_mboxes[ring][0],
665 error->semaphore_seqno[ring][0]); 731 error->semaphore_seqno[ring][0]);
666 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 732 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
667 error->semaphore_mboxes[ring][1], 733 error->semaphore_mboxes[ring][1],
668 error->semaphore_seqno[ring][1]); 734 error->semaphore_seqno[ring][1]);
669 } 735 }
670 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 736 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
671 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 737 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
672 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 738 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
673 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 739 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
674} 740}
675 741
676struct i915_error_state_file_priv { 742struct i915_error_state_file_priv {
@@ -678,9 +744,11 @@ struct i915_error_state_file_priv {
678 struct drm_i915_error_state *error; 744 struct drm_i915_error_state *error;
679}; 745};
680 746
681static int i915_error_state(struct seq_file *m, void *unused) 747
748static int i915_error_state(struct i915_error_state_file_priv *error_priv,
749 struct drm_i915_error_state_buf *m)
750
682{ 751{
683 struct i915_error_state_file_priv *error_priv = m->private;
684 struct drm_device *dev = error_priv->dev; 752 struct drm_device *dev = error_priv->dev;
685 drm_i915_private_t *dev_priv = dev->dev_private; 753 drm_i915_private_t *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error = error_priv->error; 754 struct drm_i915_error_state *error = error_priv->error;
@@ -688,34 +756,35 @@ static int i915_error_state(struct seq_file *m, void *unused)
688 int i, j, page, offset, elt; 756 int i, j, page, offset, elt;
689 757
690 if (!error) { 758 if (!error) {
691 seq_printf(m, "no error state collected\n"); 759 err_printf(m, "no error state collected\n");
692 return 0; 760 return 0;
693 } 761 }
694 762
695 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 763 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
696 error->time.tv_usec); 764 error->time.tv_usec);
697 seq_printf(m, "Kernel: " UTS_RELEASE "\n"); 765 err_printf(m, "Kernel: " UTS_RELEASE "\n");
698 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 766 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
699 seq_printf(m, "EIR: 0x%08x\n", error->eir); 767 err_printf(m, "EIR: 0x%08x\n", error->eir);
700 seq_printf(m, "IER: 0x%08x\n", error->ier); 768 err_printf(m, "IER: 0x%08x\n", error->ier);
701 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 769 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
702 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 770 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
703 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 771 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
704 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 772 err_printf(m, "CCID: 0x%08x\n", error->ccid);
705 773
706 for (i = 0; i < dev_priv->num_fence_regs; i++) 774 for (i = 0; i < dev_priv->num_fence_regs; i++)
707 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 775 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
708 776
709 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 777 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
710 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); 778 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
779 error->extra_instdone[i]);
711 780
712 if (INTEL_INFO(dev)->gen >= 6) { 781 if (INTEL_INFO(dev)->gen >= 6) {
713 seq_printf(m, "ERROR: 0x%08x\n", error->error); 782 err_printf(m, "ERROR: 0x%08x\n", error->error);
714 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 783 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
715 } 784 }
716 785
717 if (INTEL_INFO(dev)->gen == 7) 786 if (INTEL_INFO(dev)->gen == 7)
718 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 787 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
719 788
720 for_each_ring(ring, dev_priv, i) 789 for_each_ring(ring, dev_priv, i)
721 i915_ring_error_state(m, dev, error, i); 790 i915_ring_error_state(m, dev, error, i);
@@ -734,24 +803,25 @@ static int i915_error_state(struct seq_file *m, void *unused)
734 struct drm_i915_error_object *obj; 803 struct drm_i915_error_object *obj;
735 804
736 if ((obj = error->ring[i].batchbuffer)) { 805 if ((obj = error->ring[i].batchbuffer)) {
737 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 806 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
738 dev_priv->ring[i].name, 807 dev_priv->ring[i].name,
739 obj->gtt_offset); 808 obj->gtt_offset);
740 offset = 0; 809 offset = 0;
741 for (page = 0; page < obj->page_count; page++) { 810 for (page = 0; page < obj->page_count; page++) {
742 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 811 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
743 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 812 err_printf(m, "%08x : %08x\n", offset,
813 obj->pages[page][elt]);
744 offset += 4; 814 offset += 4;
745 } 815 }
746 } 816 }
747 } 817 }
748 818
749 if (error->ring[i].num_requests) { 819 if (error->ring[i].num_requests) {
750 seq_printf(m, "%s --- %d requests\n", 820 err_printf(m, "%s --- %d requests\n",
751 dev_priv->ring[i].name, 821 dev_priv->ring[i].name,
752 error->ring[i].num_requests); 822 error->ring[i].num_requests);
753 for (j = 0; j < error->ring[i].num_requests; j++) { 823 for (j = 0; j < error->ring[i].num_requests; j++) {
754 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 824 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
755 error->ring[i].requests[j].seqno, 825 error->ring[i].requests[j].seqno,
756 error->ring[i].requests[j].jiffies, 826 error->ring[i].requests[j].jiffies,
757 error->ring[i].requests[j].tail); 827 error->ring[i].requests[j].tail);
@@ -759,13 +829,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
759 } 829 }
760 830
761 if ((obj = error->ring[i].ringbuffer)) { 831 if ((obj = error->ring[i].ringbuffer)) {
762 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 832 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
763 dev_priv->ring[i].name, 833 dev_priv->ring[i].name,
764 obj->gtt_offset); 834 obj->gtt_offset);
765 offset = 0; 835 offset = 0;
766 for (page = 0; page < obj->page_count; page++) { 836 for (page = 0; page < obj->page_count; page++) {
767 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 837 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
768 seq_printf(m, "%08x : %08x\n", 838 err_printf(m, "%08x : %08x\n",
769 offset, 839 offset,
770 obj->pages[page][elt]); 840 obj->pages[page][elt]);
771 offset += 4; 841 offset += 4;
@@ -775,12 +845,12 @@ static int i915_error_state(struct seq_file *m, void *unused)
775 845
776 obj = error->ring[i].ctx; 846 obj = error->ring[i].ctx;
777 if (obj) { 847 if (obj) {
778 seq_printf(m, "%s --- HW Context = 0x%08x\n", 848 err_printf(m, "%s --- HW Context = 0x%08x\n",
779 dev_priv->ring[i].name, 849 dev_priv->ring[i].name,
780 obj->gtt_offset); 850 obj->gtt_offset);
781 offset = 0; 851 offset = 0;
782 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 852 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
783 seq_printf(m, "[%04x] %08x %08x %08x %08x\n", 853 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
784 offset, 854 offset,
785 obj->pages[0][elt], 855 obj->pages[0][elt],
786 obj->pages[0][elt+1], 856 obj->pages[0][elt+1],
@@ -806,8 +876,7 @@ i915_error_state_write(struct file *filp,
806 size_t cnt, 876 size_t cnt,
807 loff_t *ppos) 877 loff_t *ppos)
808{ 878{
809 struct seq_file *m = filp->private_data; 879 struct i915_error_state_file_priv *error_priv = filp->private_data;
810 struct i915_error_state_file_priv *error_priv = m->private;
811 struct drm_device *dev = error_priv->dev; 880 struct drm_device *dev = error_priv->dev;
812 int ret; 881 int ret;
813 882
@@ -842,25 +911,81 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
842 kref_get(&error_priv->error->ref); 911 kref_get(&error_priv->error->ref);
843 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 912 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
844 913
845 return single_open(file, i915_error_state, error_priv); 914 file->private_data = error_priv;
915
916 return 0;
846} 917}
847 918
848static int i915_error_state_release(struct inode *inode, struct file *file) 919static int i915_error_state_release(struct inode *inode, struct file *file)
849{ 920{
850 struct seq_file *m = file->private_data; 921 struct i915_error_state_file_priv *error_priv = file->private_data;
851 struct i915_error_state_file_priv *error_priv = m->private;
852 922
853 if (error_priv->error) 923 if (error_priv->error)
854 kref_put(&error_priv->error->ref, i915_error_state_free); 924 kref_put(&error_priv->error->ref, i915_error_state_free);
855 kfree(error_priv); 925 kfree(error_priv);
856 926
857 return single_release(inode, file); 927 return 0;
928}
929
930static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
931 size_t count, loff_t *pos)
932{
933 struct i915_error_state_file_priv *error_priv = file->private_data;
934 struct drm_i915_error_state_buf error_str;
935 loff_t tmp_pos = 0;
936 ssize_t ret_count = 0;
937 int ret = 0;
938
939 memset(&error_str, 0, sizeof(error_str));
940
941 /* We need to have enough room to store any i915_error_state printf
942 * so that we can move it to start position.
943 */
944 error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
945 error_str.buf = kmalloc(error_str.size,
946 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
947
948 if (error_str.buf == NULL) {
949 error_str.size = PAGE_SIZE;
950 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
951 }
952
953 if (error_str.buf == NULL) {
954 error_str.size = 128;
955 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
956 }
957
958 if (error_str.buf == NULL)
959 return -ENOMEM;
960
961 error_str.start = *pos;
962
963 ret = i915_error_state(error_priv, &error_str);
964 if (ret)
965 goto out;
966
967 if (error_str.bytes == 0 && error_str.err) {
968 ret = error_str.err;
969 goto out;
970 }
971
972 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
973 error_str.buf,
974 error_str.bytes);
975
976 if (ret_count < 0)
977 ret = ret_count;
978 else
979 *pos = error_str.start + ret_count;
980out:
981 kfree(error_str.buf);
982 return ret ?: ret_count;
858} 983}
859 984
860static const struct file_operations i915_error_state_fops = { 985static const struct file_operations i915_error_state_fops = {
861 .owner = THIS_MODULE, 986 .owner = THIS_MODULE,
862 .open = i915_error_state_open, 987 .open = i915_error_state_open,
863 .read = seq_read, 988 .read = i915_error_state_read,
864 .write = i915_error_state_write, 989 .write = i915_error_state_write,
865 .llseek = default_llseek, 990 .llseek = default_llseek,
866 .release = i915_error_state_release, 991 .release = i915_error_state_release,
@@ -1013,16 +1138,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1013 u32 freq_sts, val; 1138 u32 freq_sts, val;
1014 1139
1015 mutex_lock(&dev_priv->rps.hw_lock); 1140 mutex_lock(&dev_priv->rps.hw_lock);
1016 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, 1141 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1017 &freq_sts);
1018 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1142 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1019 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1143 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1020 1144
1021 valleyview_punit_read(dev_priv, PUNIT_FUSE_BUS1, &val); 1145 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
1022 seq_printf(m, "max GPU freq: %d MHz\n", 1146 seq_printf(m, "max GPU freq: %d MHz\n",
1023 vlv_gpu_freq(dev_priv->mem_freq, val)); 1147 vlv_gpu_freq(dev_priv->mem_freq, val));
1024 1148
1025 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_LFM, &val); 1149 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
1026 seq_printf(m, "min GPU freq: %d MHz\n", 1150 seq_printf(m, "min GPU freq: %d MHz\n",
1027 vlv_gpu_freq(dev_priv->mem_freq, val)); 1151 vlv_gpu_freq(dev_priv->mem_freq, val));
1028 1152
@@ -1311,6 +1435,25 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1311 return 0; 1435 return 0;
1312} 1436}
1313 1437
1438static int i915_ips_status(struct seq_file *m, void *unused)
1439{
1440 struct drm_info_node *node = (struct drm_info_node *) m->private;
1441 struct drm_device *dev = node->minor->dev;
1442 struct drm_i915_private *dev_priv = dev->dev_private;
1443
1444 if (!IS_ULT(dev)) {
1445 seq_puts(m, "not supported\n");
1446 return 0;
1447 }
1448
1449 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1450 seq_puts(m, "enabled\n");
1451 else
1452 seq_puts(m, "disabled\n");
1453
1454 return 0;
1455}
1456
1314static int i915_sr_status(struct seq_file *m, void *unused) 1457static int i915_sr_status(struct seq_file *m, void *unused)
1315{ 1458{
1316 struct drm_info_node *node = (struct drm_info_node *) m->private; 1459 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1663,27 +1806,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1663 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1806 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1664 1807
1665 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1808 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1666 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1809 vlv_dpio_read(dev_priv, _DPIO_DIV_A));
1667 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1810 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1668 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1811 vlv_dpio_read(dev_priv, _DPIO_DIV_B));
1669 1812
1670 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1813 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1671 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1814 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
1672 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1815 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1673 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1816 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
1674 1817
1675 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1818 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1676 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1819 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1677 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1820 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1678 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1821 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1679 1822
1680 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1823 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1681 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1824 vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1682 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1825 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1683 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1826 vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1684 1827
1685 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1828 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1686 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1829 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1687 1830
1688 mutex_unlock(&dev_priv->dpio_lock); 1831 mutex_unlock(&dev_priv->dpio_lock);
1689 1832
@@ -2099,6 +2242,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2099 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 2242 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2100 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 2243 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2101 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 2244 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2245 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
2102 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 2246 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2103 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 2247 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2104 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 2248 {"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -2108,6 +2252,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2108 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 2252 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2109 {"i915_gfxec", i915_gfxec, 0}, 2253 {"i915_gfxec", i915_gfxec, 0},
2110 {"i915_fbc_status", i915_fbc_status, 0}, 2254 {"i915_fbc_status", i915_fbc_status, 0},
2255 {"i915_ips_status", i915_ips_status, 0},
2111 {"i915_sr_status", i915_sr_status, 0}, 2256 {"i915_sr_status", i915_sr_status, 0},
2112 {"i915_opregion", i915_opregion, 0}, 2257 {"i915_opregion", i915_opregion, 0},
2113 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2258 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b76da1470e71..c52d866dfdb0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -955,6 +955,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
955 case I915_PARAM_HAS_BLT: 955 case I915_PARAM_HAS_BLT:
956 value = intel_ring_initialized(&dev_priv->ring[BCS]); 956 value = intel_ring_initialized(&dev_priv->ring[BCS]);
957 break; 957 break;
958 case I915_PARAM_HAS_VEBOX:
959 value = intel_ring_initialized(&dev_priv->ring[VECS]);
960 break;
958 case I915_PARAM_HAS_RELAXED_FENCING: 961 case I915_PARAM_HAS_RELAXED_FENCING:
959 value = 1; 962 value = 1;
960 break; 963 break;
@@ -1358,8 +1361,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
1358cleanup_gem: 1361cleanup_gem:
1359 mutex_lock(&dev->struct_mutex); 1362 mutex_lock(&dev->struct_mutex);
1360 i915_gem_cleanup_ringbuffer(dev); 1363 i915_gem_cleanup_ringbuffer(dev);
1364 i915_gem_context_fini(dev);
1361 mutex_unlock(&dev->struct_mutex); 1365 mutex_unlock(&dev->struct_mutex);
1362 i915_gem_cleanup_aliasing_ppgtt(dev); 1366 i915_gem_cleanup_aliasing_ppgtt(dev);
1367 drm_mm_takedown(&dev_priv->mm.gtt_space);
1363cleanup_irq: 1368cleanup_irq:
1364 drm_irq_uninstall(dev); 1369 drm_irq_uninstall(dev);
1365cleanup_gem_stolen: 1370cleanup_gem_stolen:
@@ -1407,7 +1412,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1407 return; 1412 return;
1408 1413
1409 ap->ranges[0].base = dev_priv->gtt.mappable_base; 1414 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1410 ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start; 1415 ap->ranges[0].size = dev_priv->gtt.mappable_end;
1411 1416
1412 primary = 1417 primary =
1413 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 1418 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1744,6 +1749,7 @@ int i915_driver_unload(struct drm_device *dev)
1744 i915_free_hws(dev); 1749 i915_free_hws(dev);
1745 } 1750 }
1746 1751
1752 drm_mm_takedown(&dev_priv->mm.gtt_space);
1747 if (dev_priv->regs != NULL) 1753 if (dev_priv->regs != NULL)
1748 pci_iounmap(dev->pdev, dev_priv->regs); 1754 pci_iounmap(dev->pdev, dev_priv->regs);
1749 1755
@@ -1753,6 +1759,8 @@ int i915_driver_unload(struct drm_device *dev)
1753 destroy_workqueue(dev_priv->wq); 1759 destroy_workqueue(dev_priv->wq);
1754 pm_qos_remove_request(&dev_priv->pm_qos); 1760 pm_qos_remove_request(&dev_priv->pm_qos);
1755 1761
1762 dev_priv->gtt.gtt_remove(dev);
1763
1756 if (dev_priv->slab) 1764 if (dev_priv->slab)
1757 kmem_cache_destroy(dev_priv->slab); 1765 kmem_cache_destroy(dev_priv->slab);
1758 1766
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b7c3b98f7858..59ff7456bd70 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -128,6 +128,10 @@ module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
128MODULE_PARM_DESC(disable_power_well, 128MODULE_PARM_DESC(disable_power_well,
129 "Disable the power well when possible (default: false)"); 129 "Disable the power well when possible (default: false)");
130 130
131int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600);
133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
134
131static struct drm_driver driver; 135static struct drm_driver driver;
132extern int intel_agp_enabled; 136extern int intel_agp_enabled;
133 137
@@ -311,6 +315,7 @@ static const struct intel_device_info intel_haswell_d_info = {
311 .is_haswell = 1, 315 .is_haswell = 1,
312 .has_ddi = 1, 316 .has_ddi = 1,
313 .has_fpga_dbg = 1, 317 .has_fpga_dbg = 1,
318 .has_vebox_ring = 1,
314}; 319};
315 320
316static const struct intel_device_info intel_haswell_m_info = { 321static const struct intel_device_info intel_haswell_m_info = {
@@ -320,6 +325,7 @@ static const struct intel_device_info intel_haswell_m_info = {
320 .has_ddi = 1, 325 .has_ddi = 1,
321 .has_fpga_dbg = 1, 326 .has_fpga_dbg = 1,
322 .has_fbc = 1, 327 .has_fbc = 1,
328 .has_vebox_ring = 1,
323}; 329};
324 330
325static const struct pci_device_id pciidlist[] = { /* aka */ 331static const struct pci_device_id pciidlist[] = { /* aka */
@@ -863,37 +869,14 @@ static int gen6_do_reset(struct drm_device *dev)
863 869
864int intel_gpu_reset(struct drm_device *dev) 870int intel_gpu_reset(struct drm_device *dev)
865{ 871{
866 struct drm_i915_private *dev_priv = dev->dev_private;
867 int ret = -ENODEV;
868
869 switch (INTEL_INFO(dev)->gen) { 872 switch (INTEL_INFO(dev)->gen) {
870 case 7: 873 case 7:
871 case 6: 874 case 6: return gen6_do_reset(dev);
872 ret = gen6_do_reset(dev); 875 case 5: return ironlake_do_reset(dev);
873 break; 876 case 4: return i965_do_reset(dev);
874 case 5: 877 case 2: return i8xx_do_reset(dev);
875 ret = ironlake_do_reset(dev); 878 default: return -ENODEV;
876 break;
877 case 4:
878 ret = i965_do_reset(dev);
879 break;
880 case 2:
881 ret = i8xx_do_reset(dev);
882 break;
883 } 879 }
884
885 /* Also reset the gpu hangman. */
886 if (dev_priv->gpu_error.stop_rings) {
887 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
888 dev_priv->gpu_error.stop_rings = 0;
889 if (ret == -ENODEV) {
890 DRM_ERROR("Reset not implemented, but ignoring "
891 "error for simulated gpu hangs\n");
892 ret = 0;
893 }
894 }
895
896 return ret;
897} 880}
898 881
899/** 882/**
@@ -914,6 +897,7 @@ int intel_gpu_reset(struct drm_device *dev)
914int i915_reset(struct drm_device *dev) 897int i915_reset(struct drm_device *dev)
915{ 898{
916 drm_i915_private_t *dev_priv = dev->dev_private; 899 drm_i915_private_t *dev_priv = dev->dev_private;
900 bool simulated;
917 int ret; 901 int ret;
918 902
919 if (!i915_try_reset) 903 if (!i915_try_reset)
@@ -923,13 +907,26 @@ int i915_reset(struct drm_device *dev)
923 907
924 i915_gem_reset(dev); 908 i915_gem_reset(dev);
925 909
926 ret = -ENODEV; 910 simulated = dev_priv->gpu_error.stop_rings != 0;
927 if (get_seconds() - dev_priv->gpu_error.last_reset < 5) 911
912 if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
928 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 913 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
929 else 914 ret = -ENODEV;
915 } else {
930 ret = intel_gpu_reset(dev); 916 ret = intel_gpu_reset(dev);
931 917
932 dev_priv->gpu_error.last_reset = get_seconds(); 918 /* Also reset the gpu hangman. */
919 if (simulated) {
920 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
921 dev_priv->gpu_error.stop_rings = 0;
922 if (ret == -ENODEV) {
923 DRM_ERROR("Reset not implemented, but ignoring "
924 "error for simulated gpu hangs\n");
925 ret = 0;
926 }
927 } else
928 dev_priv->gpu_error.last_reset = get_seconds();
929 }
933 if (ret) { 930 if (ret) {
934 DRM_ERROR("Failed to reset chip.\n"); 931 DRM_ERROR("Failed to reset chip.\n");
935 mutex_unlock(&dev->struct_mutex); 932 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dfb5ade15149..359a2003086b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -315,9 +315,8 @@ struct drm_i915_display_funcs {
315 int (*get_fifo_size)(struct drm_device *dev, int plane); 315 int (*get_fifo_size)(struct drm_device *dev, int plane);
316 void (*update_wm)(struct drm_device *dev); 316 void (*update_wm)(struct drm_device *dev);
317 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 317 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
318 uint32_t sprite_width, int pixel_size); 318 uint32_t sprite_width, int pixel_size,
319 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 319 bool enable);
320 struct drm_display_mode *mode);
321 void (*modeset_global_resources)(struct drm_device *dev); 320 void (*modeset_global_resources)(struct drm_device *dev);
322 /* Returns the active state of the crtc, and if the crtc is active, 321 /* Returns the active state of the crtc, and if the crtc is active,
323 * fills out the pipe-config with the hw state. */ 322 * fills out the pipe-config with the hw state. */
@@ -375,6 +374,7 @@ struct drm_i915_gt_funcs {
375 func(supports_tv) sep \ 374 func(supports_tv) sep \
376 func(has_bsd_ring) sep \ 375 func(has_bsd_ring) sep \
377 func(has_blt_ring) sep \ 376 func(has_blt_ring) sep \
377 func(has_vebox_ring) sep \
378 func(has_llc) sep \ 378 func(has_llc) sep \
379 func(has_ddi) sep \ 379 func(has_ddi) sep \
380 func(has_fpga_dbg) 380 func(has_fpga_dbg)
@@ -828,14 +828,21 @@ struct i915_gem_mm {
828 u32 object_count; 828 u32 object_count;
829}; 829};
830 830
831struct drm_i915_error_state_buf {
832 unsigned bytes;
833 unsigned size;
834 int err;
835 u8 *buf;
836 loff_t start;
837 loff_t pos;
838};
839
831struct i915_gpu_error { 840struct i915_gpu_error {
832 /* For hangcheck timer */ 841 /* For hangcheck timer */
833#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 842#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
834#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 843#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
835 struct timer_list hangcheck_timer; 844 struct timer_list hangcheck_timer;
836 int hangcheck_count; 845 int hangcheck_count;
837 uint32_t last_acthd[I915_NUM_RINGS];
838 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
839 846
840 /* For reset and error_state handling. */ 847 /* For reset and error_state handling. */
841 spinlock_t lock; 848 spinlock_t lock;
@@ -1367,6 +1374,7 @@ struct drm_i915_file_private {
1367 1374
1368#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1375#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1369#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1376#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1377#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1370#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1378#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1371#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1379#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1372 1380
@@ -1462,6 +1470,7 @@ extern bool i915_enable_hangcheck __read_mostly;
1462extern int i915_enable_ppgtt __read_mostly; 1470extern int i915_enable_ppgtt __read_mostly;
1463extern unsigned int i915_preliminary_hw_support __read_mostly; 1471extern unsigned int i915_preliminary_hw_support __read_mostly;
1464extern int i915_disable_power_well __read_mostly; 1472extern int i915_disable_power_well __read_mostly;
1473extern int i915_enable_ips __read_mostly;
1465 1474
1466extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1475extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1467extern int i915_resume(struct drm_device *dev); 1476extern int i915_resume(struct drm_device *dev);
@@ -1820,6 +1829,8 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1820/* i915_debugfs.c */ 1829/* i915_debugfs.c */
1821int i915_debugfs_init(struct drm_minor *minor); 1830int i915_debugfs_init(struct drm_minor *minor);
1822void i915_debugfs_cleanup(struct drm_minor *minor); 1831void i915_debugfs_cleanup(struct drm_minor *minor);
1832__printf(2, 3)
1833void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
1823 1834
1824/* i915_suspend.c */ 1835/* i915_suspend.c */
1825extern int i915_save_state(struct drm_device *dev); 1836extern int i915_save_state(struct drm_device *dev);
@@ -1901,10 +1912,11 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1901/* overlay */ 1912/* overlay */
1902#ifdef CONFIG_DEBUG_FS 1913#ifdef CONFIG_DEBUG_FS
1903extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1914extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1904extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1915extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
1916 struct intel_overlay_error_state *error);
1905 1917
1906extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 1918extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1907extern void intel_display_print_error_state(struct seq_file *m, 1919extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
1908 struct drm_device *dev, 1920 struct drm_device *dev,
1909 struct intel_display_error_state *error); 1921 struct intel_display_error_state *error);
1910#endif 1922#endif
@@ -1919,9 +1931,17 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1919 1931
1920int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 1932int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1921int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 1933int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1922int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val); 1934
1923int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 1935/* intel_sideband.c */
1924int valleyview_nc_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val); 1936u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
1937void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
1938u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
1939u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
1940void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
1941u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1942 enum intel_sbi_destination destination);
1943void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1944 enum intel_sbi_destination destination);
1925 1945
1926int vlv_gpu_freq(int ddr_freq, int val); 1946int vlv_gpu_freq(int ddr_freq, int val);
1927int vlv_freq_opcode(int ddr_freq, int val); 1947int vlv_freq_opcode(int ddr_freq, int val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2b51fa7e3477..c605097bf598 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2693,18 +2693,33 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
2693 return fence - dev_priv->fence_regs; 2693 return fence - dev_priv->fence_regs;
2694} 2694}
2695 2695
2696struct write_fence {
2697 struct drm_device *dev;
2698 struct drm_i915_gem_object *obj;
2699 int fence;
2700};
2701
2696static void i915_gem_write_fence__ipi(void *data) 2702static void i915_gem_write_fence__ipi(void *data)
2697{ 2703{
2704 struct write_fence *args = data;
2705
2706 /* Required for SNB+ with LLC */
2698 wbinvd(); 2707 wbinvd();
2708
2709 /* Required for VLV */
2710 i915_gem_write_fence(args->dev, args->fence, args->obj);
2699} 2711}
2700 2712
2701static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2713static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2702 struct drm_i915_fence_reg *fence, 2714 struct drm_i915_fence_reg *fence,
2703 bool enable) 2715 bool enable)
2704{ 2716{
2705 struct drm_device *dev = obj->base.dev; 2717 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2706 struct drm_i915_private *dev_priv = dev->dev_private; 2718 struct write_fence args = {
2707 int fence_reg = fence_number(dev_priv, fence); 2719 .dev = obj->base.dev,
2720 .fence = fence_number(dev_priv, fence),
2721 .obj = enable ? obj : NULL,
2722 };
2708 2723
2709 /* In order to fully serialize access to the fenced region and 2724 /* In order to fully serialize access to the fenced region and
2710 * the update to the fence register we need to take extreme 2725 * the update to the fence register we need to take extreme
@@ -2715,13 +2730,19 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2715 * SNB+ we need to take a step further and emit an explicit wbinvd() 2730 * SNB+ we need to take a step further and emit an explicit wbinvd()
2716 * on each processor in order to manually flush all memory 2731 * on each processor in order to manually flush all memory
2717 * transactions before updating the fence register. 2732 * transactions before updating the fence register.
2733 *
2734 * However, Valleyview complicates matter. There the wbinvd is
2735 * insufficient and unlike SNB/IVB requires the serialising
2736 * register write. (Note that that register write by itself is
2737 * conversely not sufficient for SNB+.) To compromise, we do both.
2718 */ 2738 */
2719 if (HAS_LLC(obj->base.dev)) 2739 if (INTEL_INFO(args.dev)->gen >= 6)
2720 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); 2740 on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
2721 i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); 2741 else
2742 i915_gem_write_fence(args.dev, args.fence, args.obj);
2722 2743
2723 if (enable) { 2744 if (enable) {
2724 obj->fence_reg = fence_reg; 2745 obj->fence_reg = args.fence;
2725 fence->obj = obj; 2746 fence->obj = obj;
2726 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 2747 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2727 } else { 2748 } else {
@@ -2947,6 +2968,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2947 struct drm_mm_node *node; 2968 struct drm_mm_node *node;
2948 u32 size, fence_size, fence_alignment, unfenced_alignment; 2969 u32 size, fence_size, fence_alignment, unfenced_alignment;
2949 bool mappable, fenceable; 2970 bool mappable, fenceable;
2971 size_t gtt_max = map_and_fenceable ?
2972 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
2950 int ret; 2973 int ret;
2951 2974
2952 fence_size = i915_gem_get_gtt_size(dev, 2975 fence_size = i915_gem_get_gtt_size(dev,
@@ -2973,9 +2996,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2973 /* If the object is bigger than the entire aperture, reject it early 2996 /* If the object is bigger than the entire aperture, reject it early
2974 * before evicting everything in a vain attempt to find space. 2997 * before evicting everything in a vain attempt to find space.
2975 */ 2998 */
2976 if (obj->base.size > 2999 if (obj->base.size > gtt_max) {
2977 (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { 3000 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n",
2978 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 3001 obj->base.size,
3002 map_and_fenceable ? "mappable" : "total",
3003 gtt_max);
2979 return -E2BIG; 3004 return -E2BIG;
2980 } 3005 }
2981 3006
@@ -2991,14 +3016,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2991 return -ENOMEM; 3016 return -ENOMEM;
2992 } 3017 }
2993 3018
2994 search_free: 3019search_free:
2995 if (map_and_fenceable) 3020 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2996 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 3021 size, alignment,
2997 size, alignment, obj->cache_level, 3022 obj->cache_level, 0, gtt_max);
2998 0, dev_priv->gtt.mappable_end);
2999 else
3000 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
3001 size, alignment, obj->cache_level);
3002 if (ret) { 3023 if (ret) {
3003 ret = i915_gem_evict_something(dev, size, alignment, 3024 ret = i915_gem_evict_something(dev, size, alignment,
3004 obj->cache_level, 3025 obj->cache_level,
@@ -3992,12 +4013,21 @@ static int i915_gem_init_rings(struct drm_device *dev)
3992 goto cleanup_bsd_ring; 4013 goto cleanup_bsd_ring;
3993 } 4014 }
3994 4015
4016 if (HAS_VEBOX(dev)) {
4017 ret = intel_init_vebox_ring_buffer(dev);
4018 if (ret)
4019 goto cleanup_blt_ring;
4020 }
4021
4022
3995 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4023 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3996 if (ret) 4024 if (ret)
3997 goto cleanup_blt_ring; 4025 goto cleanup_vebox_ring;
3998 4026
3999 return 0; 4027 return 0;
4000 4028
4029cleanup_vebox_ring:
4030 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4001cleanup_blt_ring: 4031cleanup_blt_ring:
4002 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 4032 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4003cleanup_bsd_ring: 4033cleanup_bsd_ring:
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 64cb1909a0ce..39bcc087db96 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -156,7 +156,8 @@ create_hw_context(struct drm_device *dev,
156 if (INTEL_INFO(dev)->gen >= 7) { 156 if (INTEL_INFO(dev)->gen >= 7) {
157 ret = i915_gem_object_set_cache_level(ctx->obj, 157 ret = i915_gem_object_set_cache_level(ctx->obj,
158 I915_CACHE_LLC_MLC); 158 I915_CACHE_LLC_MLC);
159 if (ret) 159 /* Failure shouldn't ever happen this early */
160 if (WARN_ON(ret))
160 goto err_out; 161 goto err_out;
161 } 162 }
162 163
@@ -214,12 +215,16 @@ static int create_default_context(struct drm_i915_private *dev_priv)
214 */ 215 */
215 dev_priv->ring[RCS].default_context = ctx; 216 dev_priv->ring[RCS].default_context = ctx;
216 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); 217 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
217 if (ret) 218 if (ret) {
219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
218 goto err_destroy; 220 goto err_destroy;
221 }
219 222
220 ret = do_switch(ctx); 223 ret = do_switch(ctx);
221 if (ret) 224 if (ret) {
225 DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
222 goto err_unpin; 226 goto err_unpin;
227 }
223 228
224 DRM_DEBUG_DRIVER("Default HW context loaded\n"); 229 DRM_DEBUG_DRIVER("Default HW context loaded\n");
225 return 0; 230 return 0;
@@ -237,6 +242,7 @@ void i915_gem_context_init(struct drm_device *dev)
237 242
238 if (!HAS_HW_CONTEXTS(dev)) { 243 if (!HAS_HW_CONTEXTS(dev)) {
239 dev_priv->hw_contexts_disabled = true; 244 dev_priv->hw_contexts_disabled = true;
245 DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
240 return; 246 return;
241 } 247 }
242 248
@@ -249,11 +255,13 @@ void i915_gem_context_init(struct drm_device *dev)
249 255
250 if (dev_priv->hw_context_size > (1<<20)) { 256 if (dev_priv->hw_context_size > (1<<20)) {
251 dev_priv->hw_contexts_disabled = true; 257 dev_priv->hw_contexts_disabled = true;
258 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
252 return; 259 return;
253 } 260 }
254 261
255 if (create_default_context(dev_priv)) { 262 if (create_default_context(dev_priv)) {
256 dev_priv->hw_contexts_disabled = true; 263 dev_priv->hw_contexts_disabled = true;
264 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
257 return; 265 return;
258 } 266 }
259 267
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 117ce3813681..a8bb62ca8756 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -885,6 +885,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
885 return -EPERM; 885 return -EPERM;
886 } 886 }
887 break; 887 break;
888 case I915_EXEC_VEBOX:
889 ring = &dev_priv->ring[VECS];
890 if (ctx_id != 0) {
891 DRM_DEBUG("Ring %s doesn't support contexts\n",
892 ring->name);
893 return -EPERM;
894 }
895 break;
896
888 default: 897 default:
889 DRM_DEBUG("execbuf with unknown ring: %d\n", 898 DRM_DEBUG("execbuf with unknown ring: %d\n",
890 (int)(args->flags & I915_EXEC_RING_MASK)); 899 (int)(args->flags & I915_EXEC_RING_MASK));
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 879c4ccb00db..e17bbe201195 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -381,14 +381,16 @@ static int
381i915_pipe_enabled(struct drm_device *dev, int pipe) 381i915_pipe_enabled(struct drm_device *dev, int pipe)
382{ 382{
383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
384 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
385 pipe);
386 384
387 if (!intel_display_power_enabled(dev, 385 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
388 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 386 /* Locking is horribly broken here, but whatever. */
389 return false; 387 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
388 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
390 389
391 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 390 return intel_crtc->active;
391 } else {
392 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
393 }
392} 394}
393 395
394/* Called from drm generic code, passed a 'crtc', which 396/* Called from drm generic code, passed a 'crtc', which
@@ -698,10 +700,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
698 pm_iir = dev_priv->rps.pm_iir; 700 pm_iir = dev_priv->rps.pm_iir;
699 dev_priv->rps.pm_iir = 0; 701 dev_priv->rps.pm_iir = 0;
700 pm_imr = I915_READ(GEN6_PMIMR); 702 pm_imr = I915_READ(GEN6_PMIMR);
701 I915_WRITE(GEN6_PMIMR, 0); 703 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
704 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
702 spin_unlock_irq(&dev_priv->rps.lock); 705 spin_unlock_irq(&dev_priv->rps.lock);
703 706
704 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 707 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
705 return; 708 return;
706 709
707 mutex_lock(&dev_priv->rps.hw_lock); 710 mutex_lock(&dev_priv->rps.hw_lock);
@@ -777,7 +780,7 @@ static void ivybridge_parity_work(struct work_struct *work)
777 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 780 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
778 781
779 spin_lock_irqsave(&dev_priv->irq_lock, flags); 782 spin_lock_irqsave(&dev_priv->irq_lock, flags);
780 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 783 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
781 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 784 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
782 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 785 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
783 786
@@ -809,7 +812,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
809 return; 812 return;
810 813
811 spin_lock_irqsave(&dev_priv->irq_lock, flags); 814 spin_lock_irqsave(&dev_priv->irq_lock, flags);
812 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 815 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
813 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 816 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
814 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 817 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
815 818
@@ -821,25 +824,26 @@ static void snb_gt_irq_handler(struct drm_device *dev,
821 u32 gt_iir) 824 u32 gt_iir)
822{ 825{
823 826
824 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 827 if (gt_iir &
825 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 828 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
826 notify_ring(dev, &dev_priv->ring[RCS]); 829 notify_ring(dev, &dev_priv->ring[RCS]);
827 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 830 if (gt_iir & GT_BSD_USER_INTERRUPT)
828 notify_ring(dev, &dev_priv->ring[VCS]); 831 notify_ring(dev, &dev_priv->ring[VCS]);
829 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 832 if (gt_iir & GT_BLT_USER_INTERRUPT)
830 notify_ring(dev, &dev_priv->ring[BCS]); 833 notify_ring(dev, &dev_priv->ring[BCS]);
831 834
832 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 835 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
833 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 836 GT_BSD_CS_ERROR_INTERRUPT |
834 GT_RENDER_CS_ERROR_INTERRUPT)) { 837 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
835 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 838 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
836 i915_handle_error(dev, false); 839 i915_handle_error(dev, false);
837 } 840 }
838 841
839 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 842 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
840 ivybridge_handle_parity_error(dev); 843 ivybridge_handle_parity_error(dev);
841} 844}
842 845
846/* Legacy way of handling PM interrupts */
843static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 847static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
844 u32 pm_iir) 848 u32 pm_iir)
845{ 849{
@@ -919,6 +923,38 @@ static void dp_aux_irq_handler(struct drm_device *dev)
919 wake_up_all(&dev_priv->gmbus_wait_queue); 923 wake_up_all(&dev_priv->gmbus_wait_queue);
920} 924}
921 925
926/* Unlike gen6_queue_rps_work() from which this function is originally derived,
927 * we must be able to deal with other PM interrupts. This is complicated because
928 * of the way in which we use the masks to defer the RPS work (which for
929 * posterity is necessary because of forcewake).
930 */
931static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
932 u32 pm_iir)
933{
934 unsigned long flags;
935
936 spin_lock_irqsave(&dev_priv->rps.lock, flags);
937 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
938 if (dev_priv->rps.pm_iir) {
939 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
940 /* never want to mask useful interrupts. (also posting read) */
941 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
942 /* TODO: if queue_work is slow, move it out of the spinlock */
943 queue_work(dev_priv->wq, &dev_priv->rps.work);
944 }
945 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
946
947 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
948 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
949 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
950
951 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
952 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
953 i915_handle_error(dev_priv->dev, false);
954 }
955 }
956}
957
922static irqreturn_t valleyview_irq_handler(int irq, void *arg) 958static irqreturn_t valleyview_irq_handler(int irq, void *arg)
923{ 959{
924 struct drm_device *dev = (struct drm_device *) arg; 960 struct drm_device *dev = (struct drm_device *) arg;
@@ -990,7 +1026,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
990 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1026 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
991 gmbus_irq_handler(dev); 1027 gmbus_irq_handler(dev);
992 1028
993 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 1029 if (pm_iir & GEN6_PM_RPS_EVENTS)
994 gen6_queue_rps_work(dev_priv, pm_iir); 1030 gen6_queue_rps_work(dev_priv, pm_iir);
995 1031
996 I915_WRITE(GTIIR, gt_iir); 1032 I915_WRITE(GTIIR, gt_iir);
@@ -1229,7 +1265,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1229 1265
1230 pm_iir = I915_READ(GEN6_PMIIR); 1266 pm_iir = I915_READ(GEN6_PMIIR);
1231 if (pm_iir) { 1267 if (pm_iir) {
1232 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 1268 if (IS_HASWELL(dev))
1269 hsw_pm_irq_handler(dev_priv, pm_iir);
1270 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1233 gen6_queue_rps_work(dev_priv, pm_iir); 1271 gen6_queue_rps_work(dev_priv, pm_iir);
1234 I915_WRITE(GEN6_PMIIR, pm_iir); 1272 I915_WRITE(GEN6_PMIIR, pm_iir);
1235 ret = IRQ_HANDLED; 1273 ret = IRQ_HANDLED;
@@ -1252,9 +1290,10 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
1252 struct drm_i915_private *dev_priv, 1290 struct drm_i915_private *dev_priv,
1253 u32 gt_iir) 1291 u32 gt_iir)
1254{ 1292{
1255 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 1293 if (gt_iir &
1294 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1256 notify_ring(dev, &dev_priv->ring[RCS]); 1295 notify_ring(dev, &dev_priv->ring[RCS]);
1257 if (gt_iir & GT_BSD_USER_INTERRUPT) 1296 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1258 notify_ring(dev, &dev_priv->ring[VCS]); 1297 notify_ring(dev, &dev_priv->ring[VCS]);
1259} 1298}
1260 1299
@@ -1344,7 +1383,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1344 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1383 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1345 ironlake_handle_rps_change(dev); 1384 ironlake_handle_rps_change(dev);
1346 1385
1347 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 1386 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1348 gen6_queue_rps_work(dev_priv, pm_iir); 1387 gen6_queue_rps_work(dev_priv, pm_iir);
1349 1388
1350 I915_WRITE(GTIIR, gt_iir); 1389 I915_WRITE(GTIIR, gt_iir);
@@ -1564,11 +1603,13 @@ i915_error_state_free(struct kref *error_ref)
1564 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 1603 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1565 i915_error_object_free(error->ring[i].batchbuffer); 1604 i915_error_object_free(error->ring[i].batchbuffer);
1566 i915_error_object_free(error->ring[i].ringbuffer); 1605 i915_error_object_free(error->ring[i].ringbuffer);
1606 i915_error_object_free(error->ring[i].ctx);
1567 kfree(error->ring[i].requests); 1607 kfree(error->ring[i].requests);
1568 } 1608 }
1569 1609
1570 kfree(error->active_bo); 1610 kfree(error->active_bo);
1571 kfree(error->overlay); 1611 kfree(error->overlay);
1612 kfree(error->display);
1572 kfree(error); 1613 kfree(error);
1573} 1614}
1574static void capture_bo(struct drm_i915_error_buffer *err, 1615static void capture_bo(struct drm_i915_error_buffer *err,
@@ -2274,11 +2315,11 @@ ring_last_seqno(struct intel_ring_buffer *ring)
2274 struct drm_i915_gem_request, list)->seqno; 2315 struct drm_i915_gem_request, list)->seqno;
2275} 2316}
2276 2317
2277static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 2318static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring,
2319 u32 ring_seqno, bool *err)
2278{ 2320{
2279 if (list_empty(&ring->request_list) || 2321 if (list_empty(&ring->request_list) ||
2280 i915_seqno_passed(ring->get_seqno(ring, false), 2322 i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) {
2281 ring_last_seqno(ring))) {
2282 /* Issue a wake-up to catch stuck h/w. */ 2323 /* Issue a wake-up to catch stuck h/w. */
2283 if (waitqueue_active(&ring->irq_queue)) { 2324 if (waitqueue_active(&ring->irq_queue)) {
2284 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2325 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -2345,28 +2386,33 @@ static bool kick_ring(struct intel_ring_buffer *ring)
2345 return false; 2386 return false;
2346} 2387}
2347 2388
2389static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring)
2390{
2391 if (IS_GEN2(ring->dev))
2392 return false;
2393
2394 /* Is the chip hanging on a WAIT_FOR_EVENT?
2395 * If so we can simply poke the RB_WAIT bit
2396 * and break the hang. This should work on
2397 * all but the second generation chipsets.
2398 */
2399 return !kick_ring(ring);
2400}
2401
2348static bool i915_hangcheck_hung(struct drm_device *dev) 2402static bool i915_hangcheck_hung(struct drm_device *dev)
2349{ 2403{
2350 drm_i915_private_t *dev_priv = dev->dev_private; 2404 drm_i915_private_t *dev_priv = dev->dev_private;
2351 2405
2352 if (dev_priv->gpu_error.hangcheck_count++ > 1) { 2406 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
2353 bool hung = true; 2407 bool hung = true;
2408 struct intel_ring_buffer *ring;
2409 int i;
2354 2410
2355 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 2411 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
2356 i915_handle_error(dev, true); 2412 i915_handle_error(dev, true);
2357 2413
2358 if (!IS_GEN2(dev)) { 2414 for_each_ring(ring, dev_priv, i)
2359 struct intel_ring_buffer *ring; 2415 hung &= i915_hangcheck_ring_hung(ring);
2360 int i;
2361
2362 /* Is the chip hanging on a WAIT_FOR_EVENT?
2363 * If so we can simply poke the RB_WAIT bit
2364 * and break the hang. This should work on
2365 * all but the second generation chipsets.
2366 */
2367 for_each_ring(ring, dev_priv, i)
2368 hung &= !kick_ring(ring);
2369 }
2370 2416
2371 return hung; 2417 return hung;
2372 } 2418 }
@@ -2384,19 +2430,19 @@ void i915_hangcheck_elapsed(unsigned long data)
2384{ 2430{
2385 struct drm_device *dev = (struct drm_device *)data; 2431 struct drm_device *dev = (struct drm_device *)data;
2386 drm_i915_private_t *dev_priv = dev->dev_private; 2432 drm_i915_private_t *dev_priv = dev->dev_private;
2387 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
2388 struct intel_ring_buffer *ring; 2433 struct intel_ring_buffer *ring;
2389 bool err = false, idle; 2434 bool err = false, idle;
2390 int i; 2435 int i;
2436 u32 seqno[I915_NUM_RINGS];
2437 bool work_done;
2391 2438
2392 if (!i915_enable_hangcheck) 2439 if (!i915_enable_hangcheck)
2393 return; 2440 return;
2394 2441
2395 memset(acthd, 0, sizeof(acthd));
2396 idle = true; 2442 idle = true;
2397 for_each_ring(ring, dev_priv, i) { 2443 for_each_ring(ring, dev_priv, i) {
2398 idle &= i915_hangcheck_ring_idle(ring, &err); 2444 seqno[i] = ring->get_seqno(ring, false);
2399 acthd[i] = intel_ring_get_active_head(ring); 2445 idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err);
2400 } 2446 }
2401 2447
2402 /* If all work is done then ACTHD clearly hasn't advanced. */ 2448 /* If all work is done then ACTHD clearly hasn't advanced. */
@@ -2412,20 +2458,19 @@ void i915_hangcheck_elapsed(unsigned long data)
2412 return; 2458 return;
2413 } 2459 }
2414 2460
2415 i915_get_extra_instdone(dev, instdone); 2461 work_done = false;
2416 if (memcmp(dev_priv->gpu_error.last_acthd, acthd, 2462 for_each_ring(ring, dev_priv, i) {
2417 sizeof(acthd)) == 0 && 2463 if (ring->hangcheck.seqno != seqno[i]) {
2418 memcmp(dev_priv->gpu_error.prev_instdone, instdone, 2464 work_done = true;
2419 sizeof(instdone)) == 0) { 2465 ring->hangcheck.seqno = seqno[i];
2466 }
2467 }
2468
2469 if (!work_done) {
2420 if (i915_hangcheck_hung(dev)) 2470 if (i915_hangcheck_hung(dev))
2421 return; 2471 return;
2422 } else { 2472 } else {
2423 dev_priv->gpu_error.hangcheck_count = 0; 2473 dev_priv->gpu_error.hangcheck_count = 0;
2424
2425 memcpy(dev_priv->gpu_error.last_acthd, acthd,
2426 sizeof(acthd));
2427 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2428 sizeof(instdone));
2429 } 2474 }
2430 2475
2431repeat: 2476repeat:
@@ -2455,6 +2500,42 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
2455 I915_WRITE(GTIER, 0x0); 2500 I915_WRITE(GTIER, 0x0);
2456 POSTING_READ(GTIER); 2501 POSTING_READ(GTIER);
2457 2502
2503 /* south display irq */
2504 I915_WRITE(SDEIMR, 0xffffffff);
2505 /*
2506 * SDEIER is also touched by the interrupt handler to work around missed
2507 * PCH interrupts. Hence we can't update it after the interrupt handler
2508 * is enabled - instead we unconditionally enable all PCH interrupt
2509 * sources here, but then only unmask them as needed with SDEIMR.
2510 */
2511 I915_WRITE(SDEIER, 0xffffffff);
2512 POSTING_READ(SDEIER);
2513}
2514
2515static void ivybridge_irq_preinstall(struct drm_device *dev)
2516{
2517 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2518
2519 atomic_set(&dev_priv->irq_received, 0);
2520
2521 I915_WRITE(HWSTAM, 0xeffe);
2522
2523 /* XXX hotplug from PCH */
2524
2525 I915_WRITE(DEIMR, 0xffffffff);
2526 I915_WRITE(DEIER, 0x0);
2527 POSTING_READ(DEIER);
2528
2529 /* and GT */
2530 I915_WRITE(GTIMR, 0xffffffff);
2531 I915_WRITE(GTIER, 0x0);
2532 POSTING_READ(GTIER);
2533
2534 /* Power management */
2535 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2536 I915_WRITE(GEN6_PMIER, 0x0);
2537 POSTING_READ(GEN6_PMIER);
2538
2458 if (HAS_PCH_NOP(dev)) 2539 if (HAS_PCH_NOP(dev))
2459 return; 2540 return;
2460 2541
@@ -2543,6 +2624,9 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2543 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2544 u32 mask; 2625 u32 mask;
2545 2626
2627 if (HAS_PCH_NOP(dev))
2628 return;
2629
2546 if (HAS_PCH_IBX(dev)) { 2630 if (HAS_PCH_IBX(dev)) {
2547 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2631 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2548 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2632 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
@@ -2552,9 +2636,6 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2552 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2636 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2553 } 2637 }
2554 2638
2555 if (HAS_PCH_NOP(dev))
2556 return;
2557
2558 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2639 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2559 I915_WRITE(SDEIMR, ~mask); 2640 I915_WRITE(SDEIMR, ~mask);
2560} 2641}
@@ -2567,7 +2648,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2567 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2648 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2568 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2649 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2569 DE_PIPEA_FIFO_UNDERRUN | DE_POISON; 2650 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2570 u32 render_irqs; 2651 u32 gt_irqs;
2571 2652
2572 dev_priv->irq_mask = ~display_mask; 2653 dev_priv->irq_mask = ~display_mask;
2573 2654
@@ -2582,17 +2663,15 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2582 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2663 I915_WRITE(GTIIR, I915_READ(GTIIR));
2583 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2664 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2584 2665
2666 gt_irqs = GT_RENDER_USER_INTERRUPT;
2667
2585 if (IS_GEN6(dev)) 2668 if (IS_GEN6(dev))
2586 render_irqs = 2669 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2587 GT_USER_INTERRUPT |
2588 GEN6_BSD_USER_INTERRUPT |
2589 GEN6_BLITTER_USER_INTERRUPT;
2590 else 2670 else
2591 render_irqs = 2671 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2592 GT_USER_INTERRUPT | 2672 ILK_BSD_USER_INTERRUPT;
2593 GT_PIPE_NOTIFY | 2673
2594 GT_BSD_USER_INTERRUPT; 2674 I915_WRITE(GTIER, gt_irqs);
2595 I915_WRITE(GTIER, render_irqs);
2596 POSTING_READ(GTIER); 2675 POSTING_READ(GTIER);
2597 2676
2598 ibx_irq_postinstall(dev); 2677 ibx_irq_postinstall(dev);
@@ -2618,7 +2697,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2618 DE_PLANEA_FLIP_DONE_IVB | 2697 DE_PLANEA_FLIP_DONE_IVB |
2619 DE_AUX_CHANNEL_A_IVB | 2698 DE_AUX_CHANNEL_A_IVB |
2620 DE_ERR_INT_IVB; 2699 DE_ERR_INT_IVB;
2621 u32 render_irqs; 2700 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2701 u32 gt_irqs;
2622 2702
2623 dev_priv->irq_mask = ~display_mask; 2703 dev_priv->irq_mask = ~display_mask;
2624 2704
@@ -2633,16 +2713,32 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2633 DE_PIPEA_VBLANK_IVB); 2713 DE_PIPEA_VBLANK_IVB);
2634 POSTING_READ(DEIER); 2714 POSTING_READ(DEIER);
2635 2715
2636 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2716 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2637 2717
2638 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2718 I915_WRITE(GTIIR, I915_READ(GTIIR));
2639 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2719 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2640 2720
2641 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2721 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2642 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2722 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2643 I915_WRITE(GTIER, render_irqs); 2723 I915_WRITE(GTIER, gt_irqs);
2644 POSTING_READ(GTIER); 2724 POSTING_READ(GTIER);
2645 2725
2726 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2727 if (HAS_VEBOX(dev))
2728 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2729 PM_VEBOX_CS_ERROR_INTERRUPT;
2730
2731 /* Our enable/disable rps functions may touch these registers so
2732 * make sure to set a known state for only the non-RPS bits.
2733 * The RMW is extra paranoia since this should be called after being set
2734 * to a known state in preinstall.
2735 * */
2736 I915_WRITE(GEN6_PMIMR,
2737 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2738 I915_WRITE(GEN6_PMIER,
2739 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2740 POSTING_READ(GEN6_PMIER);
2741
2646 ibx_irq_postinstall(dev); 2742 ibx_irq_postinstall(dev);
2647 2743
2648 return 0; 2744 return 0;
@@ -2651,9 +2747,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2651static int valleyview_irq_postinstall(struct drm_device *dev) 2747static int valleyview_irq_postinstall(struct drm_device *dev)
2652{ 2748{
2653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2749 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2750 u32 gt_irqs;
2654 u32 enable_mask; 2751 u32 enable_mask;
2655 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2752 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2656 u32 render_irqs;
2657 2753
2658 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2754 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2659 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2755 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2689,9 +2785,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2689 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2785 I915_WRITE(GTIIR, I915_READ(GTIIR));
2690 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2786 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2691 2787
2692 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2788 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2693 GEN6_BLITTER_USER_INTERRUPT; 2789 GT_BLT_USER_INTERRUPT;
2694 I915_WRITE(GTIER, render_irqs); 2790 I915_WRITE(GTIER, gt_irqs);
2695 POSTING_READ(GTIER); 2791 POSTING_READ(GTIER);
2696 2792
2697 /* ack & enable invalid PTE error interrupts */ 2793 /* ack & enable invalid PTE error interrupts */
@@ -3458,9 +3554,9 @@ void intel_irq_init(struct drm_device *dev)
3458 dev->driver->disable_vblank = valleyview_disable_vblank; 3554 dev->driver->disable_vblank = valleyview_disable_vblank;
3459 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3555 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3460 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 3556 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3461 /* Share pre & uninstall handlers with ILK/SNB */ 3557 /* Share uninstall handlers with ILK/SNB */
3462 dev->driver->irq_handler = ivybridge_irq_handler; 3558 dev->driver->irq_handler = ivybridge_irq_handler;
3463 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3559 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3464 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 3560 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3465 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3561 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3466 dev->driver->enable_vblank = ivybridge_enable_vblank; 3562 dev->driver->enable_vblank = ivybridge_enable_vblank;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e4cf382f0b75..5a593d20036c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -265,13 +265,19 @@
265#define MI_SEMAPHORE_UPDATE (1<<21) 265#define MI_SEMAPHORE_UPDATE (1<<21)
266#define MI_SEMAPHORE_COMPARE (1<<20) 266#define MI_SEMAPHORE_COMPARE (1<<20)
267#define MI_SEMAPHORE_REGISTER (1<<18) 267#define MI_SEMAPHORE_REGISTER (1<<18)
268#define MI_SEMAPHORE_SYNC_RV (2<<16) 268#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
269#define MI_SEMAPHORE_SYNC_RB (0<<16) 269#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
270#define MI_SEMAPHORE_SYNC_VR (0<<16) 270#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
271#define MI_SEMAPHORE_SYNC_VB (2<<16) 271#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
272#define MI_SEMAPHORE_SYNC_BR (2<<16) 272#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
273#define MI_SEMAPHORE_SYNC_BV (0<<16) 273#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
274#define MI_SEMAPHORE_SYNC_INVALID (1<<0) 274#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
275#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
276#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
277#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
278#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
279#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
280#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
275/* 281/*
276 * 3D instructions used by the kernel 282 * 3D instructions used by the kernel
277 */ 283 */
@@ -342,27 +348,54 @@
342#define DEBUG_RESET_DISPLAY (1<<9) 348#define DEBUG_RESET_DISPLAY (1<<9)
343 349
344/* 350/*
345 * DPIO - a special bus for various display related registers to hide behind: 351 * IOSF sideband
346 * 0x800c: m1, m2, n, p1, p2, k dividers 352 */
347 * 0x8014: REF and SFR select 353#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100)
348 * 0x8014: N divider, VCO select 354#define IOSF_DEVFN_SHIFT 24
349 * 0x801c/3c: core clock bits 355#define IOSF_OPCODE_SHIFT 16
350 * 0x8048/68: low pass filter coefficients 356#define IOSF_PORT_SHIFT 8
351 * 0x8100: fast clock controls 357#define IOSF_BYTE_ENABLES_SHIFT 4
358#define IOSF_BAR_SHIFT 1
359#define IOSF_SB_BUSY (1<<0)
360#define IOSF_PORT_PUNIT 0x4
361#define IOSF_PORT_NC 0x11
362#define IOSF_PORT_DPIO 0x12
363#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
364#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
365
366#define PUNIT_OPCODE_REG_READ 6
367#define PUNIT_OPCODE_REG_WRITE 7
368
369#define PUNIT_REG_GPU_LFM 0xd3
370#define PUNIT_REG_GPU_FREQ_REQ 0xd4
371#define PUNIT_REG_GPU_FREQ_STS 0xd8
372#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
373
374#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
375#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
376
377#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
378#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
379#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
380#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
381#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
382#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
383#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
384#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
385#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
386#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
387
388/*
389 * DPIO - a special bus for various display related registers to hide behind
352 * 390 *
353 * DPIO is VLV only. 391 * DPIO is VLV only.
354 * 392 *
355 * Note: digital port B is DDI0, digital pot C is DDI1 393 * Note: digital port B is DDI0, digital pot C is DDI1
356 */ 394 */
357#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100) 395#define DPIO_DEVFN 0
358#define DPIO_RID (0<<24) 396#define DPIO_OPCODE_REG_WRITE 1
359#define DPIO_OP_WRITE (1<<16) 397#define DPIO_OPCODE_REG_READ 0
360#define DPIO_OP_READ (0<<16) 398
361#define DPIO_PORTID (0x12<<8)
362#define DPIO_BYTE (0xf<<4)
363#define DPIO_BUSY (1<<0) /* status only */
364#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
365#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
366#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) 399#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
367#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 400#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
368#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 401#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
@@ -554,6 +587,7 @@
554#define RENDER_RING_BASE 0x02000 587#define RENDER_RING_BASE 0x02000
555#define BSD_RING_BASE 0x04000 588#define BSD_RING_BASE 0x04000
556#define GEN6_BSD_RING_BASE 0x12000 589#define GEN6_BSD_RING_BASE 0x12000
590#define VEBOX_RING_BASE 0x1a000
557#define BLT_RING_BASE 0x22000 591#define BLT_RING_BASE 0x22000
558#define RING_TAIL(base) ((base)+0x30) 592#define RING_TAIL(base) ((base)+0x30)
559#define RING_HEAD(base) ((base)+0x34) 593#define RING_HEAD(base) ((base)+0x34)
@@ -561,12 +595,20 @@
561#define RING_CTL(base) ((base)+0x3c) 595#define RING_CTL(base) ((base)+0x3c)
562#define RING_SYNC_0(base) ((base)+0x40) 596#define RING_SYNC_0(base) ((base)+0x40)
563#define RING_SYNC_1(base) ((base)+0x44) 597#define RING_SYNC_1(base) ((base)+0x44)
564#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) 598#define RING_SYNC_2(base) ((base)+0x48)
565#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) 599#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
566#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) 600#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
567#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) 601#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
568#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) 602#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
569#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) 603#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
604#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE))
605#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
606#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
607#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE))
608#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
609#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
610#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
611#define GEN6_NOSYNC 0
570#define RING_MAX_IDLE(base) ((base)+0x54) 612#define RING_MAX_IDLE(base) ((base)+0x54)
571#define RING_HWS_PGA(base) ((base)+0x80) 613#define RING_HWS_PGA(base) ((base)+0x80)
572#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 614#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -578,6 +620,7 @@
578#define DONE_REG 0x40b0 620#define DONE_REG 0x40b0
579#define BSD_HWS_PGA_GEN7 (0x04180) 621#define BSD_HWS_PGA_GEN7 (0x04180)
580#define BLT_HWS_PGA_GEN7 (0x04280) 622#define BLT_HWS_PGA_GEN7 (0x04280)
623#define VEBOX_HWS_PGA_GEN7 (0x04380)
581#define RING_ACTHD(base) ((base)+0x74) 624#define RING_ACTHD(base) ((base)+0x74)
582#define RING_NOPID(base) ((base)+0x94) 625#define RING_NOPID(base) ((base)+0x94)
583#define RING_IMR(base) ((base)+0xa8) 626#define RING_IMR(base) ((base)+0xa8)
@@ -699,24 +742,6 @@
699#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) 742#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
700#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) 743#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
701#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) 744#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
702#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
703#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
704#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
705#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
706#define I915_HWB_OOM_INTERRUPT (1<<13)
707#define I915_SYNC_STATUS_INTERRUPT (1<<12)
708#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
709#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
710#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
711#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
712#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
713#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
714#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
715#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
716#define I915_DEBUG_INTERRUPT (1<<2)
717#define I915_USER_INTERRUPT (1<<1)
718#define I915_ASLE_INTERRUPT (1<<0)
719#define I915_BSD_USER_INTERRUPT (1<<25)
720#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ 745#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
721#define EIR 0x020b0 746#define EIR 0x020b0
722#define EMR 0x020b4 747#define EMR 0x020b4
@@ -828,28 +853,6 @@
828#define CACHE_MODE_1 0x7004 /* IVB+ */ 853#define CACHE_MODE_1 0x7004 /* IVB+ */
829#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 854#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
830 855
831/* GEN6 interrupt control
832 * Note that the per-ring interrupt bits do alias with the global interrupt bits
833 * in GTIMR. */
834#define GEN6_RENDER_HWSTAM 0x2098
835#define GEN6_RENDER_IMR 0x20a8
836#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
837#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
838#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
839#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
840#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
841#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
842#define GEN6_RENDER_SYNC_STATUS (1 << 2)
843#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
844#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
845
846#define GEN6_BLITTER_HWSTAM 0x22098
847#define GEN6_BLITTER_IMR 0x220a8
848#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
849#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
850#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
851#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
852
853#define GEN6_BLITTER_ECOSKPD 0x221d0 856#define GEN6_BLITTER_ECOSKPD 0x221d0
854#define GEN6_BLITTER_LOCK_SHIFT 16 857#define GEN6_BLITTER_LOCK_SHIFT 16
855#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 858#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
@@ -860,9 +863,52 @@
860#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) 863#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
861#define GEN6_BSD_GO_INDICATOR (1 << 4) 864#define GEN6_BSD_GO_INDICATOR (1 << 4)
862 865
863#define GEN6_BSD_HWSTAM 0x12098 866/* On modern GEN architectures interrupt control consists of two sets
864#define GEN6_BSD_IMR 0x120a8 867 * of registers. The first set pertains to the ring generating the
865#define GEN6_BSD_USER_INTERRUPT (1 << 12) 868 * interrupt. The second control is for the functional block generating the
869 * interrupt. These are PM, GT, DE, etc.
870 *
871 * Luckily *knocks on wood* all the ring interrupt bits match up with the
872 * GT interrupt bits, so we don't need to duplicate the defines.
873 *
874 * These defines should cover us well from SNB->HSW with minor exceptions
875 * it can also work on ILK.
876 */
877#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
878#define GT_BLT_CS_ERROR_INTERRUPT (1 << 25)
879#define GT_BLT_USER_INTERRUPT (1 << 22)
880#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
881#define GT_BSD_USER_INTERRUPT (1 << 12)
882#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
883#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
884#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
885#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
886#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
887#define GT_RENDER_USER_INTERRUPT (1 << 0)
888
889#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
890#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
891
892/* These are all the "old" interrupts */
893#define ILK_BSD_USER_INTERRUPT (1<<5)
894#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
895#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
896#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
897#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
898#define I915_HWB_OOM_INTERRUPT (1<<13)
899#define I915_SYNC_STATUS_INTERRUPT (1<<12)
900#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
901#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
902#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
903#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
904#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
905#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
906#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
907#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
908#define I915_DEBUG_INTERRUPT (1<<2)
909#define I915_USER_INTERRUPT (1<<1)
910#define I915_ASLE_INTERRUPT (1<<0)
911#define I915_BSD_USER_INTERRUPT (1 << 25)
866 912
867#define GEN6_BSD_RNCID 0x12198 913#define GEN6_BSD_RNCID 0x12198
868 914
@@ -977,6 +1023,8 @@
977/* Framebuffer compression for Ivybridge */ 1023/* Framebuffer compression for Ivybridge */
978#define IVB_FBC_RT_BASE 0x7020 1024#define IVB_FBC_RT_BASE 0x7020
979 1025
1026#define IPS_CTL 0x43408
1027#define IPS_ENABLE (1 << 31)
980 1028
981#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0 1029#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
982#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4 1030#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
@@ -3057,6 +3105,10 @@
3057#define WM3S_LP_IVB 0x45128 3105#define WM3S_LP_IVB 0x45128
3058#define WM1S_LP_EN (1<<31) 3106#define WM1S_LP_EN (1<<31)
3059 3107
3108#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
3109 (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
3110 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
3111
3060/* Memory latency timer register */ 3112/* Memory latency timer register */
3061#define MLTR_ILK 0x11222 3113#define MLTR_ILK 0x11222
3062#define MLTR_WM1_SHIFT 0 3114#define MLTR_WM1_SHIFT 0
@@ -3616,6 +3668,15 @@
3616#define _LGC_PALETTE_B 0x4a800 3668#define _LGC_PALETTE_B 0x4a800
3617#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) 3669#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
3618 3670
3671#define _GAMMA_MODE_A 0x4a480
3672#define _GAMMA_MODE_B 0x4ac80
3673#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
3674#define GAMMA_MODE_MODE_MASK (3 << 0)
3675#define GAMMA_MODE_MODE_8bit (0 << 0)
3676#define GAMMA_MODE_MODE_10bit (1 << 0)
3677#define GAMMA_MODE_MODE_12bit (2 << 0)
3678#define GAMMA_MODE_MODE_SPLIT (3 << 0)
3679
3619/* interrupts */ 3680/* interrupts */
3620#define DE_MASTER_IRQ_CONTROL (1 << 31) 3681#define DE_MASTER_IRQ_CONTROL (1 << 31)
3621#define DE_SPRITEB_FLIP_DONE (1 << 29) 3682#define DE_SPRITEB_FLIP_DONE (1 << 29)
@@ -3667,21 +3728,6 @@
3667#define DEIIR 0x44008 3728#define DEIIR 0x44008
3668#define DEIER 0x4400c 3729#define DEIER 0x4400c
3669 3730
3670/* GT interrupt.
3671 * Note that for gen6+ the ring-specific interrupt bits do alias with the
3672 * corresponding bits in the per-ring interrupt control registers. */
3673#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
3674#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
3675#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
3676#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
3677#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
3678#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
3679#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
3680#define GT_PIPE_NOTIFY (1 << 4)
3681#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
3682#define GT_SYNC_STATUS (1 << 2)
3683#define GT_USER_INTERRUPT (1 << 0)
3684
3685#define GTISR 0x44010 3731#define GTISR 0x44010
3686#define GTIMR 0x44014 3732#define GTIMR 0x44014
3687#define GTIIR 0x44018 3733#define GTIIR 0x44018
@@ -3711,6 +3757,9 @@
3711# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) 3757# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
3712# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 3758# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
3713 3759
3760#define CHICKEN_PAR1_1 0x42080
3761#define FORCE_ARB_IDLE_PLANES (1 << 14)
3762
3714#define DISP_ARB_CTL 0x45000 3763#define DISP_ARB_CTL 0x45000
3715#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 3764#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
3716#define DISP_FBC_WM_DIS (1<<15) 3765#define DISP_FBC_WM_DIS (1<<15)
@@ -4516,7 +4565,7 @@
4516#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) 4565#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
4517#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) 4566#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
4518#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) 4567#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
4519#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ 4568#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
4520 GEN6_PM_RP_DOWN_THRESHOLD | \ 4569 GEN6_PM_RP_DOWN_THRESHOLD | \
4521 GEN6_PM_RP_DOWN_TIMEOUT) 4570 GEN6_PM_RP_DOWN_TIMEOUT)
4522 4571
@@ -4538,40 +4587,6 @@
4538#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4587#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4539#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 4588#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
4540 4589
4541#define VLV_IOSF_DOORBELL_REQ 0x182100
4542#define IOSF_DEVFN_SHIFT 24
4543#define IOSF_OPCODE_SHIFT 16
4544#define IOSF_PORT_SHIFT 8
4545#define IOSF_BYTE_ENABLES_SHIFT 4
4546#define IOSF_BAR_SHIFT 1
4547#define IOSF_SB_BUSY (1<<0)
4548#define IOSF_PORT_PUNIT 0x4
4549#define IOSF_PORT_NC 0x11
4550#define VLV_IOSF_DATA 0x182104
4551#define VLV_IOSF_ADDR 0x182108
4552
4553#define PUNIT_OPCODE_REG_READ 6
4554#define PUNIT_OPCODE_REG_WRITE 7
4555
4556#define PUNIT_REG_GPU_LFM 0xd3
4557#define PUNIT_REG_GPU_FREQ_REQ 0xd4
4558#define PUNIT_REG_GPU_FREQ_STS 0xd8
4559#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
4560
4561#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
4562#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
4563
4564#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
4565#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
4566#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
4567#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
4568#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
4569#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
4570#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
4571#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
4572#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
4573#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
4574
4575#define GEN6_GT_CORE_STATUS 0x138060 4590#define GEN6_GT_CORE_STATUS 0x138060
4576#define GEN6_CORE_CPD_STATE_MASK (7<<4) 4591#define GEN6_CORE_CPD_STATE_MASK (7<<4)
4577#define GEN6_RCn_MASK 7 4592#define GEN6_RCn_MASK 7
@@ -4935,6 +4950,9 @@
4935#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 4950#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4936#define SFUSE_STRAP_DDID_DETECTED (1<<0) 4951#define SFUSE_STRAP_DDID_DETECTED (1<<0)
4937 4952
4953#define WM_MISC 0x45260
4954#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
4955
4938#define WM_DBG 0x45280 4956#define WM_DBG 0x45280
4939#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) 4957#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
4940#define WM_DBG_DISALLOW_MAXFIFO (1<<1) 4958#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c0d7875b475c..6875b5654c63 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -214,7 +214,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
214 mutex_lock(&dev_priv->rps.hw_lock); 214 mutex_lock(&dev_priv->rps.hw_lock);
215 if (IS_VALLEYVIEW(dev_priv->dev)) { 215 if (IS_VALLEYVIEW(dev_priv->dev)) {
216 u32 freq; 216 u32 freq;
217 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &freq); 217 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
218 ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff); 218 ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
219 } else { 219 } else {
220 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 220 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 66a0c6f0bb81..3acec8c48166 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,6 +84,28 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
84 return true; 84 return true;
85} 85}
86 86
87static void intel_crt_get_config(struct intel_encoder *encoder,
88 struct intel_crtc_config *pipe_config)
89{
90 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
91 struct intel_crt *crt = intel_encoder_to_crt(encoder);
92 u32 tmp, flags = 0;
93
94 tmp = I915_READ(crt->adpa_reg);
95
96 if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
97 flags |= DRM_MODE_FLAG_PHSYNC;
98 else
99 flags |= DRM_MODE_FLAG_NHSYNC;
100
101 if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
102 flags |= DRM_MODE_FLAG_PVSYNC;
103 else
104 flags |= DRM_MODE_FLAG_NVSYNC;
105
106 pipe_config->adjusted_mode.flags |= flags;
107}
108
87/* Note: The caller is required to filter out dpms modes not supported by the 109/* Note: The caller is required to filter out dpms modes not supported by the
88 * platform. */ 110 * platform. */
89static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 111static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -127,7 +149,7 @@ static void intel_enable_crt(struct intel_encoder *encoder)
127 intel_crt_set_dpms(encoder, crt->connector->base.dpms); 149 intel_crt_set_dpms(encoder, crt->connector->base.dpms);
128} 150}
129 151
130 152/* Special dpms function to support cloning between dvo/sdvo/crt. */
131static void intel_crt_dpms(struct drm_connector *connector, int mode) 153static void intel_crt_dpms(struct drm_connector *connector, int mode)
132{ 154{
133 struct drm_device *dev = connector->dev; 155 struct drm_device *dev = connector->dev;
@@ -158,6 +180,8 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
158 else 180 else
159 encoder->connectors_active = true; 181 encoder->connectors_active = true;
160 182
183 /* We call connector dpms manually below in case pipe dpms doesn't
184 * change due to cloning. */
161 if (mode < old_dpms) { 185 if (mode < old_dpms) {
162 /* From off to on, enable the pipe first. */ 186 /* From off to on, enable the pipe first. */
163 intel_crtc_update_dpms(crtc); 187 intel_crtc_update_dpms(crtc);
@@ -778,6 +802,7 @@ void intel_crt_init(struct drm_device *dev)
778 crt->base.compute_config = intel_crt_compute_config; 802 crt->base.compute_config = intel_crt_compute_config;
779 crt->base.disable = intel_disable_crt; 803 crt->base.disable = intel_disable_crt;
780 crt->base.enable = intel_enable_crt; 804 crt->base.enable = intel_enable_crt;
805 crt->base.get_config = intel_crt_get_config;
781 if (I915_HAS_HOTPLUG(dev)) 806 if (I915_HAS_HOTPLUG(dev))
782 crt->base.hpd_pin = HPD_CRT; 807 crt->base.hpd_pin = HPD_CRT;
783 if (HAS_DDI(dev)) 808 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 062de679f38f..9649df806079 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1153,14 +1153,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1153int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1153int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1154{ 1154{
1155 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1155 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1156 return 450; 1156 return 450000;
1157 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == 1157 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
1158 LCPLL_CLK_FREQ_450) 1158 LCPLL_CLK_FREQ_450)
1159 return 450; 1159 return 450000;
1160 else if (IS_ULT(dev_priv->dev)) 1160 else if (IS_ULT(dev_priv->dev))
1161 return 338; 1161 return 337500;
1162 else 1162 else
1163 return 540; 1163 return 540000;
1164} 1164}
1165 1165
1166void intel_ddi_pll_init(struct drm_device *dev) 1166void intel_ddi_pll_init(struct drm_device *dev)
@@ -1173,7 +1173,7 @@ void intel_ddi_pll_init(struct drm_device *dev)
1173 * Don't even try to turn it on. 1173 * Don't even try to turn it on.
1174 */ 1174 */
1175 1175
1176 DRM_DEBUG_KMS("CDCLK running at %dMHz\n", 1176 DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
1177 intel_ddi_get_cdclk_freq(dev_priv)); 1177 intel_ddi_get_cdclk_freq(dev_priv));
1178 1178
1179 if (val & LCPLL_CD_SOURCE_FCLK) 1179 if (val & LCPLL_CD_SOURCE_FCLK)
@@ -1259,6 +1259,28 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1259 intel_dp_check_link_status(intel_dp); 1259 intel_dp_check_link_status(intel_dp);
1260} 1260}
1261 1261
1262static void intel_ddi_get_config(struct intel_encoder *encoder,
1263 struct intel_crtc_config *pipe_config)
1264{
1265 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1266 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1267 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
1268 u32 temp, flags = 0;
1269
1270 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1271 if (temp & TRANS_DDI_PHSYNC)
1272 flags |= DRM_MODE_FLAG_PHSYNC;
1273 else
1274 flags |= DRM_MODE_FLAG_NHSYNC;
1275 if (temp & TRANS_DDI_PVSYNC)
1276 flags |= DRM_MODE_FLAG_PVSYNC;
1277 else
1278 flags |= DRM_MODE_FLAG_NVSYNC;
1279
1280 pipe_config->adjusted_mode.flags |= flags;
1281 pipe_config->pixel_multiplier = 1;
1282}
1283
1262static void intel_ddi_destroy(struct drm_encoder *encoder) 1284static void intel_ddi_destroy(struct drm_encoder *encoder)
1263{ 1285{
1264 /* HDMI has nothing special to destroy, so we can go with this. */ 1286 /* HDMI has nothing special to destroy, so we can go with this. */
@@ -1269,9 +1291,13 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
1269 struct intel_crtc_config *pipe_config) 1291 struct intel_crtc_config *pipe_config)
1270{ 1292{
1271 int type = encoder->type; 1293 int type = encoder->type;
1294 int port = intel_ddi_get_encoder_port(encoder);
1272 1295
1273 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); 1296 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
1274 1297
1298 if (port == PORT_A)
1299 pipe_config->cpu_transcoder = TRANSCODER_EDP;
1300
1275 if (type == INTEL_OUTPUT_HDMI) 1301 if (type == INTEL_OUTPUT_HDMI)
1276 return intel_hdmi_compute_config(encoder, pipe_config); 1302 return intel_hdmi_compute_config(encoder, pipe_config);
1277 else 1303 else
@@ -1318,6 +1344,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1318 intel_encoder->disable = intel_disable_ddi; 1344 intel_encoder->disable = intel_disable_ddi;
1319 intel_encoder->post_disable = intel_ddi_post_disable; 1345 intel_encoder->post_disable = intel_ddi_post_disable;
1320 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 1346 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1347 intel_encoder->get_config = intel_ddi_get_config;
1321 1348
1322 intel_dig_port->port = port; 1349 intel_dig_port->port = port;
1323 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1350 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a4c9f56afb38..dff9d4e5b92b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -381,43 +381,6 @@ static const intel_limit_t intel_limits_vlv_dp = {
381 .find_pll = intel_vlv_find_best_pll, 381 .find_pll = intel_vlv_find_best_pll,
382}; 382};
383 383
384u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
385{
386 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
387
388 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
389 DRM_ERROR("DPIO idle wait timed out\n");
390 return 0;
391 }
392
393 I915_WRITE(DPIO_REG, reg);
394 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
395 DPIO_BYTE);
396 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
397 DRM_ERROR("DPIO read wait timed out\n");
398 return 0;
399 }
400
401 return I915_READ(DPIO_DATA);
402}
403
404void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
405{
406 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
407
408 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
409 DRM_ERROR("DPIO idle wait timed out\n");
410 return;
411 }
412
413 I915_WRITE(DPIO_DATA, val);
414 I915_WRITE(DPIO_REG, reg);
415 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
416 DPIO_BYTE);
417 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
418 DRM_ERROR("DPIO write wait timed out\n");
419}
420
421static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 384static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
422 int refclk) 385 int refclk)
423{ 386{
@@ -1404,67 +1367,6 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1404 POSTING_READ(reg); 1367 POSTING_READ(reg);
1405} 1368}
1406 1369
1407/* SBI access */
1408static void
1409intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1410 enum intel_sbi_destination destination)
1411{
1412 u32 tmp;
1413
1414 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1415
1416 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1417 100)) {
1418 DRM_ERROR("timeout waiting for SBI to become ready\n");
1419 return;
1420 }
1421
1422 I915_WRITE(SBI_ADDR, (reg << 16));
1423 I915_WRITE(SBI_DATA, value);
1424
1425 if (destination == SBI_ICLK)
1426 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1427 else
1428 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1429 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1430
1431 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1432 100)) {
1433 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1434 return;
1435 }
1436}
1437
1438static u32
1439intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1440 enum intel_sbi_destination destination)
1441{
1442 u32 value = 0;
1443 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1444
1445 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1446 100)) {
1447 DRM_ERROR("timeout waiting for SBI to become ready\n");
1448 return 0;
1449 }
1450
1451 I915_WRITE(SBI_ADDR, (reg << 16));
1452
1453 if (destination == SBI_ICLK)
1454 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1455 else
1456 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1457 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1458
1459 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1460 100)) {
1461 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1462 return 0;
1463 }
1464
1465 return I915_READ(SBI_DATA);
1466}
1467
1468void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1370void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1469{ 1371{
1470 u32 port_mask; 1372 u32 port_mask;
@@ -3340,6 +3242,42 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3340 intel_wait_for_vblank(dev, intel_crtc->pipe); 3242 intel_wait_for_vblank(dev, intel_crtc->pipe);
3341} 3243}
3342 3244
3245/* IPS only exists on ULT machines and is tied to pipe A. */
3246static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3247{
3248 return IS_ULT(crtc->base.dev) && crtc->pipe == PIPE_A;
3249}
3250
3251static void hsw_enable_ips(struct intel_crtc *crtc)
3252{
3253 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3254
3255 if (!crtc->config.ips_enabled)
3256 return;
3257
3258 /* We can only enable IPS after we enable a plane and wait for a vblank.
3259 * We guarantee that the plane is enabled by calling intel_enable_ips
3260 * only after intel_enable_plane. And intel_enable_plane already waits
3261 * for a vblank, so all we need to do here is to enable the IPS bit. */
3262 assert_plane_enabled(dev_priv, crtc->plane);
3263 I915_WRITE(IPS_CTL, IPS_ENABLE);
3264}
3265
3266static void hsw_disable_ips(struct intel_crtc *crtc)
3267{
3268 struct drm_device *dev = crtc->base.dev;
3269 struct drm_i915_private *dev_priv = dev->dev_private;
3270
3271 if (!crtc->config.ips_enabled)
3272 return;
3273
3274 assert_plane_enabled(dev_priv, crtc->plane);
3275 I915_WRITE(IPS_CTL, 0);
3276
3277 /* We need to wait for a vblank before we can disable the plane. */
3278 intel_wait_for_vblank(dev, crtc->pipe);
3279}
3280
3343static void haswell_crtc_enable(struct drm_crtc *crtc) 3281static void haswell_crtc_enable(struct drm_crtc *crtc)
3344{ 3282{
3345 struct drm_device *dev = crtc->dev; 3283 struct drm_device *dev = crtc->dev;
@@ -3387,6 +3325,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3387 intel_crtc->config.has_pch_encoder); 3325 intel_crtc->config.has_pch_encoder);
3388 intel_enable_plane(dev_priv, plane, pipe); 3326 intel_enable_plane(dev_priv, plane, pipe);
3389 3327
3328 hsw_enable_ips(intel_crtc);
3329
3390 if (intel_crtc->config.has_pch_encoder) 3330 if (intel_crtc->config.has_pch_encoder)
3391 lpt_pch_enable(crtc); 3331 lpt_pch_enable(crtc);
3392 3332
@@ -3529,6 +3469,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3529 if (dev_priv->cfb_plane == plane) 3469 if (dev_priv->cfb_plane == plane)
3530 intel_disable_fbc(dev); 3470 intel_disable_fbc(dev);
3531 3471
3472 hsw_disable_ips(intel_crtc);
3473
3532 intel_disable_plane(dev_priv, plane, pipe); 3474 intel_disable_plane(dev_priv, plane, pipe);
3533 3475
3534 if (intel_crtc->config.has_pch_encoder) 3476 if (intel_crtc->config.has_pch_encoder)
@@ -3567,12 +3509,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
3567 3509
3568static void haswell_crtc_off(struct drm_crtc *crtc) 3510static void haswell_crtc_off(struct drm_crtc *crtc)
3569{ 3511{
3570 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3571
3572 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3573 * start using it. */
3574 intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3575
3576 intel_ddi_put_crtc_pll(crtc); 3512 intel_ddi_put_crtc_pll(crtc);
3577} 3513}
3578 3514
@@ -3627,18 +3563,12 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
3627 if (!crtc->config.gmch_pfit.control) 3563 if (!crtc->config.gmch_pfit.control)
3628 return; 3564 return;
3629 3565
3630 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3631 assert_pipe_disabled(dev_priv, crtc->pipe);
3632
3633 /* 3566 /*
3634 * Enable automatic panel scaling so that non-native modes 3567 * The panel fitter should only be adjusted whilst the pipe is disabled,
3635 * fill the screen. The panel fitter should only be 3568 * according to register description and PRM.
3636 * adjusted whilst the pipe is disabled, according to
3637 * register description and PRM.
3638 */ 3569 */
3639 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 3570 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3640 pipe_config->gmch_pfit.control, 3571 assert_pipe_disabled(dev_priv, crtc->pipe);
3641 pipe_config->gmch_pfit.pgm_ratios);
3642 3572
3643 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 3573 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3644 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 3574 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
@@ -4101,11 +4031,20 @@ retry:
4101 return setup_ok ? 0 : -EINVAL; 4031 return setup_ok ? 0 : -EINVAL;
4102} 4032}
4103 4033
4034static void hsw_compute_ips_config(struct intel_crtc *crtc,
4035 struct intel_crtc_config *pipe_config)
4036{
4037 pipe_config->ips_enabled = i915_enable_ips &&
4038 hsw_crtc_supports_ips(crtc) &&
4039 pipe_config->pipe_bpp == 24;
4040}
4041
4104static int intel_crtc_compute_config(struct drm_crtc *crtc, 4042static int intel_crtc_compute_config(struct drm_crtc *crtc,
4105 struct intel_crtc_config *pipe_config) 4043 struct intel_crtc_config *pipe_config)
4106{ 4044{
4107 struct drm_device *dev = crtc->dev; 4045 struct drm_device *dev = crtc->dev;
4108 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 4046 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4047 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4109 4048
4110 if (HAS_PCH_SPLIT(dev)) { 4049 if (HAS_PCH_SPLIT(dev)) {
4111 /* FDI link clock is fixed at 2.7G */ 4050 /* FDI link clock is fixed at 2.7G */
@@ -4135,8 +4074,11 @@ static int intel_crtc_compute_config(struct drm_crtc *crtc,
4135 pipe_config->pipe_bpp = 8*3; 4074 pipe_config->pipe_bpp = 8*3;
4136 } 4075 }
4137 4076
4077 if (IS_HASWELL(dev))
4078 hsw_compute_ips_config(intel_crtc, pipe_config);
4079
4138 if (pipe_config->has_pch_encoder) 4080 if (pipe_config->has_pch_encoder)
4139 return ironlake_fdi_compute_config(to_intel_crtc(crtc), pipe_config); 4081 return ironlake_fdi_compute_config(intel_crtc, pipe_config);
4140 4082
4141 return 0; 4083 return 0;
4142} 4084}
@@ -4343,24 +4285,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4343 * PLLB opamp always calibrates to max value of 0x3f, force enable it 4285 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4344 * and set it to a reasonable value instead. 4286 * and set it to a reasonable value instead.
4345 */ 4287 */
4346 reg_val = intel_dpio_read(dev_priv, DPIO_IREF(1)); 4288 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4347 reg_val &= 0xffffff00; 4289 reg_val &= 0xffffff00;
4348 reg_val |= 0x00000030; 4290 reg_val |= 0x00000030;
4349 intel_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4291 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4350 4292
4351 reg_val = intel_dpio_read(dev_priv, DPIO_CALIBRATION); 4293 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4352 reg_val &= 0x8cffffff; 4294 reg_val &= 0x8cffffff;
4353 reg_val = 0x8c000000; 4295 reg_val = 0x8c000000;
4354 intel_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4296 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4355 4297
4356 reg_val = intel_dpio_read(dev_priv, DPIO_IREF(1)); 4298 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4357 reg_val &= 0xffffff00; 4299 reg_val &= 0xffffff00;
4358 intel_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4300 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4359 4301
4360 reg_val = intel_dpio_read(dev_priv, DPIO_CALIBRATION); 4302 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4361 reg_val &= 0x00ffffff; 4303 reg_val &= 0x00ffffff;
4362 reg_val |= 0xb0000000; 4304 reg_val |= 0xb0000000;
4363 intel_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4305 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4364} 4306}
4365 4307
4366static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 4308static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4435,15 +4377,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4435 vlv_pllb_recal_opamp(dev_priv); 4377 vlv_pllb_recal_opamp(dev_priv);
4436 4378
4437 /* Set up Tx target for periodic Rcomp update */ 4379 /* Set up Tx target for periodic Rcomp update */
4438 intel_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f); 4380 vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
4439 4381
4440 /* Disable target IRef on PLL */ 4382 /* Disable target IRef on PLL */
4441 reg_val = intel_dpio_read(dev_priv, DPIO_IREF_CTL(pipe)); 4383 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
4442 reg_val &= 0x00ffffff; 4384 reg_val &= 0x00ffffff;
4443 intel_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val); 4385 vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
4444 4386
4445 /* Disable fast lock */ 4387 /* Disable fast lock */
4446 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610); 4388 vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
4447 4389
4448 /* Set idtafcrecal before PLL is enabled */ 4390 /* Set idtafcrecal before PLL is enabled */
4449 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4391 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4457,47 +4399,47 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4457 * Note: don't use the DAC post divider as it seems unstable. 4399 * Note: don't use the DAC post divider as it seems unstable.
4458 */ 4400 */
4459 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 4401 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4460 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4402 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4461 4403
4462 mdiv |= DPIO_ENABLE_CALIBRATION; 4404 mdiv |= DPIO_ENABLE_CALIBRATION;
4463 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4405 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4464 4406
4465 /* Set HBR and RBR LPF coefficients */ 4407 /* Set HBR and RBR LPF coefficients */
4466 if (adjusted_mode->clock == 162000 || 4408 if (adjusted_mode->clock == 162000 ||
4467 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4409 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4468 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 4410 vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe),
4469 0x005f0021); 4411 0x005f0021);
4470 else 4412 else
4471 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 4413 vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe),
4472 0x00d0000f); 4414 0x00d0000f);
4473 4415
4474 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 4416 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4475 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 4417 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4476 /* Use SSC source */ 4418 /* Use SSC source */
4477 if (!pipe) 4419 if (!pipe)
4478 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4420 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4479 0x0df40000); 4421 0x0df40000);
4480 else 4422 else
4481 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4423 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4482 0x0df70000); 4424 0x0df70000);
4483 } else { /* HDMI or VGA */ 4425 } else { /* HDMI or VGA */
4484 /* Use bend source */ 4426 /* Use bend source */
4485 if (!pipe) 4427 if (!pipe)
4486 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4428 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4487 0x0df70000); 4429 0x0df70000);
4488 else 4430 else
4489 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4431 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4490 0x0df40000); 4432 0x0df40000);
4491 } 4433 }
4492 4434
4493 coreclk = intel_dpio_read(dev_priv, DPIO_CORE_CLK(pipe)); 4435 coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
4494 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 4436 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4495 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 4437 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4496 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 4438 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4497 coreclk |= 0x01000000; 4439 coreclk |= 0x01000000;
4498 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk); 4440 vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
4499 4441
4500 intel_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4442 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
4501 4443
4502 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 4444 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4503 if (encoder->pre_pll_enable) 4445 if (encoder->pre_pll_enable)
@@ -4961,9 +4903,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4961 dspcntr |= DISPPLANE_SEL_PIPE_B; 4903 dspcntr |= DISPPLANE_SEL_PIPE_B;
4962 } 4904 }
4963 4905
4964 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe_name(pipe));
4965 drm_mode_debug_printmodeline(mode);
4966
4967 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 4906 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
4968 4907
4969 /* pipesrc and dspsize control the size that is scaled from, 4908 /* pipesrc and dspsize control the size that is scaled from,
@@ -5023,6 +4962,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5023 struct drm_i915_private *dev_priv = dev->dev_private; 4962 struct drm_i915_private *dev_priv = dev->dev_private;
5024 uint32_t tmp; 4963 uint32_t tmp;
5025 4964
4965 pipe_config->cpu_transcoder = crtc->pipe;
4966
5026 tmp = I915_READ(PIPECONF(crtc->pipe)); 4967 tmp = I915_READ(PIPECONF(crtc->pipe));
5027 if (!(tmp & PIPECONF_ENABLE)) 4968 if (!(tmp & PIPECONF_ENABLE))
5028 return false; 4969 return false;
@@ -5745,8 +5686,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5745 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 5686 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5746 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 5687 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5747 5688
5748 intel_crtc->config.cpu_transcoder = pipe;
5749
5750 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, 5689 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5751 &has_reduced_clock, &reduced_clock); 5690 &has_reduced_clock, &reduced_clock);
5752 if (!ok) { 5691 if (!ok) {
@@ -5765,9 +5704,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5765 /* Ensure that the cursor is valid for the new mode before changing... */ 5704 /* Ensure that the cursor is valid for the new mode before changing... */
5766 intel_crtc_update_cursor(crtc, true); 5705 intel_crtc_update_cursor(crtc, true);
5767 5706
5768 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe_name(pipe));
5769 drm_mode_debug_printmodeline(mode);
5770
5771 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5707 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5772 if (intel_crtc->config.has_pch_encoder) { 5708 if (intel_crtc->config.has_pch_encoder) {
5773 struct intel_pch_pll *pll; 5709 struct intel_pch_pll *pll;
@@ -5841,8 +5777,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5841 5777
5842 intel_update_watermarks(dev); 5778 intel_update_watermarks(dev);
5843 5779
5844 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5845
5846 return ret; 5780 return ret;
5847} 5781}
5848 5782
@@ -5884,6 +5818,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5884 struct drm_i915_private *dev_priv = dev->dev_private; 5818 struct drm_i915_private *dev_priv = dev->dev_private;
5885 uint32_t tmp; 5819 uint32_t tmp;
5886 5820
5821 pipe_config->cpu_transcoder = crtc->pipe;
5822
5887 tmp = I915_READ(PIPECONF(crtc->pipe)); 5823 tmp = I915_READ(PIPECONF(crtc->pipe));
5888 if (!(tmp & PIPECONF_ENABLE)) 5824 if (!(tmp & PIPECONF_ENABLE))
5889 return false; 5825 return false;
@@ -5909,23 +5845,13 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
5909{ 5845{
5910 bool enable = false; 5846 bool enable = false;
5911 struct intel_crtc *crtc; 5847 struct intel_crtc *crtc;
5912 struct intel_encoder *encoder;
5913 5848
5914 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 5849 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
5915 if (crtc->pipe != PIPE_A && crtc->base.enabled) 5850 if (!crtc->base.enabled)
5916 enable = true; 5851 continue;
5917 /* XXX: Should check for edp transcoder here, but thanks to init
5918 * sequence that's not yet available. Just in case desktop eDP
5919 * on PORT D is possible on haswell, too. */
5920 /* Even the eDP panel fitter is outside the always-on well. */
5921 if (crtc->config.pch_pfit.size && crtc->base.enabled)
5922 enable = true;
5923 }
5924 5852
5925 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 5853 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
5926 base.head) { 5854 crtc->config.cpu_transcoder != TRANSCODER_EDP)
5927 if (encoder->type != INTEL_OUTPUT_EDP &&
5928 encoder->connectors_active)
5929 enable = true; 5855 enable = true;
5930 } 5856 }
5931 5857
@@ -5960,32 +5886,15 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5960 num_connectors++; 5886 num_connectors++;
5961 } 5887 }
5962 5888
5963 if (is_cpu_edp)
5964 intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
5965 else
5966 intel_crtc->config.cpu_transcoder = pipe;
5967
5968 /* We are not sure yet this won't happen. */
5969 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5970 INTEL_PCH_TYPE(dev));
5971
5972 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", 5889 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5973 num_connectors, pipe_name(pipe)); 5890 num_connectors, pipe_name(pipe));
5974 5891
5975 WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
5976 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5977
5978 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5979
5980 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) 5892 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5981 return -EINVAL; 5893 return -EINVAL;
5982 5894
5983 /* Ensure that the cursor is valid for the new mode before changing... */ 5895 /* Ensure that the cursor is valid for the new mode before changing... */
5984 intel_crtc_update_cursor(crtc, true); 5896 intel_crtc_update_cursor(crtc, true);
5985 5897
5986 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe_name(pipe));
5987 drm_mode_debug_printmodeline(mode);
5988
5989 if (intel_crtc->config.has_dp_encoder) 5898 if (intel_crtc->config.has_dp_encoder)
5990 intel_dp_set_m_n(intel_crtc); 5899 intel_dp_set_m_n(intel_crtc);
5991 5900
@@ -6010,8 +5919,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6010 5919
6011 intel_update_watermarks(dev); 5920 intel_update_watermarks(dev);
6012 5921
6013 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
6014
6015 return ret; 5922 return ret;
6016} 5923}
6017 5924
@@ -6020,15 +5927,37 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6020{ 5927{
6021 struct drm_device *dev = crtc->base.dev; 5928 struct drm_device *dev = crtc->base.dev;
6022 struct drm_i915_private *dev_priv = dev->dev_private; 5929 struct drm_i915_private *dev_priv = dev->dev_private;
6023 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
6024 enum intel_display_power_domain pfit_domain; 5930 enum intel_display_power_domain pfit_domain;
6025 uint32_t tmp; 5931 uint32_t tmp;
6026 5932
5933 pipe_config->cpu_transcoder = crtc->pipe;
5934 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
5935 if (tmp & TRANS_DDI_FUNC_ENABLE) {
5936 enum pipe trans_edp_pipe;
5937 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5938 default:
5939 WARN(1, "unknown pipe linked to edp transcoder\n");
5940 case TRANS_DDI_EDP_INPUT_A_ONOFF:
5941 case TRANS_DDI_EDP_INPUT_A_ON:
5942 trans_edp_pipe = PIPE_A;
5943 break;
5944 case TRANS_DDI_EDP_INPUT_B_ONOFF:
5945 trans_edp_pipe = PIPE_B;
5946 break;
5947 case TRANS_DDI_EDP_INPUT_C_ONOFF:
5948 trans_edp_pipe = PIPE_C;
5949 break;
5950 }
5951
5952 if (trans_edp_pipe == crtc->pipe)
5953 pipe_config->cpu_transcoder = TRANSCODER_EDP;
5954 }
5955
6027 if (!intel_display_power_enabled(dev, 5956 if (!intel_display_power_enabled(dev,
6028 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 5957 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6029 return false; 5958 return false;
6030 5959
6031 tmp = I915_READ(PIPECONF(cpu_transcoder)); 5960 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
6032 if (!(tmp & PIPECONF_ENABLE)) 5961 if (!(tmp & PIPECONF_ENABLE))
6033 return false; 5962 return false;
6034 5963
@@ -6037,7 +5966,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6037 * DDI E. So just check whether this pipe is wired to DDI E and whether 5966 * DDI E. So just check whether this pipe is wired to DDI E and whether
6038 * the PCH transcoder is on. 5967 * the PCH transcoder is on.
6039 */ 5968 */
6040 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 5969 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
6041 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && 5970 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
6042 I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 5971 I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
6043 pipe_config->has_pch_encoder = true; 5972 pipe_config->has_pch_encoder = true;
@@ -6055,6 +5984,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6055 if (intel_display_power_enabled(dev, pfit_domain)) 5984 if (intel_display_power_enabled(dev, pfit_domain))
6056 ironlake_get_pfit_config(crtc, pipe_config); 5985 ironlake_get_pfit_config(crtc, pipe_config);
6057 5986
5987 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
5988 (I915_READ(IPS_CTL) & IPS_ENABLE);
5989
6058 return true; 5990 return true;
6059} 5991}
6060 5992
@@ -6359,8 +6291,10 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
6359 struct drm_device *dev = crtc->dev; 6291 struct drm_device *dev = crtc->dev;
6360 struct drm_i915_private *dev_priv = dev->dev_private; 6292 struct drm_i915_private *dev_priv = dev->dev_private;
6361 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6362 int palreg = PALETTE(intel_crtc->pipe); 6294 enum pipe pipe = intel_crtc->pipe;
6295 int palreg = PALETTE(pipe);
6363 int i; 6296 int i;
6297 bool reenable_ips = false;
6364 6298
6365 /* The clocks have to be on to load the palette. */ 6299 /* The clocks have to be on to load the palette. */
6366 if (!crtc->enabled || !intel_crtc->active) 6300 if (!crtc->enabled || !intel_crtc->active)
@@ -6368,7 +6302,17 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
6368 6302
6369 /* use legacy palette for Ironlake */ 6303 /* use legacy palette for Ironlake */
6370 if (HAS_PCH_SPLIT(dev)) 6304 if (HAS_PCH_SPLIT(dev))
6371 palreg = LGC_PALETTE(intel_crtc->pipe); 6305 palreg = LGC_PALETTE(pipe);
6306
6307 /* Workaround : Do not read or write the pipe palette/gamma data while
6308 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6309 */
6310 if (intel_crtc->config.ips_enabled &&
6311 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6312 GAMMA_MODE_MODE_SPLIT)) {
6313 hsw_disable_ips(intel_crtc);
6314 reenable_ips = true;
6315 }
6372 6316
6373 for (i = 0; i < 256; i++) { 6317 for (i = 0; i < 256; i++) {
6374 I915_WRITE(palreg + 4 * i, 6318 I915_WRITE(palreg + 4 * i,
@@ -6376,6 +6320,9 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
6376 (intel_crtc->lut_g[i] << 8) | 6320 (intel_crtc->lut_g[i] << 8) |
6377 intel_crtc->lut_b[i]); 6321 intel_crtc->lut_b[i]);
6378 } 6322 }
6323
6324 if (reenable_ips)
6325 hsw_enable_ips(intel_crtc);
6379} 6326}
6380 6327
6381static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 6328static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6622,7 +6569,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6622 intel_crtc->cursor_width = width; 6569 intel_crtc->cursor_width = width;
6623 intel_crtc->cursor_height = height; 6570 intel_crtc->cursor_height = height;
6624 6571
6625 intel_crtc_update_cursor(crtc, true); 6572 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6626 6573
6627 return 0; 6574 return 0;
6628fail_unpin: 6575fail_unpin:
@@ -6641,7 +6588,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6641 intel_crtc->cursor_x = x; 6588 intel_crtc->cursor_x = x;
6642 intel_crtc->cursor_y = y; 6589 intel_crtc->cursor_y = y;
6643 6590
6644 intel_crtc_update_cursor(crtc, true); 6591 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6645 6592
6646 return 0; 6593 return 0;
6647} 6594}
@@ -7155,6 +7102,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
7155 kfree(work); 7102 kfree(work);
7156 } 7103 }
7157 7104
7105 intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
7106
7158 drm_crtc_cleanup(crtc); 7107 drm_crtc_cleanup(crtc);
7159 7108
7160 kfree(intel_crtc); 7109 kfree(intel_crtc);
@@ -7774,6 +7723,36 @@ pipe_config_set_bpp(struct drm_crtc *crtc,
7774 return bpp; 7723 return bpp;
7775} 7724}
7776 7725
7726static void intel_dump_pipe_config(struct intel_crtc *crtc,
7727 struct intel_crtc_config *pipe_config,
7728 const char *context)
7729{
7730 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
7731 context, pipe_name(crtc->pipe));
7732
7733 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
7734 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
7735 pipe_config->pipe_bpp, pipe_config->dither);
7736 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7737 pipe_config->has_pch_encoder,
7738 pipe_config->fdi_lanes,
7739 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
7740 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
7741 pipe_config->fdi_m_n.tu);
7742 DRM_DEBUG_KMS("requested mode:\n");
7743 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
7744 DRM_DEBUG_KMS("adjusted mode:\n");
7745 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
7746 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7747 pipe_config->gmch_pfit.control,
7748 pipe_config->gmch_pfit.pgm_ratios,
7749 pipe_config->gmch_pfit.lvds_border_bits);
7750 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
7751 pipe_config->pch_pfit.pos,
7752 pipe_config->pch_pfit.size);
7753 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
7754}
7755
7777static struct intel_crtc_config * 7756static struct intel_crtc_config *
7778intel_modeset_pipe_config(struct drm_crtc *crtc, 7757intel_modeset_pipe_config(struct drm_crtc *crtc,
7779 struct drm_framebuffer *fb, 7758 struct drm_framebuffer *fb,
@@ -7792,6 +7771,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7792 7771
7793 drm_mode_copy(&pipe_config->adjusted_mode, mode); 7772 drm_mode_copy(&pipe_config->adjusted_mode, mode);
7794 drm_mode_copy(&pipe_config->requested_mode, mode); 7773 drm_mode_copy(&pipe_config->requested_mode, mode);
7774 pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
7795 7775
7796 plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config); 7776 plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
7797 if (plane_bpp < 0) 7777 if (plane_bpp < 0)
@@ -7843,8 +7823,6 @@ encoder_retry:
7843 goto encoder_retry; 7823 goto encoder_retry;
7844 } 7824 }
7845 7825
7846 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
7847
7848 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; 7826 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
7849 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 7827 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
7850 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); 7828 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@@ -8042,6 +8020,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8042 return false; \ 8020 return false; \
8043 } 8021 }
8044 8022
8023 PIPE_CONF_CHECK_I(cpu_transcoder);
8024
8045 PIPE_CONF_CHECK_I(has_pch_encoder); 8025 PIPE_CONF_CHECK_I(has_pch_encoder);
8046 PIPE_CONF_CHECK_I(fdi_lanes); 8026 PIPE_CONF_CHECK_I(fdi_lanes);
8047 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); 8027 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
@@ -8067,6 +8047,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8067 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 8047 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8068 DRM_MODE_FLAG_INTERLACE); 8048 DRM_MODE_FLAG_INTERLACE);
8069 8049
8050 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8051 DRM_MODE_FLAG_PHSYNC);
8052 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8053 DRM_MODE_FLAG_NHSYNC);
8054 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8055 DRM_MODE_FLAG_PVSYNC);
8056 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8057 DRM_MODE_FLAG_NVSYNC);
8058
8070 PIPE_CONF_CHECK_I(requested_mode.hdisplay); 8059 PIPE_CONF_CHECK_I(requested_mode.hdisplay);
8071 PIPE_CONF_CHECK_I(requested_mode.vdisplay); 8060 PIPE_CONF_CHECK_I(requested_mode.vdisplay);
8072 8061
@@ -8078,6 +8067,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8078 PIPE_CONF_CHECK_I(pch_pfit.pos); 8067 PIPE_CONF_CHECK_I(pch_pfit.pos);
8079 PIPE_CONF_CHECK_I(pch_pfit.size); 8068 PIPE_CONF_CHECK_I(pch_pfit.size);
8080 8069
8070 PIPE_CONF_CHECK_I(ips_enabled);
8071
8081#undef PIPE_CONF_CHECK_I 8072#undef PIPE_CONF_CHECK_I
8082#undef PIPE_CONF_CHECK_FLAGS 8073#undef PIPE_CONF_CHECK_FLAGS
8083 8074
@@ -8159,6 +8150,8 @@ intel_modeset_check_state(struct drm_device *dev)
8159 bool enabled = false; 8150 bool enabled = false;
8160 bool active = false; 8151 bool active = false;
8161 8152
8153 memset(&pipe_config, 0, sizeof(pipe_config));
8154
8162 DRM_DEBUG_KMS("[CRTC:%d]\n", 8155 DRM_DEBUG_KMS("[CRTC:%d]\n",
8163 crtc->base.base.id); 8156 crtc->base.base.id);
8164 8157
@@ -8172,6 +8165,8 @@ intel_modeset_check_state(struct drm_device *dev)
8172 enabled = true; 8165 enabled = true;
8173 if (encoder->connectors_active) 8166 if (encoder->connectors_active)
8174 active = true; 8167 active = true;
8168 if (encoder->get_config)
8169 encoder->get_config(encoder, &pipe_config);
8175 } 8170 }
8176 WARN(active != crtc->active, 8171 WARN(active != crtc->active,
8177 "crtc's computed active state doesn't match tracked active state " 8172 "crtc's computed active state doesn't match tracked active state "
@@ -8180,17 +8175,20 @@ intel_modeset_check_state(struct drm_device *dev)
8180 "crtc's computed enabled state doesn't match tracked enabled state " 8175 "crtc's computed enabled state doesn't match tracked enabled state "
8181 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 8176 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
8182 8177
8183 memset(&pipe_config, 0, sizeof(pipe_config));
8184 pipe_config.cpu_transcoder = crtc->config.cpu_transcoder;
8185 active = dev_priv->display.get_pipe_config(crtc, 8178 active = dev_priv->display.get_pipe_config(crtc,
8186 &pipe_config); 8179 &pipe_config);
8187 WARN(crtc->active != active, 8180 WARN(crtc->active != active,
8188 "crtc active state doesn't match with hw state " 8181 "crtc active state doesn't match with hw state "
8189 "(expected %i, found %i)\n", crtc->active, active); 8182 "(expected %i, found %i)\n", crtc->active, active);
8190 8183
8191 WARN(active && 8184 if (active &&
8192 !intel_pipe_config_compare(dev, &crtc->config, &pipe_config), 8185 !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
8193 "pipe state doesn't match!\n"); 8186 WARN(1, "pipe state doesn't match!\n");
8187 intel_dump_pipe_config(crtc, &pipe_config,
8188 "[hw state]");
8189 intel_dump_pipe_config(crtc, &crtc->config,
8190 "[sw state]");
8191 }
8194 } 8192 }
8195} 8193}
8196 8194
@@ -8230,6 +8228,8 @@ static int __intel_set_mode(struct drm_crtc *crtc,
8230 8228
8231 goto out; 8229 goto out;
8232 } 8230 }
8231 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
8232 "[modeset]");
8233 } 8233 }
8234 8234
8235 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 8235 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
@@ -8244,12 +8244,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
8244 * to set it here already despite that we pass it down the callchain. 8244 * to set it here already despite that we pass it down the callchain.
8245 */ 8245 */
8246 if (modeset_pipes) { 8246 if (modeset_pipes) {
8247 enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
8248 crtc->mode = *mode; 8247 crtc->mode = *mode;
8249 /* mode_set/enable/disable functions rely on a correct pipe 8248 /* mode_set/enable/disable functions rely on a correct pipe
8250 * config. */ 8249 * config. */
8251 to_intel_crtc(crtc)->config = *pipe_config; 8250 to_intel_crtc(crtc)->config = *pipe_config;
8252 to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
8253 } 8251 }
8254 8252
8255 /* Only after disabling all output pipelines that will be changed can we 8253 /* Only after disabling all output pipelines that will be changed can we
@@ -8588,12 +8586,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8588 goto fail; 8586 goto fail;
8589 8587
8590 if (config->mode_changed) { 8588 if (config->mode_changed) {
8591 if (set->mode) {
8592 DRM_DEBUG_KMS("attempting to set mode from"
8593 " userspace\n");
8594 drm_mode_debug_printmodeline(set->mode);
8595 }
8596
8597 ret = intel_set_mode(set->crtc, set->mode, 8589 ret = intel_set_mode(set->crtc, set->mode,
8598 set->x, set->y, set->fb); 8590 set->x, set->y, set->fb);
8599 } else if (config->fb_changed) { 8591 } else if (config->fb_changed) {
@@ -8675,7 +8667,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
8675 /* Swap pipes & planes for FBC on pre-965 */ 8667 /* Swap pipes & planes for FBC on pre-965 */
8676 intel_crtc->pipe = pipe; 8668 intel_crtc->pipe = pipe;
8677 intel_crtc->plane = pipe; 8669 intel_crtc->plane = pipe;
8678 intel_crtc->config.cpu_transcoder = pipe;
8679 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 8670 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
8680 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 8671 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8681 intel_crtc->plane = !pipe; 8672 intel_crtc->plane = !pipe;
@@ -9545,50 +9536,14 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9545{ 9536{
9546 struct drm_i915_private *dev_priv = dev->dev_private; 9537 struct drm_i915_private *dev_priv = dev->dev_private;
9547 enum pipe pipe; 9538 enum pipe pipe;
9548 u32 tmp;
9549 struct drm_plane *plane; 9539 struct drm_plane *plane;
9550 struct intel_crtc *crtc; 9540 struct intel_crtc *crtc;
9551 struct intel_encoder *encoder; 9541 struct intel_encoder *encoder;
9552 struct intel_connector *connector; 9542 struct intel_connector *connector;
9553 9543
9554 if (HAS_DDI(dev)) {
9555 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9556
9557 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9558 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9559 case TRANS_DDI_EDP_INPUT_A_ON:
9560 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9561 pipe = PIPE_A;
9562 break;
9563 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9564 pipe = PIPE_B;
9565 break;
9566 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9567 pipe = PIPE_C;
9568 break;
9569 default:
9570 /* A bogus value has been programmed, disable
9571 * the transcoder */
9572 WARN(1, "Bogus eDP source %08x\n", tmp);
9573 intel_ddi_disable_transcoder_func(dev_priv,
9574 TRANSCODER_EDP);
9575 goto setup_pipes;
9576 }
9577
9578 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9579 crtc->config.cpu_transcoder = TRANSCODER_EDP;
9580
9581 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9582 pipe_name(pipe));
9583 }
9584 }
9585
9586setup_pipes:
9587 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 9544 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9588 base.head) { 9545 base.head) {
9589 enum transcoder tmp = crtc->config.cpu_transcoder;
9590 memset(&crtc->config, 0, sizeof(crtc->config)); 9546 memset(&crtc->config, 0, sizeof(crtc->config));
9591 crtc->config.cpu_transcoder = tmp;
9592 9547
9593 crtc->active = dev_priv->display.get_pipe_config(crtc, 9548 crtc->active = dev_priv->display.get_pipe_config(crtc,
9594 &crtc->config); 9549 &crtc->config);
@@ -9608,8 +9563,10 @@ setup_pipes:
9608 pipe = 0; 9563 pipe = 0;
9609 9564
9610 if (encoder->get_hw_state(encoder, &pipe)) { 9565 if (encoder->get_hw_state(encoder, &pipe)) {
9611 encoder->base.crtc = 9566 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9612 dev_priv->pipe_to_crtc_mapping[pipe]; 9567 encoder->base.crtc = &crtc->base;
9568 if (encoder->get_config)
9569 encoder->get_config(encoder, &crtc->config);
9613 } else { 9570 } else {
9614 encoder->base.crtc = NULL; 9571 encoder->base.crtc = NULL;
9615 } 9572 }
@@ -9647,6 +9604,7 @@ setup_pipes:
9647 for_each_pipe(pipe) { 9604 for_each_pipe(pipe) {
9648 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9605 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9649 intel_sanitize_crtc(crtc); 9606 intel_sanitize_crtc(crtc);
9607 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
9650 } 9608 }
9651 9609
9652 if (force_restore) { 9610 if (force_restore) {
@@ -9867,48 +9825,50 @@ intel_display_capture_error_state(struct drm_device *dev)
9867 return error; 9825 return error;
9868} 9826}
9869 9827
9828#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
9829
9870void 9830void
9871intel_display_print_error_state(struct seq_file *m, 9831intel_display_print_error_state(struct drm_i915_error_state_buf *m,
9872 struct drm_device *dev, 9832 struct drm_device *dev,
9873 struct intel_display_error_state *error) 9833 struct intel_display_error_state *error)
9874{ 9834{
9875 int i; 9835 int i;
9876 9836
9877 seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 9837 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
9878 if (HAS_POWER_WELL(dev)) 9838 if (HAS_POWER_WELL(dev))
9879 seq_printf(m, "PWR_WELL_CTL2: %08x\n", 9839 err_printf(m, "PWR_WELL_CTL2: %08x\n",
9880 error->power_well_driver); 9840 error->power_well_driver);
9881 for_each_pipe(i) { 9841 for_each_pipe(i) {
9882 seq_printf(m, "Pipe [%d]:\n", i); 9842 err_printf(m, "Pipe [%d]:\n", i);
9883 seq_printf(m, " CPU transcoder: %c\n", 9843 err_printf(m, " CPU transcoder: %c\n",
9884 transcoder_name(error->pipe[i].cpu_transcoder)); 9844 transcoder_name(error->pipe[i].cpu_transcoder));
9885 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 9845 err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
9886 seq_printf(m, " SRC: %08x\n", error->pipe[i].source); 9846 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
9887 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 9847 err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
9888 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 9848 err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
9889 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 9849 err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
9890 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 9850 err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
9891 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 9851 err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
9892 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 9852 err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
9893 9853
9894 seq_printf(m, "Plane [%d]:\n", i); 9854 err_printf(m, "Plane [%d]:\n", i);
9895 seq_printf(m, " CNTR: %08x\n", error->plane[i].control); 9855 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
9896 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 9856 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
9897 if (INTEL_INFO(dev)->gen <= 3) { 9857 if (INTEL_INFO(dev)->gen <= 3) {
9898 seq_printf(m, " SIZE: %08x\n", error->plane[i].size); 9858 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
9899 seq_printf(m, " POS: %08x\n", error->plane[i].pos); 9859 err_printf(m, " POS: %08x\n", error->plane[i].pos);
9900 } 9860 }
9901 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 9861 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
9902 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); 9862 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
9903 if (INTEL_INFO(dev)->gen >= 4) { 9863 if (INTEL_INFO(dev)->gen >= 4) {
9904 seq_printf(m, " SURF: %08x\n", error->plane[i].surface); 9864 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
9905 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 9865 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
9906 } 9866 }
9907 9867
9908 seq_printf(m, "Cursor [%d]:\n", i); 9868 err_printf(m, "Cursor [%d]:\n", i);
9909 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); 9869 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
9910 seq_printf(m, " POS: %08x\n", error->cursor[i].position); 9870 err_printf(m, " POS: %08x\n", error->cursor[i].position);
9911 seq_printf(m, " BASE: %08x\n", error->cursor[i].base); 9871 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
9912 } 9872 }
9913} 9873}
9914#endif 9874#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index bfc8664be8dc..91a31b3b9829 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,22 +59,6 @@ static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
59 return intel_dig_port->base.base.dev; 59 return intel_dig_port->base.base.dev;
60} 60}
61 61
62/**
63 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
64 * @intel_dp: DP struct
65 *
66 * Returns true if the given DP struct corresponds to a CPU eDP port.
67 */
68static bool is_cpu_edp(struct intel_dp *intel_dp)
69{
70 struct drm_device *dev = intel_dp_to_dev(intel_dp);
71 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
72 enum port port = intel_dig_port->port;
73
74 return is_edp(intel_dp) &&
75 (port == PORT_A || (port == PORT_C && IS_VALLEYVIEW(dev)));
76}
77
78static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 62static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
79{ 63{
80 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 64 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
@@ -317,11 +301,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
317 * Note that PCH attached eDP panels should use a 125MHz input 301 * Note that PCH attached eDP panels should use a 125MHz input
318 * clock divider. 302 * clock divider.
319 */ 303 */
320 if (is_cpu_edp(intel_dp)) { 304 if (IS_VALLEYVIEW(dev)) {
305 aux_clock_divider = 100;
306 } else if (intel_dig_port->port == PORT_A) {
321 if (HAS_DDI(dev)) 307 if (HAS_DDI(dev))
322 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 308 aux_clock_divider = DIV_ROUND_CLOSEST(
323 else if (IS_VALLEYVIEW(dev)) 309 intel_ddi_get_cdclk_freq(dev_priv), 2000);
324 aux_clock_divider = 100;
325 else if (IS_GEN6(dev) || IS_GEN7(dev)) 310 else if (IS_GEN6(dev) || IS_GEN7(dev))
326 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 311 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
327 else 312 else
@@ -684,6 +669,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
684 struct drm_i915_private *dev_priv = dev->dev_private; 669 struct drm_i915_private *dev_priv = dev->dev_private;
685 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 670 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
686 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 671 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
672 enum port port = dp_to_dig_port(intel_dp)->port;
687 struct intel_crtc *intel_crtc = encoder->new_crtc; 673 struct intel_crtc *intel_crtc = encoder->new_crtc;
688 struct intel_connector *intel_connector = intel_dp->attached_connector; 674 struct intel_connector *intel_connector = intel_dp->attached_connector;
689 int lane_count, clock; 675 int lane_count, clock;
@@ -693,7 +679,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
693 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 679 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
694 int target_clock, link_avail, link_clock; 680 int target_clock, link_avail, link_clock;
695 681
696 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp)) 682 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
697 pipe_config->has_pch_encoder = true; 683 pipe_config->has_pch_encoder = true;
698 684
699 pipe_config->has_dp_encoder = true; 685 pipe_config->has_dp_encoder = true;
@@ -827,6 +813,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
827 struct drm_device *dev = encoder->dev; 813 struct drm_device *dev = encoder->dev;
828 struct drm_i915_private *dev_priv = dev->dev_private; 814 struct drm_i915_private *dev_priv = dev->dev_private;
829 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 815 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
816 enum port port = dp_to_dig_port(intel_dp)->port;
830 struct drm_crtc *crtc = encoder->crtc; 817 struct drm_crtc *crtc = encoder->crtc;
831 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
832 819
@@ -867,7 +854,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
867 854
868 /* Split out the IBX/CPU vs CPT settings */ 855 /* Split out the IBX/CPU vs CPT settings */
869 856
870 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 857 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
871 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 858 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
872 intel_dp->DP |= DP_SYNC_HS_HIGH; 859 intel_dp->DP |= DP_SYNC_HS_HIGH;
873 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 860 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -884,7 +871,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
884 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 871 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
885 else 872 else
886 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 873 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
887 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 874 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
888 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 875 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
889 intel_dp->DP |= intel_dp->color_range; 876 intel_dp->DP |= intel_dp->color_range;
890 877
@@ -900,7 +887,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
900 if (intel_crtc->pipe == 1) 887 if (intel_crtc->pipe == 1)
901 intel_dp->DP |= DP_PIPEB_SELECT; 888 intel_dp->DP |= DP_PIPEB_SELECT;
902 889
903 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 890 if (port == PORT_A && !IS_VALLEYVIEW(dev)) {
904 /* don't miss out required setting for eDP */ 891 /* don't miss out required setting for eDP */
905 if (adjusted_mode->clock < 200000) 892 if (adjusted_mode->clock < 200000)
906 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 893 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
@@ -911,7 +898,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
911 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 898 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
912 } 899 }
913 900
914 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 901 if (port == PORT_A && !IS_VALLEYVIEW(dev))
915 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 902 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
916} 903}
917 904
@@ -1301,6 +1288,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1301 enum pipe *pipe) 1288 enum pipe *pipe)
1302{ 1289{
1303 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1290 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1291 enum port port = dp_to_dig_port(intel_dp)->port;
1304 struct drm_device *dev = encoder->base.dev; 1292 struct drm_device *dev = encoder->base.dev;
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1293 struct drm_i915_private *dev_priv = dev->dev_private;
1306 u32 tmp = I915_READ(intel_dp->output_reg); 1294 u32 tmp = I915_READ(intel_dp->output_reg);
@@ -1308,9 +1296,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1308 if (!(tmp & DP_PORT_EN)) 1296 if (!(tmp & DP_PORT_EN))
1309 return false; 1297 return false;
1310 1298
1311 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1299 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1312 *pipe = PORT_TO_PIPE_CPT(tmp); 1300 *pipe = PORT_TO_PIPE_CPT(tmp);
1313 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1301 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1314 *pipe = PORT_TO_PIPE(tmp); 1302 *pipe = PORT_TO_PIPE(tmp);
1315 } else { 1303 } else {
1316 u32 trans_sel; 1304 u32 trans_sel;
@@ -1346,9 +1334,33 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1346 return true; 1334 return true;
1347} 1335}
1348 1336
1337static void intel_dp_get_config(struct intel_encoder *encoder,
1338 struct intel_crtc_config *pipe_config)
1339{
1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1341 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1342 u32 tmp, flags = 0;
1343
1344 tmp = I915_READ(intel_dp->output_reg);
1345
1346 if (tmp & DP_SYNC_HS_HIGH)
1347 flags |= DRM_MODE_FLAG_PHSYNC;
1348 else
1349 flags |= DRM_MODE_FLAG_NHSYNC;
1350
1351 if (tmp & DP_SYNC_VS_HIGH)
1352 flags |= DRM_MODE_FLAG_PVSYNC;
1353 else
1354 flags |= DRM_MODE_FLAG_NVSYNC;
1355
1356 pipe_config->adjusted_mode.flags |= flags;
1357}
1358
1349static void intel_disable_dp(struct intel_encoder *encoder) 1359static void intel_disable_dp(struct intel_encoder *encoder)
1350{ 1360{
1351 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1361 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1362 enum port port = dp_to_dig_port(intel_dp)->port;
1363 struct drm_device *dev = encoder->base.dev;
1352 1364
1353 /* Make sure the panel is off before trying to change the mode. But also 1365 /* Make sure the panel is off before trying to change the mode. But also
1354 * ensure that we have vdd while we switch off the panel. */ 1366 * ensure that we have vdd while we switch off the panel. */
@@ -1358,16 +1370,17 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1358 ironlake_edp_panel_off(intel_dp); 1370 ironlake_edp_panel_off(intel_dp);
1359 1371
1360 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1372 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1361 if (!is_cpu_edp(intel_dp)) 1373 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1362 intel_dp_link_down(intel_dp); 1374 intel_dp_link_down(intel_dp);
1363} 1375}
1364 1376
1365static void intel_post_disable_dp(struct intel_encoder *encoder) 1377static void intel_post_disable_dp(struct intel_encoder *encoder)
1366{ 1378{
1367 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1379 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1380 enum port port = dp_to_dig_port(intel_dp)->port;
1368 struct drm_device *dev = encoder->base.dev; 1381 struct drm_device *dev = encoder->base.dev;
1369 1382
1370 if (is_cpu_edp(intel_dp)) { 1383 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1371 intel_dp_link_down(intel_dp); 1384 intel_dp_link_down(intel_dp);
1372 if (!IS_VALLEYVIEW(dev)) 1385 if (!IS_VALLEYVIEW(dev))
1373 ironlake_edp_pll_off(intel_dp); 1386 ironlake_edp_pll_off(intel_dp);
@@ -1405,34 +1418,32 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1405static void intel_pre_enable_dp(struct intel_encoder *encoder) 1418static void intel_pre_enable_dp(struct intel_encoder *encoder)
1406{ 1419{
1407 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1420 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1421 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1408 struct drm_device *dev = encoder->base.dev; 1422 struct drm_device *dev = encoder->base.dev;
1409 struct drm_i915_private *dev_priv = dev->dev_private; 1423 struct drm_i915_private *dev_priv = dev->dev_private;
1410 1424
1411 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 1425 if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
1412 ironlake_edp_pll_on(intel_dp); 1426 ironlake_edp_pll_on(intel_dp);
1413 1427
1414 if (IS_VALLEYVIEW(dev)) { 1428 if (IS_VALLEYVIEW(dev)) {
1415 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1416 struct intel_crtc *intel_crtc = 1429 struct intel_crtc *intel_crtc =
1417 to_intel_crtc(encoder->base.crtc); 1430 to_intel_crtc(encoder->base.crtc);
1418 int port = vlv_dport_to_channel(dport); 1431 int port = vlv_dport_to_channel(dport);
1419 int pipe = intel_crtc->pipe; 1432 int pipe = intel_crtc->pipe;
1420 u32 val; 1433 u32 val;
1421 1434
1422 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); 1435 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1423
1424 val = intel_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1425 val = 0; 1436 val = 0;
1426 if (pipe) 1437 if (pipe)
1427 val |= (1<<21); 1438 val |= (1<<21);
1428 else 1439 else
1429 val &= ~(1<<21); 1440 val &= ~(1<<21);
1430 val |= 0x001000c4; 1441 val |= 0x001000c4;
1431 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1442 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1432 1443
1433 intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1444 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1434 0x00760018); 1445 0x00760018);
1435 intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1446 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1436 0x00400888); 1447 0x00400888);
1437 } 1448 }
1438} 1449}
@@ -1447,22 +1458,20 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1447 if (!IS_VALLEYVIEW(dev)) 1458 if (!IS_VALLEYVIEW(dev))
1448 return; 1459 return;
1449 1460
1450 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1451
1452 /* Program Tx lane resets to default */ 1461 /* Program Tx lane resets to default */
1453 intel_dpio_write(dev_priv, DPIO_PCS_TX(port), 1462 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1454 DPIO_PCS_TX_LANE2_RESET | 1463 DPIO_PCS_TX_LANE2_RESET |
1455 DPIO_PCS_TX_LANE1_RESET); 1464 DPIO_PCS_TX_LANE1_RESET);
1456 intel_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1465 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1457 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1466 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1458 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1467 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1459 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1468 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1460 DPIO_PCS_CLK_SOFT_RESET); 1469 DPIO_PCS_CLK_SOFT_RESET);
1461 1470
1462 /* Fix up inter-pair skew failure */ 1471 /* Fix up inter-pair skew failure */
1463 intel_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1472 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1464 intel_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1473 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1465 intel_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1474 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1466} 1475}
1467 1476
1468/* 1477/*
@@ -1524,12 +1533,13 @@ static uint8_t
1524intel_dp_voltage_max(struct intel_dp *intel_dp) 1533intel_dp_voltage_max(struct intel_dp *intel_dp)
1525{ 1534{
1526 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1535 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1536 enum port port = dp_to_dig_port(intel_dp)->port;
1527 1537
1528 if (IS_VALLEYVIEW(dev)) 1538 if (IS_VALLEYVIEW(dev))
1529 return DP_TRAIN_VOLTAGE_SWING_1200; 1539 return DP_TRAIN_VOLTAGE_SWING_1200;
1530 else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1540 else if (IS_GEN7(dev) && port == PORT_A)
1531 return DP_TRAIN_VOLTAGE_SWING_800; 1541 return DP_TRAIN_VOLTAGE_SWING_800;
1532 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1542 else if (HAS_PCH_CPT(dev) && port != PORT_A)
1533 return DP_TRAIN_VOLTAGE_SWING_1200; 1543 return DP_TRAIN_VOLTAGE_SWING_1200;
1534 else 1544 else
1535 return DP_TRAIN_VOLTAGE_SWING_800; 1545 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1539,6 +1549,7 @@ static uint8_t
1539intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1549intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1540{ 1550{
1541 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1551 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1552 enum port port = dp_to_dig_port(intel_dp)->port;
1542 1553
1543 if (HAS_DDI(dev)) { 1554 if (HAS_DDI(dev)) {
1544 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1555 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
@@ -1564,7 +1575,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1564 default: 1575 default:
1565 return DP_TRAIN_PRE_EMPHASIS_0; 1576 return DP_TRAIN_PRE_EMPHASIS_0;
1566 } 1577 }
1567 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1578 } else if (IS_GEN7(dev) && port == PORT_A) {
1568 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1579 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1569 case DP_TRAIN_VOLTAGE_SWING_400: 1580 case DP_TRAIN_VOLTAGE_SWING_400:
1570 return DP_TRAIN_PRE_EMPHASIS_6; 1581 return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1599,8 +1610,6 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1599 uint8_t train_set = intel_dp->train_set[0]; 1610 uint8_t train_set = intel_dp->train_set[0];
1600 int port = vlv_dport_to_channel(dport); 1611 int port = vlv_dport_to_channel(dport);
1601 1612
1602 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1603
1604 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1613 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1605 case DP_TRAIN_PRE_EMPHASIS_0: 1614 case DP_TRAIN_PRE_EMPHASIS_0:
1606 preemph_reg_value = 0x0004000; 1615 preemph_reg_value = 0x0004000;
@@ -1674,14 +1683,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1674 return 0; 1683 return 0;
1675 } 1684 }
1676 1685
1677 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 1686 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1678 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 1687 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1679 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1688 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1680 uniqtranscale_reg_value); 1689 uniqtranscale_reg_value);
1681 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040); 1690 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1682 intel_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1691 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1683 intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 1692 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1684 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 1693 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1685 1694
1686 return 0; 1695 return 0;
1687} 1696}
@@ -1853,6 +1862,7 @@ static void
1853intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 1862intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1854{ 1863{
1855 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1864 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1865 enum port port = intel_dig_port->port;
1856 struct drm_device *dev = intel_dig_port->base.base.dev; 1866 struct drm_device *dev = intel_dig_port->base.base.dev;
1857 uint32_t signal_levels, mask; 1867 uint32_t signal_levels, mask;
1858 uint8_t train_set = intel_dp->train_set[0]; 1868 uint8_t train_set = intel_dp->train_set[0];
@@ -1863,10 +1873,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1863 } else if (IS_VALLEYVIEW(dev)) { 1873 } else if (IS_VALLEYVIEW(dev)) {
1864 signal_levels = intel_vlv_signal_levels(intel_dp); 1874 signal_levels = intel_vlv_signal_levels(intel_dp);
1865 mask = 0; 1875 mask = 0;
1866 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1876 } else if (IS_GEN7(dev) && port == PORT_A) {
1867 signal_levels = intel_gen7_edp_signal_levels(train_set); 1877 signal_levels = intel_gen7_edp_signal_levels(train_set);
1868 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 1878 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1869 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1879 } else if (IS_GEN6(dev) && port == PORT_A) {
1870 signal_levels = intel_gen6_edp_signal_levels(train_set); 1880 signal_levels = intel_gen6_edp_signal_levels(train_set);
1871 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 1881 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1872 } else { 1882 } else {
@@ -1916,8 +1926,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1916 } 1926 }
1917 I915_WRITE(DP_TP_CTL(port), temp); 1927 I915_WRITE(DP_TP_CTL(port), temp);
1918 1928
1919 } else if (HAS_PCH_CPT(dev) && 1929 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
1920 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1921 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1930 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1922 1931
1923 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1932 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2168,6 +2177,7 @@ static void
2168intel_dp_link_down(struct intel_dp *intel_dp) 2177intel_dp_link_down(struct intel_dp *intel_dp)
2169{ 2178{
2170 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2180 enum port port = intel_dig_port->port;
2171 struct drm_device *dev = intel_dig_port->base.base.dev; 2181 struct drm_device *dev = intel_dig_port->base.base.dev;
2172 struct drm_i915_private *dev_priv = dev->dev_private; 2182 struct drm_i915_private *dev_priv = dev->dev_private;
2173 struct intel_crtc *intel_crtc = 2183 struct intel_crtc *intel_crtc =
@@ -2197,7 +2207,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2197 2207
2198 DRM_DEBUG_KMS("\n"); 2208 DRM_DEBUG_KMS("\n");
2199 2209
2200 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 2210 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2201 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2211 DP &= ~DP_LINK_TRAIN_MASK_CPT;
2202 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2212 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2203 } else { 2213 } else {
@@ -2488,11 +2498,10 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2488 return NULL; 2498 return NULL;
2489 2499
2490 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2500 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2491 edid = kmalloc(size, GFP_KERNEL); 2501 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2492 if (!edid) 2502 if (!edid)
2493 return NULL; 2503 return NULL;
2494 2504
2495 memcpy(edid, intel_connector->edid, size);
2496 return edid; 2505 return edid;
2497 } 2506 }
2498 2507
@@ -2925,9 +2934,6 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2925 pp_div_reg = PIPEA_PP_DIVISOR; 2934 pp_div_reg = PIPEA_PP_DIVISOR;
2926 } 2935 }
2927 2936
2928 if (IS_VALLEYVIEW(dev))
2929 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2930
2931 /* And finally store the new values in the power sequencer. */ 2937 /* And finally store the new values in the power sequencer. */
2932 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2938 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2933 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2939 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
@@ -2941,8 +2947,10 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2941 2947
2942 /* Haswell doesn't have any port selection bits for the panel 2948 /* Haswell doesn't have any port selection bits for the panel
2943 * power sequencer any more. */ 2949 * power sequencer any more. */
2944 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2950 if (IS_VALLEYVIEW(dev)) {
2945 if (is_cpu_edp(intel_dp)) 2951 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2952 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2953 if (dp_to_dig_port(intel_dp)->port == PORT_A)
2946 port_sel = PANEL_POWER_PORT_DP_A; 2954 port_sel = PANEL_POWER_PORT_DP_A;
2947 else 2955 else
2948 port_sel = PANEL_POWER_PORT_DP_D; 2956 port_sel = PANEL_POWER_PORT_DP_D;
@@ -3184,6 +3192,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3184 intel_encoder->disable = intel_disable_dp; 3192 intel_encoder->disable = intel_disable_dp;
3185 intel_encoder->post_disable = intel_post_disable_dp; 3193 intel_encoder->post_disable = intel_post_disable_dp;
3186 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3194 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3195 intel_encoder->get_config = intel_dp_get_config;
3187 if (IS_VALLEYVIEW(dev)) 3196 if (IS_VALLEYVIEW(dev))
3188 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3197 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
3189 3198
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9b0af7e27c82..fdf6303be0a9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -139,6 +139,10 @@ struct intel_encoder {
139 * the encoder is active. If the encoder is enabled it also set the pipe 139 * the encoder is active. If the encoder is enabled it also set the pipe
140 * it is connected to in the pipe parameter. */ 140 * it is connected to in the pipe parameter. */
141 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); 141 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
142 /* Reconstructs the equivalent mode flags for the current hardware
143 * state. */
144 void (*get_config)(struct intel_encoder *,
145 struct intel_crtc_config *pipe_config);
142 int crtc_mask; 146 int crtc_mask;
143 enum hpd_pin hpd_pin; 147 enum hpd_pin hpd_pin;
144}; 148};
@@ -264,6 +268,8 @@ struct intel_crtc_config {
264 /* FDI configuration, only valid if has_pch_encoder is set. */ 268 /* FDI configuration, only valid if has_pch_encoder is set. */
265 int fdi_lanes; 269 int fdi_lanes;
266 struct intel_link_m_n fdi_m_n; 270 struct intel_link_m_n fdi_m_n;
271
272 bool ips_enabled;
267}; 273};
268 274
269struct intel_crtc { 275struct intel_crtc {
@@ -322,6 +328,18 @@ struct intel_plane {
322 unsigned int crtc_w, crtc_h; 328 unsigned int crtc_w, crtc_h;
323 uint32_t src_x, src_y; 329 uint32_t src_x, src_y;
324 uint32_t src_w, src_h; 330 uint32_t src_w, src_h;
331
332 /* Since we need to change the watermarks before/after
333 * enabling/disabling the planes, we need to store the parameters here
334 * as the other pieces of the struct may not reflect the values we want
335 * for the watermark calculations. Currently only Haswell uses this.
336 */
337 struct {
338 bool enable;
339 uint8_t bytes_per_pixel;
340 uint32_t horiz_pixels;
341 } wm;
342
325 void (*update_plane)(struct drm_plane *plane, 343 void (*update_plane)(struct drm_plane *plane,
326 struct drm_framebuffer *fb, 344 struct drm_framebuffer *fb,
327 struct drm_i915_gem_object *obj, 345 struct drm_i915_gem_object *obj,
@@ -727,9 +745,7 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
727extern void intel_update_watermarks(struct drm_device *dev); 745extern void intel_update_watermarks(struct drm_device *dev);
728extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 746extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
729 uint32_t sprite_width, 747 uint32_t sprite_width,
730 int pixel_size); 748 int pixel_size, bool enable);
731extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
732 struct drm_display_mode *mode);
733 749
734extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 750extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
735 unsigned int tiling_mode, 751 unsigned int tiling_mode,
@@ -741,10 +757,6 @@ extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
741extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 757extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
742 struct drm_file *file_priv); 758 struct drm_file *file_priv);
743 759
744extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
745extern void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
746 u32 val);
747
748/* Power-related functions, located in intel_pm.c */ 760/* Power-related functions, located in intel_pm.c */
749extern void intel_init_pm(struct drm_device *dev); 761extern void intel_init_pm(struct drm_device *dev);
750/* FBC */ 762/* FBC */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 2c0be924e9a9..eb2020eb2b7e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -136,6 +136,26 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
136 return true; 136 return true;
137} 137}
138 138
139static void intel_dvo_get_config(struct intel_encoder *encoder,
140 struct intel_crtc_config *pipe_config)
141{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
144 u32 tmp, flags = 0;
145
146 tmp = I915_READ(intel_dvo->dev.dvo_reg);
147 if (tmp & DVO_HSYNC_ACTIVE_HIGH)
148 flags |= DRM_MODE_FLAG_PHSYNC;
149 else
150 flags |= DRM_MODE_FLAG_NHSYNC;
151 if (tmp & DVO_VSYNC_ACTIVE_HIGH)
152 flags |= DRM_MODE_FLAG_PVSYNC;
153 else
154 flags |= DRM_MODE_FLAG_NVSYNC;
155
156 pipe_config->adjusted_mode.flags |= flags;
157}
158
139static void intel_disable_dvo(struct intel_encoder *encoder) 159static void intel_disable_dvo(struct intel_encoder *encoder)
140{ 160{
141 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 161 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -160,6 +180,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
160 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 180 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
161} 181}
162 182
183/* Special dpms function to support cloning between dvo/sdvo/crt. */
163static void intel_dvo_dpms(struct drm_connector *connector, int mode) 184static void intel_dvo_dpms(struct drm_connector *connector, int mode)
164{ 185{
165 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 186 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@@ -181,6 +202,8 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
181 return; 202 return;
182 } 203 }
183 204
205 /* We call connector dpms manually below in case pipe dpms doesn't
206 * change due to cloning. */
184 if (mode == DRM_MODE_DPMS_ON) { 207 if (mode == DRM_MODE_DPMS_ON) {
185 intel_dvo->base.connectors_active = true; 208 intel_dvo->base.connectors_active = true;
186 209
@@ -447,6 +470,7 @@ void intel_dvo_init(struct drm_device *dev)
447 intel_encoder->disable = intel_disable_dvo; 470 intel_encoder->disable = intel_disable_dvo;
448 intel_encoder->enable = intel_enable_dvo; 471 intel_encoder->enable = intel_enable_dvo;
449 intel_encoder->get_hw_state = intel_dvo_get_hw_state; 472 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
473 intel_encoder->get_config = intel_dvo_get_config;
450 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 474 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
451 475
452 /* Now, try to find a controller */ 476 /* Now, try to find a controller */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2b727f0d201f..8062a92e6e80 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -658,6 +658,28 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
658 return true; 658 return true;
659} 659}
660 660
661static void intel_hdmi_get_config(struct intel_encoder *encoder,
662 struct intel_crtc_config *pipe_config)
663{
664 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
665 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
666 u32 tmp, flags = 0;
667
668 tmp = I915_READ(intel_hdmi->hdmi_reg);
669
670 if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
671 flags |= DRM_MODE_FLAG_PHSYNC;
672 else
673 flags |= DRM_MODE_FLAG_NHSYNC;
674
675 if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
676 flags |= DRM_MODE_FLAG_PVSYNC;
677 else
678 flags |= DRM_MODE_FLAG_NVSYNC;
679
680 pipe_config->adjusted_mode.flags |= flags;
681}
682
661static void intel_enable_hdmi(struct intel_encoder *encoder) 683static void intel_enable_hdmi(struct intel_encoder *encoder)
662{ 684{
663 struct drm_device *dev = encoder->base.dev; 685 struct drm_device *dev = encoder->base.dev;
@@ -996,38 +1018,36 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
996 if (!IS_VALLEYVIEW(dev)) 1018 if (!IS_VALLEYVIEW(dev))
997 return; 1019 return;
998 1020
999 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1000
1001 /* Enable clock channels for this port */ 1021 /* Enable clock channels for this port */
1002 val = intel_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1022 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1003 val = 0; 1023 val = 0;
1004 if (pipe) 1024 if (pipe)
1005 val |= (1<<21); 1025 val |= (1<<21);
1006 else 1026 else
1007 val &= ~(1<<21); 1027 val &= ~(1<<21);
1008 val |= 0x001000c4; 1028 val |= 0x001000c4;
1009 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1029 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1010 1030
1011 /* HDMI 1.0V-2dB */ 1031 /* HDMI 1.0V-2dB */
1012 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0); 1032 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
1013 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), 1033 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
1014 0x2b245f5f); 1034 0x2b245f5f);
1015 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1035 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1016 0x5578b83a); 1036 0x5578b83a);
1017 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 1037 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
1018 0x0c782040); 1038 0x0c782040);
1019 intel_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port), 1039 vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
1020 0x2b247878); 1040 0x2b247878);
1021 intel_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1041 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1022 intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1042 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
1023 0x00002000); 1043 0x00002000);
1024 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1044 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
1025 DPIO_TX_OCALINIT_EN); 1045 DPIO_TX_OCALINIT_EN);
1026 1046
1027 /* Program lane clock */ 1047 /* Program lane clock */
1028 intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1048 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1029 0x00760018); 1049 0x00760018);
1030 intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1050 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1031 0x00400888); 1051 0x00400888);
1032} 1052}
1033 1053
@@ -1041,26 +1061,24 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1041 if (!IS_VALLEYVIEW(dev)) 1061 if (!IS_VALLEYVIEW(dev))
1042 return; 1062 return;
1043 1063
1044 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1045
1046 /* Program Tx lane resets to default */ 1064 /* Program Tx lane resets to default */
1047 intel_dpio_write(dev_priv, DPIO_PCS_TX(port), 1065 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1048 DPIO_PCS_TX_LANE2_RESET | 1066 DPIO_PCS_TX_LANE2_RESET |
1049 DPIO_PCS_TX_LANE1_RESET); 1067 DPIO_PCS_TX_LANE1_RESET);
1050 intel_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1068 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1051 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1069 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1052 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1070 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1053 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1071 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1054 DPIO_PCS_CLK_SOFT_RESET); 1072 DPIO_PCS_CLK_SOFT_RESET);
1055 1073
1056 /* Fix up inter-pair skew failure */ 1074 /* Fix up inter-pair skew failure */
1057 intel_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1075 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1058 intel_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1076 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1059 intel_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1077 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1060 1078
1061 intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1079 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
1062 0x00002000); 1080 0x00002000);
1063 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1081 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
1064 DPIO_TX_OCALINIT_EN); 1082 DPIO_TX_OCALINIT_EN);
1065} 1083}
1066 1084
@@ -1072,8 +1090,8 @@ static void intel_hdmi_post_disable(struct intel_encoder *encoder)
1072 1090
1073 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1091 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1074 mutex_lock(&dev_priv->dpio_lock); 1092 mutex_lock(&dev_priv->dpio_lock);
1075 intel_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000); 1093 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
1076 intel_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060); 1094 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
1077 mutex_unlock(&dev_priv->dpio_lock); 1095 mutex_unlock(&dev_priv->dpio_lock);
1078} 1096}
1079 1097
@@ -1216,6 +1234,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1216 intel_encoder->enable = intel_enable_hdmi; 1234 intel_encoder->enable = intel_enable_hdmi;
1217 intel_encoder->disable = intel_disable_hdmi; 1235 intel_encoder->disable = intel_disable_hdmi;
1218 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1236 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1237 intel_encoder->get_config = intel_hdmi_get_config;
1219 if (IS_VALLEYVIEW(dev)) { 1238 if (IS_VALLEYVIEW(dev)) {
1220 intel_encoder->pre_enable = intel_hdmi_pre_enable; 1239 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1221 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; 1240 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 36fe291172ee..655486099b76 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -86,6 +86,31 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
86 return true; 86 return true;
87} 87}
88 88
89static void intel_lvds_get_config(struct intel_encoder *encoder,
90 struct intel_crtc_config *pipe_config)
91{
92 struct drm_device *dev = encoder->base.dev;
93 struct drm_i915_private *dev_priv = dev->dev_private;
94 u32 lvds_reg, tmp, flags = 0;
95
96 if (HAS_PCH_SPLIT(dev))
97 lvds_reg = PCH_LVDS;
98 else
99 lvds_reg = LVDS;
100
101 tmp = I915_READ(lvds_reg);
102 if (tmp & LVDS_HSYNC_POLARITY)
103 flags |= DRM_MODE_FLAG_NHSYNC;
104 else
105 flags |= DRM_MODE_FLAG_PHSYNC;
106 if (tmp & LVDS_VSYNC_POLARITY)
107 flags |= DRM_MODE_FLAG_NVSYNC;
108 else
109 flags |= DRM_MODE_FLAG_PVSYNC;
110
111 pipe_config->adjusted_mode.flags |= flags;
112}
113
89/* The LVDS pin pair needs to be on before the DPLLs are enabled. 114/* The LVDS pin pair needs to be on before the DPLLs are enabled.
90 * This is an exception to the general rule that mode_set doesn't turn 115 * This is an exception to the general rule that mode_set doesn't turn
91 * things on. 116 * things on.
@@ -921,6 +946,7 @@ bool intel_lvds_init(struct drm_device *dev)
921 intel_encoder->compute_config = intel_lvds_compute_config; 946 intel_encoder->compute_config = intel_lvds_compute_config;
922 intel_encoder->disable = intel_disable_lvds; 947 intel_encoder->disable = intel_disable_lvds;
923 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 948 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
949 intel_encoder->get_config = intel_lvds_get_config;
924 intel_connector->get_hw_state = intel_connector_get_hw_state; 950 intel_connector->get_hw_state = intel_connector_get_hw_state;
925 951
926 intel_connector_attach_encoder(intel_connector, intel_encoder); 952 intel_connector_attach_encoder(intel_connector, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 67a2501d519d..836794b68fc6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1485,14 +1485,15 @@ err:
1485} 1485}
1486 1486
1487void 1487void
1488intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error) 1488intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
1489 struct intel_overlay_error_state *error)
1489{ 1490{
1490 seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", 1491 i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
1491 error->dovsta, error->isr); 1492 error->dovsta, error->isr);
1492 seq_printf(m, " Register file at 0x%08lx:\n", 1493 i915_error_printf(m, " Register file at 0x%08lx:\n",
1493 error->base); 1494 error->base);
1494 1495
1495#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x) 1496#define P(x) i915_error_printf(m, " " #x ": 0x%08x\n", error->regs.x)
1496 P(OBUF_0Y); 1497 P(OBUF_0Y);
1497 P(OBUF_1Y); 1498 P(OBUF_1Y);
1498 P(OBUF_0U); 1499 P(OBUF_0U);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 56f17b2382fc..80bea1d3209f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -332,7 +332,7 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
332 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
333 u32 val; 333 u32 val;
334 334
335 WARN_ON(!spin_is_locked(&dev_priv->backlight.lock)); 335 WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
336 336
337 /* Restore the CTL value if it lost, e.g. GPU reset */ 337 /* Restore the CTL value if it lost, e.g. GPU reset */
338 338
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e2255ed97894..49a188718f9d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2072,31 +2072,561 @@ static void ivybridge_update_wm(struct drm_device *dev)
2072 cursor_wm); 2072 cursor_wm);
2073} 2073}
2074 2074
2075static void 2075static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2076haswell_update_linetime_wm(struct drm_device *dev, int pipe, 2076 struct drm_crtc *crtc)
2077 struct drm_display_mode *mode) 2077{
2078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2079 uint32_t pixel_rate, pfit_size;
2080
2081 if (intel_crtc->config.pixel_target_clock)
2082 pixel_rate = intel_crtc->config.pixel_target_clock;
2083 else
2084 pixel_rate = intel_crtc->config.adjusted_mode.clock;
2085
2086 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2087 * adjust the pixel_rate here. */
2088
2089 pfit_size = intel_crtc->config.pch_pfit.size;
2090 if (pfit_size) {
2091 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2092
2093 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2094 pipe_h = intel_crtc->config.requested_mode.vdisplay;
2095 pfit_w = (pfit_size >> 16) & 0xFFFF;
2096 pfit_h = pfit_size & 0xFFFF;
2097 if (pipe_w < pfit_w)
2098 pipe_w = pfit_w;
2099 if (pipe_h < pfit_h)
2100 pipe_h = pfit_h;
2101
2102 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2103 pfit_w * pfit_h);
2104 }
2105
2106 return pixel_rate;
2107}
2108
2109static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2110 uint32_t latency)
2111{
2112 uint64_t ret;
2113
2114 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2115 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2116
2117 return ret;
2118}
2119
2120static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2121 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2122 uint32_t latency)
2123{
2124 uint32_t ret;
2125
2126 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2127 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2128 ret = DIV_ROUND_UP(ret, 64) + 2;
2129 return ret;
2130}
2131
2132static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2133 uint8_t bytes_per_pixel)
2134{
2135 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2136}
2137
2138struct hsw_pipe_wm_parameters {
2139 bool active;
2140 bool sprite_enabled;
2141 uint8_t pri_bytes_per_pixel;
2142 uint8_t spr_bytes_per_pixel;
2143 uint8_t cur_bytes_per_pixel;
2144 uint32_t pri_horiz_pixels;
2145 uint32_t spr_horiz_pixels;
2146 uint32_t cur_horiz_pixels;
2147 uint32_t pipe_htotal;
2148 uint32_t pixel_rate;
2149};
2150
2151struct hsw_wm_maximums {
2152 uint16_t pri;
2153 uint16_t spr;
2154 uint16_t cur;
2155 uint16_t fbc;
2156};
2157
2158struct hsw_lp_wm_result {
2159 bool enable;
2160 bool fbc_enable;
2161 uint32_t pri_val;
2162 uint32_t spr_val;
2163 uint32_t cur_val;
2164 uint32_t fbc_val;
2165};
2166
2167struct hsw_wm_values {
2168 uint32_t wm_pipe[3];
2169 uint32_t wm_lp[3];
2170 uint32_t wm_lp_spr[3];
2171 uint32_t wm_linetime[3];
2172 bool enable_fbc_wm;
2173};
2174
2175enum hsw_data_buf_partitioning {
2176 HSW_DATA_BUF_PART_1_2,
2177 HSW_DATA_BUF_PART_5_6,
2178};
2179
2180/* For both WM_PIPE and WM_LP. */
2181static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2182 uint32_t mem_value,
2183 bool is_lp)
2184{
2185 uint32_t method1, method2;
2186
2187 /* TODO: for now, assume the primary plane is always enabled. */
2188 if (!params->active)
2189 return 0;
2190
2191 method1 = hsw_wm_method1(params->pixel_rate,
2192 params->pri_bytes_per_pixel,
2193 mem_value);
2194
2195 if (!is_lp)
2196 return method1;
2197
2198 method2 = hsw_wm_method2(params->pixel_rate,
2199 params->pipe_htotal,
2200 params->pri_horiz_pixels,
2201 params->pri_bytes_per_pixel,
2202 mem_value);
2203
2204 return min(method1, method2);
2205}
2206
2207/* For both WM_PIPE and WM_LP. */
2208static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2209 uint32_t mem_value)
2210{
2211 uint32_t method1, method2;
2212
2213 if (!params->active || !params->sprite_enabled)
2214 return 0;
2215
2216 method1 = hsw_wm_method1(params->pixel_rate,
2217 params->spr_bytes_per_pixel,
2218 mem_value);
2219 method2 = hsw_wm_method2(params->pixel_rate,
2220 params->pipe_htotal,
2221 params->spr_horiz_pixels,
2222 params->spr_bytes_per_pixel,
2223 mem_value);
2224 return min(method1, method2);
2225}
2226
2227/* For both WM_PIPE and WM_LP. */
2228static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2229 uint32_t mem_value)
2230{
2231 if (!params->active)
2232 return 0;
2233
2234 return hsw_wm_method2(params->pixel_rate,
2235 params->pipe_htotal,
2236 params->cur_horiz_pixels,
2237 params->cur_bytes_per_pixel,
2238 mem_value);
2239}
2240
2241/* Only for WM_LP. */
2242static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2243 uint32_t pri_val,
2244 uint32_t mem_value)
2245{
2246 if (!params->active)
2247 return 0;
2248
2249 return hsw_wm_fbc(pri_val,
2250 params->pri_horiz_pixels,
2251 params->pri_bytes_per_pixel);
2252}
2253
2254static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
2255 struct hsw_pipe_wm_parameters *params,
2256 struct hsw_lp_wm_result *result)
2257{
2258 enum pipe pipe;
2259 uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
2260
2261 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
2262 struct hsw_pipe_wm_parameters *p = &params[pipe];
2263
2264 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
2265 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
2266 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
2267 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2268 }
2269
2270 result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
2271 result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
2272 result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
2273 result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
2274
2275 if (result->fbc_val > max->fbc) {
2276 result->fbc_enable = false;
2277 result->fbc_val = 0;
2278 } else {
2279 result->fbc_enable = true;
2280 }
2281
2282 result->enable = result->pri_val <= max->pri &&
2283 result->spr_val <= max->spr &&
2284 result->cur_val <= max->cur;
2285 return result->enable;
2286}
2287
2288static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2289 uint32_t mem_value, enum pipe pipe,
2290 struct hsw_pipe_wm_parameters *params)
2291{
2292 uint32_t pri_val, cur_val, spr_val;
2293
2294 pri_val = hsw_compute_pri_wm(params, mem_value, false);
2295 spr_val = hsw_compute_spr_wm(params, mem_value);
2296 cur_val = hsw_compute_cur_wm(params, mem_value);
2297
2298 WARN(pri_val > 127,
2299 "Primary WM error, mode not supported for pipe %c\n",
2300 pipe_name(pipe));
2301 WARN(spr_val > 127,
2302 "Sprite WM error, mode not supported for pipe %c\n",
2303 pipe_name(pipe));
2304 WARN(cur_val > 63,
2305 "Cursor WM error, mode not supported for pipe %c\n",
2306 pipe_name(pipe));
2307
2308 return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2309 (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2310 cur_val;
2311}
2312
2313static uint32_t
2314hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2078{ 2315{
2079 struct drm_i915_private *dev_priv = dev->dev_private; 2316 struct drm_i915_private *dev_priv = dev->dev_private;
2080 u32 temp; 2317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2318 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2319 u32 linetime, ips_linetime;
2081 2320
2082 temp = I915_READ(PIPE_WM_LINETIME(pipe)); 2321 if (!intel_crtc_active(crtc))
2083 temp &= ~PIPE_WM_LINETIME_MASK; 2322 return 0;
2084 2323
2085 /* The WM are computed with base on how long it takes to fill a single 2324 /* The WM are computed with base on how long it takes to fill a single
2086 * row at the given clock rate, multiplied by 8. 2325 * row at the given clock rate, multiplied by 8.
2087 * */ 2326 * */
2088 temp |= PIPE_WM_LINETIME_TIME( 2327 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2089 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8); 2328 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2329 intel_ddi_get_cdclk_freq(dev_priv));
2090 2330
2091 /* IPS watermarks are only used by pipe A, and are ignored by 2331 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2092 * pipes B and C. They are calculated similarly to the common 2332 PIPE_WM_LINETIME_TIME(linetime);
2093 * linetime values, except that we are using CD clock frequency 2333}
2094 * in MHz instead of pixel rate for the division. 2334
2095 * 2335static void hsw_compute_wm_parameters(struct drm_device *dev,
2096 * This is a placeholder for the IPS watermark calculation code. 2336 struct hsw_pipe_wm_parameters *params,
2097 */ 2337 uint32_t *wm,
2338 struct hsw_wm_maximums *lp_max_1_2,
2339 struct hsw_wm_maximums *lp_max_5_6)
2340{
2341 struct drm_i915_private *dev_priv = dev->dev_private;
2342 struct drm_crtc *crtc;
2343 struct drm_plane *plane;
2344 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2345 enum pipe pipe;
2346 int pipes_active = 0, sprites_enabled = 0;
2347
2348 if ((sskpd >> 56) & 0xFF)
2349 wm[0] = (sskpd >> 56) & 0xFF;
2350 else
2351 wm[0] = sskpd & 0xF;
2352 wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2353 wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2354 wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2355 wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2356
2357 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2358 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2359 struct hsw_pipe_wm_parameters *p;
2360
2361 pipe = intel_crtc->pipe;
2362 p = &params[pipe];
2363
2364 p->active = intel_crtc_active(crtc);
2365 if (!p->active)
2366 continue;
2098 2367
2099 I915_WRITE(PIPE_WM_LINETIME(pipe), temp); 2368 pipes_active++;
2369
2370 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2371 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
2372 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2373 p->cur_bytes_per_pixel = 4;
2374 p->pri_horiz_pixels =
2375 intel_crtc->config.requested_mode.hdisplay;
2376 p->cur_horiz_pixels = 64;
2377 }
2378
2379 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2380 struct intel_plane *intel_plane = to_intel_plane(plane);
2381 struct hsw_pipe_wm_parameters *p;
2382
2383 pipe = intel_plane->pipe;
2384 p = &params[pipe];
2385
2386 p->sprite_enabled = intel_plane->wm.enable;
2387 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2388 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2389
2390 if (p->sprite_enabled)
2391 sprites_enabled++;
2392 }
2393
2394 if (pipes_active > 1) {
2395 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
2396 lp_max_1_2->spr = lp_max_5_6->spr = 128;
2397 lp_max_1_2->cur = lp_max_5_6->cur = 64;
2398 } else {
2399 lp_max_1_2->pri = sprites_enabled ? 384 : 768;
2400 lp_max_5_6->pri = sprites_enabled ? 128 : 768;
2401 lp_max_1_2->spr = 384;
2402 lp_max_5_6->spr = 640;
2403 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2404 }
2405 lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2406}
2407
2408static void hsw_compute_wm_results(struct drm_device *dev,
2409 struct hsw_pipe_wm_parameters *params,
2410 uint32_t *wm,
2411 struct hsw_wm_maximums *lp_maximums,
2412 struct hsw_wm_values *results)
2413{
2414 struct drm_i915_private *dev_priv = dev->dev_private;
2415 struct drm_crtc *crtc;
2416 struct hsw_lp_wm_result lp_results[4] = {};
2417 enum pipe pipe;
2418 int level, max_level, wm_lp;
2419
2420 for (level = 1; level <= 4; level++)
2421 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
2422 &lp_results[level - 1]))
2423 break;
2424 max_level = level - 1;
2425
2426 /* The spec says it is preferred to disable FBC WMs instead of disabling
2427 * a WM level. */
2428 results->enable_fbc_wm = true;
2429 for (level = 1; level <= max_level; level++) {
2430 if (!lp_results[level - 1].fbc_enable) {
2431 results->enable_fbc_wm = false;
2432 break;
2433 }
2434 }
2435
2436 memset(results, 0, sizeof(*results));
2437 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2438 const struct hsw_lp_wm_result *r;
2439
2440 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2441 if (level > max_level)
2442 break;
2443
2444 r = &lp_results[level - 1];
2445 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2446 r->fbc_val,
2447 r->pri_val,
2448 r->cur_val);
2449 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2450 }
2451
2452 for_each_pipe(pipe)
2453 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
2454 pipe,
2455 &params[pipe]);
2456
2457 for_each_pipe(pipe) {
2458 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2459 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2460 }
2461}
2462
2463/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2464 * case both are at the same level. Prefer r1 in case they're the same. */
2465struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2466 struct hsw_wm_values *r2)
2467{
2468 int i, val_r1 = 0, val_r2 = 0;
2469
2470 for (i = 0; i < 3; i++) {
2471 if (r1->wm_lp[i] & WM3_LP_EN)
2472 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2473 if (r2->wm_lp[i] & WM3_LP_EN)
2474 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2475 }
2476
2477 if (val_r1 == val_r2) {
2478 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2479 return r2;
2480 else
2481 return r1;
2482 } else if (val_r1 > val_r2) {
2483 return r1;
2484 } else {
2485 return r2;
2486 }
2487}
2488
2489/*
2490 * The spec says we shouldn't write when we don't need, because every write
2491 * causes WMs to be re-evaluated, expending some power.
2492 */
2493static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2494 struct hsw_wm_values *results,
2495 enum hsw_data_buf_partitioning partitioning)
2496{
2497 struct hsw_wm_values previous;
2498 uint32_t val;
2499 enum hsw_data_buf_partitioning prev_partitioning;
2500 bool prev_enable_fbc_wm;
2501
2502 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2503 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2504 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2505 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2506 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2507 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2508 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2509 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2510 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2511 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2512 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2513 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2514
2515 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2516 HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
2517
2518 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2519
2520 if (memcmp(results->wm_pipe, previous.wm_pipe,
2521 sizeof(results->wm_pipe)) == 0 &&
2522 memcmp(results->wm_lp, previous.wm_lp,
2523 sizeof(results->wm_lp)) == 0 &&
2524 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2525 sizeof(results->wm_lp_spr)) == 0 &&
2526 memcmp(results->wm_linetime, previous.wm_linetime,
2527 sizeof(results->wm_linetime)) == 0 &&
2528 partitioning == prev_partitioning &&
2529 results->enable_fbc_wm == prev_enable_fbc_wm)
2530 return;
2531
2532 if (previous.wm_lp[2] != 0)
2533 I915_WRITE(WM3_LP_ILK, 0);
2534 if (previous.wm_lp[1] != 0)
2535 I915_WRITE(WM2_LP_ILK, 0);
2536 if (previous.wm_lp[0] != 0)
2537 I915_WRITE(WM1_LP_ILK, 0);
2538
2539 if (previous.wm_pipe[0] != results->wm_pipe[0])
2540 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2541 if (previous.wm_pipe[1] != results->wm_pipe[1])
2542 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2543 if (previous.wm_pipe[2] != results->wm_pipe[2])
2544 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2545
2546 if (previous.wm_linetime[0] != results->wm_linetime[0])
2547 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2548 if (previous.wm_linetime[1] != results->wm_linetime[1])
2549 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2550 if (previous.wm_linetime[2] != results->wm_linetime[2])
2551 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2552
2553 if (prev_partitioning != partitioning) {
2554 val = I915_READ(WM_MISC);
2555 if (partitioning == HSW_DATA_BUF_PART_1_2)
2556 val &= ~WM_MISC_DATA_PARTITION_5_6;
2557 else
2558 val |= WM_MISC_DATA_PARTITION_5_6;
2559 I915_WRITE(WM_MISC, val);
2560 }
2561
2562 if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2563 val = I915_READ(DISP_ARB_CTL);
2564 if (results->enable_fbc_wm)
2565 val &= ~DISP_FBC_WM_DIS;
2566 else
2567 val |= DISP_FBC_WM_DIS;
2568 I915_WRITE(DISP_ARB_CTL, val);
2569 }
2570
2571 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2572 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2573 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2574 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2575 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2576 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2577
2578 if (results->wm_lp[0] != 0)
2579 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2580 if (results->wm_lp[1] != 0)
2581 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2582 if (results->wm_lp[2] != 0)
2583 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2584}
2585
2586static void haswell_update_wm(struct drm_device *dev)
2587{
2588 struct drm_i915_private *dev_priv = dev->dev_private;
2589 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2590 struct hsw_pipe_wm_parameters params[3];
2591 struct hsw_wm_values results_1_2, results_5_6, *best_results;
2592 uint32_t wm[5];
2593 enum hsw_data_buf_partitioning partitioning;
2594
2595 hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
2596
2597 hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
2598 if (lp_max_1_2.pri != lp_max_5_6.pri) {
2599 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
2600 &results_5_6);
2601 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2602 } else {
2603 best_results = &results_1_2;
2604 }
2605
2606 partitioning = (best_results == &results_1_2) ?
2607 HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
2608
2609 hsw_write_wm_values(dev_priv, best_results, partitioning);
2610}
2611
2612static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
2613 uint32_t sprite_width, int pixel_size,
2614 bool enable)
2615{
2616 struct drm_plane *plane;
2617
2618 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2619 struct intel_plane *intel_plane = to_intel_plane(plane);
2620
2621 if (intel_plane->pipe == pipe) {
2622 intel_plane->wm.enable = enable;
2623 intel_plane->wm.horiz_pixels = sprite_width + 1;
2624 intel_plane->wm.bytes_per_pixel = pixel_size;
2625 break;
2626 }
2627 }
2628
2629 haswell_update_wm(dev);
2100} 2630}
2101 2631
2102static bool 2632static bool
@@ -2176,7 +2706,8 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2176} 2706}
2177 2707
2178static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, 2708static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2179 uint32_t sprite_width, int pixel_size) 2709 uint32_t sprite_width, int pixel_size,
2710 bool enable)
2180{ 2711{
2181 struct drm_i915_private *dev_priv = dev->dev_private; 2712 struct drm_i915_private *dev_priv = dev->dev_private;
2182 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 2713 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
@@ -2184,6 +2715,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2184 int sprite_wm, reg; 2715 int sprite_wm, reg;
2185 int ret; 2716 int ret;
2186 2717
2718 if (!enable)
2719 return;
2720
2187 switch (pipe) { 2721 switch (pipe) {
2188 case 0: 2722 case 0:
2189 reg = WM0_PIPEA_ILK; 2723 reg = WM0_PIPEA_ILK;
@@ -2294,23 +2828,15 @@ void intel_update_watermarks(struct drm_device *dev)
2294 dev_priv->display.update_wm(dev); 2828 dev_priv->display.update_wm(dev);
2295} 2829}
2296 2830
2297void intel_update_linetime_watermarks(struct drm_device *dev,
2298 int pipe, struct drm_display_mode *mode)
2299{
2300 struct drm_i915_private *dev_priv = dev->dev_private;
2301
2302 if (dev_priv->display.update_linetime_wm)
2303 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2304}
2305
2306void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 2831void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2307 uint32_t sprite_width, int pixel_size) 2832 uint32_t sprite_width, int pixel_size,
2833 bool enable)
2308{ 2834{
2309 struct drm_i915_private *dev_priv = dev->dev_private; 2835 struct drm_i915_private *dev_priv = dev->dev_private;
2310 2836
2311 if (dev_priv->display.update_sprite_wm) 2837 if (dev_priv->display.update_sprite_wm)
2312 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, 2838 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2313 pixel_size); 2839 pixel_size, enable);
2314} 2840}
2315 2841
2316static struct drm_i915_gem_object * 2842static struct drm_i915_gem_object *
@@ -2556,10 +3082,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
2556 if (val == dev_priv->rps.cur_delay) 3082 if (val == dev_priv->rps.cur_delay)
2557 return; 3083 return;
2558 3084
2559 valleyview_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3085 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
2560 3086
2561 do { 3087 do {
2562 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &pval); 3088 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
2563 if (time_after(jiffies, timeout)) { 3089 if (time_after(jiffies, timeout)) {
2564 DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); 3090 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
2565 break; 3091 break;
@@ -2567,7 +3093,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
2567 udelay(10); 3093 udelay(10);
2568 } while (pval & 1); 3094 } while (pval & 1);
2569 3095
2570 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &pval); 3096 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
2571 if ((pval >> 8) != val) 3097 if ((pval >> 8) != val)
2572 DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n", 3098 DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n",
2573 val, pval >> 8); 3099 val, pval >> 8);
@@ -2590,7 +3116,7 @@ static void gen6_disable_rps(struct drm_device *dev)
2590 I915_WRITE(GEN6_RC_CONTROL, 0); 3116 I915_WRITE(GEN6_RC_CONTROL, 0);
2591 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 3117 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2592 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3118 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2593 I915_WRITE(GEN6_PMIER, 0); 3119 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
2594 /* Complete PM interrupt masking here doesn't race with the rps work 3120 /* Complete PM interrupt masking here doesn't race with the rps work
2595 * item again unmasking PM interrupts because that is using a different 3121 * item again unmasking PM interrupts because that is using a different
2596 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3122 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
@@ -2600,7 +3126,7 @@ static void gen6_disable_rps(struct drm_device *dev)
2600 dev_priv->rps.pm_iir = 0; 3126 dev_priv->rps.pm_iir = 0;
2601 spin_unlock_irq(&dev_priv->rps.lock); 3127 spin_unlock_irq(&dev_priv->rps.lock);
2602 3128
2603 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3129 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
2604} 3130}
2605 3131
2606static void valleyview_disable_rps(struct drm_device *dev) 3132static void valleyview_disable_rps(struct drm_device *dev)
@@ -2781,12 +3307,15 @@ static void gen6_enable_rps(struct drm_device *dev)
2781 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3307 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2782 3308
2783 /* requires MSI enabled */ 3309 /* requires MSI enabled */
2784 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); 3310 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
2785 spin_lock_irq(&dev_priv->rps.lock); 3311 spin_lock_irq(&dev_priv->rps.lock);
2786 WARN_ON(dev_priv->rps.pm_iir != 0); 3312 /* FIXME: Our interrupt enabling sequence is bonghits.
2787 I915_WRITE(GEN6_PMIMR, 0); 3313 * dev_priv->rps.pm_iir really should be 0 here. */
3314 dev_priv->rps.pm_iir = 0;
3315 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3316 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
2788 spin_unlock_irq(&dev_priv->rps.lock); 3317 spin_unlock_irq(&dev_priv->rps.lock);
2789 /* enable all PM interrupts */ 3318 /* unmask all PM interrupts */
2790 I915_WRITE(GEN6_PMINTRMSK, 0); 3319 I915_WRITE(GEN6_PMINTRMSK, 0);
2791 3320
2792 rc6vids = 0; 3321 rc6vids = 0;
@@ -2872,7 +3401,7 @@ int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
2872{ 3401{
2873 u32 val, rp0; 3402 u32 val, rp0;
2874 3403
2875 valleyview_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE, &val); 3404 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
2876 3405
2877 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 3406 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
2878 /* Clamp to max */ 3407 /* Clamp to max */
@@ -2885,9 +3414,9 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
2885{ 3414{
2886 u32 val, rpe; 3415 u32 val, rpe;
2887 3416
2888 valleyview_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO, &val); 3417 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
2889 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 3418 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
2890 valleyview_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI, &val); 3419 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
2891 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 3420 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
2892 3421
2893 return rpe; 3422 return rpe;
@@ -2895,11 +3424,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
2895 3424
2896int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 3425int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
2897{ 3426{
2898 u32 val; 3427 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
2899
2900 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_LFM, &val);
2901
2902 return val & 0xff;
2903} 3428}
2904 3429
2905static void vlv_rps_timer_work(struct work_struct *work) 3430static void vlv_rps_timer_work(struct work_struct *work)
@@ -3008,7 +3533,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3008 I915_WRITE(GEN6_RC_CONTROL, 3533 I915_WRITE(GEN6_RC_CONTROL,
3009 GEN7_RC_CTL_TO_MODE); 3534 GEN7_RC_CTL_TO_MODE);
3010 3535
3011 valleyview_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS, &val); 3536 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3012 switch ((val >> 6) & 3) { 3537 switch ((val >> 6) & 3) {
3013 case 0: 3538 case 0:
3014 case 1: 3539 case 1:
@@ -3053,7 +3578,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3053 valleyview_set_rps(dev_priv->dev, rpe); 3578 valleyview_set_rps(dev_priv->dev, rpe);
3054 3579
3055 /* requires MSI enabled */ 3580 /* requires MSI enabled */
3056 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); 3581 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3057 spin_lock_irq(&dev_priv->rps.lock); 3582 spin_lock_irq(&dev_priv->rps.lock);
3058 WARN_ON(dev_priv->rps.pm_iir != 0); 3583 WARN_ON(dev_priv->rps.pm_iir != 0);
3059 I915_WRITE(GEN6_PMIMR, 0); 3584 I915_WRITE(GEN6_PMIMR, 0);
@@ -4162,14 +4687,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4162 /* WaSwitchSolVfFArbitrationPriority:hsw */ 4687 /* WaSwitchSolVfFArbitrationPriority:hsw */
4163 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 4688 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4164 4689
4165 /* XXX: This is a workaround for early silicon revisions and should be 4690 /* WaRsPkgCStateDisplayPMReq:hsw */
4166 * removed later. 4691 I915_WRITE(CHICKEN_PAR1_1,
4167 */ 4692 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
4168 I915_WRITE(WM_DBG,
4169 I915_READ(WM_DBG) |
4170 WM_DBG_DISALLOW_MULTIPLE_LP |
4171 WM_DBG_DISALLOW_SPRITE |
4172 WM_DBG_DISALLOW_MAXFIFO);
4173 4693
4174 lpt_init_clock_gating(dev); 4694 lpt_init_clock_gating(dev);
4175} 4695}
@@ -4623,10 +5143,10 @@ void intel_init_pm(struct drm_device *dev)
4623 } 5143 }
4624 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 5144 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
4625 } else if (IS_HASWELL(dev)) { 5145 } else if (IS_HASWELL(dev)) {
4626 if (SNB_READ_WM0_LATENCY()) { 5146 if (I915_READ64(MCH_SSKPD)) {
4627 dev_priv->display.update_wm = sandybridge_update_wm; 5147 dev_priv->display.update_wm = haswell_update_wm;
4628 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 5148 dev_priv->display.update_sprite_wm =
4629 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm; 5149 haswell_update_sprite_wm;
4630 } else { 5150 } else {
4631 DRM_DEBUG_KMS("Failed to read display plane latency. " 5151 DRM_DEBUG_KMS("Failed to read display plane latency. "
4632 "Disable CxSR\n"); 5152 "Disable CxSR\n");
@@ -4952,66 +5472,6 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4952 return 0; 5472 return 0;
4953} 5473}
4954 5474
4955static int vlv_punit_rw(struct drm_i915_private *dev_priv, u32 port, u8 opcode,
4956 u8 addr, u32 *val)
4957{
4958 u32 cmd, devfn, be, bar;
4959
4960 bar = 0;
4961 be = 0xf;
4962 devfn = PCI_DEVFN(2, 0);
4963
4964 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
4965 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
4966 (bar << IOSF_BAR_SHIFT);
4967
4968 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4969
4970 if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
4971 DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
4972 opcode == PUNIT_OPCODE_REG_READ ?
4973 "read" : "write");
4974 return -EAGAIN;
4975 }
4976
4977 I915_WRITE(VLV_IOSF_ADDR, addr);
4978 if (opcode == PUNIT_OPCODE_REG_WRITE)
4979 I915_WRITE(VLV_IOSF_DATA, *val);
4980 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
4981
4982 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
4983 5)) {
4984 DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
4985 opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
4986 addr);
4987 return -ETIMEDOUT;
4988 }
4989
4990 if (opcode == PUNIT_OPCODE_REG_READ)
4991 *val = I915_READ(VLV_IOSF_DATA);
4992 I915_WRITE(VLV_IOSF_DATA, 0);
4993
4994 return 0;
4995}
4996
4997int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
4998{
4999 return vlv_punit_rw(dev_priv, IOSF_PORT_PUNIT, PUNIT_OPCODE_REG_READ,
5000 addr, val);
5001}
5002
5003int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
5004{
5005 return vlv_punit_rw(dev_priv, IOSF_PORT_PUNIT, PUNIT_OPCODE_REG_WRITE,
5006 addr, &val);
5007}
5008
5009int valleyview_nc_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
5010{
5011 return vlv_punit_rw(dev_priv, IOSF_PORT_NC, PUNIT_OPCODE_REG_READ,
5012 addr, val);
5013}
5014
5015int vlv_gpu_freq(int ddr_freq, int val) 5475int vlv_gpu_freq(int ddr_freq, int val)
5016{ 5476{
5017 int mult, base; 5477 int mult, base;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3d2c236e15ab..0e72da6ad0fa 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -464,9 +464,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
464 goto err_unref; 464 goto err_unref;
465 465
466 pc->gtt_offset = obj->gtt_offset; 466 pc->gtt_offset = obj->gtt_offset;
467 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 467 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
468 if (pc->cpu_page == NULL) 468 if (pc->cpu_page == NULL) {
469 ret = -ENOMEM;
469 goto err_unpin; 470 goto err_unpin;
471 }
470 472
471 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 473 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
472 ring->name, pc->gtt_offset); 474 ring->name, pc->gtt_offset);
@@ -558,7 +560,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
558 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
559 561
560 if (HAS_L3_GPU_CACHE(dev)) 562 if (HAS_L3_GPU_CACHE(dev))
561 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 563 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
562 564
563 return ret; 565 return ret;
564} 566}
@@ -580,9 +582,16 @@ static void
580update_mboxes(struct intel_ring_buffer *ring, 582update_mboxes(struct intel_ring_buffer *ring,
581 u32 mmio_offset) 583 u32 mmio_offset)
582{ 584{
585/* NB: In order to be able to do semaphore MBOX updates for varying number
586 * of rings, it's easiest if we round up each individual update to a
587 * multiple of 2 (since ring updates must always be a multiple of 2)
588 * even though the actual update only requires 3 dwords.
589 */
590#define MBOX_UPDATE_DWORDS 4
583 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 591 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
584 intel_ring_emit(ring, mmio_offset); 592 intel_ring_emit(ring, mmio_offset);
585 intel_ring_emit(ring, ring->outstanding_lazy_request); 593 intel_ring_emit(ring, ring->outstanding_lazy_request);
594 intel_ring_emit(ring, MI_NOOP);
586} 595}
587 596
588/** 597/**
@@ -597,19 +606,24 @@ update_mboxes(struct intel_ring_buffer *ring,
597static int 606static int
598gen6_add_request(struct intel_ring_buffer *ring) 607gen6_add_request(struct intel_ring_buffer *ring)
599{ 608{
600 u32 mbox1_reg; 609 struct drm_device *dev = ring->dev;
601 u32 mbox2_reg; 610 struct drm_i915_private *dev_priv = dev->dev_private;
602 int ret; 611 struct intel_ring_buffer *useless;
612 int i, ret;
603 613
604 ret = intel_ring_begin(ring, 10); 614 ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
615 MBOX_UPDATE_DWORDS) +
616 4);
605 if (ret) 617 if (ret)
606 return ret; 618 return ret;
619#undef MBOX_UPDATE_DWORDS
607 620
608 mbox1_reg = ring->signal_mbox[0]; 621 for_each_ring(useless, dev_priv, i) {
609 mbox2_reg = ring->signal_mbox[1]; 622 u32 mbox_reg = ring->signal_mbox[i];
623 if (mbox_reg != GEN6_NOSYNC)
624 update_mboxes(ring, mbox_reg);
625 }
610 626
611 update_mboxes(ring, mbox1_reg);
612 update_mboxes(ring, mbox2_reg);
613 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 627 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
614 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 628 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
615 intel_ring_emit(ring, ring->outstanding_lazy_request); 629 intel_ring_emit(ring, ring->outstanding_lazy_request);
@@ -781,7 +795,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
781 return false; 795 return false;
782 796
783 spin_lock_irqsave(&dev_priv->irq_lock, flags); 797 spin_lock_irqsave(&dev_priv->irq_lock, flags);
784 if (ring->irq_refcount++ == 0) { 798 if (ring->irq_refcount.gt++ == 0) {
785 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 799 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
786 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 800 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
787 POSTING_READ(GTIMR); 801 POSTING_READ(GTIMR);
@@ -799,7 +813,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
799 unsigned long flags; 813 unsigned long flags;
800 814
801 spin_lock_irqsave(&dev_priv->irq_lock, flags); 815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
802 if (--ring->irq_refcount == 0) { 816 if (--ring->irq_refcount.gt == 0) {
803 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 817 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
804 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 818 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
805 POSTING_READ(GTIMR); 819 POSTING_READ(GTIMR);
@@ -818,7 +832,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
818 return false; 832 return false;
819 833
820 spin_lock_irqsave(&dev_priv->irq_lock, flags); 834 spin_lock_irqsave(&dev_priv->irq_lock, flags);
821 if (ring->irq_refcount++ == 0) { 835 if (ring->irq_refcount.gt++ == 0) {
822 dev_priv->irq_mask &= ~ring->irq_enable_mask; 836 dev_priv->irq_mask &= ~ring->irq_enable_mask;
823 I915_WRITE(IMR, dev_priv->irq_mask); 837 I915_WRITE(IMR, dev_priv->irq_mask);
824 POSTING_READ(IMR); 838 POSTING_READ(IMR);
@@ -836,7 +850,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
836 unsigned long flags; 850 unsigned long flags;
837 851
838 spin_lock_irqsave(&dev_priv->irq_lock, flags); 852 spin_lock_irqsave(&dev_priv->irq_lock, flags);
839 if (--ring->irq_refcount == 0) { 853 if (--ring->irq_refcount.gt == 0) {
840 dev_priv->irq_mask |= ring->irq_enable_mask; 854 dev_priv->irq_mask |= ring->irq_enable_mask;
841 I915_WRITE(IMR, dev_priv->irq_mask); 855 I915_WRITE(IMR, dev_priv->irq_mask);
842 POSTING_READ(IMR); 856 POSTING_READ(IMR);
@@ -855,7 +869,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
855 return false; 869 return false;
856 870
857 spin_lock_irqsave(&dev_priv->irq_lock, flags); 871 spin_lock_irqsave(&dev_priv->irq_lock, flags);
858 if (ring->irq_refcount++ == 0) { 872 if (ring->irq_refcount.gt++ == 0) {
859 dev_priv->irq_mask &= ~ring->irq_enable_mask; 873 dev_priv->irq_mask &= ~ring->irq_enable_mask;
860 I915_WRITE16(IMR, dev_priv->irq_mask); 874 I915_WRITE16(IMR, dev_priv->irq_mask);
861 POSTING_READ16(IMR); 875 POSTING_READ16(IMR);
@@ -873,7 +887,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
873 unsigned long flags; 887 unsigned long flags;
874 888
875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 889 spin_lock_irqsave(&dev_priv->irq_lock, flags);
876 if (--ring->irq_refcount == 0) { 890 if (--ring->irq_refcount.gt == 0) {
877 dev_priv->irq_mask |= ring->irq_enable_mask; 891 dev_priv->irq_mask |= ring->irq_enable_mask;
878 I915_WRITE16(IMR, dev_priv->irq_mask); 892 I915_WRITE16(IMR, dev_priv->irq_mask);
879 POSTING_READ16(IMR); 893 POSTING_READ16(IMR);
@@ -901,6 +915,9 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
901 case VCS: 915 case VCS:
902 mmio = BSD_HWS_PGA_GEN7; 916 mmio = BSD_HWS_PGA_GEN7;
903 break; 917 break;
918 case VECS:
919 mmio = VEBOX_HWS_PGA_GEN7;
920 break;
904 } 921 }
905 } else if (IS_GEN6(ring->dev)) { 922 } else if (IS_GEN6(ring->dev)) {
906 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 923 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
@@ -963,10 +980,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
963 gen6_gt_force_wake_get(dev_priv); 980 gen6_gt_force_wake_get(dev_priv);
964 981
965 spin_lock_irqsave(&dev_priv->irq_lock, flags); 982 spin_lock_irqsave(&dev_priv->irq_lock, flags);
966 if (ring->irq_refcount++ == 0) { 983 if (ring->irq_refcount.gt++ == 0) {
967 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 984 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
968 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 985 I915_WRITE_IMR(ring,
969 GEN6_RENDER_L3_PARITY_ERROR)); 986 ~(ring->irq_enable_mask |
987 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
970 else 988 else
971 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 989 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
972 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 990 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
@@ -986,9 +1004,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
986 unsigned long flags; 1004 unsigned long flags;
987 1005
988 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1006 spin_lock_irqsave(&dev_priv->irq_lock, flags);
989 if (--ring->irq_refcount == 0) { 1007 if (--ring->irq_refcount.gt == 0) {
990 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1008 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
991 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 1009 I915_WRITE_IMR(ring,
1010 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
992 else 1011 else
993 I915_WRITE_IMR(ring, ~0); 1012 I915_WRITE_IMR(ring, ~0);
994 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1013 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
@@ -1000,6 +1019,48 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1000 gen6_gt_force_wake_put(dev_priv); 1019 gen6_gt_force_wake_put(dev_priv);
1001} 1020}
1002 1021
1022static bool
1023hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1024{
1025 struct drm_device *dev = ring->dev;
1026 struct drm_i915_private *dev_priv = dev->dev_private;
1027 unsigned long flags;
1028
1029 if (!dev->irq_enabled)
1030 return false;
1031
1032 spin_lock_irqsave(&dev_priv->rps.lock, flags);
1033 if (ring->irq_refcount.pm++ == 0) {
1034 u32 pm_imr = I915_READ(GEN6_PMIMR);
1035 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1036 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
1037 POSTING_READ(GEN6_PMIMR);
1038 }
1039 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
1040
1041 return true;
1042}
1043
1044static void
1045hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1046{
1047 struct drm_device *dev = ring->dev;
1048 struct drm_i915_private *dev_priv = dev->dev_private;
1049 unsigned long flags;
1050
1051 if (!dev->irq_enabled)
1052 return;
1053
1054 spin_lock_irqsave(&dev_priv->rps.lock, flags);
1055 if (--ring->irq_refcount.pm == 0) {
1056 u32 pm_imr = I915_READ(GEN6_PMIMR);
1057 I915_WRITE_IMR(ring, ~0);
1058 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
1059 POSTING_READ(GEN6_PMIMR);
1060 }
1061 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
1062}
1063
1003static int 1064static int
1004i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1065i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1005 u32 offset, u32 length, 1066 u32 offset, u32 length,
@@ -1502,6 +1563,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1502 } 1563 }
1503 1564
1504 ring->set_seqno(ring, seqno); 1565 ring->set_seqno(ring, seqno);
1566 ring->hangcheck.seqno = seqno;
1505} 1567}
1506 1568
1507void intel_ring_advance(struct intel_ring_buffer *ring) 1569void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1548,8 +1610,8 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1548 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1610 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1549} 1611}
1550 1612
1551static int gen6_ring_flush(struct intel_ring_buffer *ring, 1613static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1552 u32 invalidate, u32 flush) 1614 u32 invalidate, u32 flush)
1553{ 1615{
1554 uint32_t cmd; 1616 uint32_t cmd;
1555 int ret; 1617 int ret;
@@ -1620,8 +1682,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1620 1682
1621/* Blitter support (SandyBridge+) */ 1683/* Blitter support (SandyBridge+) */
1622 1684
1623static int blt_ring_flush(struct intel_ring_buffer *ring, 1685static int gen6_ring_flush(struct intel_ring_buffer *ring,
1624 u32 invalidate, u32 flush) 1686 u32 invalidate, u32 flush)
1625{ 1687{
1626 uint32_t cmd; 1688 uint32_t cmd;
1627 int ret; 1689 int ret;
@@ -1664,15 +1726,18 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1664 ring->flush = gen6_render_ring_flush; 1726 ring->flush = gen6_render_ring_flush;
1665 ring->irq_get = gen6_ring_get_irq; 1727 ring->irq_get = gen6_ring_get_irq;
1666 ring->irq_put = gen6_ring_put_irq; 1728 ring->irq_put = gen6_ring_put_irq;
1667 ring->irq_enable_mask = GT_USER_INTERRUPT; 1729 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1668 ring->get_seqno = gen6_ring_get_seqno; 1730 ring->get_seqno = gen6_ring_get_seqno;
1669 ring->set_seqno = ring_set_seqno; 1731 ring->set_seqno = ring_set_seqno;
1670 ring->sync_to = gen6_ring_sync; 1732 ring->sync_to = gen6_ring_sync;
1671 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1733 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1672 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; 1734 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1673 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; 1735 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
1674 ring->signal_mbox[0] = GEN6_VRSYNC; 1736 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
1675 ring->signal_mbox[1] = GEN6_BRSYNC; 1737 ring->signal_mbox[RCS] = GEN6_NOSYNC;
1738 ring->signal_mbox[VCS] = GEN6_VRSYNC;
1739 ring->signal_mbox[BCS] = GEN6_BRSYNC;
1740 ring->signal_mbox[VECS] = GEN6_VERSYNC;
1676 } else if (IS_GEN5(dev)) { 1741 } else if (IS_GEN5(dev)) {
1677 ring->add_request = pc_render_add_request; 1742 ring->add_request = pc_render_add_request;
1678 ring->flush = gen4_render_ring_flush; 1743 ring->flush = gen4_render_ring_flush;
@@ -1680,7 +1745,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1680 ring->set_seqno = pc_render_set_seqno; 1745 ring->set_seqno = pc_render_set_seqno;
1681 ring->irq_get = gen5_ring_get_irq; 1746 ring->irq_get = gen5_ring_get_irq;
1682 ring->irq_put = gen5_ring_put_irq; 1747 ring->irq_put = gen5_ring_put_irq;
1683 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1748 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1749 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1684 } else { 1750 } else {
1685 ring->add_request = i9xx_add_request; 1751 ring->add_request = i9xx_add_request;
1686 if (INTEL_INFO(dev)->gen < 4) 1752 if (INTEL_INFO(dev)->gen < 4)
@@ -1818,20 +1884,23 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1818 /* gen6 bsd needs a special wa for tail updates */ 1884 /* gen6 bsd needs a special wa for tail updates */
1819 if (IS_GEN6(dev)) 1885 if (IS_GEN6(dev))
1820 ring->write_tail = gen6_bsd_ring_write_tail; 1886 ring->write_tail = gen6_bsd_ring_write_tail;
1821 ring->flush = gen6_ring_flush; 1887 ring->flush = gen6_bsd_ring_flush;
1822 ring->add_request = gen6_add_request; 1888 ring->add_request = gen6_add_request;
1823 ring->get_seqno = gen6_ring_get_seqno; 1889 ring->get_seqno = gen6_ring_get_seqno;
1824 ring->set_seqno = ring_set_seqno; 1890 ring->set_seqno = ring_set_seqno;
1825 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1891 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1826 ring->irq_get = gen6_ring_get_irq; 1892 ring->irq_get = gen6_ring_get_irq;
1827 ring->irq_put = gen6_ring_put_irq; 1893 ring->irq_put = gen6_ring_put_irq;
1828 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1894 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1829 ring->sync_to = gen6_ring_sync; 1895 ring->sync_to = gen6_ring_sync;
1830 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; 1896 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1831 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; 1897 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
1832 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; 1898 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
1833 ring->signal_mbox[0] = GEN6_RVSYNC; 1899 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
1834 ring->signal_mbox[1] = GEN6_BVSYNC; 1900 ring->signal_mbox[RCS] = GEN6_RVSYNC;
1901 ring->signal_mbox[VCS] = GEN6_NOSYNC;
1902 ring->signal_mbox[BCS] = GEN6_BVSYNC;
1903 ring->signal_mbox[VECS] = GEN6_VEVSYNC;
1835 } else { 1904 } else {
1836 ring->mmio_base = BSD_RING_BASE; 1905 ring->mmio_base = BSD_RING_BASE;
1837 ring->flush = bsd_ring_flush; 1906 ring->flush = bsd_ring_flush;
@@ -1839,7 +1908,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1839 ring->get_seqno = ring_get_seqno; 1908 ring->get_seqno = ring_get_seqno;
1840 ring->set_seqno = ring_set_seqno; 1909 ring->set_seqno = ring_set_seqno;
1841 if (IS_GEN5(dev)) { 1910 if (IS_GEN5(dev)) {
1842 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1911 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1843 ring->irq_get = gen5_ring_get_irq; 1912 ring->irq_get = gen5_ring_get_irq;
1844 ring->irq_put = gen5_ring_put_irq; 1913 ring->irq_put = gen5_ring_put_irq;
1845 } else { 1914 } else {
@@ -1864,20 +1933,56 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1864 1933
1865 ring->mmio_base = BLT_RING_BASE; 1934 ring->mmio_base = BLT_RING_BASE;
1866 ring->write_tail = ring_write_tail; 1935 ring->write_tail = ring_write_tail;
1867 ring->flush = blt_ring_flush; 1936 ring->flush = gen6_ring_flush;
1868 ring->add_request = gen6_add_request; 1937 ring->add_request = gen6_add_request;
1869 ring->get_seqno = gen6_ring_get_seqno; 1938 ring->get_seqno = gen6_ring_get_seqno;
1870 ring->set_seqno = ring_set_seqno; 1939 ring->set_seqno = ring_set_seqno;
1871 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1940 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1872 ring->irq_get = gen6_ring_get_irq; 1941 ring->irq_get = gen6_ring_get_irq;
1873 ring->irq_put = gen6_ring_put_irq; 1942 ring->irq_put = gen6_ring_put_irq;
1874 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1943 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1875 ring->sync_to = gen6_ring_sync; 1944 ring->sync_to = gen6_ring_sync;
1876 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; 1945 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1877 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; 1946 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
1878 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; 1947 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
1879 ring->signal_mbox[0] = GEN6_RBSYNC; 1948 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
1880 ring->signal_mbox[1] = GEN6_VBSYNC; 1949 ring->signal_mbox[RCS] = GEN6_RBSYNC;
1950 ring->signal_mbox[VCS] = GEN6_VBSYNC;
1951 ring->signal_mbox[BCS] = GEN6_NOSYNC;
1952 ring->signal_mbox[VECS] = GEN6_VEBSYNC;
1953 ring->init = init_ring_common;
1954
1955 return intel_init_ring_buffer(dev, ring);
1956}
1957
1958int intel_init_vebox_ring_buffer(struct drm_device *dev)
1959{
1960 drm_i915_private_t *dev_priv = dev->dev_private;
1961 struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
1962
1963 ring->name = "video enhancement ring";
1964 ring->id = VECS;
1965
1966 ring->mmio_base = VEBOX_RING_BASE;
1967 ring->write_tail = ring_write_tail;
1968 ring->flush = gen6_ring_flush;
1969 ring->add_request = gen6_add_request;
1970 ring->get_seqno = gen6_ring_get_seqno;
1971 ring->set_seqno = ring_set_seqno;
1972 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
1973 PM_VEBOX_CS_ERROR_INTERRUPT;
1974 ring->irq_get = hsw_vebox_get_irq;
1975 ring->irq_put = hsw_vebox_put_irq;
1976 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1977 ring->sync_to = gen6_ring_sync;
1978 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1979 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
1980 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
1981 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
1982 ring->signal_mbox[RCS] = GEN6_RVESYNC;
1983 ring->signal_mbox[VCS] = GEN6_VVESYNC;
1984 ring->signal_mbox[BCS] = GEN6_BVESYNC;
1985 ring->signal_mbox[VECS] = GEN6_NOSYNC;
1881 ring->init = init_ring_common; 1986 ring->init = init_ring_common;
1882 1987
1883 return intel_init_ring_buffer(dev, ring); 1988 return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index dac1614a1bca..022d07e43d12 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -37,14 +37,19 @@ struct intel_hw_status_page {
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
39 39
40struct intel_ring_hangcheck {
41 u32 seqno;
42};
43
40struct intel_ring_buffer { 44struct intel_ring_buffer {
41 const char *name; 45 const char *name;
42 enum intel_ring_id { 46 enum intel_ring_id {
43 RCS = 0x0, 47 RCS = 0x0,
44 VCS, 48 VCS,
45 BCS, 49 BCS,
50 VECS,
46 } id; 51 } id;
47#define I915_NUM_RINGS 3 52#define I915_NUM_RINGS 4
48 u32 mmio_base; 53 u32 mmio_base;
49 void __iomem *virtual_start; 54 void __iomem *virtual_start;
50 struct drm_device *dev; 55 struct drm_device *dev;
@@ -67,7 +72,10 @@ struct intel_ring_buffer {
67 */ 72 */
68 u32 last_retired_head; 73 u32 last_retired_head;
69 74
70 u32 irq_refcount; /* protected by dev_priv->irq_lock */ 75 struct {
76 u32 gt; /* protected by dev_priv->irq_lock */
77 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
78 } irq_refcount;
71 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 79 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
72 u32 trace_irq_seqno; 80 u32 trace_irq_seqno;
73 u32 sync_seqno[I915_NUM_RINGS-1]; 81 u32 sync_seqno[I915_NUM_RINGS-1];
@@ -102,8 +110,11 @@ struct intel_ring_buffer {
102 struct intel_ring_buffer *to, 110 struct intel_ring_buffer *to,
103 u32 seqno); 111 u32 seqno);
104 112
105 u32 semaphore_register[3]; /*our mbox written by others */ 113 /* our mbox written by others */
106 u32 signal_mbox[2]; /* mboxes this ring signals to */ 114 u32 semaphore_register[I915_NUM_RINGS];
115 /* mboxes this ring signals to */
116 u32 signal_mbox[I915_NUM_RINGS];
117
107 /** 118 /**
108 * List of objects currently involved in rendering from the 119 * List of objects currently involved in rendering from the
109 * ringbuffer. 120 * ringbuffer.
@@ -137,6 +148,8 @@ struct intel_ring_buffer {
137 struct i915_hw_context *default_context; 148 struct i915_hw_context *default_context;
138 struct i915_hw_context *last_context; 149 struct i915_hw_context *last_context;
139 150
151 struct intel_ring_hangcheck hangcheck;
152
140 void *private; 153 void *private;
141}; 154};
142 155
@@ -224,6 +237,7 @@ int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
224int intel_init_render_ring_buffer(struct drm_device *dev); 237int intel_init_render_ring_buffer(struct drm_device *dev);
225int intel_init_bsd_ring_buffer(struct drm_device *dev); 238int intel_init_bsd_ring_buffer(struct drm_device *dev);
226int intel_init_blt_ring_buffer(struct drm_device *dev); 239int intel_init_blt_ring_buffer(struct drm_device *dev);
240int intel_init_vebox_ring_buffer(struct drm_device *dev);
227 241
228u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 242u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
229void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 243void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 78f0631b1c43..7068195376ef 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -712,6 +712,13 @@ static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
712 intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); 712 intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
713} 713}
714 714
715static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
716 struct intel_sdvo_dtd *dtd)
717{
718 return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
719 intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
720}
721
715static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, 722static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
716 struct intel_sdvo_dtd *dtd) 723 struct intel_sdvo_dtd *dtd)
717{ 724{
@@ -726,6 +733,13 @@ static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
726 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 733 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
727} 734}
728 735
736static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
737 struct intel_sdvo_dtd *dtd)
738{
739 return intel_sdvo_get_timing(intel_sdvo,
740 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
741}
742
729static bool 743static bool
730intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, 744intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
731 uint16_t clock, 745 uint16_t clock,
@@ -1295,6 +1309,33 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1295 return true; 1309 return true;
1296} 1310}
1297 1311
1312static void intel_sdvo_get_config(struct intel_encoder *encoder,
1313 struct intel_crtc_config *pipe_config)
1314{
1315 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1316 struct intel_sdvo_dtd dtd;
1317 u32 flags = 0;
1318 bool ret;
1319
1320 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
1321 if (!ret) {
1322 DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
1323 return;
1324 }
1325
1326 if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
1327 flags |= DRM_MODE_FLAG_PHSYNC;
1328 else
1329 flags |= DRM_MODE_FLAG_NHSYNC;
1330
1331 if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
1332 flags |= DRM_MODE_FLAG_PVSYNC;
1333 else
1334 flags |= DRM_MODE_FLAG_NVSYNC;
1335
1336 pipe_config->adjusted_mode.flags |= flags;
1337}
1338
1298static void intel_disable_sdvo(struct intel_encoder *encoder) 1339static void intel_disable_sdvo(struct intel_encoder *encoder)
1299{ 1340{
1300 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1341 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -1375,6 +1416,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1375 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); 1416 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1376} 1417}
1377 1418
1419/* Special dpms function to support cloning between dvo/sdvo/crt. */
1378static void intel_sdvo_dpms(struct drm_connector *connector, int mode) 1420static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1379{ 1421{
1380 struct drm_crtc *crtc; 1422 struct drm_crtc *crtc;
@@ -1396,6 +1438,8 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1396 return; 1438 return;
1397 } 1439 }
1398 1440
1441 /* We set active outputs manually below in case pipe dpms doesn't change
1442 * due to cloning. */
1399 if (mode != DRM_MODE_DPMS_ON) { 1443 if (mode != DRM_MODE_DPMS_ON) {
1400 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1444 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1401 if (0) 1445 if (0)
@@ -2827,6 +2871,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2827 intel_encoder->mode_set = intel_sdvo_mode_set; 2871 intel_encoder->mode_set = intel_sdvo_mode_set;
2828 intel_encoder->enable = intel_enable_sdvo; 2872 intel_encoder->enable = intel_enable_sdvo;
2829 intel_encoder->get_hw_state = intel_sdvo_get_hw_state; 2873 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2874 intel_encoder->get_config = intel_sdvo_get_config;
2830 2875
2831 /* In default case sdvo lvds is false */ 2876 /* In default case sdvo lvds is false */
2832 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2877 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
new file mode 100644
index 000000000000..9a0e6c5ea540
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_drv.h"
27
28/* IOSF sideband */
29static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
30 u32 port, u32 opcode, u32 addr, u32 *val)
31{
32 u32 cmd, be = 0xf, bar = 0;
33 bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
34 opcode == DPIO_OPCODE_REG_READ);
35
36 cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
37 (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
38 (bar << IOSF_BAR_SHIFT);
39
40 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
41
42 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
43 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
44 is_read ? "read" : "write");
45 return -EAGAIN;
46 }
47
48 I915_WRITE(VLV_IOSF_ADDR, addr);
49 if (!is_read)
50 I915_WRITE(VLV_IOSF_DATA, *val);
51 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
52
53 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
54 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
55 is_read ? "read" : "write");
56 return -ETIMEDOUT;
57 }
58
59 if (is_read)
60 *val = I915_READ(VLV_IOSF_DATA);
61 I915_WRITE(VLV_IOSF_DATA, 0);
62
63 return 0;
64}
65
66u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
67{
68 u32 val = 0;
69
70 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
71
72 mutex_lock(&dev_priv->dpio_lock);
73 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
74 PUNIT_OPCODE_REG_READ, addr, &val);
75 mutex_unlock(&dev_priv->dpio_lock);
76
77 return val;
78}
79
80void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
81{
82 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
83
84 mutex_lock(&dev_priv->dpio_lock);
85 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
86 PUNIT_OPCODE_REG_WRITE, addr, &val);
87 mutex_unlock(&dev_priv->dpio_lock);
88}
89
90u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
91{
92 u32 val = 0;
93
94 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
95
96 mutex_lock(&dev_priv->dpio_lock);
97 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
98 PUNIT_OPCODE_REG_READ, addr, &val);
99 mutex_unlock(&dev_priv->dpio_lock);
100
101 return val;
102}
103
104u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
105{
106 u32 val = 0;
107
108 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
109 DPIO_OPCODE_REG_READ, reg, &val);
110
111 return val;
112}
113
114void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
115{
116 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
117 DPIO_OPCODE_REG_WRITE, reg, &val);
118}
119
120/* SBI access */
121u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
122 enum intel_sbi_destination destination)
123{
124 u32 value = 0;
125 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
126
127 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
128 100)) {
129 DRM_ERROR("timeout waiting for SBI to become ready\n");
130 return 0;
131 }
132
133 I915_WRITE(SBI_ADDR, (reg << 16));
134
135 if (destination == SBI_ICLK)
136 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
137 else
138 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
139 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
140
141 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
142 100)) {
143 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
144 return 0;
145 }
146
147 return I915_READ(SBI_DATA);
148}
149
150void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
151 enum intel_sbi_destination destination)
152{
153 u32 tmp;
154
155 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
156
157 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
158 100)) {
159 DRM_ERROR("timeout waiting for SBI to become ready\n");
160 return;
161 }
162
163 I915_WRITE(SBI_ADDR, (reg << 16));
164 I915_WRITE(SBI_DATA, value);
165
166 if (destination == SBI_ICLK)
167 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
168 else
169 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
170 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
171
172 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
173 100)) {
174 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
175 return;
176 }
177}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 19b9cb961b5a..04d38d4d811a 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -114,7 +114,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
114 crtc_w--; 114 crtc_w--;
115 crtc_h--; 115 crtc_h--;
116 116
117 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 117 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
118 118
119 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 119 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
120 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 120 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -268,7 +268,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
268 crtc_w--; 268 crtc_w--;
269 crtc_h--; 269 crtc_h--;
270 270
271 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 271 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
272 272
273 /* 273 /*
274 * IVB workaround: must disable low power watermarks for at least 274 * IVB workaround: must disable low power watermarks for at least
@@ -335,6 +335,8 @@ ivb_disable_plane(struct drm_plane *plane)
335 335
336 dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 336 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
337 337
338 intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
339
338 /* potentially re-enable LP watermarks */ 340 /* potentially re-enable LP watermarks */
339 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 341 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
340 intel_update_watermarks(dev); 342 intel_update_watermarks(dev);
@@ -453,7 +455,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
453 crtc_w--; 455 crtc_w--;
454 crtc_h--; 456 crtc_h--;
455 457
456 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 458 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
457 459
458 dvsscale = 0; 460 dvsscale = 0;
459 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) 461 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 64fa265c6ffb..d1286297567b 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -25,7 +25,14 @@
25#define DRM_RECT_H 25#define DRM_RECT_H
26 26
27/** 27/**
28 * drm_rect - two dimensional rectangle 28 * DOC: rect utils
29 *
30 * Utility functions to help manage rectangular areas for
31 * clipping, scaling, etc. calculations.
32 */
33
34/**
35 * struct drm_rect - two dimensional rectangle
29 * @x1: horizontal starting coordinate (inclusive) 36 * @x1: horizontal starting coordinate (inclusive)
30 * @x2: horizontal ending coordinate (exclusive) 37 * @x2: horizontal ending coordinate (exclusive)
31 * @y1: vertical starting coordinate (inclusive) 38 * @y1: vertical starting coordinate (inclusive)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 07d59419fe6b..923ed7fe5775 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -305,7 +305,7 @@ typedef struct drm_i915_irq_wait {
305#define I915_PARAM_HAS_WAIT_TIMEOUT 19 305#define I915_PARAM_HAS_WAIT_TIMEOUT 19
306#define I915_PARAM_HAS_SEMAPHORES 20 306#define I915_PARAM_HAS_SEMAPHORES 20
307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 308#define I915_PARAM_HAS_VEBOX 22
309#define I915_PARAM_HAS_SECURE_BATCHES 23 309#define I915_PARAM_HAS_SECURE_BATCHES 23
310#define I915_PARAM_HAS_PINNED_BATCHES 24 310#define I915_PARAM_HAS_PINNED_BATCHES 24
311#define I915_PARAM_HAS_EXEC_NO_RELOC 25 311#define I915_PARAM_HAS_EXEC_NO_RELOC 25
@@ -660,6 +660,7 @@ struct drm_i915_gem_execbuffer2 {
660#define I915_EXEC_RENDER (1<<0) 660#define I915_EXEC_RENDER (1<<0)
661#define I915_EXEC_BSD (2<<0) 661#define I915_EXEC_BSD (2<<0)
662#define I915_EXEC_BLT (3<<0) 662#define I915_EXEC_BLT (3<<0)
663#define I915_EXEC_VEBOX (4<<0)
663 664
664/* Used for switching the constants addressing mode on gen4+ RENDER ring. 665/* Used for switching the constants addressing mode on gen4+ RENDER ring.
665 * Gen6+ only supports relative addressing to dynamic state (default) and 666 * Gen6+ only supports relative addressing to dynamic state (default) and