aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-07-03 20:46:17 -0400
committerDave Airlie <airlied@redhat.com>2013-07-03 20:46:17 -0400
commitd0aaa2836aa3f12b4ebf3e21811616a083c8c91b (patch)
tree7702a9d1b553890f40075d7e35dc2f6bd2670799 /drivers
parent1586ba727f08293f1b48f40683b59746f9f71d0b (diff)
parent446f8d81ca2d9cefb614e87f2fabcc996a9e4e7e (diff)
Merge tag 'drm-intel-fixes-2013-07-03' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Pile of fixes for 3.11. A bit large in patch count, but that's simply due to two fixes being split up into really small parts. Also I've included a few more vlv patches than I'd have included for other platforms. But since vlv is officially supported for the first time only in 3.11 that shouldn't result in unbearable risks. Highlights: - ghost eDP fixes for hsw from Paulo - fix PCH detection in virtualized enviroments (Rui Guo) - duct-tape dma sg construction when swiotlb is in use (Konrad), dupe with a patch in your drm-fixes branch - fix sdvo hotplug on i965g - tune down a bunch of dmesg ERRORs which can be hit under normal conditions - detect invalid pitches for tiled scanout buffers (Chris) - a pile of vlv fixes from Ville: rps improvements, fixes for the dpll LPF, fixup the sprite mmio offsets - fix context size on hsw (Ben) - locking fixes for the hotplug code, specifically the storm handling - fix get_config on CPT (Xiong Zhang) - Fix the domain tracking when an unlocked seqno wait was interrupt (Chris), this seems to explain tons of little corruption bugs in the ddx. Chris also added a nice igt to exercise this. - work around stack-corrupting vnsprintf in our error state dumper * tag 'drm-intel-fixes-2013-07-03' of git://people.freedesktop.org/~danvet/drm-intel: (39 commits) drm/i915: Don't try to tear down the stolen drm_mm if it's not there drm/i915: Break up the large vsnprintf() in print_error_buffers() drm/i915: Refactor the wait_rendering completion into a common routine drm/i915: Only clear write-domains after a successful wait-seqno drm/i915: correct intel_dp_get_config() function for DevCPT drm/i915: fix hpd interrupt register locking drm/i915: fold the no-irq check into intel_hpd_irq_handler drm/i915: fold the queue_work into intel_hpd_irq_handler drm/i915: fold the hpd_irq_setup call into intel_hpd_irq_handler drm/i915: s/hotplug_irq_storm_detect/intel_hpd_irq_handler/ drm/i915: close tiny race in the ilk pcu even interrupt setup drm/i915: fix locking around ironlake_enable|disable_display_irq drm/i915: Fix context sizes on HSW drm/i915: Fix VLV sprite register offsets Revert "drm/i915: Don't use the HDMI port color range bit on Valleyview" drm/i915: s/LFP/LPF in DPIO PLL register names drm/i915: Fix VLV PLL LPF coefficients for DAC drm/i915: Jump to at least RPe on VLV when increasing the GPU frequency drm/i915: Don't increase the GPU frequency from the delayed VLV rps timer drm/i915: GEN6_RP_INTERRUPT_LIMITS doesn't seem to exist on VLV ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c119
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c9
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c137
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h84
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c215
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c110
15 files changed, 498 insertions, 322 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d4e78b64ca87..47d6c748057e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -647,41 +647,44 @@ static const char *purgeable_flag(int purgeable)
647 return purgeable ? " purgeable" : ""; 647 return purgeable ? " purgeable" : "";
648} 648}
649 649
650static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 650static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
651 const char *f, va_list args)
652{ 651{
653 unsigned len;
654 652
655 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { 653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
656 e->err = -ENOSPC; 654 e->err = -ENOSPC;
657 return; 655 return false;
658 } 656 }
659 657
660 if (e->bytes == e->size - 1 || e->err) 658 if (e->bytes == e->size - 1 || e->err)
661 return; 659 return false;
662 660
663 /* Seek the first printf which is hits start position */ 661 return true;
664 if (e->pos < e->start) { 662}
665 len = vsnprintf(NULL, 0, f, args);
666 if (e->pos + len <= e->start) {
667 e->pos += len;
668 return;
669 }
670 663
671 /* First vsnprintf needs to fit in full for memmove*/ 664static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
672 if (len >= e->size) { 665 unsigned len)
673 e->err = -EIO; 666{
674 return; 667 if (e->pos + len <= e->start) {
675 } 668 e->pos += len;
669 return false;
676 } 670 }
677 671
678 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); 672 /* First vsnprintf needs to fit in its entirety for memmove */
679 if (len >= e->size - e->bytes) 673 if (len >= e->size) {
680 len = e->size - e->bytes - 1; 674 e->err = -EIO;
675 return false;
676 }
681 677
678 return true;
679}
680
681static void __i915_error_advance(struct drm_i915_error_state_buf *e,
682 unsigned len)
683{
682 /* If this is first printf in this window, adjust it so that 684 /* If this is first printf in this window, adjust it so that
683 * start position matches start of the buffer 685 * start position matches start of the buffer
684 */ 686 */
687
685 if (e->pos < e->start) { 688 if (e->pos < e->start) {
686 const size_t off = e->start - e->pos; 689 const size_t off = e->start - e->pos;
687 690
@@ -701,6 +704,51 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
701 e->pos += len; 704 e->pos += len;
702} 705}
703 706
707static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
708 const char *f, va_list args)
709{
710 unsigned len;
711
712 if (!__i915_error_ok(e))
713 return;
714
715 /* Seek the first printf which is hits start position */
716 if (e->pos < e->start) {
717 len = vsnprintf(NULL, 0, f, args);
718 if (!__i915_error_seek(e, len))
719 return;
720 }
721
722 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
723 if (len >= e->size - e->bytes)
724 len = e->size - e->bytes - 1;
725
726 __i915_error_advance(e, len);
727}
728
729static void i915_error_puts(struct drm_i915_error_state_buf *e,
730 const char *str)
731{
732 unsigned len;
733
734 if (!__i915_error_ok(e))
735 return;
736
737 len = strlen(str);
738
739 /* Seek the first printf which is hits start position */
740 if (e->pos < e->start) {
741 if (!__i915_error_seek(e, len))
742 return;
743 }
744
745 if (len >= e->size - e->bytes)
746 len = e->size - e->bytes - 1;
747 memcpy(e->buf + e->bytes, str, len);
748
749 __i915_error_advance(e, len);
750}
751
704void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 752void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
705{ 753{
706 va_list args; 754 va_list args;
@@ -711,6 +759,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
711} 759}
712 760
713#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 761#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
762#define err_puts(e, s) i915_error_puts(e, s)
714 763
715static void print_error_buffers(struct drm_i915_error_state_buf *m, 764static void print_error_buffers(struct drm_i915_error_state_buf *m,
716 const char *name, 765 const char *name,
@@ -720,26 +769,26 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
720 err_printf(m, "%s [%d]:\n", name, count); 769 err_printf(m, "%s [%d]:\n", name, count);
721 770
722 while (count--) { 771 while (count--) {
723 err_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", 772 err_printf(m, " %08x %8u %02x %02x %x %x",
724 err->gtt_offset, 773 err->gtt_offset,
725 err->size, 774 err->size,
726 err->read_domains, 775 err->read_domains,
727 err->write_domain, 776 err->write_domain,
728 err->rseqno, err->wseqno, 777 err->rseqno, err->wseqno);
729 pin_flag(err->pinned), 778 err_puts(m, pin_flag(err->pinned));
730 tiling_flag(err->tiling), 779 err_puts(m, tiling_flag(err->tiling));
731 dirty_flag(err->dirty), 780 err_puts(m, dirty_flag(err->dirty));
732 purgeable_flag(err->purgeable), 781 err_puts(m, purgeable_flag(err->purgeable));
733 err->ring != -1 ? " " : "", 782 err_puts(m, err->ring != -1 ? " " : "");
734 ring_str(err->ring), 783 err_puts(m, ring_str(err->ring));
735 cache_level_str(err->cache_level)); 784 err_puts(m, cache_level_str(err->cache_level));
736 785
737 if (err->name) 786 if (err->name)
738 err_printf(m, " (name: %d)", err->name); 787 err_printf(m, " (name: %d)", err->name);
739 if (err->fence_reg != I915_FENCE_REG_NONE) 788 if (err->fence_reg != I915_FENCE_REG_NONE)
740 err_printf(m, " (fence: %d)", err->fence_reg); 789 err_printf(m, " (fence: %d)", err->fence_reg);
741 790
742 err_printf(m, "\n"); 791 err_puts(m, "\n");
743 err++; 792 err++;
744 } 793 }
745} 794}
@@ -1483,7 +1532,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1483 struct drm_device *dev = node->minor->dev; 1532 struct drm_device *dev = node->minor->dev;
1484 struct drm_i915_private *dev_priv = dev->dev_private; 1533 struct drm_i915_private *dev_priv = dev->dev_private;
1485 1534
1486 if (!IS_ULT(dev)) { 1535 if (!HAS_IPS(dev)) {
1487 seq_puts(m, "not supported\n"); 1536 seq_puts(m, "not supported\n");
1488 return 0; 1537 return 0;
1489 } 1538 }
@@ -1862,10 +1911,10 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1862 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1911 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1863 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1912 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1864 1913
1865 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1914 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1866 vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1915 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
1867 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1916 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1868 vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1917 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
1869 1918
1870 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1919 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1871 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1920 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index deaa32e8113b..062cbda1bf4a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -465,9 +465,15 @@ void intel_detect_pch(struct drm_device *dev)
465 * make graphics device passthrough work easy for VMM, that only 465 * make graphics device passthrough work easy for VMM, that only
466 * need to expose ISA bridge to let driver know the real hardware 466 * need to expose ISA bridge to let driver know the real hardware
467 * underneath. This is a requirement from virtualization team. 467 * underneath. This is a requirement from virtualization team.
468 *
469 * In some virtualized environments (e.g. XEN), there is irrelevant
470 * ISA bridge in the system. To work reliably, we should scan trhough
471 * all the ISA bridge devices and check for the first match, instead
472 * of only checking the first one.
468 */ 473 */
469 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 474 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
470 if (pch) { 475 while (pch) {
476 struct pci_dev *curr = pch;
471 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 477 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
472 unsigned short id; 478 unsigned short id;
473 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 479 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
@@ -496,10 +502,18 @@ void intel_detect_pch(struct drm_device *dev)
496 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 502 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
497 WARN_ON(!IS_HASWELL(dev)); 503 WARN_ON(!IS_HASWELL(dev));
498 WARN_ON(!IS_ULT(dev)); 504 WARN_ON(!IS_ULT(dev));
505 } else {
506 goto check_next;
499 } 507 }
508 pci_dev_put(pch);
509 break;
500 } 510 }
501 pci_dev_put(pch); 511check_next:
512 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
513 pci_dev_put(curr);
502 } 514 }
515 if (!pch)
516 DRM_DEBUG_KMS("No PCH found?\n");
503} 517}
504 518
505bool i915_semaphore_is_enabled(struct drm_device *dev) 519bool i915_semaphore_is_enabled(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9e1bf6dcbb2a..cc1d6056ab70 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1474,6 +1474,8 @@ struct drm_i915_file_private {
1474#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1474#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1475#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1475#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1476 1476
1477#define HAS_IPS(dev) (IS_ULT(dev))
1478
1477#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1479#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1478 1480
1479#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1481#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6178baccb56..769f75262feb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1087 interruptible, NULL); 1087 interruptible, NULL);
1088} 1088}
1089 1089
1090static int
1091i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1092 struct intel_ring_buffer *ring)
1093{
1094 i915_gem_retire_requests_ring(ring);
1095
1096 /* Manually manage the write flush as we may have not yet
1097 * retired the buffer.
1098 *
1099 * Note that the last_write_seqno is always the earlier of
1100 * the two (read/write) seqno, so if we haved successfully waited,
1101 * we know we have passed the last write.
1102 */
1103 obj->last_write_seqno = 0;
1104 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1105
1106 return 0;
1107}
1108
1090/** 1109/**
1091 * Ensures that all rendering to the object has completed and the object is 1110 * Ensures that all rendering to the object has completed and the object is
1092 * safe to unbind from the GTT or access from the CPU. 1111 * safe to unbind from the GTT or access from the CPU.
@@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1107 if (ret) 1126 if (ret)
1108 return ret; 1127 return ret;
1109 1128
1110 i915_gem_retire_requests_ring(ring); 1129 return i915_gem_object_wait_rendering__tail(obj, ring);
1111
1112 /* Manually manage the write flush as we may have not yet
1113 * retired the buffer.
1114 */
1115 if (obj->last_write_seqno &&
1116 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1117 obj->last_write_seqno = 0;
1118 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1119 }
1120
1121 return 0;
1122} 1130}
1123 1131
1124/* A nonblocking variant of the above wait. This is a highly dangerous routine 1132/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1154,19 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1154 mutex_unlock(&dev->struct_mutex); 1162 mutex_unlock(&dev->struct_mutex);
1155 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1163 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1156 mutex_lock(&dev->struct_mutex); 1164 mutex_lock(&dev->struct_mutex);
1165 if (ret)
1166 return ret;
1157 1167
1158 i915_gem_retire_requests_ring(ring); 1168 return i915_gem_object_wait_rendering__tail(obj, ring);
1159
1160 /* Manually manage the write flush as we may have not yet
1161 * retired the buffer.
1162 */
1163 if (obj->last_write_seqno &&
1164 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1165 obj->last_write_seqno = 0;
1166 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1167 }
1168
1169 return ret;
1170} 1169}
1171 1170
1172/** 1171/**
@@ -1802,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1802 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1803 gfp &= ~(__GFP_IO | __GFP_WAIT); 1802 gfp &= ~(__GFP_IO | __GFP_WAIT);
1804 } 1803 }
1805 1804#ifdef CONFIG_SWIOTLB
1805 if (swiotlb_nr_tbl()) {
1806 st->nents++;
1807 sg_set_page(sg, page, PAGE_SIZE, 0);
1808 sg = sg_next(sg);
1809 continue;
1810 }
1811#endif
1806 if (!i || page_to_pfn(page) != last_pfn + 1) { 1812 if (!i || page_to_pfn(page) != last_pfn + 1) {
1807 if (i) 1813 if (i)
1808 sg = sg_next(sg); 1814 sg = sg_next(sg);
@@ -1813,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1813 } 1819 }
1814 last_pfn = page_to_pfn(page); 1820 last_pfn = page_to_pfn(page);
1815 } 1821 }
1816 1822#ifdef CONFIG_SWIOTLB
1817 sg_mark_end(sg); 1823 if (!swiotlb_nr_tbl())
1824#endif
1825 sg_mark_end(sg);
1818 obj->pages = st; 1826 obj->pages = st;
1819 1827
1820 if (i915_gem_object_needs_bit17_swizzle(obj)) 1828 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -3103,7 +3111,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3103 * before evicting everything in a vain attempt to find space. 3111 * before evicting everything in a vain attempt to find space.
3104 */ 3112 */
3105 if (obj->base.size > gtt_max) { 3113 if (obj->base.size > gtt_max) {
3106 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n", 3114 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3107 obj->base.size, 3115 obj->base.size,
3108 map_and_fenceable ? "mappable" : "total", 3116 map_and_fenceable ? "mappable" : "total",
3109 gtt_max); 3117 gtt_max);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index ff471454968d..51b7a2171cae 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -113,7 +113,7 @@ static int get_context_size(struct drm_device *dev)
113 case 7: 113 case 7:
114 reg = I915_READ(GEN7_CXT_SIZE); 114 reg = I915_READ(GEN7_CXT_SIZE);
115 if (IS_HASWELL(dev)) 115 if (IS_HASWELL(dev))
116 ret = HSW_CXT_TOTAL_SIZE(reg) * 64; 116 ret = HSW_CXT_TOTAL_SIZE;
117 else 117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119 break; 119 break;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f713294618fe..982d4732cecf 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -147,7 +147,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
147{ 147{
148 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
149 149
150 if (dev_priv->mm.stolen_base == 0) 150 if (!drm_mm_initialized(&dev_priv->mm.stolen))
151 return -ENODEV; 151 return -ENODEV;
152 152
153 if (size < dev_priv->cfb_size) 153 if (size < dev_priv->cfb_size)
@@ -179,6 +179,9 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
179{ 179{
180 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
181 181
182 if (!drm_mm_initialized(&dev_priv->mm.stolen))
183 return;
184
182 i915_gem_stolen_cleanup_compression(dev); 185 i915_gem_stolen_cleanup_compression(dev);
183 drm_mm_takedown(&dev_priv->mm.stolen); 186 drm_mm_takedown(&dev_priv->mm.stolen);
184} 187}
@@ -300,7 +303,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
300 struct drm_i915_gem_object *obj; 303 struct drm_i915_gem_object *obj;
301 struct drm_mm_node *stolen; 304 struct drm_mm_node *stolen;
302 305
303 if (dev_priv->mm.stolen_base == 0) 306 if (!drm_mm_initialized(&dev_priv->mm.stolen))
304 return NULL; 307 return NULL;
305 308
306 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); 309 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
@@ -331,7 +334,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
331 struct drm_i915_gem_object *obj; 334 struct drm_i915_gem_object *obj;
332 struct drm_mm_node *stolen; 335 struct drm_mm_node *stolen;
333 336
334 if (dev_priv->mm.stolen_base == 0) 337 if (!drm_mm_initialized(&dev_priv->mm.stolen))
335 return NULL; 338 return NULL;
336 339
337 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", 340 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7857430943ec..3d92a7cef154 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71}; 71};
72 72
73static const u32 hpd_status_i965[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
82static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 73static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -88,13 +79,12 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89}; 80};
90 81
91static void ibx_hpd_irq_setup(struct drm_device *dev);
92static void i915_hpd_irq_setup(struct drm_device *dev);
93
94/* For display hotplug interrupt */ 82/* For display hotplug interrupt */
95static void 83static void
96ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 84ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97{ 85{
86 assert_spin_locked(&dev_priv->irq_lock);
87
98 if ((dev_priv->irq_mask & mask) != 0) { 88 if ((dev_priv->irq_mask & mask) != 0) {
99 dev_priv->irq_mask &= ~mask; 89 dev_priv->irq_mask &= ~mask;
100 I915_WRITE(DEIMR, dev_priv->irq_mask); 90 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -105,6 +95,8 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
105static void 95static void
106ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 96ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
107{ 97{
98 assert_spin_locked(&dev_priv->irq_lock);
99
108 if ((dev_priv->irq_mask & mask) != mask) { 100 if ((dev_priv->irq_mask & mask) != mask) {
109 dev_priv->irq_mask |= mask; 101 dev_priv->irq_mask |= mask;
110 I915_WRITE(DEIMR, dev_priv->irq_mask); 102 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -118,6 +110,8 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
118 struct intel_crtc *crtc; 110 struct intel_crtc *crtc;
119 enum pipe pipe; 111 enum pipe pipe;
120 112
113 assert_spin_locked(&dev_priv->irq_lock);
114
121 for_each_pipe(pipe) { 115 for_each_pipe(pipe) {
122 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 116 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
123 117
@@ -708,16 +702,24 @@ static void gen6_pm_rps_work(struct work_struct *work)
708 702
709 mutex_lock(&dev_priv->rps.hw_lock); 703 mutex_lock(&dev_priv->rps.hw_lock);
710 704
711 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 705 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
712 new_delay = dev_priv->rps.cur_delay + 1; 706 new_delay = dev_priv->rps.cur_delay + 1;
713 else 707
708 /*
709 * For better performance, jump directly
710 * to RPe if we're below it.
711 */
712 if (IS_VALLEYVIEW(dev_priv->dev) &&
713 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
714 new_delay = dev_priv->rps.rpe_delay;
715 } else
714 new_delay = dev_priv->rps.cur_delay - 1; 716 new_delay = dev_priv->rps.cur_delay - 1;
715 717
716 /* sysfs frequency interfaces may have snuck in while servicing the 718 /* sysfs frequency interfaces may have snuck in while servicing the
717 * interrupt 719 * interrupt
718 */ 720 */
719 if (!(new_delay > dev_priv->rps.max_delay || 721 if (new_delay >= dev_priv->rps.min_delay &&
720 new_delay < dev_priv->rps.min_delay)) { 722 new_delay <= dev_priv->rps.max_delay) {
721 if (IS_VALLEYVIEW(dev_priv->dev)) 723 if (IS_VALLEYVIEW(dev_priv->dev))
722 valleyview_set_rps(dev_priv->dev, new_delay); 724 valleyview_set_rps(dev_priv->dev, new_delay);
723 else 725 else
@@ -870,17 +872,18 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
870#define HPD_STORM_DETECT_PERIOD 1000 872#define HPD_STORM_DETECT_PERIOD 1000
871#define HPD_STORM_THRESHOLD 5 873#define HPD_STORM_THRESHOLD 5
872 874
873static inline bool hotplug_irq_storm_detect(struct drm_device *dev, 875static inline void intel_hpd_irq_handler(struct drm_device *dev,
874 u32 hotplug_trigger, 876 u32 hotplug_trigger,
875 const u32 *hpd) 877 const u32 *hpd)
876{ 878{
877 drm_i915_private_t *dev_priv = dev->dev_private; 879 drm_i915_private_t *dev_priv = dev->dev_private;
878 unsigned long irqflags;
879 int i; 880 int i;
880 bool ret = false; 881 bool storm_detected = false;
881 882
882 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 883 if (!hotplug_trigger)
884 return;
883 885
886 spin_lock(&dev_priv->irq_lock);
884 for (i = 1; i < HPD_NUM_PINS; i++) { 887 for (i = 1; i < HPD_NUM_PINS; i++) {
885 888
886 if (!(hpd[i] & hotplug_trigger) || 889 if (!(hpd[i] & hotplug_trigger) ||
@@ -897,15 +900,18 @@ static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
897 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 900 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
898 dev_priv->hpd_event_bits &= ~(1 << i); 901 dev_priv->hpd_event_bits &= ~(1 << i);
899 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 902 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
900 ret = true; 903 storm_detected = true;
901 } else { 904 } else {
902 dev_priv->hpd_stats[i].hpd_cnt++; 905 dev_priv->hpd_stats[i].hpd_cnt++;
903 } 906 }
904 } 907 }
905 908
906 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 909 if (storm_detected)
910 dev_priv->display.hpd_irq_setup(dev);
911 spin_unlock(&dev_priv->irq_lock);
907 912
908 return ret; 913 queue_work(dev_priv->wq,
914 &dev_priv->hotplug_work);
909} 915}
910 916
911static void gmbus_irq_handler(struct drm_device *dev) 917static void gmbus_irq_handler(struct drm_device *dev)
@@ -1012,12 +1018,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1012 1018
1013 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1019 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1014 hotplug_status); 1020 hotplug_status);
1015 if (hotplug_trigger) { 1021
1016 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 1022 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1017 i915_hpd_irq_setup(dev); 1023
1018 queue_work(dev_priv->wq,
1019 &dev_priv->hotplug_work);
1020 }
1021 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1024 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1022 I915_READ(PORT_HOTPLUG_STAT); 1025 I915_READ(PORT_HOTPLUG_STAT);
1023 } 1026 }
@@ -1043,11 +1046,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1043 int pipe; 1046 int pipe;
1044 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1047 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1045 1048
1046 if (hotplug_trigger) { 1049 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1047 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) 1050
1048 ibx_hpd_irq_setup(dev);
1049 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1050 }
1051 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1051 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1052 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1052 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1053 SDE_AUDIO_POWER_SHIFT); 1053 SDE_AUDIO_POWER_SHIFT);
@@ -1148,11 +1148,8 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1148 int pipe; 1148 int pipe;
1149 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1149 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1150 1150
1151 if (hotplug_trigger) { 1151 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1152 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) 1152
1153 ibx_hpd_irq_setup(dev);
1154 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1155 }
1156 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1153 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1157 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1154 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1158 SDE_AUDIO_POWER_SHIFT_CPT); 1155 SDE_AUDIO_POWER_SHIFT_CPT);
@@ -1218,8 +1215,11 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1218 /* On Haswell, also mask ERR_INT because we don't want to risk 1215 /* On Haswell, also mask ERR_INT because we don't want to risk
1219 * generating "unclaimed register" interrupts from inside the interrupt 1216 * generating "unclaimed register" interrupts from inside the interrupt
1220 * handler. */ 1217 * handler. */
1221 if (IS_HASWELL(dev)) 1218 if (IS_HASWELL(dev)) {
1219 spin_lock(&dev_priv->irq_lock);
1222 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1220 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1221 spin_unlock(&dev_priv->irq_lock);
1222 }
1223 1223
1224 gt_iir = I915_READ(GTIIR); 1224 gt_iir = I915_READ(GTIIR);
1225 if (gt_iir) { 1225 if (gt_iir) {
@@ -1272,8 +1272,12 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1272 ret = IRQ_HANDLED; 1272 ret = IRQ_HANDLED;
1273 } 1273 }
1274 1274
1275 if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev)) 1275 if (IS_HASWELL(dev)) {
1276 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1276 spin_lock(&dev_priv->irq_lock);
1277 if (ivb_can_enable_err_int(dev))
1278 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1279 spin_unlock(&dev_priv->irq_lock);
1280 }
1277 1281
1278 I915_WRITE(DEIER, de_ier); 1282 I915_WRITE(DEIER, de_ier);
1279 POSTING_READ(DEIER); 1283 POSTING_READ(DEIER);
@@ -2698,6 +2702,8 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2698 2702
2699static int ironlake_irq_postinstall(struct drm_device *dev) 2703static int ironlake_irq_postinstall(struct drm_device *dev)
2700{ 2704{
2705 unsigned long irqflags;
2706
2701 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2702 /* enable kind of interrupts always enabled */ 2708 /* enable kind of interrupts always enabled */
2703 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2709 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
@@ -2711,7 +2717,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2711 /* should always can generate irq */ 2717 /* should always can generate irq */
2712 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2718 I915_WRITE(DEIIR, I915_READ(DEIIR));
2713 I915_WRITE(DEIMR, dev_priv->irq_mask); 2719 I915_WRITE(DEIMR, dev_priv->irq_mask);
2714 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 2720 I915_WRITE(DEIER, display_mask |
2721 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2715 POSTING_READ(DEIER); 2722 POSTING_READ(DEIER);
2716 2723
2717 dev_priv->gt_irq_mask = ~0; 2724 dev_priv->gt_irq_mask = ~0;
@@ -2733,10 +2740,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2733 ibx_irq_postinstall(dev); 2740 ibx_irq_postinstall(dev);
2734 2741
2735 if (IS_IRONLAKE_M(dev)) { 2742 if (IS_IRONLAKE_M(dev)) {
2736 /* Clear & enable PCU event interrupts */ 2743 /* Enable PCU event interrupts
2737 I915_WRITE(DEIIR, DE_PCU_EVENT); 2744 *
2738 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 2745 * spinlocking not required here for correctness since interrupt
2746 * setup is guaranteed to run in single-threaded context. But we
2747 * need it to make the assert_spin_locked happy. */
2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2739 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2749 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2740 } 2751 }
2741 2752
2742 return 0; 2753 return 0;
@@ -3212,12 +3223,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3212 3223
3213 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3224 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3214 hotplug_status); 3225 hotplug_status);
3215 if (hotplug_trigger) { 3226
3216 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 3227 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3217 i915_hpd_irq_setup(dev); 3228
3218 queue_work(dev_priv->wq,
3219 &dev_priv->hotplug_work);
3220 }
3221 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3229 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3222 POSTING_READ(PORT_HOTPLUG_STAT); 3230 POSTING_READ(PORT_HOTPLUG_STAT);
3223 } 3231 }
@@ -3369,6 +3377,8 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
3369 struct intel_encoder *intel_encoder; 3377 struct intel_encoder *intel_encoder;
3370 u32 hotplug_en; 3378 u32 hotplug_en;
3371 3379
3380 assert_spin_locked(&dev_priv->irq_lock);
3381
3372 if (I915_HAS_HOTPLUG(dev)) { 3382 if (I915_HAS_HOTPLUG(dev)) {
3373 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3383 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3374 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3384 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
@@ -3449,17 +3459,14 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3449 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3459 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3450 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3460 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3451 HOTPLUG_INT_STATUS_G4X : 3461 HOTPLUG_INT_STATUS_G4X :
3452 HOTPLUG_INT_STATUS_I965); 3462 HOTPLUG_INT_STATUS_I915);
3453 3463
3454 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3464 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3455 hotplug_status); 3465 hotplug_status);
3456 if (hotplug_trigger) { 3466
3457 if (hotplug_irq_storm_detect(dev, hotplug_trigger, 3467 intel_hpd_irq_handler(dev, hotplug_trigger,
3458 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965)) 3468 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3459 i915_hpd_irq_setup(dev); 3469
3460 queue_work(dev_priv->wq,
3461 &dev_priv->hotplug_work);
3462 }
3463 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3470 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3464 I915_READ(PORT_HOTPLUG_STAT); 3471 I915_READ(PORT_HOTPLUG_STAT);
3465 } 3472 }
@@ -3655,6 +3662,7 @@ void intel_hpd_init(struct drm_device *dev)
3655 struct drm_i915_private *dev_priv = dev->dev_private; 3662 struct drm_i915_private *dev_priv = dev->dev_private;
3656 struct drm_mode_config *mode_config = &dev->mode_config; 3663 struct drm_mode_config *mode_config = &dev->mode_config;
3657 struct drm_connector *connector; 3664 struct drm_connector *connector;
3665 unsigned long irqflags;
3658 int i; 3666 int i;
3659 3667
3660 for (i = 1; i < HPD_NUM_PINS; i++) { 3668 for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -3667,6 +3675,11 @@ void intel_hpd_init(struct drm_device *dev)
3667 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3675 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3668 connector->polled = DRM_CONNECTOR_POLL_HPD; 3676 connector->polled = DRM_CONNECTOR_POLL_HPD;
3669 } 3677 }
3678
3679 /* Interrupt setup is already guaranteed to be single-threaded, this is
3680 * just to make the assert_spin_locked checks happy. */
3681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3670 if (dev_priv->display.hpd_irq_setup) 3682 if (dev_priv->display.hpd_irq_setup)
3671 dev_priv->display.hpd_irq_setup(dev); 3683 dev_priv->display.hpd_irq_setup(dev);
3684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3672} 3685}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2102ff32ee20..f2326fc60ac9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -448,9 +448,9 @@
448#define _DPIO_PLL_CML_B 0x806c 448#define _DPIO_PLL_CML_B 0x806c
449#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B) 449#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
450 450
451#define _DPIO_LFP_COEFF_A 0x8048 451#define _DPIO_LPF_COEFF_A 0x8048
452#define _DPIO_LFP_COEFF_B 0x8068 452#define _DPIO_LPF_COEFF_B 0x8068
453#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B) 453#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
454 454
455#define DPIO_CALIBRATION 0x80ac 455#define DPIO_CALIBRATION 0x80ac
456 456
@@ -1718,14 +1718,13 @@
1718 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 1718 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1719 GEN7_CXT_GT1_SIZE(ctx_reg) + \ 1719 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1720 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1720 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1721#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f) 1721/* Haswell does have the CXT_SIZE register however it does not appear to be
1722#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7) 1722 * valid. Now, docs explain in dwords what is in the context object. The full
1723#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff) 1723 * size is 70720 bytes, however, the power context and execlist context will
1724#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \ 1724 * never be saved (power context is stored elsewhere, and execlists don't work
1725 HSW_CXT_RING_SIZE(ctx_reg) + \ 1725 * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
1726 HSW_CXT_RENDER_SIZE(ctx_reg) + \ 1726 */
1727 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1727#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1728
1729 1728
1730/* 1729/*
1731 * Overlay regs 1730 * Overlay regs
@@ -1874,6 +1873,12 @@
1874/* SDVO is different across gen3/4 */ 1873/* SDVO is different across gen3/4 */
1875#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) 1874#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
1876#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) 1875#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
1876/*
1877 * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
1878 * since reality corrobates that they're the same as on gen3. But keep these
1879 * bits here (and the comment!) to help any other lost wanderers back onto the
1880 * right tracks.
1881 */
1877#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) 1882#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
1878#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) 1883#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
1879#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) 1884#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
@@ -1885,13 +1890,6 @@
1885 PORTC_HOTPLUG_INT_STATUS | \ 1890 PORTC_HOTPLUG_INT_STATUS | \
1886 PORTD_HOTPLUG_INT_STATUS) 1891 PORTD_HOTPLUG_INT_STATUS)
1887 1892
1888#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
1889 SDVOB_HOTPLUG_INT_STATUS_I965 | \
1890 SDVOC_HOTPLUG_INT_STATUS_I965 | \
1891 PORTB_HOTPLUG_INT_STATUS | \
1892 PORTC_HOTPLUG_INT_STATUS | \
1893 PORTD_HOTPLUG_INT_STATUS)
1894
1895#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \ 1893#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
1896 SDVOB_HOTPLUG_INT_STATUS_I915 | \ 1894 SDVOB_HOTPLUG_INT_STATUS_I915 | \
1897 SDVOC_HOTPLUG_INT_STATUS_I915 | \ 1895 SDVOC_HOTPLUG_INT_STATUS_I915 | \
@@ -3488,7 +3486,7 @@
3488#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 3486#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
3489#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) 3487#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
3490 3488
3491#define _SPACNTR 0x72180 3489#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
3492#define SP_ENABLE (1<<31) 3490#define SP_ENABLE (1<<31)
3493#define SP_GEAMMA_ENABLE (1<<30) 3491#define SP_GEAMMA_ENABLE (1<<30)
3494#define SP_PIXFORMAT_MASK (0xf<<26) 3492#define SP_PIXFORMAT_MASK (0xf<<26)
@@ -3507,30 +3505,30 @@
3507#define SP_YUV_ORDER_YVYU (2<<16) 3505#define SP_YUV_ORDER_YVYU (2<<16)
3508#define SP_YUV_ORDER_VYUY (3<<16) 3506#define SP_YUV_ORDER_VYUY (3<<16)
3509#define SP_TILED (1<<10) 3507#define SP_TILED (1<<10)
3510#define _SPALINOFF 0x72184 3508#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
3511#define _SPASTRIDE 0x72188 3509#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
3512#define _SPAPOS 0x7218c 3510#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
3513#define _SPASIZE 0x72190 3511#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
3514#define _SPAKEYMINVAL 0x72194 3512#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
3515#define _SPAKEYMSK 0x72198 3513#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
3516#define _SPASURF 0x7219c 3514#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
3517#define _SPAKEYMAXVAL 0x721a0 3515#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
3518#define _SPATILEOFF 0x721a4 3516#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
3519#define _SPACONSTALPHA 0x721a8 3517#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
3520#define _SPAGAMC 0x721f4 3518#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
3521 3519
3522#define _SPBCNTR 0x72280 3520#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
3523#define _SPBLINOFF 0x72284 3521#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
3524#define _SPBSTRIDE 0x72288 3522#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
3525#define _SPBPOS 0x7228c 3523#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
3526#define _SPBSIZE 0x72290 3524#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
3527#define _SPBKEYMINVAL 0x72294 3525#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
3528#define _SPBKEYMSK 0x72298 3526#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
3529#define _SPBSURF 0x7229c 3527#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
3530#define _SPBKEYMAXVAL 0x722a0 3528#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
3531#define _SPBTILEOFF 0x722a4 3529#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
3532#define _SPBCONSTALPHA 0x722a8 3530#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
3533#define _SPBGAMC 0x722f4 3531#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
3534 3532
3535#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) 3533#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
3536#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) 3534#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 224ce25129ce..324211ac9c55 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1356,7 +1356,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1356 intel_encoder->cloneable = false; 1356 intel_encoder->cloneable = false;
1357 intel_encoder->hot_plug = intel_ddi_hot_plug; 1357 intel_encoder->hot_plug = intel_ddi_hot_plug;
1358 1358
1359 intel_dp_init_connector(intel_dig_port, dp_connector); 1359 if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
1360 drm_encoder_cleanup(encoder);
1361 kfree(intel_dig_port);
1362 kfree(dp_connector);
1363 return;
1364 }
1360 1365
1361 if (intel_encoder->type != INTEL_OUTPUT_EDP) { 1366 if (intel_encoder->type != INTEL_OUTPUT_EDP) {
1362 hdmi_connector = kzalloc(sizeof(struct intel_connector), 1367 hdmi_connector = kzalloc(sizeof(struct intel_connector),
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b08d1f9ce0de..85f3eb74d2b7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3250,7 +3250,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3250/* IPS only exists on ULT machines and is tied to pipe A. */ 3250/* IPS only exists on ULT machines and is tied to pipe A. */
3251static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 3251static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3252{ 3252{
3253 return IS_ULT(crtc->base.dev) && crtc->pipe == PIPE_A; 3253 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3254} 3254}
3255 3255
3256static void hsw_enable_ips(struct intel_crtc *crtc) 3256static void hsw_enable_ips(struct intel_crtc *crtc)
@@ -4069,7 +4069,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4069 pipe_config->pipe_bpp = 8*3; 4069 pipe_config->pipe_bpp = 8*3;
4070 } 4070 }
4071 4071
4072 if (IS_HASWELL(dev)) 4072 if (HAS_IPS(dev))
4073 hsw_compute_ips_config(crtc, pipe_config); 4073 hsw_compute_ips_config(crtc, pipe_config);
4074 4074
4075 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old 4075 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
@@ -4404,11 +4404,12 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4404 4404
4405 /* Set HBR and RBR LPF coefficients */ 4405 /* Set HBR and RBR LPF coefficients */
4406 if (crtc->config.port_clock == 162000 || 4406 if (crtc->config.port_clock == 162000 ||
4407 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4407 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4408 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4408 vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 4409 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4409 0x005f0021); 4410 0x005f0021);
4410 else 4411 else
4411 vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 4412 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4412 0x00d0000f); 4413 0x00d0000f);
4413 4414
4414 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 4415 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
@@ -8753,8 +8754,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8753 } 8754 }
8754 8755
8755 if (ret) { 8756 if (ret) {
8756 DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", 8757 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
8757 set->crtc->base.id, ret); 8758 set->crtc->base.id, ret);
8758fail: 8759fail:
8759 intel_set_config_restore_state(dev, config); 8760 intel_set_config_restore_state(dev, config);
8760 8761
@@ -9121,6 +9122,7 @@ int intel_framebuffer_init(struct drm_device *dev,
9121 struct drm_mode_fb_cmd2 *mode_cmd, 9122 struct drm_mode_fb_cmd2 *mode_cmd,
9122 struct drm_i915_gem_object *obj) 9123 struct drm_i915_gem_object *obj)
9123{ 9124{
9125 int pitch_limit;
9124 int ret; 9126 int ret;
9125 9127
9126 if (obj->tiling_mode == I915_TILING_Y) { 9128 if (obj->tiling_mode == I915_TILING_Y) {
@@ -9134,10 +9136,26 @@ int intel_framebuffer_init(struct drm_device *dev,
9134 return -EINVAL; 9136 return -EINVAL;
9135 } 9137 }
9136 9138
9137 /* FIXME <= Gen4 stride limits are bit unclear */ 9139 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
9138 if (mode_cmd->pitches[0] > 32768) { 9140 pitch_limit = 32*1024;
9139 DRM_DEBUG("pitch (%d) must be at less than 32768\n", 9141 } else if (INTEL_INFO(dev)->gen >= 4) {
9140 mode_cmd->pitches[0]); 9142 if (obj->tiling_mode)
9143 pitch_limit = 16*1024;
9144 else
9145 pitch_limit = 32*1024;
9146 } else if (INTEL_INFO(dev)->gen >= 3) {
9147 if (obj->tiling_mode)
9148 pitch_limit = 8*1024;
9149 else
9150 pitch_limit = 16*1024;
9151 } else
9152 /* XXX DSPC is limited to 4k tiled */
9153 pitch_limit = 8*1024;
9154
9155 if (mode_cmd->pitches[0] > pitch_limit) {
9156 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
9157 obj->tiling_mode ? "tiled" : "linear",
9158 mode_cmd->pitches[0], pitch_limit);
9141 return -EINVAL; 9159 return -EINVAL;
9142 } 9160 }
9143 9161
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 98686005dcf6..b73971234013 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1324,20 +1324,35 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1324 struct intel_crtc_config *pipe_config) 1324 struct intel_crtc_config *pipe_config)
1325{ 1325{
1326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1327 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1328 u32 tmp, flags = 0; 1327 u32 tmp, flags = 0;
1328 struct drm_device *dev = encoder->base.dev;
1329 struct drm_i915_private *dev_priv = dev->dev_private;
1330 enum port port = dp_to_dig_port(intel_dp)->port;
1331 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1329 1332
1330 tmp = I915_READ(intel_dp->output_reg); 1333 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1334 tmp = I915_READ(intel_dp->output_reg);
1335 if (tmp & DP_SYNC_HS_HIGH)
1336 flags |= DRM_MODE_FLAG_PHSYNC;
1337 else
1338 flags |= DRM_MODE_FLAG_NHSYNC;
1331 1339
1332 if (tmp & DP_SYNC_HS_HIGH) 1340 if (tmp & DP_SYNC_VS_HIGH)
1333 flags |= DRM_MODE_FLAG_PHSYNC; 1341 flags |= DRM_MODE_FLAG_PVSYNC;
1334 else 1342 else
1335 flags |= DRM_MODE_FLAG_NHSYNC; 1343 flags |= DRM_MODE_FLAG_NVSYNC;
1344 } else {
1345 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1346 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1347 flags |= DRM_MODE_FLAG_PHSYNC;
1348 else
1349 flags |= DRM_MODE_FLAG_NHSYNC;
1336 1350
1337 if (tmp & DP_SYNC_VS_HIGH) 1351 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1338 flags |= DRM_MODE_FLAG_PVSYNC; 1352 flags |= DRM_MODE_FLAG_PVSYNC;
1339 else 1353 else
1340 flags |= DRM_MODE_FLAG_NVSYNC; 1354 flags |= DRM_MODE_FLAG_NVSYNC;
1355 }
1341 1356
1342 pipe_config->adjusted_mode.flags |= flags; 1357 pipe_config->adjusted_mode.flags |= flags;
1343} 1358}
@@ -2681,15 +2696,16 @@ done:
2681} 2696}
2682 2697
2683static void 2698static void
2684intel_dp_destroy(struct drm_connector *connector) 2699intel_dp_connector_destroy(struct drm_connector *connector)
2685{ 2700{
2686 struct intel_dp *intel_dp = intel_attached_dp(connector);
2687 struct intel_connector *intel_connector = to_intel_connector(connector); 2701 struct intel_connector *intel_connector = to_intel_connector(connector);
2688 2702
2689 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2703 if (!IS_ERR_OR_NULL(intel_connector->edid))
2690 kfree(intel_connector->edid); 2704 kfree(intel_connector->edid);
2691 2705
2692 if (is_edp(intel_dp)) 2706 /* Can't call is_edp() since the encoder may have been destroyed
2707 * already. */
2708 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2693 intel_panel_fini(&intel_connector->panel); 2709 intel_panel_fini(&intel_connector->panel);
2694 2710
2695 drm_sysfs_connector_remove(connector); 2711 drm_sysfs_connector_remove(connector);
@@ -2723,7 +2739,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
2723 .detect = intel_dp_detect, 2739 .detect = intel_dp_detect,
2724 .fill_modes = drm_helper_probe_single_connector_modes, 2740 .fill_modes = drm_helper_probe_single_connector_modes,
2725 .set_property = intel_dp_set_property, 2741 .set_property = intel_dp_set_property,
2726 .destroy = intel_dp_destroy, 2742 .destroy = intel_dp_connector_destroy,
2727}; 2743};
2728 2744
2729static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2745static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -2954,7 +2970,85 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2954 I915_READ(pp_div_reg)); 2970 I915_READ(pp_div_reg));
2955} 2971}
2956 2972
2957void 2973static bool intel_edp_init_connector(struct intel_dp *intel_dp,
2974 struct intel_connector *intel_connector)
2975{
2976 struct drm_connector *connector = &intel_connector->base;
2977 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2978 struct drm_device *dev = intel_dig_port->base.base.dev;
2979 struct drm_i915_private *dev_priv = dev->dev_private;
2980 struct drm_display_mode *fixed_mode = NULL;
2981 struct edp_power_seq power_seq = { 0 };
2982 bool has_dpcd;
2983 struct drm_display_mode *scan;
2984 struct edid *edid;
2985
2986 if (!is_edp(intel_dp))
2987 return true;
2988
2989 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2990
2991 /* Cache DPCD and EDID for edp. */
2992 ironlake_edp_panel_vdd_on(intel_dp);
2993 has_dpcd = intel_dp_get_dpcd(intel_dp);
2994 ironlake_edp_panel_vdd_off(intel_dp, false);
2995
2996 if (has_dpcd) {
2997 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2998 dev_priv->no_aux_handshake =
2999 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3000 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3001 } else {
3002 /* if this fails, presume the device is a ghost */
3003 DRM_INFO("failed to retrieve link info, disabling eDP\n");
3004 return false;
3005 }
3006
3007 /* We now know it's not a ghost, init power sequence regs. */
3008 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3009 &power_seq);
3010
3011 ironlake_edp_panel_vdd_on(intel_dp);
3012 edid = drm_get_edid(connector, &intel_dp->adapter);
3013 if (edid) {
3014 if (drm_add_edid_modes(connector, edid)) {
3015 drm_mode_connector_update_edid_property(connector,
3016 edid);
3017 drm_edid_to_eld(connector, edid);
3018 } else {
3019 kfree(edid);
3020 edid = ERR_PTR(-EINVAL);
3021 }
3022 } else {
3023 edid = ERR_PTR(-ENOENT);
3024 }
3025 intel_connector->edid = edid;
3026
3027 /* prefer fixed mode from EDID if available */
3028 list_for_each_entry(scan, &connector->probed_modes, head) {
3029 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3030 fixed_mode = drm_mode_duplicate(dev, scan);
3031 break;
3032 }
3033 }
3034
3035 /* fallback to VBT if available for eDP */
3036 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3037 fixed_mode = drm_mode_duplicate(dev,
3038 dev_priv->vbt.lfp_lvds_vbt_mode);
3039 if (fixed_mode)
3040 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3041 }
3042
3043 ironlake_edp_panel_vdd_off(intel_dp, false);
3044
3045 intel_panel_init(&intel_connector->panel, fixed_mode);
3046 intel_panel_setup_backlight(connector);
3047
3048 return true;
3049}
3050
3051bool
2958intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 3052intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2959 struct intel_connector *intel_connector) 3053 struct intel_connector *intel_connector)
2960{ 3054{
@@ -2963,11 +3057,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2963 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3057 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2964 struct drm_device *dev = intel_encoder->base.dev; 3058 struct drm_device *dev = intel_encoder->base.dev;
2965 struct drm_i915_private *dev_priv = dev->dev_private; 3059 struct drm_i915_private *dev_priv = dev->dev_private;
2966 struct drm_display_mode *fixed_mode = NULL;
2967 struct edp_power_seq power_seq = { 0 };
2968 enum port port = intel_dig_port->port; 3060 enum port port = intel_dig_port->port;
2969 const char *name = NULL; 3061 const char *name = NULL;
2970 int type; 3062 int type, error;
2971 3063
2972 /* Preserve the current hw state. */ 3064 /* Preserve the current hw state. */
2973 intel_dp->DP = I915_READ(intel_dp->output_reg); 3065 intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -3065,74 +3157,21 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3065 BUG(); 3157 BUG();
3066 } 3158 }
3067 3159
3068 if (is_edp(intel_dp)) 3160 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3069 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3161 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3070 3162 error, port_name(port));
3071 intel_dp_i2c_init(intel_dp, intel_connector, name);
3072
3073 /* Cache DPCD and EDID for edp. */
3074 if (is_edp(intel_dp)) {
3075 bool ret;
3076 struct drm_display_mode *scan;
3077 struct edid *edid;
3078
3079 ironlake_edp_panel_vdd_on(intel_dp);
3080 ret = intel_dp_get_dpcd(intel_dp);
3081 ironlake_edp_panel_vdd_off(intel_dp, false);
3082
3083 if (ret) {
3084 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3085 dev_priv->no_aux_handshake =
3086 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3087 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3088 } else {
3089 /* if this fails, presume the device is a ghost */
3090 DRM_INFO("failed to retrieve link info, disabling eDP\n");
3091 intel_dp_encoder_destroy(&intel_encoder->base);
3092 intel_dp_destroy(connector);
3093 return;
3094 }
3095
3096 /* We now know it's not a ghost, init power sequence regs. */
3097 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3098 &power_seq);
3099 3163
3100 ironlake_edp_panel_vdd_on(intel_dp); 3164 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
3101 edid = drm_get_edid(connector, &intel_dp->adapter); 3165 i2c_del_adapter(&intel_dp->adapter);
3102 if (edid) { 3166 if (is_edp(intel_dp)) {
3103 if (drm_add_edid_modes(connector, edid)) { 3167 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3104 drm_mode_connector_update_edid_property(connector, edid); 3168 mutex_lock(&dev->mode_config.mutex);
3105 drm_edid_to_eld(connector, edid); 3169 ironlake_panel_vdd_off_sync(intel_dp);
3106 } else { 3170 mutex_unlock(&dev->mode_config.mutex);
3107 kfree(edid);
3108 edid = ERR_PTR(-EINVAL);
3109 }
3110 } else {
3111 edid = ERR_PTR(-ENOENT);
3112 } 3171 }
3113 intel_connector->edid = edid; 3172 drm_sysfs_connector_remove(connector);
3114 3173 drm_connector_cleanup(connector);
3115 /* prefer fixed mode from EDID if available */ 3174 return false;
3116 list_for_each_entry(scan, &connector->probed_modes, head) {
3117 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3118 fixed_mode = drm_mode_duplicate(dev, scan);
3119 break;
3120 }
3121 }
3122
3123 /* fallback to VBT if available for eDP */
3124 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3125 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
3126 if (fixed_mode)
3127 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3128 }
3129
3130 ironlake_edp_panel_vdd_off(intel_dp, false);
3131 }
3132
3133 if (is_edp(intel_dp)) {
3134 intel_panel_init(&intel_connector->panel, fixed_mode);
3135 intel_panel_setup_backlight(connector);
3136 } 3175 }
3137 3176
3138 intel_dp_add_properties(intel_dp, connector); 3177 intel_dp_add_properties(intel_dp, connector);
@@ -3145,6 +3184,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3145 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 3184 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3146 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 3185 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3147 } 3186 }
3187
3188 return true;
3148} 3189}
3149 3190
3150void 3191void
@@ -3190,5 +3231,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3190 intel_encoder->cloneable = false; 3231 intel_encoder->cloneable = false;
3191 intel_encoder->hot_plug = intel_dp_hot_plug; 3232 intel_encoder->hot_plug = intel_dp_hot_plug;
3192 3233
3193 intel_dp_init_connector(intel_dig_port, intel_connector); 3234 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3235 drm_encoder_cleanup(encoder);
3236 kfree(intel_dig_port);
3237 kfree(intel_connector);
3238 }
3194} 3239}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ffe9d35b37b4..c8c9b6f48230 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -141,7 +141,8 @@ struct intel_encoder {
141 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); 141 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
142 /* Reconstructs the equivalent mode flags for the current hardware 142 /* Reconstructs the equivalent mode flags for the current hardware
143 * state. This must be called _after_ display->get_pipe_config has 143 * state. This must be called _after_ display->get_pipe_config has
144 * pre-filled the pipe config. */ 144 * pre-filled the pipe config. Note that intel_encoder->base.crtc must
145 * be set correctly before calling this function. */
145 void (*get_config)(struct intel_encoder *, 146 void (*get_config)(struct intel_encoder *,
146 struct intel_crtc_config *pipe_config); 147 struct intel_crtc_config *pipe_config);
147 int crtc_mask; 148 int crtc_mask;
@@ -586,7 +587,7 @@ extern void intel_lvds_init(struct drm_device *dev);
586extern bool intel_is_dual_link_lvds(struct drm_device *dev); 587extern bool intel_is_dual_link_lvds(struct drm_device *dev);
587extern void intel_dp_init(struct drm_device *dev, int output_reg, 588extern void intel_dp_init(struct drm_device *dev, int output_reg,
588 enum port port); 589 enum port port);
589extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 590extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
590 struct intel_connector *intel_connector); 591 struct intel_connector *intel_connector);
591extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 592extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
592extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 593extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index bc12518a21b4..98df2a0c85bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -602,7 +602,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
602 u32 hdmi_val; 602 u32 hdmi_val;
603 603
604 hdmi_val = SDVO_ENCODING_HDMI; 604 hdmi_val = SDVO_ENCODING_HDMI;
605 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 605 if (!HAS_PCH_SPLIT(dev))
606 hdmi_val |= intel_hdmi->color_range; 606 hdmi_val |= intel_hdmi->color_range;
607 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 607 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
608 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; 608 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 79be7cfd3152..cfb8fb68f09c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -311,8 +311,8 @@ static void intel_didl_outputs(struct drm_device *dev)
311 311
312 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { 312 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
313 if (i >= 8) { 313 if (i >= 8) {
314 dev_printk(KERN_ERR, &dev->pdev->dev, 314 dev_dbg(&dev->pdev->dev,
315 "More than 8 outputs detected via ACPI\n"); 315 "More than 8 outputs detected via ACPI\n");
316 return; 316 return;
317 } 317 }
318 status = 318 status =
@@ -338,8 +338,8 @@ blind_set:
338 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 338 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
339 int output_type = ACPI_OTHER_OUTPUT; 339 int output_type = ACPI_OTHER_OUTPUT;
340 if (i >= 8) { 340 if (i >= 8) {
341 dev_printk(KERN_ERR, &dev->pdev->dev, 341 dev_dbg(&dev->pdev->dev,
342 "More than 8 outputs in connector list\n"); 342 "More than 8 outputs in connector list\n");
343 return; 343 return;
344 } 344 }
345 switch (connector->connector_type) { 345 switch (connector->connector_type) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b27bda07f4ae..ccbdd83f5220 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3069,26 +3069,17 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3069 trace_intel_gpu_freq_change(val * 50); 3069 trace_intel_gpu_freq_change(val * 50);
3070} 3070}
3071 3071
3072void valleyview_set_rps(struct drm_device *dev, u8 val) 3072/*
3073 * Wait until the previous freq change has completed,
3074 * or the timeout elapsed, and then update our notion
3075 * of the current GPU frequency.
3076 */
3077static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3073{ 3078{
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 unsigned long timeout = jiffies + msecs_to_jiffies(10); 3079 unsigned long timeout = jiffies + msecs_to_jiffies(10);
3076 u32 limits = gen6_rps_limits(dev_priv, &val);
3077 u32 pval; 3080 u32 pval;
3078 3081
3079 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3082 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3080 WARN_ON(val > dev_priv->rps.max_delay);
3081 WARN_ON(val < dev_priv->rps.min_delay);
3082
3083 DRM_DEBUG_DRIVER("gpu freq request from %d to %d\n",
3084 vlv_gpu_freq(dev_priv->mem_freq,
3085 dev_priv->rps.cur_delay),
3086 vlv_gpu_freq(dev_priv->mem_freq, val));
3087
3088 if (val == dev_priv->rps.cur_delay)
3089 return;
3090
3091 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3092 3083
3093 do { 3084 do {
3094 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3085 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -3099,17 +3090,41 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3099 udelay(10); 3090 udelay(10);
3100 } while (pval & 1); 3091 } while (pval & 1);
3101 3092
3102 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3093 pval >>= 8;
3103 if ((pval >> 8) != val)
3104 DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n",
3105 val, pval >> 8);
3106 3094
3107 /* Make sure we continue to get interrupts 3095 if (pval != dev_priv->rps.cur_delay)
3108 * until we hit the minimum or maximum frequencies. 3096 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3109 */ 3097 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3110 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 3098 dev_priv->rps.cur_delay,
3099 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3100
3101 dev_priv->rps.cur_delay = pval;
3102}
3103
3104void valleyview_set_rps(struct drm_device *dev, u8 val)
3105{
3106 struct drm_i915_private *dev_priv = dev->dev_private;
3107
3108 gen6_rps_limits(dev_priv, &val);
3111 3109
3112 dev_priv->rps.cur_delay = pval >> 8; 3110 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3111 WARN_ON(val > dev_priv->rps.max_delay);
3112 WARN_ON(val < dev_priv->rps.min_delay);
3113
3114 vlv_update_rps_cur_delay(dev_priv);
3115
3116 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3117 vlv_gpu_freq(dev_priv->mem_freq,
3118 dev_priv->rps.cur_delay),
3119 dev_priv->rps.cur_delay,
3120 vlv_gpu_freq(dev_priv->mem_freq, val), val);
3121
3122 if (val == dev_priv->rps.cur_delay)
3123 return;
3124
3125 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3126
3127 dev_priv->rps.cur_delay = val;
3113 3128
3114 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); 3129 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3115} 3130}
@@ -3446,7 +3461,8 @@ static void vlv_rps_timer_work(struct work_struct *work)
3446 * min freq available. 3461 * min freq available.
3447 */ 3462 */
3448 mutex_lock(&dev_priv->rps.hw_lock); 3463 mutex_lock(&dev_priv->rps.hw_lock);
3449 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3464 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3465 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3450 mutex_unlock(&dev_priv->rps.hw_lock); 3466 mutex_unlock(&dev_priv->rps.hw_lock);
3451} 3467}
3452 3468
@@ -3496,7 +3512,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3496{ 3512{
3497 struct drm_i915_private *dev_priv = dev->dev_private; 3513 struct drm_i915_private *dev_priv = dev->dev_private;
3498 struct intel_ring_buffer *ring; 3514 struct intel_ring_buffer *ring;
3499 u32 gtfifodbg, val, rpe; 3515 u32 gtfifodbg, val;
3500 int i; 3516 int i;
3501 3517
3502 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3518 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3557,31 +3573,39 @@ static void valleyview_enable_rps(struct drm_device *dev)
3557 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 3573 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3558 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 3574 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3559 3575
3560 DRM_DEBUG_DRIVER("current GPU freq: %d\n",
3561 vlv_gpu_freq(dev_priv->mem_freq, (val >> 8) & 0xff));
3562 dev_priv->rps.cur_delay = (val >> 8) & 0xff; 3576 dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3577 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3578 vlv_gpu_freq(dev_priv->mem_freq,
3579 dev_priv->rps.cur_delay),
3580 dev_priv->rps.cur_delay);
3563 3581
3564 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); 3582 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3565 dev_priv->rps.hw_max = dev_priv->rps.max_delay; 3583 dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3566 DRM_DEBUG_DRIVER("max GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq, 3584 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3567 dev_priv->rps.max_delay)); 3585 vlv_gpu_freq(dev_priv->mem_freq,
3586 dev_priv->rps.max_delay),
3587 dev_priv->rps.max_delay);
3568 3588
3569 rpe = valleyview_rps_rpe_freq(dev_priv); 3589 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3570 DRM_DEBUG_DRIVER("RPe GPU freq: %d\n", 3590 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3571 vlv_gpu_freq(dev_priv->mem_freq, rpe)); 3591 vlv_gpu_freq(dev_priv->mem_freq,
3572 dev_priv->rps.rpe_delay = rpe; 3592 dev_priv->rps.rpe_delay),
3593 dev_priv->rps.rpe_delay);
3573 3594
3574 val = valleyview_rps_min_freq(dev_priv); 3595 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3575 DRM_DEBUG_DRIVER("min GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq, 3596 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3576 val)); 3597 vlv_gpu_freq(dev_priv->mem_freq,
3577 dev_priv->rps.min_delay = val; 3598 dev_priv->rps.min_delay),
3599 dev_priv->rps.min_delay);
3578 3600
3579 DRM_DEBUG_DRIVER("setting GPU freq to %d\n", 3601 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3580 vlv_gpu_freq(dev_priv->mem_freq, rpe)); 3602 vlv_gpu_freq(dev_priv->mem_freq,
3603 dev_priv->rps.rpe_delay),
3604 dev_priv->rps.rpe_delay);
3581 3605
3582 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); 3606 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3583 3607
3584 valleyview_set_rps(dev_priv->dev, rpe); 3608 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3585 3609
3586 /* requires MSI enabled */ 3610 /* requires MSI enabled */
3587 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); 3611 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
@@ -4834,10 +4858,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4834 I915_WRITE(GEN7_ROW_CHICKEN2, 4858 I915_WRITE(GEN7_ROW_CHICKEN2,
4835 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4859 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4836 4860
4837 /* WaForceL3Serialization:vlv */
4838 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4839 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4840
4841 /* This is required by WaCatErrorRejectionIssue:vlv */ 4861 /* This is required by WaCatErrorRejectionIssue:vlv */
4842 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4862 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4843 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4863 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |