aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-12-22 19:46:07 -0500
committerDave Airlie <airlied@redhat.com>2013-12-22 19:46:07 -0500
commit859ae233cd0ee76b6143f948ba1cb6b0b4c342f8 (patch)
treeb2071654cf0ef520e047035720a101d3222e47bc
parent785e15ecefbfe8ea311ae320fdacd482a84b3cc3 (diff)
parentab57fff1302c485d74992d34df24ccb5efda244e (diff)
Merge tag 'drm-intel-next-2013-12-13' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
- fbc1 improvements from Ville (pre-gm45). - vlv forcewake improvements from Deepak S. - Some corner-cases fixes from Mika for the context hang stat code. - pc8 improvements and prep work for runtime D3 from Paulo, almost ready for primetime. - gen2 dpll fixes from Ville. - DSI improvements from Shobhit Kumar. - A few smaller fixes and improvements all over. [airlied: intel_ddi.c conflict fixed up] * tag 'drm-intel-next-2013-12-13' of git://people.freedesktop.org/~danvet/drm-intel: (61 commits) drm/i915/bdw: Implement ff workarounds drm/i915/bdw: Force all Data Cache Data Port access to be Non-Coherent drm/i915/bdw: Don't use forcewake needlessly drm/i915: Clear out old GT FIFO errors in intel_uncore_early_sanitize() drm/i915: dont call irq_put when irq test is on drm/i915: Rework the FBC interval/stall stuff a bit drm/i915: Enable FBC for all mobile gen2 and gen3 platforms drm/i915: FBC_CONTROL2 is gen4 only drm/i915: Gen2 FBC1 CFB pitch wants 32B units drm/i915: split intel_ddi_pll_mode_set in 2 pieces drm/i915: Fix timeout with missed interrupts in __wait_seqno drm/i915: touch VGA MSR after we enable the power well drm/i915: extract hsw_power_well_post_{enable, disable} drm/i915: remove i915_disable_vga_mem declaration drm/i915: Parametrize the dphy and other spec specific parameters drm/i915: Remove redundant DSI PLL enabling drm/i915: Reorganize the DSI enable/disable sequence drm/i915: Try harder to get best m, n, p values with minimal error drm/i915: Compute dsi_clk from pixel clock drm/i915: Use FLISDSI interface for band gap reset ... Conflicts: drivers/gpu/drm/i915/intel_ddi.c
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c41
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c57
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c40
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h13
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c14
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h48
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c113
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c40
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h16
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c188
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c119
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c85
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c259
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h40
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c14
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c43
28 files changed, 947 insertions, 506 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13accf795548..6badc1596ceb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -564,10 +564,12 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
564 ret = mutex_lock_interruptible(&dev->struct_mutex); 564 ret = mutex_lock_interruptible(&dev->struct_mutex);
565 if (ret) 565 if (ret)
566 return ret; 566 return ret;
567 intel_runtime_pm_get(dev_priv);
567 568
568 for_each_ring(ring, dev_priv, i) 569 for_each_ring(ring, dev_priv, i)
569 i915_ring_seqno_info(m, ring); 570 i915_ring_seqno_info(m, ring);
570 571
572 intel_runtime_pm_put(dev_priv);
571 mutex_unlock(&dev->struct_mutex); 573 mutex_unlock(&dev->struct_mutex);
572 574
573 return 0; 575 return 0;
@@ -585,6 +587,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
585 ret = mutex_lock_interruptible(&dev->struct_mutex); 587 ret = mutex_lock_interruptible(&dev->struct_mutex);
586 if (ret) 588 if (ret)
587 return ret; 589 return ret;
590 intel_runtime_pm_get(dev_priv);
588 591
589 if (INTEL_INFO(dev)->gen >= 8) { 592 if (INTEL_INFO(dev)->gen >= 8) {
590 int i; 593 int i;
@@ -711,6 +714,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
711 } 714 }
712 i915_ring_seqno_info(m, ring); 715 i915_ring_seqno_info(m, ring);
713 } 716 }
717 intel_runtime_pm_put(dev_priv);
714 mutex_unlock(&dev->struct_mutex); 718 mutex_unlock(&dev->struct_mutex);
715 719
716 return 0; 720 return 0;
@@ -904,9 +908,11 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
904 ret = mutex_lock_interruptible(&dev->struct_mutex); 908 ret = mutex_lock_interruptible(&dev->struct_mutex);
905 if (ret) 909 if (ret)
906 return ret; 910 return ret;
911 intel_runtime_pm_get(dev_priv);
907 912
908 crstanddelay = I915_READ16(CRSTANDVID); 913 crstanddelay = I915_READ16(CRSTANDVID);
909 914
915 intel_runtime_pm_put(dev_priv);
910 mutex_unlock(&dev->struct_mutex); 916 mutex_unlock(&dev->struct_mutex);
911 917
912 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 918 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -919,7 +925,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
919 struct drm_info_node *node = (struct drm_info_node *) m->private; 925 struct drm_info_node *node = (struct drm_info_node *) m->private;
920 struct drm_device *dev = node->minor->dev; 926 struct drm_device *dev = node->minor->dev;
921 drm_i915_private_t *dev_priv = dev->dev_private; 927 drm_i915_private_t *dev_priv = dev->dev_private;
922 int ret; 928 int ret = 0;
929
930 intel_runtime_pm_get(dev_priv);
923 931
924 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 932 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
925 933
@@ -945,7 +953,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
945 /* RPSTAT1 is in the GT power well */ 953 /* RPSTAT1 is in the GT power well */
946 ret = mutex_lock_interruptible(&dev->struct_mutex); 954 ret = mutex_lock_interruptible(&dev->struct_mutex);
947 if (ret) 955 if (ret)
948 return ret; 956 goto out;
949 957
950 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 958 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
951 959
@@ -1033,7 +1041,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1033 seq_puts(m, "no P-state info available\n"); 1041 seq_puts(m, "no P-state info available\n");
1034 } 1042 }
1035 1043
1036 return 0; 1044out:
1045 intel_runtime_pm_put(dev_priv);
1046 return ret;
1037} 1047}
1038 1048
1039static int i915_delayfreq_table(struct seq_file *m, void *unused) 1049static int i915_delayfreq_table(struct seq_file *m, void *unused)
@@ -1047,6 +1057,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
1047 ret = mutex_lock_interruptible(&dev->struct_mutex); 1057 ret = mutex_lock_interruptible(&dev->struct_mutex);
1048 if (ret) 1058 if (ret)
1049 return ret; 1059 return ret;
1060 intel_runtime_pm_get(dev_priv);
1050 1061
1051 for (i = 0; i < 16; i++) { 1062 for (i = 0; i < 16; i++) {
1052 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1063 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -1054,6 +1065,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
1054 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1065 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1055 } 1066 }
1056 1067
1068 intel_runtime_pm_put(dev_priv);
1069
1057 mutex_unlock(&dev->struct_mutex); 1070 mutex_unlock(&dev->struct_mutex);
1058 1071
1059 return 0; 1072 return 0;
@@ -1075,12 +1088,14 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
1075 ret = mutex_lock_interruptible(&dev->struct_mutex); 1088 ret = mutex_lock_interruptible(&dev->struct_mutex);
1076 if (ret) 1089 if (ret)
1077 return ret; 1090 return ret;
1091 intel_runtime_pm_get(dev_priv);
1078 1092
1079 for (i = 1; i <= 32; i++) { 1093 for (i = 1; i <= 32; i++) {
1080 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1094 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1081 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1095 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1082 } 1096 }
1083 1097
1098 intel_runtime_pm_put(dev_priv);
1084 mutex_unlock(&dev->struct_mutex); 1099 mutex_unlock(&dev->struct_mutex);
1085 1100
1086 return 0; 1101 return 0;
@@ -1098,11 +1113,13 @@ static int ironlake_drpc_info(struct seq_file *m)
1098 ret = mutex_lock_interruptible(&dev->struct_mutex); 1113 ret = mutex_lock_interruptible(&dev->struct_mutex);
1099 if (ret) 1114 if (ret)
1100 return ret; 1115 return ret;
1116 intel_runtime_pm_get(dev_priv);
1101 1117
1102 rgvmodectl = I915_READ(MEMMODECTL); 1118 rgvmodectl = I915_READ(MEMMODECTL);
1103 rstdbyctl = I915_READ(RSTDBYCTL); 1119 rstdbyctl = I915_READ(RSTDBYCTL);
1104 crstandvid = I915_READ16(CRSTANDVID); 1120 crstandvid = I915_READ16(CRSTANDVID);
1105 1121
1122 intel_runtime_pm_put(dev_priv);
1106 mutex_unlock(&dev->struct_mutex); 1123 mutex_unlock(&dev->struct_mutex);
1107 1124
1108 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1125 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -1166,6 +1183,7 @@ static int gen6_drpc_info(struct seq_file *m)
1166 ret = mutex_lock_interruptible(&dev->struct_mutex); 1183 ret = mutex_lock_interruptible(&dev->struct_mutex);
1167 if (ret) 1184 if (ret)
1168 return ret; 1185 return ret;
1186 intel_runtime_pm_get(dev_priv);
1169 1187
1170 spin_lock_irq(&dev_priv->uncore.lock); 1188 spin_lock_irq(&dev_priv->uncore.lock);
1171 forcewake_count = dev_priv->uncore.forcewake_count; 1189 forcewake_count = dev_priv->uncore.forcewake_count;
@@ -1191,6 +1209,8 @@ static int gen6_drpc_info(struct seq_file *m)
1191 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1209 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1192 mutex_unlock(&dev_priv->rps.hw_lock); 1210 mutex_unlock(&dev_priv->rps.hw_lock);
1193 1211
1212 intel_runtime_pm_put(dev_priv);
1213
1194 seq_printf(m, "Video Turbo Mode: %s\n", 1214 seq_printf(m, "Video Turbo Mode: %s\n",
1195 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1215 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1196 seq_printf(m, "HW control enabled: %s\n", 1216 seq_printf(m, "HW control enabled: %s\n",
@@ -1405,6 +1425,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1405 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1425 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1406 if (ret) 1426 if (ret)
1407 return ret; 1427 return ret;
1428 intel_runtime_pm_get(dev_priv);
1408 1429
1409 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1430 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1410 1431
@@ -1421,6 +1442,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1421 ((ia_freq >> 8) & 0xff) * 100); 1442 ((ia_freq >> 8) & 0xff) * 100);
1422 } 1443 }
1423 1444
1445 intel_runtime_pm_put(dev_priv);
1424 mutex_unlock(&dev_priv->rps.hw_lock); 1446 mutex_unlock(&dev_priv->rps.hw_lock);
1425 1447
1426 return 0; 1448 return 0;
@@ -1436,8 +1458,10 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1436 ret = mutex_lock_interruptible(&dev->struct_mutex); 1458 ret = mutex_lock_interruptible(&dev->struct_mutex);
1437 if (ret) 1459 if (ret)
1438 return ret; 1460 return ret;
1461 intel_runtime_pm_get(dev_priv);
1439 1462
1440 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1463 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1464 intel_runtime_pm_put(dev_priv);
1441 1465
1442 mutex_unlock(&dev->struct_mutex); 1466 mutex_unlock(&dev->struct_mutex);
1443 1467
@@ -1617,6 +1641,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1617 ret = mutex_lock_interruptible(&dev->struct_mutex); 1641 ret = mutex_lock_interruptible(&dev->struct_mutex);
1618 if (ret) 1642 if (ret)
1619 return ret; 1643 return ret;
1644 intel_runtime_pm_get(dev_priv);
1620 1645
1621 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1646 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1622 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1647 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1648,6 +1673,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1648 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1673 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1649 I915_READ(DISP_ARB_CTL)); 1674 I915_READ(DISP_ARB_CTL));
1650 } 1675 }
1676 intel_runtime_pm_put(dev_priv);
1651 mutex_unlock(&dev->struct_mutex); 1677 mutex_unlock(&dev->struct_mutex);
1652 1678
1653 return 0; 1679 return 0;
@@ -1708,16 +1734,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1708{ 1734{
1709 struct drm_info_node *node = (struct drm_info_node *) m->private; 1735 struct drm_info_node *node = (struct drm_info_node *) m->private;
1710 struct drm_device *dev = node->minor->dev; 1736 struct drm_device *dev = node->minor->dev;
1737 struct drm_i915_private *dev_priv = dev->dev_private;
1711 1738
1712 int ret = mutex_lock_interruptible(&dev->struct_mutex); 1739 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1713 if (ret) 1740 if (ret)
1714 return ret; 1741 return ret;
1742 intel_runtime_pm_get(dev_priv);
1715 1743
1716 if (INTEL_INFO(dev)->gen >= 8) 1744 if (INTEL_INFO(dev)->gen >= 8)
1717 gen8_ppgtt_info(m, dev); 1745 gen8_ppgtt_info(m, dev);
1718 else if (INTEL_INFO(dev)->gen >= 6) 1746 else if (INTEL_INFO(dev)->gen >= 6)
1719 gen6_ppgtt_info(m, dev); 1747 gen6_ppgtt_info(m, dev);
1720 1748
1749 intel_runtime_pm_put(dev_priv);
1721 mutex_unlock(&dev->struct_mutex); 1750 mutex_unlock(&dev->struct_mutex);
1722 1751
1723 return 0; 1752 return 0;
@@ -1791,6 +1820,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1791 u32 psrperf = 0; 1820 u32 psrperf = 0;
1792 bool enabled = false; 1821 bool enabled = false;
1793 1822
1823 intel_runtime_pm_get(dev_priv);
1824
1794 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1825 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1795 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1826 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1796 1827
@@ -1803,6 +1834,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1803 EDP_PSR_PERF_CNT_MASK; 1834 EDP_PSR_PERF_CNT_MASK;
1804 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1835 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1805 1836
1837 intel_runtime_pm_put(dev_priv);
1806 return 0; 1838 return 0;
1807} 1839}
1808 1840
@@ -3016,8 +3048,11 @@ i915_cache_sharing_get(void *data, u64 *val)
3016 ret = mutex_lock_interruptible(&dev->struct_mutex); 3048 ret = mutex_lock_interruptible(&dev->struct_mutex);
3017 if (ret) 3049 if (ret)
3018 return ret; 3050 return ret;
3051 intel_runtime_pm_get(dev_priv);
3019 3052
3020 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3053 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3054
3055 intel_runtime_pm_put(dev_priv);
3021 mutex_unlock(&dev_priv->dev->struct_mutex); 3056 mutex_unlock(&dev_priv->dev->struct_mutex);
3022 3057
3023 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 3058 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -3038,6 +3073,7 @@ i915_cache_sharing_set(void *data, u64 val)
3038 if (val > 3) 3073 if (val > 3)
3039 return -EINVAL; 3074 return -EINVAL;
3040 3075
3076 intel_runtime_pm_get(dev_priv);
3041 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 3077 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3042 3078
3043 /* Update the cache sharing policy here as well */ 3079 /* Update the cache sharing policy here as well */
@@ -3046,6 +3082,7 @@ i915_cache_sharing_set(void *data, u64 val)
3046 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 3082 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
3047 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3083 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3048 3084
3085 intel_runtime_pm_put(dev_priv);
3049 return 0; 3086 return 0;
3050} 3087}
3051 3088
@@ -3061,6 +3098,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
3061 if (INTEL_INFO(dev)->gen < 6) 3098 if (INTEL_INFO(dev)->gen < 6)
3062 return 0; 3099 return 0;
3063 3100
3101 intel_runtime_pm_get(dev_priv);
3064 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3102 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3065 3103
3066 return 0; 3104 return 0;
@@ -3075,6 +3113,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
3075 return 0; 3113 return 0;
3076 3114
3077 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3115 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3116 intel_runtime_pm_put(dev_priv);
3078 3117
3079 return 0; 3118 return 0;
3080} 3119}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bf38e99410a4..750918c779c8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -42,6 +42,8 @@
42#include <linux/vga_switcheroo.h> 42#include <linux/vga_switcheroo.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <acpi/video.h> 44#include <acpi/video.h>
45#include <linux/pm.h>
46#include <linux/pm_runtime.h>
45 47
46#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) 48#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
47 49
@@ -1667,6 +1669,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1667 if (IS_GEN5(dev)) 1669 if (IS_GEN5(dev))
1668 intel_gpu_ips_init(dev_priv); 1670 intel_gpu_ips_init(dev_priv);
1669 1671
1672 intel_init_runtime_pm(dev_priv);
1673
1670 return 0; 1674 return 0;
1671 1675
1672out_power_well: 1676out_power_well:
@@ -1706,6 +1710,14 @@ int i915_driver_unload(struct drm_device *dev)
1706 struct drm_i915_private *dev_priv = dev->dev_private; 1710 struct drm_i915_private *dev_priv = dev->dev_private;
1707 int ret; 1711 int ret;
1708 1712
1713 ret = i915_gem_suspend(dev);
1714 if (ret) {
1715 DRM_ERROR("failed to idle hardware: %d\n", ret);
1716 return ret;
1717 }
1718
1719 intel_fini_runtime_pm(dev_priv);
1720
1709 intel_gpu_ips_teardown(); 1721 intel_gpu_ips_teardown();
1710 1722
1711 /* The i915.ko module is still not prepared to be loaded when 1723 /* The i915.ko module is still not prepared to be loaded when
@@ -1719,10 +1731,6 @@ int i915_driver_unload(struct drm_device *dev)
1719 if (dev_priv->mm.inactive_shrinker.scan_objects) 1731 if (dev_priv->mm.inactive_shrinker.scan_objects)
1720 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1732 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1721 1733
1722 ret = i915_gem_suspend(dev);
1723 if (ret)
1724 DRM_ERROR("failed to idle hardware: %d\n", ret);
1725
1726 io_mapping_free(dev_priv->gtt.mappable); 1734 io_mapping_free(dev_priv->gtt.mappable);
1727 arch_phys_wc_del(dev_priv->gtt.mtrr); 1735 arch_phys_wc_del(dev_priv->gtt.mtrr);
1728 1736
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e570ad7a9dfe..74516930de7a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -172,6 +172,7 @@ static const struct intel_device_info intel_i85x_info = {
172 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 172 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
173 .cursor_needs_physical = 1, 173 .cursor_needs_physical = 1,
174 .has_overlay = 1, .overlay_needs_physical = 1, 174 .has_overlay = 1, .overlay_needs_physical = 1,
175 .has_fbc = 1,
175 .ring_mask = RENDER_RING, 176 .ring_mask = RENDER_RING,
176}; 177};
177 178
@@ -191,6 +192,7 @@ static const struct intel_device_info intel_i915gm_info = {
191 .cursor_needs_physical = 1, 192 .cursor_needs_physical = 1,
192 .has_overlay = 1, .overlay_needs_physical = 1, 193 .has_overlay = 1, .overlay_needs_physical = 1,
193 .supports_tv = 1, 194 .supports_tv = 1,
195 .has_fbc = 1,
194 .ring_mask = RENDER_RING, 196 .ring_mask = RENDER_RING,
195}; 197};
196static const struct intel_device_info intel_i945g_info = { 198static const struct intel_device_info intel_i945g_info = {
@@ -203,6 +205,7 @@ static const struct intel_device_info intel_i945gm_info = {
203 .has_hotplug = 1, .cursor_needs_physical = 1, 205 .has_hotplug = 1, .cursor_needs_physical = 1,
204 .has_overlay = 1, .overlay_needs_physical = 1, 206 .has_overlay = 1, .overlay_needs_physical = 1,
205 .supports_tv = 1, 207 .supports_tv = 1,
208 .has_fbc = 1,
206 .ring_mask = RENDER_RING, 209 .ring_mask = RENDER_RING,
207}; 210};
208 211
@@ -502,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
502 struct drm_i915_private *dev_priv = dev->dev_private; 505 struct drm_i915_private *dev_priv = dev->dev_private;
503 struct drm_crtc *crtc; 506 struct drm_crtc *crtc;
504 507
508 intel_runtime_pm_get(dev_priv);
509
505 /* ignore lid events during suspend */ 510 /* ignore lid events during suspend */
506 mutex_lock(&dev_priv->modeset_restore_lock); 511 mutex_lock(&dev_priv->modeset_restore_lock);
507 dev_priv->modeset_restore = MODESET_SUSPENDED; 512 dev_priv->modeset_restore = MODESET_SUSPENDED;
@@ -688,6 +693,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
688 mutex_lock(&dev_priv->modeset_restore_lock); 693 mutex_lock(&dev_priv->modeset_restore_lock);
689 dev_priv->modeset_restore = MODESET_DONE; 694 dev_priv->modeset_restore = MODESET_DONE;
690 mutex_unlock(&dev_priv->modeset_restore_lock); 695 mutex_unlock(&dev_priv->modeset_restore_lock);
696
697 intel_runtime_pm_put(dev_priv);
691 return error; 698 return error;
692} 699}
693 700
@@ -902,6 +909,38 @@ static int i915_pm_poweroff(struct device *dev)
902 return i915_drm_freeze(drm_dev); 909 return i915_drm_freeze(drm_dev);
903} 910}
904 911
912static int i915_runtime_suspend(struct device *device)
913{
914 struct pci_dev *pdev = to_pci_dev(device);
915 struct drm_device *dev = pci_get_drvdata(pdev);
916 struct drm_i915_private *dev_priv = dev->dev_private;
917
918 WARN_ON(!HAS_RUNTIME_PM(dev));
919
920 DRM_DEBUG_KMS("Suspending device\n");
921
922 dev_priv->pm.suspended = true;
923 intel_opregion_notify_adapter(dev, PCI_D3cold);
924
925 return 0;
926}
927
928static int i915_runtime_resume(struct device *device)
929{
930 struct pci_dev *pdev = to_pci_dev(device);
931 struct drm_device *dev = pci_get_drvdata(pdev);
932 struct drm_i915_private *dev_priv = dev->dev_private;
933
934 WARN_ON(!HAS_RUNTIME_PM(dev));
935
936 DRM_DEBUG_KMS("Resuming device\n");
937
938 intel_opregion_notify_adapter(dev, PCI_D0);
939 dev_priv->pm.suspended = false;
940
941 return 0;
942}
943
905static const struct dev_pm_ops i915_pm_ops = { 944static const struct dev_pm_ops i915_pm_ops = {
906 .suspend = i915_pm_suspend, 945 .suspend = i915_pm_suspend,
907 .resume = i915_pm_resume, 946 .resume = i915_pm_resume,
@@ -909,6 +948,8 @@ static const struct dev_pm_ops i915_pm_ops = {
909 .thaw = i915_pm_thaw, 948 .thaw = i915_pm_thaw,
910 .poweroff = i915_pm_poweroff, 949 .poweroff = i915_pm_poweroff,
911 .restore = i915_pm_resume, 950 .restore = i915_pm_resume,
951 .runtime_suspend = i915_runtime_suspend,
952 .runtime_resume = i915_runtime_resume,
912}; 953};
913 954
914static const struct vm_operations_struct i915_gem_vm_ops = { 955static const struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 780f815b6c9f..ae2c80c1981b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -323,7 +323,7 @@ struct drm_i915_error_state {
323 u32 instps[I915_NUM_RINGS]; 323 u32 instps[I915_NUM_RINGS];
324 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 324 u32 extra_instdone[I915_NUM_INSTDONE_REG];
325 u32 seqno[I915_NUM_RINGS]; 325 u32 seqno[I915_NUM_RINGS];
326 u64 bbaddr; 326 u64 bbaddr[I915_NUM_RINGS];
327 u32 fault_reg[I915_NUM_RINGS]; 327 u32 fault_reg[I915_NUM_RINGS];
328 u32 done_reg; 328 u32 done_reg;
329 u32 faddr[I915_NUM_RINGS]; 329 u32 faddr[I915_NUM_RINGS];
@@ -372,7 +372,7 @@ struct dpll;
372 372
373struct drm_i915_display_funcs { 373struct drm_i915_display_funcs {
374 bool (*fbc_enabled)(struct drm_device *dev); 374 bool (*fbc_enabled)(struct drm_device *dev);
375 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 375 void (*enable_fbc)(struct drm_crtc *crtc);
376 void (*disable_fbc)(struct drm_device *dev); 376 void (*disable_fbc)(struct drm_device *dev);
377 int (*get_display_clock_speed)(struct drm_device *dev); 377 int (*get_display_clock_speed)(struct drm_device *dev);
378 int (*get_fifo_size)(struct drm_device *dev, int plane); 378 int (*get_fifo_size)(struct drm_device *dev, int plane);
@@ -695,7 +695,6 @@ struct i915_fbc {
695 struct delayed_work work; 695 struct delayed_work work;
696 struct drm_crtc *crtc; 696 struct drm_crtc *crtc;
697 struct drm_framebuffer *fb; 697 struct drm_framebuffer *fb;
698 int interval;
699 } *fbc_work; 698 } *fbc_work;
700 699
701 enum no_fbc_reason { 700 enum no_fbc_reason {
@@ -1289,6 +1288,10 @@ struct i915_package_c8 {
1289 } regsave; 1288 } regsave;
1290}; 1289};
1291 1290
1291struct i915_runtime_pm {
1292 bool suspended;
1293};
1294
1292enum intel_pipe_crc_source { 1295enum intel_pipe_crc_source {
1293 INTEL_PIPE_CRC_SOURCE_NONE, 1296 INTEL_PIPE_CRC_SOURCE_NONE,
1294 INTEL_PIPE_CRC_SOURCE_PLANE1, 1297 INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1519,6 +1522,8 @@ typedef struct drm_i915_private {
1519 1522
1520 struct i915_package_c8 pc8; 1523 struct i915_package_c8 pc8;
1521 1524
1525 struct i915_runtime_pm pm;
1526
1522 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1527 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1523 * here! */ 1528 * here! */
1524 struct i915_dri1_state dri1; 1529 struct i915_dri1_state dri1;
@@ -1843,6 +1848,7 @@ struct drm_i915_file_private {
1843#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1848#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1844#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1849#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1845#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ 1850#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
1851#define HAS_RUNTIME_PM(dev) false
1846 1852
1847#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1853#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1848#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1854#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2468,6 +2474,8 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2468 enum intel_sbi_destination destination); 2474 enum intel_sbi_destination destination);
2469void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 2475void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2470 enum intel_sbi_destination destination); 2476 enum intel_sbi_destination destination);
2477u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
2478void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2471 2479
2472int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); 2480int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2473int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); 2481int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 182c521ec392..2be904c704e9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1015,9 +1015,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1015 struct drm_i915_file_private *file_priv) 1015 struct drm_i915_file_private *file_priv)
1016{ 1016{
1017 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1018 const bool irq_test_in_progress =
1019 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1018 struct timespec before, now; 1020 struct timespec before, now;
1019 DEFINE_WAIT(wait); 1021 DEFINE_WAIT(wait);
1020 long timeout_jiffies; 1022 unsigned long timeout_expire;
1021 int ret; 1023 int ret;
1022 1024
1023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); 1025 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,7 +1027,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1027 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1026 return 0; 1028 return 0;
1027 1029
1028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1; 1030 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1029 1031
1030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { 1032 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1031 gen6_rps_boost(dev_priv); 1033 gen6_rps_boost(dev_priv);
@@ -1035,8 +1037,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1035 msecs_to_jiffies(100)); 1037 msecs_to_jiffies(100));
1036 } 1038 }
1037 1039
1038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) && 1040 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1039 WARN_ON(!ring->irq_get(ring)))
1040 return -ENODEV; 1041 return -ENODEV;
1041 1042
1042 /* Record current time in case interrupted by signal, or wedged */ 1043 /* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1045,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1044 getrawmonotonic(&before); 1045 getrawmonotonic(&before);
1045 for (;;) { 1046 for (;;) {
1046 struct timer_list timer; 1047 struct timer_list timer;
1047 unsigned long expire;
1048 1048
1049 prepare_to_wait(&ring->irq_queue, &wait, 1049 prepare_to_wait(&ring->irq_queue, &wait,
1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1070,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1070 break; 1070 break;
1071 } 1071 }
1072 1072
1073 if (timeout_jiffies <= 0) { 1073 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1074 ret = -ETIME; 1074 ret = -ETIME;
1075 break; 1075 break;
1076 } 1076 }
1077 1077
1078 timer.function = NULL; 1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) { 1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 unsigned long expire;
1081
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); 1082 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies); 1083 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1082 mod_timer(&timer, expire); 1084 mod_timer(&timer, expire);
1083 } 1085 }
1084 1086
1085 io_schedule(); 1087 io_schedule();
1086 1088
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) { 1089 if (timer.function) {
1091 del_singleshot_timer_sync(&timer); 1090 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer); 1091 destroy_timer_on_stack(&timer);
@@ -1095,7 +1094,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1095 getrawmonotonic(&now); 1094 getrawmonotonic(&now);
1096 trace_i915_gem_request_wait_end(ring, seqno); 1095 trace_i915_gem_request_wait_end(ring, seqno);
1097 1096
1098 ring->irq_put(ring); 1097 if (!irq_test_in_progress)
1098 ring->irq_put(ring);
1099 1099
1100 finish_wait(&ring->irq_queue, &wait); 1100 finish_wait(&ring->irq_queue, &wait);
1101 1101
@@ -1380,6 +1380,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1380 int ret = 0; 1380 int ret = 0;
1381 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1381 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1382 1382
1383 intel_runtime_pm_get(dev_priv);
1384
1383 /* We don't use vmf->pgoff since that has the fake offset */ 1385 /* We don't use vmf->pgoff since that has the fake offset */
1384 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1386 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1385 PAGE_SHIFT; 1387 PAGE_SHIFT;
@@ -1427,8 +1429,10 @@ out:
1427 /* If this -EIO is due to a gpu hang, give the reset code a 1429 /* If this -EIO is due to a gpu hang, give the reset code a
1428 * chance to clean up the mess. Otherwise return the proper 1430 * chance to clean up the mess. Otherwise return the proper
1429 * SIGBUS. */ 1431 * SIGBUS. */
1430 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1432 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1431 return VM_FAULT_SIGBUS; 1433 ret = VM_FAULT_SIGBUS;
1434 break;
1435 }
1432 case -EAGAIN: 1436 case -EAGAIN:
1433 /* 1437 /*
1434 * EAGAIN means the gpu is hung and we'll wait for the error 1438 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1447,22 @@ out:
1443 * EBUSY is ok: this just means that another thread 1447 * EBUSY is ok: this just means that another thread
1444 * already did the job. 1448 * already did the job.
1445 */ 1449 */
1446 return VM_FAULT_NOPAGE; 1450 ret = VM_FAULT_NOPAGE;
1451 break;
1447 case -ENOMEM: 1452 case -ENOMEM:
1448 return VM_FAULT_OOM; 1453 ret = VM_FAULT_OOM;
1454 break;
1449 case -ENOSPC: 1455 case -ENOSPC:
1450 return VM_FAULT_SIGBUS; 1456 ret = VM_FAULT_SIGBUS;
1457 break;
1451 default: 1458 default:
1452 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 1459 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1453 return VM_FAULT_SIGBUS; 1460 ret = VM_FAULT_SIGBUS;
1461 break;
1454 } 1462 }
1463
1464 intel_runtime_pm_put(dev_priv);
1465 return ret;
1455} 1466}
1456 1467
1457/** 1468/**
@@ -2746,7 +2757,6 @@ int i915_vma_unbind(struct i915_vma *vma)
2746 obj->has_aliasing_ppgtt_mapping = 0; 2757 obj->has_aliasing_ppgtt_mapping = 0;
2747 } 2758 }
2748 i915_gem_gtt_finish_object(obj); 2759 i915_gem_gtt_finish_object(obj);
2749 i915_gem_object_unpin_pages(obj);
2750 2760
2751 list_del(&vma->mm_list); 2761 list_del(&vma->mm_list);
2752 /* Avoid an unnecessary call to unbind on rebind. */ 2762 /* Avoid an unnecessary call to unbind on rebind. */
@@ -2754,7 +2764,6 @@ int i915_vma_unbind(struct i915_vma *vma)
2754 obj->map_and_fenceable = true; 2764 obj->map_and_fenceable = true;
2755 2765
2756 drm_mm_remove_node(&vma->node); 2766 drm_mm_remove_node(&vma->node);
2757
2758 i915_gem_vma_destroy(vma); 2767 i915_gem_vma_destroy(vma);
2759 2768
2760 /* Since the unbound list is global, only move to that list if 2769 /* Since the unbound list is global, only move to that list if
@@ -2762,6 +2771,12 @@ int i915_vma_unbind(struct i915_vma *vma)
2762 if (list_empty(&obj->vma_list)) 2771 if (list_empty(&obj->vma_list))
2763 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2772 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2764 2773
2774 /* And finally now the object is completely decoupled from this vma,
2775 * we can drop its hold on the backing storage and allow it to be
2776 * reaped by the shrinker.
2777 */
2778 i915_gem_object_unpin_pages(obj);
2779
2765 return 0; 2780 return 0;
2766} 2781}
2767 2782
@@ -4165,6 +4180,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4165 drm_i915_private_t *dev_priv = dev->dev_private; 4180 drm_i915_private_t *dev_priv = dev->dev_private;
4166 struct i915_vma *vma, *next; 4181 struct i915_vma *vma, *next;
4167 4182
4183 intel_runtime_pm_get(dev_priv);
4184
4168 trace_i915_gem_object_destroy(obj); 4185 trace_i915_gem_object_destroy(obj);
4169 4186
4170 if (obj->phys_obj) 4187 if (obj->phys_obj)
@@ -4209,6 +4226,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4209 4226
4210 kfree(obj->bit_17); 4227 kfree(obj->bit_17);
4211 i915_gem_object_free(obj); 4228 i915_gem_object_free(obj);
4229
4230 intel_runtime_pm_put(dev_priv);
4212} 4231}
4213 4232
4214struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 4233struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3c90dd1a3bbd..87652fafeb49 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -888,6 +888,24 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
888 return 0; 888 return 0;
889} 889}
890 890
891static int
892i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
893 const u32 ctx_id)
894{
895 struct i915_ctx_hang_stats *hs;
896
897 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
898 if (IS_ERR(hs))
899 return PTR_ERR(hs);
900
901 if (hs->banned) {
902 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
903 return -EIO;
904 }
905
906 return 0;
907}
908
891static void 909static void
892i915_gem_execbuffer_move_to_active(struct list_head *vmas, 910i915_gem_execbuffer_move_to_active(struct list_head *vmas,
893 struct intel_ring_buffer *ring) 911 struct intel_ring_buffer *ring)
@@ -967,8 +985,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
967 struct drm_i915_gem_object *batch_obj; 985 struct drm_i915_gem_object *batch_obj;
968 struct drm_clip_rect *cliprects = NULL; 986 struct drm_clip_rect *cliprects = NULL;
969 struct intel_ring_buffer *ring; 987 struct intel_ring_buffer *ring;
970 struct i915_ctx_hang_stats *hs; 988 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
971 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
972 u32 exec_start, exec_len; 989 u32 exec_start, exec_len;
973 u32 mask, flags; 990 u32 mask, flags;
974 int ret, mode, i; 991 int ret, mode, i;
@@ -1095,6 +1112,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1095 } 1112 }
1096 } 1113 }
1097 1114
1115 intel_runtime_pm_get(dev_priv);
1116
1098 ret = i915_mutex_lock_interruptible(dev); 1117 ret = i915_mutex_lock_interruptible(dev);
1099 if (ret) 1118 if (ret)
1100 goto pre_mutex_err; 1119 goto pre_mutex_err;
@@ -1105,6 +1124,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1105 goto pre_mutex_err; 1124 goto pre_mutex_err;
1106 } 1125 }
1107 1126
1127 ret = i915_gem_validate_context(dev, file, ctx_id);
1128 if (ret) {
1129 mutex_unlock(&dev->struct_mutex);
1130 goto pre_mutex_err;
1131 }
1132
1108 eb = eb_create(args); 1133 eb = eb_create(args);
1109 if (eb == NULL) { 1134 if (eb == NULL) {
1110 mutex_unlock(&dev->struct_mutex); 1135 mutex_unlock(&dev->struct_mutex);
@@ -1157,17 +1182,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1157 if (ret) 1182 if (ret)
1158 goto err; 1183 goto err;
1159 1184
1160 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
1161 if (IS_ERR(hs)) {
1162 ret = PTR_ERR(hs);
1163 goto err;
1164 }
1165
1166 if (hs->banned) {
1167 ret = -EIO;
1168 goto err;
1169 }
1170
1171 ret = i915_switch_context(ring, file, ctx_id); 1185 ret = i915_switch_context(ring, file, ctx_id);
1172 if (ret) 1186 if (ret)
1173 goto err; 1187 goto err;
@@ -1229,6 +1243,10 @@ err:
1229 1243
1230pre_mutex_err: 1244pre_mutex_err:
1231 kfree(cliprects); 1245 kfree(cliprects);
1246
1247 /* intel_gpu_busy should also get a ref, so it will free when the device
1248 * is really idle. */
1249 intel_runtime_pm_put(dev_priv);
1232 return ret; 1250 return ret;
1233} 1251}
1234 1252
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 79dcb8f896c6..a707cca692e4 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -247,12 +247,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
247 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 247 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
248 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 248 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
249 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 249 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
250 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 250 if (INTEL_INFO(dev)->gen >= 4) {
251 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 251 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
252 if (INTEL_INFO(dev)->gen >= 4)
253 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); 252 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
254 if (INTEL_INFO(dev)->gen >= 4)
255 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 253 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
254 }
256 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 255 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
257 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 256 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
258 if (INTEL_INFO(dev)->gen >= 6) { 257 if (INTEL_INFO(dev)->gen >= 6) {
@@ -725,8 +724,9 @@ static void i915_record_ring_state(struct drm_device *dev,
725 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 724 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
726 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 725 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
727 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 726 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
728 if (ring->id == RCS) 727 error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
729 error->bbaddr = I915_READ64(BB_ADDR); 728 if (INTEL_INFO(dev)->gen >= 8)
729 error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
730 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); 730 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
731 } else { 731 } else {
732 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 732 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 271560080ad5..1d44c793bdf4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3139,10 +3139,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3139 * Returns true when a page flip has completed. 3139 * Returns true when a page flip has completed.
3140 */ 3140 */
3141static bool i8xx_handle_vblank(struct drm_device *dev, 3141static bool i8xx_handle_vblank(struct drm_device *dev,
3142 int pipe, u16 iir) 3142 int plane, int pipe, u32 iir)
3143{ 3143{
3144 drm_i915_private_t *dev_priv = dev->dev_private; 3144 drm_i915_private_t *dev_priv = dev->dev_private;
3145 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 3145 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3146 3146
3147 if (!drm_handle_vblank(dev, pipe)) 3147 if (!drm_handle_vblank(dev, pipe))
3148 return false; 3148 return false;
@@ -3150,7 +3150,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3150 if ((iir & flip_pending) == 0) 3150 if ((iir & flip_pending) == 0)
3151 return false; 3151 return false;
3152 3152
3153 intel_prepare_page_flip(dev, pipe); 3153 intel_prepare_page_flip(dev, plane);
3154 3154
3155 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3155 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3156 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3156 * to '0' on the following vblank, i.e. IIR has the Pendingflip
@@ -3219,9 +3219,13 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3219 notify_ring(dev, &dev_priv->ring[RCS]); 3219 notify_ring(dev, &dev_priv->ring[RCS]);
3220 3220
3221 for_each_pipe(pipe) { 3221 for_each_pipe(pipe) {
3222 int plane = pipe;
3223 if (IS_MOBILE(dev))
3224 plane = !plane;
3225
3222 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3226 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3223 i8xx_handle_vblank(dev, pipe, iir)) 3227 i8xx_handle_vblank(dev, plane, pipe, iir))
3224 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3228 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3225 3229
3226 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3230 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3227 i9xx_pipe_crc_irq_handler(dev, pipe); 3231 i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -3896,8 +3900,8 @@ void hsw_pc8_disable_interrupts(struct drm_device *dev)
3896 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3900 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3897 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3901 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3898 3902
3899 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3903 ironlake_disable_display_irq(dev_priv, 0xffffffff);
3900 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3904 ibx_disable_display_interrupt(dev_priv, 0xffffffff);
3901 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3905 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3902 snb_disable_pm_irq(dev_priv, 0xffffffff); 3906 snb_disable_pm_irq(dev_priv, 0xffffffff);
3903 3907
@@ -3911,34 +3915,26 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
3911{ 3915{
3912 struct drm_i915_private *dev_priv = dev->dev_private; 3916 struct drm_i915_private *dev_priv = dev->dev_private;
3913 unsigned long irqflags; 3917 unsigned long irqflags;
3914 uint32_t val, expected; 3918 uint32_t val;
3915 3919
3916 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3920 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3917 3921
3918 val = I915_READ(DEIMR); 3922 val = I915_READ(DEIMR);
3919 expected = ~DE_PCH_EVENT_IVB; 3923 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
3920 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3921 3924
3922 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3925 val = I915_READ(SDEIMR);
3923 expected = ~SDE_HOTPLUG_MASK_CPT; 3926 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
3924 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3925 val, expected);
3926 3927
3927 val = I915_READ(GTIMR); 3928 val = I915_READ(GTIMR);
3928 expected = 0xffffffff; 3929 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
3929 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3930 3930
3931 val = I915_READ(GEN6_PMIMR); 3931 val = I915_READ(GEN6_PMIMR);
3932 expected = 0xffffffff; 3932 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
3933 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3934 expected);
3935 3933
3936 dev_priv->pc8.irqs_disabled = false; 3934 dev_priv->pc8.irqs_disabled = false;
3937 3935
3938 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3936 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3939 ibx_enable_display_interrupt(dev_priv, 3937 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
3940 ~dev_priv->pc8.regsave.sdeimr &
3941 ~SDE_HOTPLUG_MASK_CPT);
3942 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3938 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3943 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3939 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3944 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3940 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3be449d884a7..f1eece4a63d5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -362,6 +362,7 @@
362#define IOSF_PORT_CCK 0x14 362#define IOSF_PORT_CCK 0x14
363#define IOSF_PORT_CCU 0xA9 363#define IOSF_PORT_CCU 0xA9
364#define IOSF_PORT_GPS_CORE 0x48 364#define IOSF_PORT_GPS_CORE 0x48
365#define IOSF_PORT_FLISDSI 0x1B
365#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) 366#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
366#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) 367#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
367 368
@@ -734,6 +735,8 @@
734#define HWSTAM 0x02098 735#define HWSTAM 0x02098
735#define DMA_FADD_I8XX 0x020d0 736#define DMA_FADD_I8XX 0x020d0
736#define RING_BBSTATE(base) ((base)+0x110) 737#define RING_BBSTATE(base) ((base)+0x110)
738#define RING_BBADDR(base) ((base)+0x140)
739#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */
737 740
738#define ERROR_GEN6 0x040a0 741#define ERROR_GEN6 0x040a0
739#define GEN7_ERR_INT 0x44040 742#define GEN7_ERR_INT 0x44040
@@ -924,7 +927,6 @@
924#define CM0_COLOR_EVICT_DISABLE (1<<3) 927#define CM0_COLOR_EVICT_DISABLE (1<<3)
925#define CM0_DEPTH_WRITE_DISABLE (1<<1) 928#define CM0_DEPTH_WRITE_DISABLE (1<<1)
926#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 929#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
927#define BB_ADDR 0x02140 /* 8 bytes */
928#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 930#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
929#define GFX_FLSH_CNTL_GEN6 0x101008 931#define GFX_FLSH_CNTL_GEN6 0x101008
930#define GFX_FLSH_CNTL_EN (1<<0) 932#define GFX_FLSH_CNTL_EN (1<<0)
@@ -1001,6 +1003,7 @@
1001 1003
1002#define GEN7_FF_THREAD_MODE 0x20a0 1004#define GEN7_FF_THREAD_MODE 0x20a0
1003#define GEN7_FF_SCHED_MASK 0x0077070 1005#define GEN7_FF_SCHED_MASK 0x0077070
1006#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
1004#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) 1007#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
1005#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) 1008#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
1006#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) 1009#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
@@ -1028,14 +1031,14 @@
1028#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 1031#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
1029#define FBC_CTL_C3_IDLE (1<<13) 1032#define FBC_CTL_C3_IDLE (1<<13)
1030#define FBC_CTL_STRIDE_SHIFT (5) 1033#define FBC_CTL_STRIDE_SHIFT (5)
1031#define FBC_CTL_FENCENO (1<<0) 1034#define FBC_CTL_FENCENO_SHIFT (0)
1032#define FBC_COMMAND 0x0320c 1035#define FBC_COMMAND 0x0320c
1033#define FBC_CMD_COMPRESS (1<<0) 1036#define FBC_CMD_COMPRESS (1<<0)
1034#define FBC_STATUS 0x03210 1037#define FBC_STATUS 0x03210
1035#define FBC_STAT_COMPRESSING (1<<31) 1038#define FBC_STAT_COMPRESSING (1<<31)
1036#define FBC_STAT_COMPRESSED (1<<30) 1039#define FBC_STAT_COMPRESSED (1<<30)
1037#define FBC_STAT_MODIFIED (1<<29) 1040#define FBC_STAT_MODIFIED (1<<29)
1038#define FBC_STAT_CURRENT_LINE (1<<0) 1041#define FBC_STAT_CURRENT_LINE_SHIFT (0)
1039#define FBC_CONTROL2 0x03214 1042#define FBC_CONTROL2 0x03214
1040#define FBC_CTL_FENCE_DBL (0<<4) 1043#define FBC_CTL_FENCE_DBL (0<<4)
1041#define FBC_CTL_IDLE_IMM (0<<2) 1044#define FBC_CTL_IDLE_IMM (0<<2)
@@ -4165,6 +4168,10 @@
4165#define GEN7_L3SQCREG4 0xb034 4168#define GEN7_L3SQCREG4 0xb034
4166#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) 4169#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
4167 4170
4171/* GEN8 chicken */
4172#define HDC_CHICKEN0 0x7300
4173#define HDC_FORCE_NON_COHERENT (1<<4)
4174
4168/* WaCatErrorRejectionIssue */ 4175/* WaCatErrorRejectionIssue */
4169#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 4176#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
4170#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 4177#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 05d8b1680c22..33bcae314bf8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -40,10 +40,13 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
40 struct drm_i915_private *dev_priv = dev->dev_private; 40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */ 41 u64 raw_time; /* 32b value may overflow during fixed point math */
42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL; 42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
43 u32 ret;
43 44
44 if (!intel_enable_rc6(dev)) 45 if (!intel_enable_rc6(dev))
45 return 0; 46 return 0;
46 47
48 intel_runtime_pm_get(dev_priv);
49
47 /* On VLV, residency time is in CZ units rather than 1.28us */ 50 /* On VLV, residency time is in CZ units rather than 1.28us */
48 if (IS_VALLEYVIEW(dev)) { 51 if (IS_VALLEYVIEW(dev)) {
49 u32 clkctl2; 52 u32 clkctl2;
@@ -52,7 +55,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
52 CLK_CTL2_CZCOUNT_30NS_SHIFT; 55 CLK_CTL2_CZCOUNT_30NS_SHIFT;
53 if (!clkctl2) { 56 if (!clkctl2) {
54 WARN(!clkctl2, "bogus CZ count value"); 57 WARN(!clkctl2, "bogus CZ count value");
55 return 0; 58 ret = 0;
59 goto out;
56 } 60 }
57 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2); 61 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
58 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 62 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
@@ -62,7 +66,11 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
62 } 66 }
63 67
64 raw_time = I915_READ(reg) * units; 68 raw_time = I915_READ(reg) * units;
65 return DIV_ROUND_UP_ULL(raw_time, div); 69 ret = DIV_ROUND_UP_ULL(raw_time, div);
70
71out:
72 intel_runtime_pm_put(dev_priv);
73 return ret;
66} 74}
67 75
68static ssize_t 76static ssize_t
@@ -448,7 +456,9 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
448 ret = mutex_lock_interruptible(&dev->struct_mutex); 456 ret = mutex_lock_interruptible(&dev->struct_mutex);
449 if (ret) 457 if (ret)
450 return ret; 458 return ret;
459 intel_runtime_pm_get(dev_priv);
451 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 460 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
461 intel_runtime_pm_put(dev_priv);
452 mutex_unlock(&dev->struct_mutex); 462 mutex_unlock(&dev->struct_mutex);
453 463
454 if (attr == &dev_attr_gt_RP0_freq_mhz) { 464 if (attr == &dev_attr_gt_RP0_freq_mhz) {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e4fba39631a5..f88e5079a3f5 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -327,12 +327,12 @@ static int intel_bios_ssc_frequency(struct drm_device *dev,
327{ 327{
328 switch (INTEL_INFO(dev)->gen) { 328 switch (INTEL_INFO(dev)->gen) {
329 case 2: 329 case 2:
330 return alternate ? 66 : 48; 330 return alternate ? 66667 : 48000;
331 case 3: 331 case 3:
332 case 4: 332 case 4:
333 return alternate ? 100 : 96; 333 return alternate ? 100000 : 96000;
334 default: 334 default:
335 return alternate ? 100 : 120; 335 return alternate ? 100000 : 120000;
336 } 336 }
337} 337}
338 338
@@ -796,7 +796,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
796 */ 796 */
797 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 797 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
798 !HAS_PCH_SPLIT(dev)); 798 !HAS_PCH_SPLIT(dev));
799 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 799 DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
800 800
801 for (port = PORT_A; port < I915_MAX_PORTS; port++) { 801 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
802 struct ddi_vbt_port_info *info = 802 struct ddi_vbt_port_info *info =
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f580a2b0ddd3..81ed58cb7b31 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -39,7 +39,7 @@ struct vbt_header {
39 u8 reserved0; 39 u8 reserved0;
40 u32 bdb_offset; /**< from beginning of VBT */ 40 u32 bdb_offset; /**< from beginning of VBT */
41 u32 aim_offset[4]; /**< from beginning of VBT */ 41 u32 aim_offset[4]; /**< from beginning of VBT */
42} __attribute__((packed)); 42} __packed;
43 43
44struct bdb_header { 44struct bdb_header {
45 u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ 45 u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
@@ -65,7 +65,7 @@ struct vbios_data {
65 u8 rsvd4; /* popup memory size */ 65 u8 rsvd4; /* popup memory size */
66 u8 resize_pci_bios; 66 u8 resize_pci_bios;
67 u8 rsvd5; /* is crt already on ddc2 */ 67 u8 rsvd5; /* is crt already on ddc2 */
68} __attribute__((packed)); 68} __packed;
69 69
70/* 70/*
71 * There are several types of BIOS data blocks (BDBs), each block has 71 * There are several types of BIOS data blocks (BDBs), each block has
@@ -142,7 +142,7 @@ struct bdb_general_features {
142 u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ 142 u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
143 u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ 143 u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
144 u8 rsvd11:3; /* finish byte */ 144 u8 rsvd11:3; /* finish byte */
145} __attribute__((packed)); 145} __packed;
146 146
147/* pre-915 */ 147/* pre-915 */
148#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ 148#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -225,7 +225,7 @@ struct old_child_dev_config {
225 u8 dvo2_wiring; 225 u8 dvo2_wiring;
226 u16 extended_type; 226 u16 extended_type;
227 u8 dvo_function; 227 u8 dvo_function;
228} __attribute__((packed)); 228} __packed;
229 229
230/* This one contains field offsets that are known to be common for all BDB 230/* This one contains field offsets that are known to be common for all BDB
231 * versions. Notice that the meaning of the contents contents may still change, 231 * versions. Notice that the meaning of the contents contents may still change,
@@ -238,7 +238,7 @@ struct common_child_dev_config {
238 u8 not_common2[2]; 238 u8 not_common2[2];
239 u8 ddc_pin; 239 u8 ddc_pin;
240 u16 edid_ptr; 240 u16 edid_ptr;
241} __attribute__((packed)); 241} __packed;
242 242
243/* This field changes depending on the BDB version, so the most reliable way to 243/* This field changes depending on the BDB version, so the most reliable way to
244 * read it is by checking the BDB version and reading the raw pointer. */ 244 * read it is by checking the BDB version and reading the raw pointer. */
@@ -279,7 +279,7 @@ struct bdb_general_definitions {
279 * sizeof(child_device_config); 279 * sizeof(child_device_config);
280 */ 280 */
281 union child_device_config devices[0]; 281 union child_device_config devices[0];
282} __attribute__((packed)); 282} __packed;
283 283
284struct bdb_lvds_options { 284struct bdb_lvds_options {
285 u8 panel_type; 285 u8 panel_type;
@@ -293,7 +293,7 @@ struct bdb_lvds_options {
293 u8 lvds_edid:1; 293 u8 lvds_edid:1;
294 u8 rsvd2:1; 294 u8 rsvd2:1;
295 u8 rsvd4; 295 u8 rsvd4;
296} __attribute__((packed)); 296} __packed;
297 297
298/* LFP pointer table contains entries to the struct below */ 298/* LFP pointer table contains entries to the struct below */
299struct bdb_lvds_lfp_data_ptr { 299struct bdb_lvds_lfp_data_ptr {
@@ -303,12 +303,12 @@ struct bdb_lvds_lfp_data_ptr {
303 u8 dvo_table_size; 303 u8 dvo_table_size;
304 u16 panel_pnp_id_offset; 304 u16 panel_pnp_id_offset;
305 u8 pnp_table_size; 305 u8 pnp_table_size;
306} __attribute__((packed)); 306} __packed;
307 307
308struct bdb_lvds_lfp_data_ptrs { 308struct bdb_lvds_lfp_data_ptrs {
309 u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ 309 u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
310 struct bdb_lvds_lfp_data_ptr ptr[16]; 310 struct bdb_lvds_lfp_data_ptr ptr[16];
311} __attribute__((packed)); 311} __packed;
312 312
313/* LFP data has 3 blocks per entry */ 313/* LFP data has 3 blocks per entry */
314struct lvds_fp_timing { 314struct lvds_fp_timing {
@@ -325,7 +325,7 @@ struct lvds_fp_timing {
325 u32 pfit_reg; 325 u32 pfit_reg;
326 u32 pfit_reg_val; 326 u32 pfit_reg_val;
327 u16 terminator; 327 u16 terminator;
328} __attribute__((packed)); 328} __packed;
329 329
330struct lvds_dvo_timing { 330struct lvds_dvo_timing {
331 u16 clock; /**< In 10khz */ 331 u16 clock; /**< In 10khz */
@@ -353,7 +353,7 @@ struct lvds_dvo_timing {
353 u8 vsync_positive:1; 353 u8 vsync_positive:1;
354 u8 hsync_positive:1; 354 u8 hsync_positive:1;
355 u8 rsvd2:1; 355 u8 rsvd2:1;
356} __attribute__((packed)); 356} __packed;
357 357
358struct lvds_pnp_id { 358struct lvds_pnp_id {
359 u16 mfg_name; 359 u16 mfg_name;
@@ -361,17 +361,17 @@ struct lvds_pnp_id {
361 u32 serial; 361 u32 serial;
362 u8 mfg_week; 362 u8 mfg_week;
363 u8 mfg_year; 363 u8 mfg_year;
364} __attribute__((packed)); 364} __packed;
365 365
366struct bdb_lvds_lfp_data_entry { 366struct bdb_lvds_lfp_data_entry {
367 struct lvds_fp_timing fp_timing; 367 struct lvds_fp_timing fp_timing;
368 struct lvds_dvo_timing dvo_timing; 368 struct lvds_dvo_timing dvo_timing;
369 struct lvds_pnp_id pnp_id; 369 struct lvds_pnp_id pnp_id;
370} __attribute__((packed)); 370} __packed;
371 371
372struct bdb_lvds_lfp_data { 372struct bdb_lvds_lfp_data {
373 struct bdb_lvds_lfp_data_entry data[16]; 373 struct bdb_lvds_lfp_data_entry data[16];
374} __attribute__((packed)); 374} __packed;
375 375
376struct aimdb_header { 376struct aimdb_header {
377 char signature[16]; 377 char signature[16];
@@ -379,12 +379,12 @@ struct aimdb_header {
379 u16 aimdb_version; 379 u16 aimdb_version;
380 u16 aimdb_header_size; 380 u16 aimdb_header_size;
381 u16 aimdb_size; 381 u16 aimdb_size;
382} __attribute__((packed)); 382} __packed;
383 383
384struct aimdb_block { 384struct aimdb_block {
385 u8 aimdb_id; 385 u8 aimdb_id;
386 u16 aimdb_size; 386 u16 aimdb_size;
387} __attribute__((packed)); 387} __packed;
388 388
389struct vch_panel_data { 389struct vch_panel_data {
390 u16 fp_timing_offset; 390 u16 fp_timing_offset;
@@ -395,12 +395,12 @@ struct vch_panel_data {
395 u8 text_fitting_size; 395 u8 text_fitting_size;
396 u16 graphics_fitting_offset; 396 u16 graphics_fitting_offset;
397 u8 graphics_fitting_size; 397 u8 graphics_fitting_size;
398} __attribute__((packed)); 398} __packed;
399 399
400struct vch_bdb_22 { 400struct vch_bdb_22 {
401 struct aimdb_block aimdb_block; 401 struct aimdb_block aimdb_block;
402 struct vch_panel_data panels[16]; 402 struct vch_panel_data panels[16];
403} __attribute__((packed)); 403} __packed;
404 404
405struct bdb_sdvo_lvds_options { 405struct bdb_sdvo_lvds_options {
406 u8 panel_backlight; 406 u8 panel_backlight;
@@ -416,7 +416,7 @@ struct bdb_sdvo_lvds_options {
416 u8 panel_misc_bits_2; 416 u8 panel_misc_bits_2;
417 u8 panel_misc_bits_3; 417 u8 panel_misc_bits_3;
418 u8 panel_misc_bits_4; 418 u8 panel_misc_bits_4;
419} __attribute__((packed)); 419} __packed;
420 420
421 421
422#define BDB_DRIVER_FEATURE_NO_LVDS 0 422#define BDB_DRIVER_FEATURE_NO_LVDS 0
@@ -462,7 +462,7 @@ struct bdb_driver_features {
462 462
463 u8 hdmi_termination; 463 u8 hdmi_termination;
464 u8 custom_vbt_version; 464 u8 custom_vbt_version;
465} __attribute__((packed)); 465} __packed;
466 466
467#define EDP_18BPP 0 467#define EDP_18BPP 0
468#define EDP_24BPP 1 468#define EDP_24BPP 1
@@ -487,14 +487,14 @@ struct edp_power_seq {
487 u16 t9; 487 u16 t9;
488 u16 t10; 488 u16 t10;
489 u16 t11_t12; 489 u16 t11_t12;
490} __attribute__ ((packed)); 490} __packed;
491 491
492struct edp_link_params { 492struct edp_link_params {
493 u8 rate:4; 493 u8 rate:4;
494 u8 lanes:4; 494 u8 lanes:4;
495 u8 preemphasis:4; 495 u8 preemphasis:4;
496 u8 vswing:4; 496 u8 vswing:4;
497} __attribute__ ((packed)); 497} __packed;
498 498
499struct bdb_edp { 499struct bdb_edp {
500 struct edp_power_seq power_seqs[16]; 500 struct edp_power_seq power_seqs[16];
@@ -505,7 +505,7 @@ struct bdb_edp {
505 /* ith bit indicates enabled/disabled for (i+1)th panel */ 505 /* ith bit indicates enabled/disabled for (i+1)th panel */
506 u16 edp_s3d_feature; 506 u16 edp_s3d_feature;
507 u16 edp_t3_optimization; 507 u16 edp_t3_optimization;
508} __attribute__ ((packed)); 508} __packed;
509 509
510void intel_setup_bios(struct drm_device *dev); 510void intel_setup_bios(struct drm_device *dev);
511int intel_parse_bios(struct drm_device *dev); 511int intel_parse_bios(struct drm_device *dev);
@@ -733,6 +733,6 @@ struct bdb_mipi {
733 u32 hl_switch_cnt; 733 u32 hl_switch_cnt;
734 u32 lp_byte_clk; 734 u32 lp_byte_clk;
735 u32 clk_lane_switch_cnt; 735 u32 clk_lane_switch_cnt;
736} __attribute__((packed)); 736} __packed;
737 737
738#endif /* _I830_BIOS_H_ */ 738#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index c8382f55870c..d7d2683b89df 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = {
73}; 73};
74 74
75static const u32 bdw_ddi_translations_edp[] = { 75static const u32 bdw_ddi_translations_edp[] = {
76 0x00FFFFFF, 0x00000012, /* DP parameters */ 76 0x00FFFFFF, 0x00000012, /* eDP parameters */
77 0x00EBAFFF, 0x00020011, 77 0x00EBAFFF, 0x00020011,
78 0x00C71FFF, 0x0006000F, 78 0x00C71FFF, 0x0006000F,
79 0x00FFFFFF, 0x00020011, 79 0x00FFFFFF, 0x00020011,
@@ -696,21 +696,23 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
696 *n2_out = best.n2; 696 *n2_out = best.n2;
697 *p_out = best.p; 697 *p_out = best.p;
698 *r2_out = best.r2; 698 *r2_out = best.r2;
699
700 DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
701 clock, *p_out, *n2_out, *r2_out);
702} 699}
703 700
704bool intel_ddi_pll_mode_set(struct drm_crtc *crtc) 701/*
702 * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
703 * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
704 * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
705 * enable the PLL.
706 */
707bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
705{ 708{
706 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 709 struct drm_crtc *crtc = &intel_crtc->base;
707 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 710 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
708 struct drm_encoder *encoder = &intel_encoder->base; 711 struct drm_encoder *encoder = &intel_encoder->base;
709 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 712 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
710 struct intel_ddi_plls *plls = &dev_priv->ddi_plls; 713 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
711 int type = intel_encoder->type; 714 int type = intel_encoder->type;
712 enum pipe pipe = intel_crtc->pipe; 715 enum pipe pipe = intel_crtc->pipe;
713 uint32_t reg, val;
714 int clock = intel_crtc->config.port_clock; 716 int clock = intel_crtc->config.port_clock;
715 717
716 intel_ddi_put_crtc_pll(crtc); 718 intel_ddi_put_crtc_pll(crtc);
@@ -734,10 +736,8 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
734 return false; 736 return false;
735 } 737 }
736 738
737 /* We don't need to turn any PLL on because we'll use LCPLL. */
738 return true;
739
740 } else if (type == INTEL_OUTPUT_HDMI) { 739 } else if (type == INTEL_OUTPUT_HDMI) {
740 uint32_t reg, val;
741 unsigned p, n2, r2; 741 unsigned p, n2, r2;
742 742
743 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 743 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
@@ -767,6 +767,9 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
767 return false; 767 return false;
768 } 768 }
769 769
770 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
771 clock, p, n2, r2);
772
770 if (reg == WRPLL_CTL1) { 773 if (reg == WRPLL_CTL1) {
771 plls->wrpll1_refcount++; 774 plls->wrpll1_refcount++;
772 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; 775 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
@@ -780,29 +783,98 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
780 DRM_DEBUG_KMS("Using SPLL on pipe %c\n", 783 DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
781 pipe_name(pipe)); 784 pipe_name(pipe));
782 plls->spll_refcount++; 785 plls->spll_refcount++;
783 reg = SPLL_CTL;
784 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; 786 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
785 } else { 787 } else {
786 DRM_ERROR("SPLL already in use\n"); 788 DRM_ERROR("SPLL already in use\n");
787 return false; 789 return false;
788 } 790 }
789 791
790 WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
791 "SPLL already enabled\n");
792
793 val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
794
795 } else { 792 } else {
796 WARN(1, "Invalid DDI encoder type %d\n", type); 793 WARN(1, "Invalid DDI encoder type %d\n", type);
797 return false; 794 return false;
798 } 795 }
799 796
800 I915_WRITE(reg, val);
801 udelay(20);
802
803 return true; 797 return true;
804} 798}
805 799
800/*
801 * To be called after intel_ddi_pll_select(). That one selects the PLL to be
802 * used, this one actually enables the PLL.
803 */
804void intel_ddi_pll_enable(struct intel_crtc *crtc)
805{
806 struct drm_device *dev = crtc->base.dev;
807 struct drm_i915_private *dev_priv = dev->dev_private;
808 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
809 int clock = crtc->config.port_clock;
810 uint32_t reg, cur_val, new_val;
811 int refcount;
812 const char *pll_name;
813 uint32_t enable_bit = (1 << 31);
814 unsigned int p, n2, r2;
815
816 BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
817 BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
818
819 switch (crtc->ddi_pll_sel) {
820 case PORT_CLK_SEL_LCPLL_2700:
821 case PORT_CLK_SEL_LCPLL_1350:
822 case PORT_CLK_SEL_LCPLL_810:
823 /*
824 * LCPLL should always be enabled at this point of the mode set
825 * sequence, so nothing to do.
826 */
827 return;
828
829 case PORT_CLK_SEL_SPLL:
830 pll_name = "SPLL";
831 reg = SPLL_CTL;
832 refcount = plls->spll_refcount;
833 new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
834 SPLL_PLL_SSC;
835 break;
836
837 case PORT_CLK_SEL_WRPLL1:
838 case PORT_CLK_SEL_WRPLL2:
839 if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
840 pll_name = "WRPLL1";
841 reg = WRPLL_CTL1;
842 refcount = plls->wrpll1_refcount;
843 } else {
844 pll_name = "WRPLL2";
845 reg = WRPLL_CTL2;
846 refcount = plls->wrpll2_refcount;
847 }
848
849 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
850
851 new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
852 WRPLL_DIVIDER_REFERENCE(r2) |
853 WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
854
855 break;
856
857 case PORT_CLK_SEL_NONE:
858 WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
859 return;
860 default:
861 WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
862 return;
863 }
864
865 cur_val = I915_READ(reg);
866
867 WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
868 if (refcount == 1) {
869 WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
870 I915_WRITE(reg, new_val);
871 POSTING_READ(reg);
872 udelay(20);
873 } else {
874 WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
875 }
876}
877
806void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 878void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
807{ 879{
808 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 880 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1122,9 +1194,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1122 1194
1123 if (type == INTEL_OUTPUT_EDP) { 1195 if (type == INTEL_OUTPUT_EDP) {
1124 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1196 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1125 ironlake_edp_panel_vdd_on(intel_dp);
1126 ironlake_edp_panel_on(intel_dp); 1197 ironlake_edp_panel_on(intel_dp);
1127 ironlake_edp_panel_vdd_off(intel_dp, true);
1128 } 1198 }
1129 1199
1130 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); 1200 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1167,7 +1237,6 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1167 1237
1168 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1238 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1169 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1239 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1170 ironlake_edp_panel_vdd_on(intel_dp);
1171 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1240 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1172 ironlake_edp_panel_off(intel_dp); 1241 ironlake_edp_panel_off(intel_dp);
1173 } 1242 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0bb3d6d596d9..72a83fabb105 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,8 +90,8 @@ intel_fdi_link_freq(struct drm_device *dev)
90 90
91static const intel_limit_t intel_limits_i8xx_dac = { 91static const intel_limit_t intel_limits_i8xx_dac = {
92 .dot = { .min = 25000, .max = 350000 }, 92 .dot = { .min = 25000, .max = 350000 },
93 .vco = { .min = 930000, .max = 1400000 }, 93 .vco = { .min = 908000, .max = 1512000 },
94 .n = { .min = 3, .max = 16 }, 94 .n = { .min = 2, .max = 16 },
95 .m = { .min = 96, .max = 140 }, 95 .m = { .min = 96, .max = 140 },
96 .m1 = { .min = 18, .max = 26 }, 96 .m1 = { .min = 18, .max = 26 },
97 .m2 = { .min = 6, .max = 16 }, 97 .m2 = { .min = 6, .max = 16 },
@@ -103,8 +103,8 @@ static const intel_limit_t intel_limits_i8xx_dac = {
103 103
104static const intel_limit_t intel_limits_i8xx_dvo = { 104static const intel_limit_t intel_limits_i8xx_dvo = {
105 .dot = { .min = 25000, .max = 350000 }, 105 .dot = { .min = 25000, .max = 350000 },
106 .vco = { .min = 930000, .max = 1400000 }, 106 .vco = { .min = 908000, .max = 1512000 },
107 .n = { .min = 3, .max = 16 }, 107 .n = { .min = 2, .max = 16 },
108 .m = { .min = 96, .max = 140 }, 108 .m = { .min = 96, .max = 140 },
109 .m1 = { .min = 18, .max = 26 }, 109 .m1 = { .min = 18, .max = 26 },
110 .m2 = { .min = 6, .max = 16 }, 110 .m2 = { .min = 6, .max = 16 },
@@ -116,8 +116,8 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
116 116
117static const intel_limit_t intel_limits_i8xx_lvds = { 117static const intel_limit_t intel_limits_i8xx_lvds = {
118 .dot = { .min = 25000, .max = 350000 }, 118 .dot = { .min = 25000, .max = 350000 },
119 .vco = { .min = 930000, .max = 1400000 }, 119 .vco = { .min = 908000, .max = 1512000 },
120 .n = { .min = 3, .max = 16 }, 120 .n = { .min = 2, .max = 16 },
121 .m = { .min = 96, .max = 140 }, 121 .m = { .min = 96, .max = 140 },
122 .m1 = { .min = 18, .max = 26 }, 122 .m1 = { .min = 18, .max = 26 },
123 .m2 = { .min = 6, .max = 16 }, 123 .m2 = { .min = 6, .max = 16 },
@@ -329,6 +329,8 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
329{ 329{
330 clock->m = clock->m1 * clock->m2; 330 clock->m = clock->m1 * clock->m2;
331 clock->p = clock->p1 * clock->p2; 331 clock->p = clock->p1 * clock->p2;
332 if (WARN_ON(clock->n == 0 || clock->p == 0))
333 return;
332 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 334 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
333 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 335 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
334} 336}
@@ -430,6 +432,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
430{ 432{
431 clock->m = clock->m2 + 2; 433 clock->m = clock->m2 + 2;
432 clock->p = clock->p1 * clock->p2; 434 clock->p = clock->p1 * clock->p2;
435 if (WARN_ON(clock->n == 0 || clock->p == 0))
436 return;
433 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 437 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
434 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 438 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
435} 439}
@@ -443,6 +447,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
443{ 447{
444 clock->m = i9xx_dpll_compute_m(clock); 448 clock->m = i9xx_dpll_compute_m(clock);
445 clock->p = clock->p1 * clock->p2; 449 clock->p = clock->p1 * clock->p2;
450 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
451 return;
446 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 452 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
447 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 453 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
448} 454}
@@ -1361,6 +1367,10 @@ static void intel_init_dpio(struct drm_device *dev)
1361 if (!IS_VALLEYVIEW(dev)) 1367 if (!IS_VALLEYVIEW(dev))
1362 return; 1368 return;
1363 1369
1370 /* Enable the CRI clock source so we can get at the display */
1371 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1372 DPLL_INTEGRATED_CRI_CLK_VLV);
1373
1364 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1374 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1365 /* 1375 /*
1366 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1376 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
@@ -4751,9 +4761,8 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4751 refclk = 100000; 4761 refclk = 100000;
4752 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4762 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4753 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4763 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4754 refclk = dev_priv->vbt.lvds_ssc_freq * 1000; 4764 refclk = dev_priv->vbt.lvds_ssc_freq;
4755 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 4765 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
4756 refclk / 1000);
4757 } else if (!IS_GEN2(dev)) { 4766 } else if (!IS_GEN2(dev)) {
4758 refclk = 96000; 4767 refclk = 96000;
4759 } else { 4768 } else {
@@ -5899,9 +5908,9 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
5899 } 5908 }
5900 5909
5901 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5910 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5902 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 5911 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
5903 dev_priv->vbt.lvds_ssc_freq); 5912 dev_priv->vbt.lvds_ssc_freq);
5904 return dev_priv->vbt.lvds_ssc_freq * 1000; 5913 return dev_priv->vbt.lvds_ssc_freq;
5905 } 5914 }
5906 5915
5907 return 120000; 5916 return 120000;
@@ -6163,7 +6172,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6163 factor = 21; 6172 factor = 21;
6164 if (is_lvds) { 6173 if (is_lvds) {
6165 if ((intel_panel_use_ssc(dev_priv) && 6174 if ((intel_panel_use_ssc(dev_priv) &&
6166 dev_priv->vbt.lvds_ssc_freq == 100) || 6175 dev_priv->vbt.lvds_ssc_freq == 100000) ||
6167 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 6176 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6168 factor = 25; 6177 factor = 25;
6169 } else if (intel_crtc->config.sdvo_tv_clock) 6178 } else if (intel_crtc->config.sdvo_tv_clock)
@@ -6484,7 +6493,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6484 uint32_t val; 6493 uint32_t val;
6485 6494
6486 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 6495 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6487 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", 6496 WARN(crtc->active, "CRTC for pipe %c enabled\n",
6488 pipe_name(crtc->pipe)); 6497 pipe_name(crtc->pipe));
6489 6498
6490 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 6499 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -6504,7 +6513,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6504 6513
6505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 6514 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6506 val = I915_READ(DEIMR); 6515 val = I915_READ(DEIMR);
6507 WARN((val & ~DE_PCH_EVENT_IVB) != val, 6516 WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
6508 "Unexpected DEIMR bits enabled: 0x%x\n", val); 6517 "Unexpected DEIMR bits enabled: 0x%x\n", val);
6509 val = I915_READ(SDEIMR); 6518 val = I915_READ(SDEIMR);
6510 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, 6519 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
@@ -6628,6 +6637,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
6628 struct drm_device *dev = dev_priv->dev; 6637 struct drm_device *dev = dev_priv->dev;
6629 uint32_t val; 6638 uint32_t val;
6630 6639
6640 WARN_ON(!HAS_PC8(dev));
6641
6631 if (dev_priv->pc8.enabled) 6642 if (dev_priv->pc8.enabled)
6632 return; 6643 return;
6633 6644
@@ -6644,6 +6655,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
6644 lpt_disable_clkout_dp(dev); 6655 lpt_disable_clkout_dp(dev);
6645 hsw_pc8_disable_interrupts(dev); 6656 hsw_pc8_disable_interrupts(dev);
6646 hsw_disable_lcpll(dev_priv, true, true); 6657 hsw_disable_lcpll(dev_priv, true, true);
6658
6659 intel_runtime_pm_put(dev_priv);
6647} 6660}
6648 6661
6649static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) 6662static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
@@ -6673,12 +6686,16 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6673 if (dev_priv->pc8.disable_count != 1) 6686 if (dev_priv->pc8.disable_count != 1)
6674 return; 6687 return;
6675 6688
6689 WARN_ON(!HAS_PC8(dev));
6690
6676 cancel_delayed_work_sync(&dev_priv->pc8.enable_work); 6691 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6677 if (!dev_priv->pc8.enabled) 6692 if (!dev_priv->pc8.enabled)
6678 return; 6693 return;
6679 6694
6680 DRM_DEBUG_KMS("Disabling package C8+\n"); 6695 DRM_DEBUG_KMS("Disabling package C8+\n");
6681 6696
6697 intel_runtime_pm_get(dev_priv);
6698
6682 hsw_restore_lcpll(dev_priv); 6699 hsw_restore_lcpll(dev_priv);
6683 hsw_pc8_restore_interrupts(dev); 6700 hsw_pc8_restore_interrupts(dev);
6684 lpt_init_pch_refclk(dev); 6701 lpt_init_pch_refclk(dev);
@@ -6885,8 +6902,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6885 int plane = intel_crtc->plane; 6902 int plane = intel_crtc->plane;
6886 int ret; 6903 int ret;
6887 6904
6888 if (!intel_ddi_pll_mode_set(crtc)) 6905 if (!intel_ddi_pll_select(intel_crtc))
6889 return -EINVAL; 6906 return -EINVAL;
6907 intel_ddi_pll_enable(intel_crtc);
6890 6908
6891 if (intel_crtc->config.has_dp_encoder) 6909 if (intel_crtc->config.has_dp_encoder)
6892 intel_dp_set_m_n(intel_crtc); 6910 intel_dp_set_m_n(intel_crtc);
@@ -7870,7 +7888,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
7870 u32 dpll = pipe_config->dpll_hw_state.dpll; 7888 u32 dpll = pipe_config->dpll_hw_state.dpll;
7871 7889
7872 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 7890 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7873 return dev_priv->vbt.lvds_ssc_freq * 1000; 7891 return dev_priv->vbt.lvds_ssc_freq;
7874 else if (HAS_PCH_SPLIT(dev)) 7892 else if (HAS_PCH_SPLIT(dev))
7875 return 120000; 7893 return 120000;
7876 else if (!IS_GEN2(dev)) 7894 else if (!IS_GEN2(dev))
@@ -7933,12 +7951,17 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7933 else 7951 else
7934 i9xx_clock(refclk, &clock); 7952 i9xx_clock(refclk, &clock);
7935 } else { 7953 } else {
7936 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 7954 u32 lvds = I915_READ(LVDS);
7955 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7937 7956
7938 if (is_lvds) { 7957 if (is_lvds) {
7939 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 7958 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7940 DPLL_FPA01_P1_POST_DIV_SHIFT); 7959 DPLL_FPA01_P1_POST_DIV_SHIFT);
7941 clock.p2 = 14; 7960
7961 if (lvds & LVDS_CLKB_POWER_UP)
7962 clock.p2 = 7;
7963 else
7964 clock.p2 = 14;
7942 } else { 7965 } else {
7943 if (dpll & PLL_P1_DIVIDE_BY_TWO) 7966 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7944 clock.p1 = 2; 7967 clock.p1 = 2;
@@ -10122,10 +10145,13 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10122 intel_crtc->lut_b[i] = i; 10145 intel_crtc->lut_b[i] = i;
10123 } 10146 }
10124 10147
10125 /* Swap pipes & planes for FBC on pre-965 */ 10148 /*
10149 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
10150 * is hooked to plane B. Hence we want plane A feeding pipe B.
10151 */
10126 intel_crtc->pipe = pipe; 10152 intel_crtc->pipe = pipe;
10127 intel_crtc->plane = pipe; 10153 intel_crtc->plane = pipe;
10128 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 10154 if (IS_MOBILE(dev) && INTEL_INFO(dev)->gen < 4) {
10129 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 10155 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10130 intel_crtc->plane = !pipe; 10156 intel_crtc->plane = !pipe;
10131 } 10157 }
@@ -10779,17 +10805,10 @@ static void i915_disable_vga(struct drm_device *dev)
10779 10805
10780void intel_modeset_init_hw(struct drm_device *dev) 10806void intel_modeset_init_hw(struct drm_device *dev)
10781{ 10807{
10782 struct drm_i915_private *dev_priv = dev->dev_private;
10783
10784 intel_prepare_ddi(dev); 10808 intel_prepare_ddi(dev);
10785 10809
10786 intel_init_clock_gating(dev); 10810 intel_init_clock_gating(dev);
10787 10811
10788 /* Enable the CRI clock source so we can get at the display */
10789 if (IS_VALLEYVIEW(dev))
10790 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10791 DPLL_INTEGRATED_CRI_CLK_VLV);
10792
10793 intel_init_dpio(dev); 10812 intel_init_dpio(dev);
10794 10813
10795 mutex_lock(&dev->struct_mutex); 10814 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 36f13c574571..7df5085973e9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1038,6 +1038,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1038 I915_READ(pp_stat_reg), 1038 I915_READ(pp_stat_reg),
1039 I915_READ(pp_ctrl_reg)); 1039 I915_READ(pp_ctrl_reg));
1040 } 1040 }
1041
1042 DRM_DEBUG_KMS("Wait complete\n");
1041} 1043}
1042 1044
1043static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 1045static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
@@ -1093,6 +1095,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1093 if (ironlake_edp_have_panel_vdd(intel_dp)) 1095 if (ironlake_edp_have_panel_vdd(intel_dp))
1094 return; 1096 return;
1095 1097
1098 intel_runtime_pm_get(dev_priv);
1099
1096 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1100 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1097 1101
1098 if (!ironlake_edp_have_panel_power(intel_dp)) 1102 if (!ironlake_edp_have_panel_power(intel_dp))
@@ -1141,7 +1145,11 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1141 /* Make sure sequencer is idle before allowing subsequent activity */ 1145 /* Make sure sequencer is idle before allowing subsequent activity */
1142 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1146 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1143 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1147 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1144 msleep(intel_dp->panel_power_down_delay); 1148
1149 if ((pp & POWER_TARGET_ON) == 0)
1150 msleep(intel_dp->panel_power_cycle_delay);
1151
1152 intel_runtime_pm_put(dev_priv);
1145 } 1153 }
1146} 1154}
1147 1155
@@ -1234,20 +1242,16 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1234 1242
1235 DRM_DEBUG_KMS("Turn eDP power off\n"); 1243 DRM_DEBUG_KMS("Turn eDP power off\n");
1236 1244
1237 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1238
1239 pp = ironlake_get_pp_control(intel_dp); 1245 pp = ironlake_get_pp_control(intel_dp);
1240 /* We need to switch off panel power _and_ force vdd, for otherwise some 1246 /* We need to switch off panel power _and_ force vdd, for otherwise some
1241 * panels get very unhappy and cease to work. */ 1247 * panels get very unhappy and cease to work. */
1242 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1248 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1243 1249
1244 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1250 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1245 1251
1246 I915_WRITE(pp_ctrl_reg, pp); 1252 I915_WRITE(pp_ctrl_reg, pp);
1247 POSTING_READ(pp_ctrl_reg); 1253 POSTING_READ(pp_ctrl_reg);
1248 1254
1249 intel_dp->want_panel_vdd = false;
1250
1251 ironlake_wait_panel_off(intel_dp); 1255 ironlake_wait_panel_off(intel_dp);
1252} 1256}
1253 1257
@@ -1773,7 +1777,6 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1773 1777
1774 /* Make sure the panel is off before trying to change the mode. But also 1778 /* Make sure the panel is off before trying to change the mode. But also
1775 * ensure that we have vdd while we switch off the panel. */ 1779 * ensure that we have vdd while we switch off the panel. */
1776 ironlake_edp_panel_vdd_on(intel_dp);
1777 ironlake_edp_backlight_off(intel_dp); 1780 ironlake_edp_backlight_off(intel_dp);
1778 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1781 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1779 ironlake_edp_panel_off(intel_dp); 1782 ironlake_edp_panel_off(intel_dp);
@@ -1942,18 +1945,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
1942 DP_LINK_STATUS_SIZE); 1945 DP_LINK_STATUS_SIZE);
1943} 1946}
1944 1947
1945#if 0
1946static char *voltage_names[] = {
1947 "0.4V", "0.6V", "0.8V", "1.2V"
1948};
1949static char *pre_emph_names[] = {
1950 "0dB", "3.5dB", "6dB", "9.5dB"
1951};
1952static char *link_train_names[] = {
1953 "pattern 1", "pattern 2", "idle", "off"
1954};
1955#endif
1956
1957/* 1948/*
1958 * These are source-specific values; current Intel hardware supports 1949 * These are source-specific values; current Intel hardware supports
1959 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1950 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
@@ -3083,9 +3074,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3083 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3074 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3084 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3075 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3085 struct drm_device *dev = connector->dev; 3076 struct drm_device *dev = connector->dev;
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3086 enum drm_connector_status status; 3078 enum drm_connector_status status;
3087 struct edid *edid = NULL; 3079 struct edid *edid = NULL;
3088 3080
3081 intel_runtime_pm_get(dev_priv);
3082
3089 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3083 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3090 connector->base.id, drm_get_connector_name(connector)); 3084 connector->base.id, drm_get_connector_name(connector));
3091 3085
@@ -3097,7 +3091,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3097 status = g4x_dp_detect(intel_dp); 3091 status = g4x_dp_detect(intel_dp);
3098 3092
3099 if (status != connector_status_connected) 3093 if (status != connector_status_connected)
3100 return status; 3094 goto out;
3101 3095
3102 intel_dp_probe_oui(intel_dp); 3096 intel_dp_probe_oui(intel_dp);
3103 3097
@@ -3113,7 +3107,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3113 3107
3114 if (intel_encoder->type != INTEL_OUTPUT_EDP) 3108 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3115 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3109 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3116 return connector_status_connected; 3110 status = connector_status_connected;
3111
3112out:
3113 intel_runtime_pm_put(dev_priv);
3114 return status;
3117} 3115}
3118 3116
3119static int intel_dp_get_modes(struct drm_connector *connector) 3117static int intel_dp_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2b5bcb617908..ea00068cced2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -155,6 +155,7 @@ struct intel_encoder {
155 155
156struct intel_panel { 156struct intel_panel {
157 struct drm_display_mode *fixed_mode; 157 struct drm_display_mode *fixed_mode;
158 struct drm_display_mode *downclock_mode;
158 int fitting_mode; 159 int fitting_mode;
159 160
160 /* backlight */ 161 /* backlight */
@@ -454,7 +455,7 @@ struct intel_hdmi {
454 bool rgb_quant_range_selectable; 455 bool rgb_quant_range_selectable;
455 void (*write_infoframe)(struct drm_encoder *encoder, 456 void (*write_infoframe)(struct drm_encoder *encoder,
456 enum hdmi_infoframe_type type, 457 enum hdmi_infoframe_type type,
457 const uint8_t *frame, ssize_t len); 458 const void *frame, ssize_t len);
458 void (*set_infoframes)(struct drm_encoder *encoder, 459 void (*set_infoframes)(struct drm_encoder *encoder,
459 struct drm_display_mode *adjusted_mode); 460 struct drm_display_mode *adjusted_mode);
460}; 461};
@@ -612,7 +613,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
612void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); 613void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
613void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); 614void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
614void intel_ddi_setup_hw_pll_state(struct drm_device *dev); 615void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
615bool intel_ddi_pll_mode_set(struct drm_crtc *crtc); 616bool intel_ddi_pll_select(struct intel_crtc *crtc);
617void intel_ddi_pll_enable(struct intel_crtc *crtc);
616void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); 618void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
617void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); 619void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
618void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); 620void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@@ -702,7 +704,6 @@ void
702ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, 704ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
703 int dotclock); 705 int dotclock);
704bool intel_crtc_active(struct drm_crtc *crtc); 706bool intel_crtc_active(struct drm_crtc *crtc);
705void i915_disable_vga_mem(struct drm_device *dev);
706void hsw_enable_ips(struct intel_crtc *crtc); 707void hsw_enable_ips(struct intel_crtc *crtc);
707void hsw_disable_ips(struct intel_crtc *crtc); 708void hsw_disable_ips(struct intel_crtc *crtc);
708void intel_display_set_init_power(struct drm_device *dev, bool enable); 709void intel_display_set_init_power(struct drm_device *dev, bool enable);
@@ -823,7 +824,10 @@ void intel_panel_disable_backlight(struct intel_connector *connector);
823void intel_panel_destroy_backlight(struct drm_connector *connector); 824void intel_panel_destroy_backlight(struct drm_connector *connector);
824void intel_panel_init_backlight_funcs(struct drm_device *dev); 825void intel_panel_init_backlight_funcs(struct drm_device *dev);
825enum drm_connector_status intel_panel_detect(struct drm_device *dev); 826enum drm_connector_status intel_panel_detect(struct drm_device *dev);
826 827extern struct drm_display_mode *intel_find_panel_downclock(
828 struct drm_device *dev,
829 struct drm_display_mode *fixed_mode,
830 struct drm_connector *connector);
827 831
828/* intel_pm.c */ 832/* intel_pm.c */
829void intel_init_clock_gating(struct drm_device *dev); 833void intel_init_clock_gating(struct drm_device *dev);
@@ -858,6 +862,10 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
858void gen6_rps_boost(struct drm_i915_private *dev_priv); 862void gen6_rps_boost(struct drm_i915_private *dev_priv);
859void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 863void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
860void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 864void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
865void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
866void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
867void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
868void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
861void ilk_wm_get_hw_state(struct drm_device *dev); 869void ilk_wm_get_hw_state(struct drm_device *dev);
862 870
863 871
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 7b9b350d29ae..fabbf0d895cf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -37,49 +37,18 @@
37static const struct intel_dsi_device intel_dsi_devices[] = { 37static const struct intel_dsi_device intel_dsi_devices[] = {
38}; 38};
39 39
40 40static void band_gap_reset(struct drm_i915_private *dev_priv)
41static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
42 u32 mask)
43{
44 u32 tmp = vlv_cck_read(dev_priv, reg);
45 tmp &= ~mask;
46 tmp |= val;
47 vlv_cck_write(dev_priv, reg, tmp);
48}
49
50static void band_gap_wa(struct drm_i915_private *dev_priv)
51{ 41{
52 mutex_lock(&dev_priv->dpio_lock); 42 mutex_lock(&dev_priv->dpio_lock);
53 43
54 /* Enable bandgap fix in GOP driver */ 44 vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
55 vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000); 45 vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
56 msleep(20); 46 vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
57 vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000); 47 udelay(150);
58 msleep(20); 48 vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
59 vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000); 49 vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
60 msleep(20);
61 vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
62 msleep(20);
63 vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
64 msleep(20);
65
66 /* Turn Display Trunk on */
67 vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
68 msleep(20);
69
70 vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
71 msleep(20);
72
73 vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
74 msleep(20);
75 vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
76 msleep(20);
77 vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
78 50
79 mutex_unlock(&dev_priv->dpio_lock); 51 mutex_unlock(&dev_priv->dpio_lock);
80
81 /* Need huge delay, otherwise clock is not stable */
82 msleep(100);
83} 52}
84 53
85static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector) 54static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
@@ -132,14 +101,47 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
132 vlv_enable_dsi_pll(encoder); 101 vlv_enable_dsi_pll(encoder);
133} 102}
134 103
104static void intel_dsi_device_ready(struct intel_encoder *encoder)
105{
106 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
107 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
108 int pipe = intel_crtc->pipe;
109 u32 val;
110
111 DRM_DEBUG_KMS("\n");
112
113 val = I915_READ(MIPI_PORT_CTRL(pipe));
114 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
115 usleep_range(1000, 1500);
116 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
117 usleep_range(2000, 2500);
118 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
119 usleep_range(2000, 2500);
120 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
121 usleep_range(2000, 2500);
122 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
123 usleep_range(2000, 2500);
124}
135static void intel_dsi_pre_enable(struct intel_encoder *encoder) 125static void intel_dsi_pre_enable(struct intel_encoder *encoder)
136{ 126{
127 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
128
137 DRM_DEBUG_KMS("\n"); 129 DRM_DEBUG_KMS("\n");
130
131 if (intel_dsi->dev.dev_ops->panel_reset)
132 intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
133
134 /* put device in ready state */
135 intel_dsi_device_ready(encoder);
136
137 if (intel_dsi->dev.dev_ops->send_otp_cmds)
138 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
138} 139}
139 140
140static void intel_dsi_enable(struct intel_encoder *encoder) 141static void intel_dsi_enable(struct intel_encoder *encoder)
141{ 142{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 143 struct drm_device *dev = encoder->base.dev;
144 struct drm_i915_private *dev_priv = dev->dev_private;
143 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 145 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
144 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 146 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
145 int pipe = intel_crtc->pipe; 147 int pipe = intel_crtc->pipe;
@@ -147,41 +149,28 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
147 149
148 DRM_DEBUG_KMS("\n"); 150 DRM_DEBUG_KMS("\n");
149 151
150 temp = I915_READ(MIPI_DEVICE_READY(pipe));
151 if ((temp & DEVICE_READY) == 0) {
152 temp &= ~ULPS_STATE_MASK;
153 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
154 } else if (temp & ULPS_STATE_MASK) {
155 temp &= ~ULPS_STATE_MASK;
156 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
157 /*
158 * We need to ensure that there is a minimum of 1 ms time
159 * available before clearing the UPLS exit state.
160 */
161 msleep(2);
162 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
163 }
164
165 if (is_cmd_mode(intel_dsi)) 152 if (is_cmd_mode(intel_dsi))
166 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); 153 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
167 154 else {
168 if (is_vid_mode(intel_dsi)) {
169 msleep(20); /* XXX */ 155 msleep(20); /* XXX */
170 dpi_send_cmd(intel_dsi, TURN_ON); 156 dpi_send_cmd(intel_dsi, TURN_ON);
171 msleep(100); 157 msleep(100);
172 158
173 /* assert ip_tg_enable signal */ 159 /* assert ip_tg_enable signal */
174 temp = I915_READ(MIPI_PORT_CTRL(pipe)); 160 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
161 temp = temp | intel_dsi->port_bits;
175 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); 162 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
176 POSTING_READ(MIPI_PORT_CTRL(pipe)); 163 POSTING_READ(MIPI_PORT_CTRL(pipe));
177 } 164 }
178 165
179 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); 166 if (intel_dsi->dev.dev_ops->enable)
167 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
180} 168}
181 169
182static void intel_dsi_disable(struct intel_encoder *encoder) 170static void intel_dsi_disable(struct intel_encoder *encoder)
183{ 171{
184 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 172 struct drm_device *dev = encoder->base.dev;
173 struct drm_i915_private *dev_priv = dev->dev_private;
185 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 174 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
186 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 175 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
187 int pipe = intel_crtc->pipe; 176 int pipe = intel_crtc->pipe;
@@ -189,8 +178,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
189 178
190 DRM_DEBUG_KMS("\n"); 179 DRM_DEBUG_KMS("\n");
191 180
192 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
193
194 if (is_vid_mode(intel_dsi)) { 181 if (is_vid_mode(intel_dsi)) {
195 dpi_send_cmd(intel_dsi, SHUTDOWN); 182 dpi_send_cmd(intel_dsi, SHUTDOWN);
196 msleep(10); 183 msleep(10);
@@ -203,20 +190,54 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
203 msleep(2); 190 msleep(2);
204 } 191 }
205 192
206 temp = I915_READ(MIPI_DEVICE_READY(pipe)); 193 /* if disable packets are sent before sending shutdown packet then in
207 if (temp & DEVICE_READY) { 194 * some next enable sequence send turn on packet error is observed */
208 temp &= ~DEVICE_READY; 195 if (intel_dsi->dev.dev_ops->disable)
209 temp &= ~ULPS_STATE_MASK; 196 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
210 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
211 }
212} 197}
213 198
214static void intel_dsi_post_disable(struct intel_encoder *encoder) 199static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
215{ 200{
201 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
202 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
203 int pipe = intel_crtc->pipe;
204 u32 val;
205
216 DRM_DEBUG_KMS("\n"); 206 DRM_DEBUG_KMS("\n");
217 207
208 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
209 usleep_range(2000, 2500);
210
211 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
212 usleep_range(2000, 2500);
213
214 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
215 usleep_range(2000, 2500);
216
217 val = I915_READ(MIPI_PORT_CTRL(pipe));
218 I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
219 usleep_range(1000, 1500);
220
221 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
222 == 0x00000), 30))
223 DRM_ERROR("DSI LP not going Low\n");
224
225 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
226 usleep_range(2000, 2500);
227
218 vlv_disable_dsi_pll(encoder); 228 vlv_disable_dsi_pll(encoder);
219} 229}
230static void intel_dsi_post_disable(struct intel_encoder *encoder)
231{
232 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
233
234 DRM_DEBUG_KMS("\n");
235
236 intel_dsi_clear_device_ready(encoder);
237
238 if (intel_dsi->dev.dev_ops->disable_panel_power)
239 intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
240}
220 241
221static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, 242static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
222 enum pipe *pipe) 243 enum pipe *pipe)
@@ -353,11 +374,8 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
353 374
354 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 375 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
355 376
356 /* Update the DSI PLL */
357 vlv_enable_dsi_pll(intel_encoder);
358
359 /* XXX: Location of the call */ 377 /* XXX: Location of the call */
360 band_gap_wa(dev_priv); 378 band_gap_reset(dev_priv);
361 379
362 /* escape clock divider, 20MHz, shared for A and C. device ready must be 380 /* escape clock divider, 20MHz, shared for A and C. device ready must be
363 * off when doing this! txclkesc? */ 381 * off when doing this! txclkesc? */
@@ -374,11 +392,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
374 I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff); 392 I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
375 I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff); 393 I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
376 394
377 I915_WRITE(MIPI_DPHY_PARAM(pipe), 395 I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
378 0x3c << EXIT_ZERO_COUNT_SHIFT |
379 0x1f << TRAIL_COUNT_SHIFT |
380 0xc5 << CLK_ZERO_COUNT_SHIFT |
381 0x1f << PREPARE_COUNT_SHIFT);
382 396
383 I915_WRITE(MIPI_DPI_RESOLUTION(pipe), 397 I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
384 adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | 398 adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
@@ -426,9 +440,9 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
426 adjusted_mode->htotal, 440 adjusted_mode->htotal,
427 bpp, intel_dsi->lane_count) + 1); 441 bpp, intel_dsi->lane_count) + 1);
428 } 442 }
429 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */ 443 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
430 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */ 444 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
431 I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */ 445 I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
432 446
433 /* dphy stuff */ 447 /* dphy stuff */
434 448
@@ -443,29 +457,31 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
443 * 457 *
444 * XXX: write MIPI_STOP_STATE_STALL? 458 * XXX: write MIPI_STOP_STATE_STALL?
445 */ 459 */
446 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46); 460 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
461 intel_dsi->hs_to_lp_count);
447 462
448 /* XXX: low power clock equivalence in terms of byte clock. the number 463 /* XXX: low power clock equivalence in terms of byte clock. the number
449 * of byte clocks occupied in one low power clock. based on txbyteclkhs 464 * of byte clocks occupied in one low power clock. based on txbyteclkhs
450 * and txclkesc. txclkesc time / txbyteclk time * (105 + 465 * and txclkesc. txclkesc time / txbyteclk time * (105 +
451 * MIPI_STOP_STATE_STALL) / 105.??? 466 * MIPI_STOP_STATE_STALL) / 105.???
452 */ 467 */
453 I915_WRITE(MIPI_LP_BYTECLK(pipe), 4); 468 I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
454 469
455 /* the bw essential for transmitting 16 long packets containing 252 470 /* the bw essential for transmitting 16 long packets containing 252
456 * bytes meant for dcs write memory command is programmed in this 471 * bytes meant for dcs write memory command is programmed in this
457 * register in terms of byte clocks. based on dsi transfer rate and the 472 * register in terms of byte clocks. based on dsi transfer rate and the
458 * number of lanes configured the time taken to transmit 16 long packets 473 * number of lanes configured the time taken to transmit 16 long packets
459 * in a dsi stream varies. */ 474 * in a dsi stream varies. */
460 I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820); 475 I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
461 476
462 I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe), 477 I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
463 0xa << LP_HS_SSW_CNT_SHIFT | 478 intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
464 0x14 << HS_LP_PWR_SW_CNT_SHIFT); 479 intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
465 480
466 if (is_vid_mode(intel_dsi)) 481 if (is_vid_mode(intel_dsi))
467 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), 482 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
468 intel_dsi->video_mode_format); 483 intel_dsi->video_frmt_cfg_bits |
484 intel_dsi->video_mode_format);
469} 485}
470 486
471static enum drm_connector_status 487static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index c7765f33d524..b4a27cec882f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -39,6 +39,13 @@ struct intel_dsi_device {
39struct intel_dsi_dev_ops { 39struct intel_dsi_dev_ops {
40 bool (*init)(struct intel_dsi_device *dsi); 40 bool (*init)(struct intel_dsi_device *dsi);
41 41
42 void (*panel_reset)(struct intel_dsi_device *dsi);
43
44 void (*disable_panel_power)(struct intel_dsi_device *dsi);
45
46 /* one time programmable commands if needed */
47 void (*send_otp_cmds)(struct intel_dsi_device *dsi);
48
42 /* This callback must be able to assume DSI commands can be sent */ 49 /* This callback must be able to assume DSI commands can be sent */
43 void (*enable)(struct intel_dsi_device *dsi); 50 void (*enable)(struct intel_dsi_device *dsi);
44 51
@@ -89,6 +96,20 @@ struct intel_dsi {
89 96
90 /* eot for MIPI_EOT_DISABLE register */ 97 /* eot for MIPI_EOT_DISABLE register */
91 u32 eot_disable; 98 u32 eot_disable;
99
100 u32 port_bits;
101 u32 bw_timer;
102 u32 dphy_reg;
103 u32 video_frmt_cfg_bits;
104 u16 lp_byte_clk;
105
106 /* timeouts in byte clocks */
107 u16 lp_rx_timeout;
108 u16 turn_arnd_val;
109 u16 rst_timer_val;
110 u16 hs_to_lp_count;
111 u16 clk_lp_to_hs_count;
112 u16 clk_hs_to_lp_count;
92}; 113};
93 114
94static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) 115static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 44279b2ade88..ba79ec19da3b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -50,6 +50,8 @@ static const u32 lfsr_converts[] = {
50 71, 35 /* 91 - 92 */ 50 71, 35 /* 91 - 92 */
51}; 51};
52 52
53#ifdef DSI_CLK_FROM_RR
54
53static u32 dsi_rr_formula(const struct drm_display_mode *mode, 55static u32 dsi_rr_formula(const struct drm_display_mode *mode,
54 int pixel_format, int video_mode_format, 56 int pixel_format, int video_mode_format,
55 int lane_count, bool eotp) 57 int lane_count, bool eotp)
@@ -121,7 +123,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
121 123
122 /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */ 124 /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
123 dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8; 125 dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
124 dsi_clk = dsi_bit_clock_hz / (1000 * 1000); 126 dsi_clk = dsi_bit_clock_hz / 1000;
125 127
126 if (eotp && video_mode_format == VIDEO_MODE_BURST) 128 if (eotp && video_mode_format == VIDEO_MODE_BURST)
127 dsi_clk *= 2; 129 dsi_clk *= 2;
@@ -129,64 +131,37 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
129 return dsi_clk; 131 return dsi_clk;
130} 132}
131 133
132#ifdef MNP_FROM_TABLE 134#else
133
134struct dsi_clock_table {
135 u32 freq;
136 u8 m;
137 u8 p;
138};
139
140static const struct dsi_clock_table dsi_clk_tbl[] = {
141 {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
142 {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
143 {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
144 {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
145 {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
146 {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
147 {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
148 {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
149 {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
150 {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
151 {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
152 {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
153 {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
154 {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
155 {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
156 {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
157 {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
158 {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
159 {1000, 80, 2}, /* dsi clock frequency in Mhz*/
160};
161 135
162static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) 136/* Get DSI clock from pixel clock */
137static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
138 int pixel_format, int lane_count)
163{ 139{
164 unsigned int i; 140 u32 dsi_clk_khz;
165 u8 m; 141 u32 bpp;
166 u8 n;
167 u8 p;
168 u32 m_seed;
169
170 if (dsi_clk < 300 || dsi_clk > 1000)
171 return -ECHRNG;
172 142
173 for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) { 143 switch (pixel_format) {
174 if (dsi_clk_tbl[i].freq > dsi_clk) 144 default:
175 break; 145 case VID_MODE_FORMAT_RGB888:
146 case VID_MODE_FORMAT_RGB666_LOOSE:
147 bpp = 24;
148 break;
149 case VID_MODE_FORMAT_RGB666:
150 bpp = 18;
151 break;
152 case VID_MODE_FORMAT_RGB565:
153 bpp = 16;
154 break;
176 } 155 }
177 156
178 m = dsi_clk_tbl[i].m; 157 /* DSI data rate = pixel clock * bits per pixel / lane count
179 p = dsi_clk_tbl[i].p; 158 pixel clock is converted from KHz to Hz */
180 m_seed = lfsr_converts[m - 62]; 159 dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
181 n = 1;
182 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
183 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
184 m_seed << DSI_PLL_M1_DIV_SHIFT;
185 160
186 return 0; 161 return dsi_clk_khz;
187} 162}
188 163
189#else 164#endif
190 165
191static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) 166static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
192{ 167{
@@ -194,36 +169,47 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
194 u32 ref_clk; 169 u32 ref_clk;
195 u32 error; 170 u32 error;
196 u32 tmp_error; 171 u32 tmp_error;
197 u32 target_dsi_clk; 172 int target_dsi_clk;
198 u32 calc_dsi_clk; 173 int calc_dsi_clk;
199 u32 calc_m; 174 u32 calc_m;
200 u32 calc_p; 175 u32 calc_p;
201 u32 m_seed; 176 u32 m_seed;
202 177
203 if (dsi_clk < 300 || dsi_clk > 1150) { 178 /* dsi_clk is expected in KHZ */
179 if (dsi_clk < 300000 || dsi_clk > 1150000) {
204 DRM_ERROR("DSI CLK Out of Range\n"); 180 DRM_ERROR("DSI CLK Out of Range\n");
205 return -ECHRNG; 181 return -ECHRNG;
206 } 182 }
207 183
208 ref_clk = 25000; 184 ref_clk = 25000;
209 target_dsi_clk = dsi_clk * 1000; 185 target_dsi_clk = dsi_clk;
210 error = 0xFFFFFFFF; 186 error = 0xFFFFFFFF;
187 tmp_error = 0xFFFFFFFF;
211 calc_m = 0; 188 calc_m = 0;
212 calc_p = 0; 189 calc_p = 0;
213 190
214 for (m = 62; m <= 92; m++) { 191 for (m = 62; m <= 92; m++) {
215 for (p = 2; p <= 6; p++) { 192 for (p = 2; p <= 6; p++) {
216 193 /* Find the optimal m and p divisors
194 with minimal error +/- the required clock */
217 calc_dsi_clk = (m * ref_clk) / p; 195 calc_dsi_clk = (m * ref_clk) / p;
218 if (calc_dsi_clk >= target_dsi_clk) { 196 if (calc_dsi_clk == target_dsi_clk) {
219 tmp_error = calc_dsi_clk - target_dsi_clk; 197 calc_m = m;
220 if (tmp_error < error) { 198 calc_p = p;
221 error = tmp_error; 199 error = 0;
222 calc_m = m; 200 break;
223 calc_p = p; 201 } else
224 } 202 tmp_error = abs(target_dsi_clk - calc_dsi_clk);
203
204 if (tmp_error < error) {
205 error = tmp_error;
206 calc_m = m;
207 calc_p = p;
225 } 208 }
226 } 209 }
210
211 if (error == 0)
212 break;
227 } 213 }
228 214
229 m_seed = lfsr_converts[calc_m - 62]; 215 m_seed = lfsr_converts[calc_m - 62];
@@ -235,8 +221,6 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
235 return 0; 221 return 0;
236} 222}
237 223
238#endif
239
240/* 224/*
241 * XXX: The muxing and gating is hard coded for now. Need to add support for 225 * XXX: The muxing and gating is hard coded for now. Need to add support for
242 * sharing PLLs with two DSI outputs. 226 * sharing PLLs with two DSI outputs.
@@ -251,9 +235,8 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
251 struct dsi_mnp dsi_mnp; 235 struct dsi_mnp dsi_mnp;
252 u32 dsi_clk; 236 u32 dsi_clk;
253 237
254 dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format, 238 dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
255 intel_dsi->video_mode_format, 239 intel_dsi->lane_count);
256 intel_dsi->lane_count, !intel_dsi->eot_disable);
257 240
258 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); 241 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
259 if (ret) { 242 if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6a6ad0c78dc7..6db0d9d17f47 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -130,9 +130,9 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
130 130
131static void g4x_write_infoframe(struct drm_encoder *encoder, 131static void g4x_write_infoframe(struct drm_encoder *encoder,
132 enum hdmi_infoframe_type type, 132 enum hdmi_infoframe_type type,
133 const uint8_t *frame, ssize_t len) 133 const void *frame, ssize_t len)
134{ 134{
135 uint32_t *data = (uint32_t *)frame; 135 const uint32_t *data = frame;
136 struct drm_device *dev = encoder->dev; 136 struct drm_device *dev = encoder->dev;
137 struct drm_i915_private *dev_priv = dev->dev_private; 137 struct drm_i915_private *dev_priv = dev->dev_private;
138 u32 val = I915_READ(VIDEO_DIP_CTL); 138 u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -167,9 +167,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
167 167
168static void ibx_write_infoframe(struct drm_encoder *encoder, 168static void ibx_write_infoframe(struct drm_encoder *encoder,
169 enum hdmi_infoframe_type type, 169 enum hdmi_infoframe_type type,
170 const uint8_t *frame, ssize_t len) 170 const void *frame, ssize_t len)
171{ 171{
172 uint32_t *data = (uint32_t *)frame; 172 const uint32_t *data = frame;
173 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
174 struct drm_i915_private *dev_priv = dev->dev_private; 174 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -205,9 +205,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
205 205
206static void cpt_write_infoframe(struct drm_encoder *encoder, 206static void cpt_write_infoframe(struct drm_encoder *encoder,
207 enum hdmi_infoframe_type type, 207 enum hdmi_infoframe_type type,
208 const uint8_t *frame, ssize_t len) 208 const void *frame, ssize_t len)
209{ 209{
210 uint32_t *data = (uint32_t *)frame; 210 const uint32_t *data = frame;
211 struct drm_device *dev = encoder->dev; 211 struct drm_device *dev = encoder->dev;
212 struct drm_i915_private *dev_priv = dev->dev_private; 212 struct drm_i915_private *dev_priv = dev->dev_private;
213 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 213 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -246,9 +246,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
246 246
247static void vlv_write_infoframe(struct drm_encoder *encoder, 247static void vlv_write_infoframe(struct drm_encoder *encoder,
248 enum hdmi_infoframe_type type, 248 enum hdmi_infoframe_type type,
249 const uint8_t *frame, ssize_t len) 249 const void *frame, ssize_t len)
250{ 250{
251 uint32_t *data = (uint32_t *)frame; 251 const uint32_t *data = frame;
252 struct drm_device *dev = encoder->dev; 252 struct drm_device *dev = encoder->dev;
253 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 254 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -284,9 +284,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
284 284
285static void hsw_write_infoframe(struct drm_encoder *encoder, 285static void hsw_write_infoframe(struct drm_encoder *encoder,
286 enum hdmi_infoframe_type type, 286 enum hdmi_infoframe_type type,
287 const uint8_t *frame, ssize_t len) 287 const void *frame, ssize_t len)
288{ 288{
289 uint32_t *data = (uint32_t *)frame; 289 const uint32_t *data = frame;
290 struct drm_device *dev = encoder->dev; 290 struct drm_device *dev = encoder->dev;
291 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
292 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 292 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3deb58e2f394..8bcb93a2a9f6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -447,9 +447,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
447 if (dev_priv->modeset_restore == MODESET_DONE) 447 if (dev_priv->modeset_restore == MODESET_DONE)
448 goto exit; 448 goto exit;
449 449
450 drm_modeset_lock_all(dev); 450 /*
451 intel_modeset_setup_hw_state(dev, true); 451 * Some old platform's BIOS love to wreak havoc while the lid is closed.
452 drm_modeset_unlock_all(dev); 452 * We try to detect this here and undo any damage. The split for PCH
453 * platforms is rather conservative and a bit arbitrary expect that on
454 * those platforms VGA disabling requires actual legacy VGA I/O access,
455 * and as part of the cleanup in the hw state restore we also redisable
456 * the vga plane.
457 */
458 if (!HAS_PCH_SPLIT(dev)) {
459 drm_modeset_lock_all(dev);
460 intel_modeset_setup_hw_state(dev, true);
461 drm_modeset_unlock_all(dev);
462 }
453 463
454 dev_priv->modeset_restore = MODESET_DONE; 464 dev_priv->modeset_restore = MODESET_DONE;
455 465
@@ -745,57 +755,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
745 { } /* terminating entry */ 755 { } /* terminating entry */
746}; 756};
747 757
748/**
749 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
750 * @dev: drm device
751 * @connector: LVDS connector
752 *
753 * Find the reduced downclock for LVDS in EDID.
754 */
755static void intel_find_lvds_downclock(struct drm_device *dev,
756 struct drm_display_mode *fixed_mode,
757 struct drm_connector *connector)
758{
759 struct drm_i915_private *dev_priv = dev->dev_private;
760 struct drm_display_mode *scan;
761 int temp_downclock;
762
763 temp_downclock = fixed_mode->clock;
764 list_for_each_entry(scan, &connector->probed_modes, head) {
765 /*
766 * If one mode has the same resolution with the fixed_panel
767 * mode while they have the different refresh rate, it means
768 * that the reduced downclock is found for the LVDS. In such
769 * case we can set the different FPx0/1 to dynamically select
770 * between low and high frequency.
771 */
772 if (scan->hdisplay == fixed_mode->hdisplay &&
773 scan->hsync_start == fixed_mode->hsync_start &&
774 scan->hsync_end == fixed_mode->hsync_end &&
775 scan->htotal == fixed_mode->htotal &&
776 scan->vdisplay == fixed_mode->vdisplay &&
777 scan->vsync_start == fixed_mode->vsync_start &&
778 scan->vsync_end == fixed_mode->vsync_end &&
779 scan->vtotal == fixed_mode->vtotal) {
780 if (scan->clock < temp_downclock) {
781 /*
782 * The downclock is already found. But we
783 * expect to find the lower downclock.
784 */
785 temp_downclock = scan->clock;
786 }
787 }
788 }
789 if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
790 /* We found the downclock for LVDS. */
791 dev_priv->lvds_downclock_avail = 1;
792 dev_priv->lvds_downclock = temp_downclock;
793 DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
794 "Normal clock %dKhz, downclock %dKhz\n",
795 fixed_mode->clock, temp_downclock);
796 }
797}
798
799/* 758/*
800 * Enumerate the child dev array parsed from VBT to check whether 759 * Enumerate the child dev array parsed from VBT to check whether
801 * the LVDS is present. 760 * the LVDS is present.
@@ -1073,8 +1032,22 @@ void intel_lvds_init(struct drm_device *dev)
1073 1032
1074 fixed_mode = drm_mode_duplicate(dev, scan); 1033 fixed_mode = drm_mode_duplicate(dev, scan);
1075 if (fixed_mode) { 1034 if (fixed_mode) {
1076 intel_find_lvds_downclock(dev, fixed_mode, 1035 intel_connector->panel.downclock_mode =
1077 connector); 1036 intel_find_panel_downclock(dev,
1037 fixed_mode, connector);
1038 if (intel_connector->panel.downclock_mode !=
1039 NULL && i915_lvds_downclock) {
1040 /* We found the downclock for LVDS. */
1041 dev_priv->lvds_downclock_avail = true;
1042 dev_priv->lvds_downclock =
1043 intel_connector->panel.
1044 downclock_mode->clock;
1045 DRM_DEBUG_KMS("LVDS downclock is found"
1046 " in EDID. Normal clock %dKhz, "
1047 "downclock %dKhz\n",
1048 fixed_mode->clock,
1049 dev_priv->lvds_downclock);
1050 }
1078 goto out; 1051 goto out;
1079 } 1052 }
1080 } 1053 }
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index a8febbd3017b..3da259e280ba 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -64,7 +64,7 @@ struct opregion_header {
64 u8 driver_ver[16]; 64 u8 driver_ver[16];
65 u32 mboxes; 65 u32 mboxes;
66 u8 reserved[164]; 66 u8 reserved[164];
67} __attribute__((packed)); 67} __packed;
68 68
69/* OpRegion mailbox #1: public ACPI methods */ 69/* OpRegion mailbox #1: public ACPI methods */
70struct opregion_acpi { 70struct opregion_acpi {
@@ -86,7 +86,7 @@ struct opregion_acpi {
86 u32 cnot; /* current OS notification */ 86 u32 cnot; /* current OS notification */
87 u32 nrdy; /* driver status */ 87 u32 nrdy; /* driver status */
88 u8 rsvd2[60]; 88 u8 rsvd2[60];
89} __attribute__((packed)); 89} __packed;
90 90
91/* OpRegion mailbox #2: SWSCI */ 91/* OpRegion mailbox #2: SWSCI */
92struct opregion_swsci { 92struct opregion_swsci {
@@ -94,7 +94,7 @@ struct opregion_swsci {
94 u32 parm; /* command parameters */ 94 u32 parm; /* command parameters */
95 u32 dslp; /* driver sleep time-out */ 95 u32 dslp; /* driver sleep time-out */
96 u8 rsvd[244]; 96 u8 rsvd[244];
97} __attribute__((packed)); 97} __packed;
98 98
99/* OpRegion mailbox #3: ASLE */ 99/* OpRegion mailbox #3: ASLE */
100struct opregion_asle { 100struct opregion_asle {
@@ -115,7 +115,7 @@ struct opregion_asle {
115 u32 srot; /* supported rotation angles */ 115 u32 srot; /* supported rotation angles */
116 u32 iuer; /* IUER events */ 116 u32 iuer; /* IUER events */
117 u8 rsvd[86]; 117 u8 rsvd[86];
118} __attribute__((packed)); 118} __packed;
119 119
120/* Driver readiness indicator */ 120/* Driver readiness indicator */
121#define ASLE_ARDY_READY (1 << 0) 121#define ASLE_ARDY_READY (1 << 0)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e480cf41c536..20ebc3e83d39 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -845,11 +845,14 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
845{ 845{
846 struct intel_connector *connector = bl_get_data(bd); 846 struct intel_connector *connector = bl_get_data(bd);
847 struct drm_device *dev = connector->base.dev; 847 struct drm_device *dev = connector->base.dev;
848 struct drm_i915_private *dev_priv = dev->dev_private;
848 int ret; 849 int ret;
849 850
851 intel_runtime_pm_get(dev_priv);
850 mutex_lock(&dev->mode_config.mutex); 852 mutex_lock(&dev->mode_config.mutex);
851 ret = intel_panel_get_backlight(connector); 853 ret = intel_panel_get_backlight(connector);
852 mutex_unlock(&dev->mode_config.mutex); 854 mutex_unlock(&dev->mode_config.mutex);
855 intel_runtime_pm_put(dev_priv);
853 856
854 return ret; 857 return ret;
855} 858}
@@ -1104,6 +1107,59 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
1104 intel_backlight_device_unregister(intel_connector); 1107 intel_backlight_device_unregister(intel_connector);
1105} 1108}
1106 1109
1110/**
1111 * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
1112 * @dev: drm device
1113 * @fixed_mode : panel native mode
1114 * @connector: LVDS/eDP connector
1115 *
1116 * Return downclock_avail
1117 * Find the reduced downclock for LVDS/eDP in EDID.
1118 */
1119struct drm_display_mode *
1120intel_find_panel_downclock(struct drm_device *dev,
1121 struct drm_display_mode *fixed_mode,
1122 struct drm_connector *connector)
1123{
1124 struct drm_display_mode *scan, *tmp_mode;
1125 int temp_downclock;
1126
1127 temp_downclock = fixed_mode->clock;
1128 tmp_mode = NULL;
1129
1130 list_for_each_entry(scan, &connector->probed_modes, head) {
1131 /*
1132 * If one mode has the same resolution with the fixed_panel
1133 * mode while they have the different refresh rate, it means
1134 * that the reduced downclock is found. In such
1135 * case we can set the different FPx0/1 to dynamically select
1136 * between low and high frequency.
1137 */
1138 if (scan->hdisplay == fixed_mode->hdisplay &&
1139 scan->hsync_start == fixed_mode->hsync_start &&
1140 scan->hsync_end == fixed_mode->hsync_end &&
1141 scan->htotal == fixed_mode->htotal &&
1142 scan->vdisplay == fixed_mode->vdisplay &&
1143 scan->vsync_start == fixed_mode->vsync_start &&
1144 scan->vsync_end == fixed_mode->vsync_end &&
1145 scan->vtotal == fixed_mode->vtotal) {
1146 if (scan->clock < temp_downclock) {
1147 /*
1148 * The downclock is already found. But we
1149 * expect to find the lower downclock.
1150 */
1151 temp_downclock = scan->clock;
1152 tmp_mode = scan;
1153 }
1154 }
1155 }
1156
1157 if (temp_downclock < fixed_mode->clock)
1158 return drm_mode_duplicate(dev, tmp_mode);
1159 else
1160 return NULL;
1161}
1162
1107/* Set up chip specific backlight functions */ 1163/* Set up chip specific backlight functions */
1108void intel_panel_init_backlight_funcs(struct drm_device *dev) 1164void intel_panel_init_backlight_funcs(struct drm_device *dev)
1109{ 1165{
@@ -1157,4 +1213,8 @@ void intel_panel_fini(struct intel_panel *panel)
1157 1213
1158 if (panel->fixed_mode) 1214 if (panel->fixed_mode)
1159 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); 1215 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
1216
1217 if (panel->downclock_mode)
1218 drm_mode_destroy(intel_connector->base.dev,
1219 panel->downclock_mode);
1160} 1220}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e6d98fe86b17..04b28f906f9e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,7 +30,9 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/vgaarb.h>
33#include <drm/i915_powerwell.h> 34#include <drm/i915_powerwell.h>
35#include <linux/pm_runtime.h>
34 36
35/** 37/**
36 * RC6 is a special power stage which allows the GPU to enter an very 38 * RC6 is a special power stage which allows the GPU to enter an very
@@ -86,7 +88,7 @@ static void i8xx_disable_fbc(struct drm_device *dev)
86 DRM_DEBUG_KMS("disabled FBC\n"); 88 DRM_DEBUG_KMS("disabled FBC\n");
87} 89}
88 90
89static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 91static void i8xx_enable_fbc(struct drm_crtc *crtc)
90{ 92{
91 struct drm_device *dev = crtc->dev; 93 struct drm_device *dev = crtc->dev;
92 struct drm_i915_private *dev_priv = dev->dev_private; 94 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -96,32 +98,40 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
96 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
97 int cfb_pitch; 99 int cfb_pitch;
98 int plane, i; 100 int plane, i;
99 u32 fbc_ctl, fbc_ctl2; 101 u32 fbc_ctl;
100 102
101 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
102 if (fb->pitches[0] < cfb_pitch) 104 if (fb->pitches[0] < cfb_pitch)
103 cfb_pitch = fb->pitches[0]; 105 cfb_pitch = fb->pitches[0];
104 106
105 /* FBC_CTL wants 64B units */ 107 /* FBC_CTL wants 32B or 64B units */
106 cfb_pitch = (cfb_pitch / 64) - 1; 108 if (IS_GEN2(dev))
109 cfb_pitch = (cfb_pitch / 32) - 1;
110 else
111 cfb_pitch = (cfb_pitch / 64) - 1;
107 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 112 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
108 113
109 /* Clear old tags */ 114 /* Clear old tags */
110 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 115 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
111 I915_WRITE(FBC_TAG + (i * 4), 0); 116 I915_WRITE(FBC_TAG + (i * 4), 0);
112 117
113 /* Set it up... */ 118 if (IS_GEN4(dev)) {
114 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 119 u32 fbc_ctl2;
115 fbc_ctl2 |= plane; 120
116 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 121 /* Set it up... */
117 I915_WRITE(FBC_FENCE_OFF, crtc->y); 122 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
123 fbc_ctl2 |= plane;
124 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
125 I915_WRITE(FBC_FENCE_OFF, crtc->y);
126 }
118 127
119 /* enable it... */ 128 /* enable it... */
120 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 129 fbc_ctl = I915_READ(FBC_CONTROL);
130 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
131 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
121 if (IS_I945GM(dev)) 132 if (IS_I945GM(dev))
122 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 133 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
123 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 134 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
124 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
125 fbc_ctl |= obj->fence_reg; 135 fbc_ctl |= obj->fence_reg;
126 I915_WRITE(FBC_CONTROL, fbc_ctl); 136 I915_WRITE(FBC_CONTROL, fbc_ctl);
127 137
@@ -136,7 +146,7 @@ static bool i8xx_fbc_enabled(struct drm_device *dev)
136 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 146 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
137} 147}
138 148
139static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 149static void g4x_enable_fbc(struct drm_crtc *crtc)
140{ 150{
141 struct drm_device *dev = crtc->dev; 151 struct drm_device *dev = crtc->dev;
142 struct drm_i915_private *dev_priv = dev->dev_private; 152 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -145,16 +155,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
145 struct drm_i915_gem_object *obj = intel_fb->obj; 155 struct drm_i915_gem_object *obj = intel_fb->obj;
146 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 156 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
147 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 157 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
148 unsigned long stall_watermark = 200;
149 u32 dpfc_ctl; 158 u32 dpfc_ctl;
150 159
151 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 160 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
152 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
153 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 162 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
154 163
155 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
156 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
157 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
158 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 164 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
159 165
160 /* enable it... */ 166 /* enable it... */
@@ -210,7 +216,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
210 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); 216 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
211} 217}
212 218
213static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 219static void ironlake_enable_fbc(struct drm_crtc *crtc)
214{ 220{
215 struct drm_device *dev = crtc->dev; 221 struct drm_device *dev = crtc->dev;
216 struct drm_i915_private *dev_priv = dev->dev_private; 222 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -219,7 +225,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
219 struct drm_i915_gem_object *obj = intel_fb->obj; 225 struct drm_i915_gem_object *obj = intel_fb->obj;
220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
221 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 227 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
222 unsigned long stall_watermark = 200;
223 u32 dpfc_ctl; 228 u32 dpfc_ctl;
224 229
225 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
@@ -232,9 +237,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
232 dpfc_ctl |= obj->fence_reg; 237 dpfc_ctl |= obj->fence_reg;
233 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 238 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
234 239
235 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
236 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
237 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
238 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 240 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
239 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); 241 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
240 /* enable it... */ 242 /* enable it... */
@@ -272,7 +274,7 @@ static bool ironlake_fbc_enabled(struct drm_device *dev)
272 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 274 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
273} 275}
274 276
275static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 277static void gen7_enable_fbc(struct drm_crtc *crtc)
276{ 278{
277 struct drm_device *dev = crtc->dev; 279 struct drm_device *dev = crtc->dev;
278 struct drm_i915_private *dev_priv = dev->dev_private; 280 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -329,8 +331,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
329 * the prior work. 331 * the prior work.
330 */ 332 */
331 if (work->crtc->fb == work->fb) { 333 if (work->crtc->fb == work->fb) {
332 dev_priv->display.enable_fbc(work->crtc, 334 dev_priv->display.enable_fbc(work->crtc);
333 work->interval);
334 335
335 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; 336 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
336 dev_priv->fbc.fb_id = work->crtc->fb->base.id; 337 dev_priv->fbc.fb_id = work->crtc->fb->base.id;
@@ -367,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
367 dev_priv->fbc.fbc_work = NULL; 368 dev_priv->fbc.fbc_work = NULL;
368} 369}
369 370
370static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 371static void intel_enable_fbc(struct drm_crtc *crtc)
371{ 372{
372 struct intel_fbc_work *work; 373 struct intel_fbc_work *work;
373 struct drm_device *dev = crtc->dev; 374 struct drm_device *dev = crtc->dev;
@@ -381,13 +382,12 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
381 work = kzalloc(sizeof(*work), GFP_KERNEL); 382 work = kzalloc(sizeof(*work), GFP_KERNEL);
382 if (work == NULL) { 383 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n"); 384 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 385 dev_priv->display.enable_fbc(crtc);
385 return; 386 return;
386 } 387 }
387 388
388 work->crtc = crtc; 389 work->crtc = crtc;
389 work->fb = crtc->fb; 390 work->fb = crtc->fb;
390 work->interval = interval;
391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392 392
393 dev_priv->fbc.fbc_work = work; 393 dev_priv->fbc.fbc_work = work;
@@ -537,10 +537,10 @@ void intel_update_fbc(struct drm_device *dev)
537 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 537 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
538 goto out_disable; 538 goto out_disable;
539 } 539 }
540 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && 540 if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
541 intel_crtc->plane != 0) { 541 intel_crtc->plane != PLANE_A) {
542 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) 542 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
543 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 543 DRM_DEBUG_KMS("plane not A, disabling compression\n");
544 goto out_disable; 544 goto out_disable;
545 } 545 }
546 546
@@ -602,7 +602,7 @@ void intel_update_fbc(struct drm_device *dev)
602 intel_disable_fbc(dev); 602 intel_disable_fbc(dev);
603 } 603 }
604 604
605 intel_enable_fbc(crtc, 500); 605 intel_enable_fbc(crtc);
606 dev_priv->fbc.no_fbc_reason = FBC_OK; 606 dev_priv->fbc.no_fbc_reason = FBC_OK;
607 return; 607 return;
608 608
@@ -5257,19 +5257,33 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5257 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5257 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5258 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); 5258 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5259 5259
5260 /* WaSwitchSolVfFArbitrationPriority */ 5260 /* WaSwitchSolVfFArbitrationPriority:bdw */
5261 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5261 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5262 5262
5263 /* WaPsrDPAMaskVBlankInSRD */ 5263 /* WaPsrDPAMaskVBlankInSRD:bdw */
5264 I915_WRITE(CHICKEN_PAR1_1, 5264 I915_WRITE(CHICKEN_PAR1_1,
5265 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 5265 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5266 5266
5267 /* WaPsrDPRSUnmaskVBlankInSRD */ 5267 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5268 for_each_pipe(i) { 5268 for_each_pipe(i) {
5269 I915_WRITE(CHICKEN_PIPESL_1(i), 5269 I915_WRITE(CHICKEN_PIPESL_1(i),
5270 I915_READ(CHICKEN_PIPESL_1(i) | 5270 I915_READ(CHICKEN_PIPESL_1(i) |
5271 DPRS_MASK_VBLANK_SRD)); 5271 DPRS_MASK_VBLANK_SRD));
5272 } 5272 }
5273
5274 /* Use Force Non-Coherent whenever executing a 3D context. This is a
5275 * workaround for for a possible hang in the unlikely event a TLB
5276 * invalidation occurs during a PSD flush.
5277 */
5278 I915_WRITE(HDC_CHICKEN0,
5279 I915_READ(HDC_CHICKEN0) |
5280 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
5281
5282 /* WaVSRefCountFullforceMissDisable:bdw */
5283 /* WaDSRefCountFullforceMissDisable:bdw */
5284 I915_WRITE(GEN7_FF_THREAD_MODE,
5285 I915_READ(GEN7_FF_THREAD_MODE) &
5286 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5273} 5287}
5274 5288
5275static void haswell_init_clock_gating(struct drm_device *dev) 5289static void haswell_init_clock_gating(struct drm_device *dev)
@@ -5681,14 +5695,71 @@ bool intel_display_power_enabled(struct drm_device *dev,
5681 return is_enabled; 5695 return is_enabled;
5682} 5696}
5683 5697
5698static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5699{
5700 struct drm_device *dev = dev_priv->dev;
5701 unsigned long irqflags;
5702
5703 /*
5704 * After we re-enable the power well, if we touch VGA register 0x3d5
5705 * we'll get unclaimed register interrupts. This stops after we write
5706 * anything to the VGA MSR register. The vgacon module uses this
5707 * register all the time, so if we unbind our driver and, as a
5708 * consequence, bind vgacon, we'll get stuck in an infinite loop at
5709 * console_unlock(). So make here we touch the VGA MSR register, making
5710 * sure vgacon can keep working normally without triggering interrupts
5711 * and error messages.
5712 */
5713 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5714 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5715 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5716
5717 if (IS_BROADWELL(dev)) {
5718 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5719 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5720 dev_priv->de_irq_mask[PIPE_B]);
5721 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5722 ~dev_priv->de_irq_mask[PIPE_B] |
5723 GEN8_PIPE_VBLANK);
5724 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5725 dev_priv->de_irq_mask[PIPE_C]);
5726 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5727 ~dev_priv->de_irq_mask[PIPE_C] |
5728 GEN8_PIPE_VBLANK);
5729 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5730 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5731 }
5732}
5733
5734static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5735{
5736 struct drm_device *dev = dev_priv->dev;
5737 enum pipe p;
5738 unsigned long irqflags;
5739
5740 /*
5741 * After this, the registers on the pipes that are part of the power
5742 * well will become zero, so we have to adjust our counters according to
5743 * that.
5744 *
5745 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5746 */
5747 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5748 for_each_pipe(p)
5749 if (p != PIPE_A)
5750 dev->vblank[p].last = 0;
5751 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5752}
5753
5684static void hsw_set_power_well(struct drm_device *dev, 5754static void hsw_set_power_well(struct drm_device *dev,
5685 struct i915_power_well *power_well, bool enable) 5755 struct i915_power_well *power_well, bool enable)
5686{ 5756{
5687 struct drm_i915_private *dev_priv = dev->dev_private; 5757 struct drm_i915_private *dev_priv = dev->dev_private;
5688 bool is_enabled, enable_requested; 5758 bool is_enabled, enable_requested;
5689 unsigned long irqflags;
5690 uint32_t tmp; 5759 uint32_t tmp;
5691 5760
5761 WARN_ON(dev_priv->pc8.enabled);
5762
5692 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5763 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5693 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 5764 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5694 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 5765 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5705,42 +5776,14 @@ static void hsw_set_power_well(struct drm_device *dev,
5705 DRM_ERROR("Timeout enabling power well\n"); 5776 DRM_ERROR("Timeout enabling power well\n");
5706 } 5777 }
5707 5778
5708 if (IS_BROADWELL(dev)) { 5779 hsw_power_well_post_enable(dev_priv);
5709 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5710 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5711 dev_priv->de_irq_mask[PIPE_B]);
5712 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5713 ~dev_priv->de_irq_mask[PIPE_B] |
5714 GEN8_PIPE_VBLANK);
5715 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5716 dev_priv->de_irq_mask[PIPE_C]);
5717 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5718 ~dev_priv->de_irq_mask[PIPE_C] |
5719 GEN8_PIPE_VBLANK);
5720 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5721 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5722 }
5723 } else { 5780 } else {
5724 if (enable_requested) { 5781 if (enable_requested) {
5725 enum pipe p;
5726
5727 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5782 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5728 POSTING_READ(HSW_PWR_WELL_DRIVER); 5783 POSTING_READ(HSW_PWR_WELL_DRIVER);
5729 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 5784 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5730 5785
5731 /* 5786 hsw_power_well_post_disable(dev_priv);
5732 * After this, the registers on the pipes that are part
5733 * of the power well will become zero, so we have to
5734 * adjust our counters according to that.
5735 *
5736 * FIXME: Should we do this in general in
5737 * drm_vblank_post_modeset?
5738 */
5739 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5740 for_each_pipe(p)
5741 if (p != PIPE_A)
5742 dev->vblank[p].last = 0;
5743 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5744 } 5787 }
5745 } 5788 }
5746} 5789}
@@ -5748,17 +5791,26 @@ static void hsw_set_power_well(struct drm_device *dev,
5748static void __intel_power_well_get(struct drm_device *dev, 5791static void __intel_power_well_get(struct drm_device *dev,
5749 struct i915_power_well *power_well) 5792 struct i915_power_well *power_well)
5750{ 5793{
5751 if (!power_well->count++ && power_well->set) 5794 struct drm_i915_private *dev_priv = dev->dev_private;
5795
5796 if (!power_well->count++ && power_well->set) {
5797 hsw_disable_package_c8(dev_priv);
5752 power_well->set(dev, power_well, true); 5798 power_well->set(dev, power_well, true);
5799 }
5753} 5800}
5754 5801
5755static void __intel_power_well_put(struct drm_device *dev, 5802static void __intel_power_well_put(struct drm_device *dev,
5756 struct i915_power_well *power_well) 5803 struct i915_power_well *power_well)
5757{ 5804{
5805 struct drm_i915_private *dev_priv = dev->dev_private;
5806
5758 WARN_ON(!power_well->count); 5807 WARN_ON(!power_well->count);
5759 5808
5760 if (!--power_well->count && power_well->set && i915_disable_power_well) 5809 if (!--power_well->count && power_well->set &&
5810 i915_disable_power_well) {
5761 power_well->set(dev, power_well, false); 5811 power_well->set(dev, power_well, false);
5812 hsw_enable_package_c8(dev_priv);
5813 }
5762} 5814}
5763 5815
5764void intel_display_power_get(struct drm_device *dev, 5816void intel_display_power_get(struct drm_device *dev,
@@ -5951,31 +6003,86 @@ void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5951 hsw_enable_package_c8(dev_priv); 6003 hsw_enable_package_c8(dev_priv);
5952} 6004}
5953 6005
6006void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
6007{
6008 struct drm_device *dev = dev_priv->dev;
6009 struct device *device = &dev->pdev->dev;
6010
6011 if (!HAS_RUNTIME_PM(dev))
6012 return;
6013
6014 pm_runtime_get_sync(device);
6015 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
6016}
6017
6018void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
6019{
6020 struct drm_device *dev = dev_priv->dev;
6021 struct device *device = &dev->pdev->dev;
6022
6023 if (!HAS_RUNTIME_PM(dev))
6024 return;
6025
6026 pm_runtime_mark_last_busy(device);
6027 pm_runtime_put_autosuspend(device);
6028}
6029
6030void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6031{
6032 struct drm_device *dev = dev_priv->dev;
6033 struct device *device = &dev->pdev->dev;
6034
6035 dev_priv->pm.suspended = false;
6036
6037 if (!HAS_RUNTIME_PM(dev))
6038 return;
6039
6040 pm_runtime_set_active(device);
6041
6042 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
6043 pm_runtime_mark_last_busy(device);
6044 pm_runtime_use_autosuspend(device);
6045}
6046
6047void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6048{
6049 struct drm_device *dev = dev_priv->dev;
6050 struct device *device = &dev->pdev->dev;
6051
6052 if (!HAS_RUNTIME_PM(dev))
6053 return;
6054
6055 /* Make sure we're not suspended first. */
6056 pm_runtime_get_sync(device);
6057 pm_runtime_disable(device);
6058}
6059
5954/* Set up chip specific power management-related functions */ 6060/* Set up chip specific power management-related functions */
5955void intel_init_pm(struct drm_device *dev) 6061void intel_init_pm(struct drm_device *dev)
5956{ 6062{
5957 struct drm_i915_private *dev_priv = dev->dev_private; 6063 struct drm_i915_private *dev_priv = dev->dev_private;
5958 6064
5959 if (I915_HAS_FBC(dev)) { 6065 if (I915_HAS_FBC(dev)) {
5960 if (HAS_PCH_SPLIT(dev)) { 6066 if (INTEL_INFO(dev)->gen >= 7) {
5961 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6067 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5962 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 6068 dev_priv->display.enable_fbc = gen7_enable_fbc;
5963 dev_priv->display.enable_fbc = 6069 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5964 gen7_enable_fbc; 6070 } else if (INTEL_INFO(dev)->gen >= 5) {
5965 else 6071 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5966 dev_priv->display.enable_fbc = 6072 dev_priv->display.enable_fbc = ironlake_enable_fbc;
5967 ironlake_enable_fbc;
5968 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6073 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5969 } else if (IS_GM45(dev)) { 6074 } else if (IS_GM45(dev)) {
5970 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 6075 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5971 dev_priv->display.enable_fbc = g4x_enable_fbc; 6076 dev_priv->display.enable_fbc = g4x_enable_fbc;
5972 dev_priv->display.disable_fbc = g4x_disable_fbc; 6077 dev_priv->display.disable_fbc = g4x_disable_fbc;
5973 } else if (IS_CRESTLINE(dev)) { 6078 } else {
5974 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 6079 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5975 dev_priv->display.enable_fbc = i8xx_enable_fbc; 6080 dev_priv->display.enable_fbc = i8xx_enable_fbc;
5976 dev_priv->display.disable_fbc = i8xx_disable_fbc; 6081 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6082
6083 /* This value was pulled out of someone's hat */
6084 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
5977 } 6085 }
5978 /* 855GM needs testing */
5979 } 6086 }
5980 6087
5981 /* For cxsr */ 6088 /* For cxsr */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2abeab09e883..95bdfb3c431c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -952,7 +952,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
952 952
953static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, 953static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
954 unsigned if_index, uint8_t tx_rate, 954 unsigned if_index, uint8_t tx_rate,
955 uint8_t *data, unsigned length) 955 const uint8_t *data, unsigned length)
956{ 956{
957 uint8_t set_buf_index[2] = { if_index, 0 }; 957 uint8_t set_buf_index[2] = { if_index, 0 };
958 uint8_t hbuf_size, tmp[8]; 958 uint8_t hbuf_size, tmp[8];
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 770bdd6ecd9f..2e2d4eb4a00d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -59,7 +59,7 @@ struct intel_sdvo_caps {
59 unsigned int stall_support:1; 59 unsigned int stall_support:1;
60 unsigned int pad:1; 60 unsigned int pad:1;
61 u16 output_flags; 61 u16 output_flags;
62} __attribute__((packed)); 62} __packed;
63 63
64/* Note: SDVO detailed timing flags match EDID misc flags. */ 64/* Note: SDVO detailed timing flags match EDID misc flags. */
65#define DTD_FLAG_HSYNC_POSITIVE (1 << 1) 65#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
@@ -94,12 +94,12 @@ struct intel_sdvo_dtd {
94 u8 v_sync_off_high; 94 u8 v_sync_off_high;
95 u8 reserved; 95 u8 reserved;
96 } part2; 96 } part2;
97} __attribute__((packed)); 97} __packed;
98 98
99struct intel_sdvo_pixel_clock_range { 99struct intel_sdvo_pixel_clock_range {
100 u16 min; /**< pixel clock, in 10kHz units */ 100 u16 min; /**< pixel clock, in 10kHz units */
101 u16 max; /**< pixel clock, in 10kHz units */ 101 u16 max; /**< pixel clock, in 10kHz units */
102} __attribute__((packed)); 102} __packed;
103 103
104struct intel_sdvo_preferred_input_timing_args { 104struct intel_sdvo_preferred_input_timing_args {
105 u16 clock; 105 u16 clock;
@@ -108,7 +108,7 @@ struct intel_sdvo_preferred_input_timing_args {
108 u8 interlace:1; 108 u8 interlace:1;
109 u8 scaled:1; 109 u8 scaled:1;
110 u8 pad:6; 110 u8 pad:6;
111} __attribute__((packed)); 111} __packed;
112 112
113/* I2C registers for SDVO */ 113/* I2C registers for SDVO */
114#define SDVO_I2C_ARG_0 0x07 114#define SDVO_I2C_ARG_0 0x07
@@ -162,7 +162,7 @@ struct intel_sdvo_get_trained_inputs_response {
162 unsigned int input0_trained:1; 162 unsigned int input0_trained:1;
163 unsigned int input1_trained:1; 163 unsigned int input1_trained:1;
164 unsigned int pad:6; 164 unsigned int pad:6;
165} __attribute__((packed)); 165} __packed;
166 166
167/** Returns a struct intel_sdvo_output_flags of active outputs. */ 167/** Returns a struct intel_sdvo_output_flags of active outputs. */
168#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 168#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
@@ -219,7 +219,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
219 unsigned int ambient_light_interrupt:1; 219 unsigned int ambient_light_interrupt:1;
220 unsigned int hdmi_audio_encrypt_change:1; 220 unsigned int hdmi_audio_encrypt_change:1;
221 unsigned int pad:6; 221 unsigned int pad:6;
222} __attribute__((packed)); 222} __packed;
223 223
224/** 224/**
225 * Selects which input is affected by future input commands. 225 * Selects which input is affected by future input commands.
@@ -232,7 +232,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
232struct intel_sdvo_set_target_input_args { 232struct intel_sdvo_set_target_input_args {
233 unsigned int target_1:1; 233 unsigned int target_1:1;
234 unsigned int pad:7; 234 unsigned int pad:7;
235} __attribute__((packed)); 235} __packed;
236 236
237/** 237/**
238 * Takes a struct intel_sdvo_output_flags of which outputs are targeted by 238 * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
@@ -370,7 +370,7 @@ struct intel_sdvo_tv_format {
370 unsigned int hdtv_std_eia_7702a_480i_60:1; 370 unsigned int hdtv_std_eia_7702a_480i_60:1;
371 unsigned int hdtv_std_eia_7702a_480p_60:1; 371 unsigned int hdtv_std_eia_7702a_480p_60:1;
372 unsigned int pad:3; 372 unsigned int pad:3;
373} __attribute__((packed)); 373} __packed;
374 374
375#define SDVO_CMD_GET_TV_FORMAT 0x28 375#define SDVO_CMD_GET_TV_FORMAT 0x28
376 376
@@ -401,7 +401,7 @@ struct intel_sdvo_sdtv_resolution_request {
401 unsigned int secam_l:1; 401 unsigned int secam_l:1;
402 unsigned int secam_60:1; 402 unsigned int secam_60:1;
403 unsigned int pad:5; 403 unsigned int pad:5;
404} __attribute__((packed)); 404} __packed;
405 405
406struct intel_sdvo_sdtv_resolution_reply { 406struct intel_sdvo_sdtv_resolution_reply {
407 unsigned int res_320x200:1; 407 unsigned int res_320x200:1;
@@ -426,7 +426,7 @@ struct intel_sdvo_sdtv_resolution_reply {
426 unsigned int res_1024x768:1; 426 unsigned int res_1024x768:1;
427 unsigned int res_1280x1024:1; 427 unsigned int res_1280x1024:1;
428 unsigned int pad:5; 428 unsigned int pad:5;
429} __attribute__((packed)); 429} __packed;
430 430
431/* Get supported resolution with squire pixel aspect ratio that can be 431/* Get supported resolution with squire pixel aspect ratio that can be
432 scaled for the requested HDTV format */ 432 scaled for the requested HDTV format */
@@ -463,7 +463,7 @@ struct intel_sdvo_hdtv_resolution_request {
463 unsigned int hdtv_std_eia_7702a_480i_60:1; 463 unsigned int hdtv_std_eia_7702a_480i_60:1;
464 unsigned int hdtv_std_eia_7702a_480p_60:1; 464 unsigned int hdtv_std_eia_7702a_480p_60:1;
465 unsigned int pad:6; 465 unsigned int pad:6;
466} __attribute__((packed)); 466} __packed;
467 467
468struct intel_sdvo_hdtv_resolution_reply { 468struct intel_sdvo_hdtv_resolution_reply {
469 unsigned int res_640x480:1; 469 unsigned int res_640x480:1;
@@ -517,7 +517,7 @@ struct intel_sdvo_hdtv_resolution_reply {
517 517
518 unsigned int res_1280x768:1; 518 unsigned int res_1280x768:1;
519 unsigned int pad5:7; 519 unsigned int pad5:7;
520} __attribute__((packed)); 520} __packed;
521 521
522/* Get supported power state returns info for encoder and monitor, rely on 522/* Get supported power state returns info for encoder and monitor, rely on
523 last SetTargetInput and SetTargetOutput calls */ 523 last SetTargetInput and SetTargetOutput calls */
@@ -557,13 +557,13 @@ struct sdvo_panel_power_sequencing {
557 557
558 unsigned int t4_high:2; 558 unsigned int t4_high:2;
559 unsigned int pad:6; 559 unsigned int pad:6;
560} __attribute__((packed)); 560} __packed;
561 561
562#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 562#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
563struct sdvo_max_backlight_reply { 563struct sdvo_max_backlight_reply {
564 u8 max_value; 564 u8 max_value;
565 u8 default_value; 565 u8 default_value;
566} __attribute__((packed)); 566} __packed;
567 567
568#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 568#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
569#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 569#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
@@ -573,14 +573,14 @@ struct sdvo_get_ambient_light_reply {
573 u16 trip_low; 573 u16 trip_low;
574 u16 trip_high; 574 u16 trip_high;
575 u16 value; 575 u16 value;
576} __attribute__((packed)); 576} __packed;
577#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 577#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
578struct sdvo_set_ambient_light_reply { 578struct sdvo_set_ambient_light_reply {
579 u16 trip_low; 579 u16 trip_low;
580 u16 trip_high; 580 u16 trip_high;
581 unsigned int enable:1; 581 unsigned int enable:1;
582 unsigned int pad:7; 582 unsigned int pad:7;
583} __attribute__((packed)); 583} __packed;
584 584
585/* Set display power state */ 585/* Set display power state */
586#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d 586#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
@@ -608,7 +608,7 @@ struct intel_sdvo_enhancements_reply {
608 unsigned int dither:1; 608 unsigned int dither:1;
609 unsigned int tv_chroma_filter:1; 609 unsigned int tv_chroma_filter:1;
610 unsigned int tv_luma_filter:1; 610 unsigned int tv_luma_filter:1;
611} __attribute__((packed)); 611} __packed;
612 612
613/* Picture enhancement limits below are dependent on the current TV format, 613/* Picture enhancement limits below are dependent on the current TV format,
614 * and thus need to be queried and set after it. 614 * and thus need to be queried and set after it.
@@ -630,7 +630,7 @@ struct intel_sdvo_enhancements_reply {
630struct intel_sdvo_enhancement_limits_reply { 630struct intel_sdvo_enhancement_limits_reply {
631 u16 max_value; 631 u16 max_value;
632 u16 default_value; 632 u16 default_value;
633} __attribute__((packed)); 633} __packed;
634 634
635#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f 635#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
636#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 636#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
@@ -671,7 +671,7 @@ struct intel_sdvo_enhancement_limits_reply {
671#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 671#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
672struct intel_sdvo_enhancements_arg { 672struct intel_sdvo_enhancements_arg {
673 u16 value; 673 u16 value;
674} __attribute__((packed)); 674} __packed;
675 675
676#define SDVO_CMD_GET_DOT_CRAWL 0x70 676#define SDVO_CMD_GET_DOT_CRAWL 0x70
677#define SDVO_CMD_SET_DOT_CRAWL 0x71 677#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -727,4 +727,4 @@ struct intel_sdvo_enhancements_arg {
727struct intel_sdvo_encode { 727struct intel_sdvo_encode {
728 u8 dvi_rev; 728 u8 dvi_rev;
729 u8 hdmi_rev; 729 u8 hdmi_rev;
730} __attribute__ ((packed)); 730} __packed;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index cc6fbcde7d3d..0954f132726e 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -249,3 +249,17 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
249 return; 249 return;
250 } 250 }
251} 251}
252
253u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
254{
255 u32 val = 0;
256 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
257 DPIO_OPCODE_REG_READ, reg, &val);
258 return val;
259}
260
261void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
262{
263 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
264 DPIO_OPCODE_REG_WRITE, reg, &val);
265}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0db5472b4dcd..2c8143c37de3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -150,6 +150,13 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
150{ 150{
151 int ret = 0; 151 int ret = 0;
152 152
153 /* On VLV, FIFO will be shared by both SW and HW.
154 * So, we need to read the FREE_ENTRIES everytime */
155 if (IS_VALLEYVIEW(dev_priv->dev))
156 dev_priv->uncore.fifo_count =
157 __raw_i915_read32(dev_priv, GTFIFOCTL) &
158 GT_FIFO_FREE_ENTRIES_MASK;
159
153 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 160 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
154 int loop = 500; 161 int loop = 500;
155 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 162 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
@@ -325,6 +332,11 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
325 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 332 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
326 } 333 }
327 334
335 /* clear out old GT FIFO errors */
336 if (IS_GEN6(dev) || IS_GEN7(dev))
337 __raw_i915_write32(dev_priv, GTFIFODBG,
338 __raw_i915_read32(dev_priv, GTFIFODBG));
339
328 intel_uncore_forcewake_reset(dev); 340 intel_uncore_forcewake_reset(dev);
329} 341}
330 342
@@ -333,8 +345,6 @@ void intel_uncore_sanitize(struct drm_device *dev)
333 struct drm_i915_private *dev_priv = dev->dev_private; 345 struct drm_i915_private *dev_priv = dev->dev_private;
334 u32 reg_val; 346 u32 reg_val;
335 347
336 intel_uncore_forcewake_reset(dev);
337
338 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 348 /* BIOS often leaves RC6 enabled, but disable it for hw init */
339 intel_disable_gt_powersave(dev); 349 intel_disable_gt_powersave(dev);
340 350
@@ -365,6 +375,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
365 if (!dev_priv->uncore.funcs.force_wake_get) 375 if (!dev_priv->uncore.funcs.force_wake_get)
366 return; 376 return;
367 377
378 intel_runtime_pm_get(dev_priv);
379
368 /* Redirect to VLV specific routine */ 380 /* Redirect to VLV specific routine */
369 if (IS_VALLEYVIEW(dev_priv->dev)) 381 if (IS_VALLEYVIEW(dev_priv->dev))
370 return vlv_force_wake_get(dev_priv, fw_engine); 382 return vlv_force_wake_get(dev_priv, fw_engine);
@@ -398,6 +410,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
398 1); 410 1);
399 } 411 }
400 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 412 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
413
414 intel_runtime_pm_put(dev_priv);
401} 415}
402 416
403/* We give fast paths for the really cool registers */ 417/* We give fast paths for the really cool registers */
@@ -432,6 +446,13 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
432 } 446 }
433} 447}
434 448
449static void
450assert_device_not_suspended(struct drm_i915_private *dev_priv)
451{
452 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
453 "Device suspended\n");
454}
455
435#define REG_READ_HEADER(x) \ 456#define REG_READ_HEADER(x) \
436 unsigned long irqflags; \ 457 unsigned long irqflags; \
437 u##x val = 0; \ 458 u##x val = 0; \
@@ -535,12 +556,15 @@ __gen4_read(64)
535 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 556 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
536 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 557 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
537 558
559#define REG_WRITE_FOOTER \
560 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
561
538#define __gen4_write(x) \ 562#define __gen4_write(x) \
539static void \ 563static void \
540gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 564gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
541 REG_WRITE_HEADER; \ 565 REG_WRITE_HEADER; \
542 __raw_i915_write##x(dev_priv, reg, val); \ 566 __raw_i915_write##x(dev_priv, reg, val); \
543 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 567 REG_WRITE_FOOTER; \
544} 568}
545 569
546#define __gen5_write(x) \ 570#define __gen5_write(x) \
@@ -549,7 +573,7 @@ gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
549 REG_WRITE_HEADER; \ 573 REG_WRITE_HEADER; \
550 ilk_dummy_write(dev_priv); \ 574 ilk_dummy_write(dev_priv); \
551 __raw_i915_write##x(dev_priv, reg, val); \ 575 __raw_i915_write##x(dev_priv, reg, val); \
552 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 576 REG_WRITE_FOOTER; \
553} 577}
554 578
555#define __gen6_write(x) \ 579#define __gen6_write(x) \
@@ -560,11 +584,12 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
560 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 584 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
561 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 585 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
562 } \ 586 } \
587 assert_device_not_suspended(dev_priv); \
563 __raw_i915_write##x(dev_priv, reg, val); \ 588 __raw_i915_write##x(dev_priv, reg, val); \
564 if (unlikely(__fifo_ret)) { \ 589 if (unlikely(__fifo_ret)) { \
565 gen6_gt_check_fifodbg(dev_priv); \ 590 gen6_gt_check_fifodbg(dev_priv); \
566 } \ 591 } \
567 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 592 REG_WRITE_FOOTER; \
568} 593}
569 594
570#define __hsw_write(x) \ 595#define __hsw_write(x) \
@@ -575,13 +600,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
575 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 600 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
576 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 601 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
577 } \ 602 } \
603 assert_device_not_suspended(dev_priv); \
578 hsw_unclaimed_reg_clear(dev_priv, reg); \ 604 hsw_unclaimed_reg_clear(dev_priv, reg); \
579 __raw_i915_write##x(dev_priv, reg, val); \ 605 __raw_i915_write##x(dev_priv, reg, val); \
580 if (unlikely(__fifo_ret)) { \ 606 if (unlikely(__fifo_ret)) { \
581 gen6_gt_check_fifodbg(dev_priv); \ 607 gen6_gt_check_fifodbg(dev_priv); \
582 } \ 608 } \
583 hsw_unclaimed_reg_check(dev_priv, reg); \ 609 hsw_unclaimed_reg_check(dev_priv, reg); \
584 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 610 REG_WRITE_FOOTER; \
585} 611}
586 612
587static const u32 gen8_shadowed_regs[] = { 613static const u32 gen8_shadowed_regs[] = {
@@ -608,7 +634,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
608#define __gen8_write(x) \ 634#define __gen8_write(x) \
609static void \ 635static void \
610gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 636gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
611 bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \ 637 bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
612 REG_WRITE_HEADER; \ 638 REG_WRITE_HEADER; \
613 if (__needs_put) { \ 639 if (__needs_put) { \
614 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 640 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -619,7 +645,7 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
619 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 645 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
620 FORCEWAKE_ALL); \ 646 FORCEWAKE_ALL); \
621 } \ 647 } \
622 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 648 REG_WRITE_FOOTER; \
623} 649}
624 650
625__gen8_write(8) 651__gen8_write(8)
@@ -648,6 +674,7 @@ __gen4_write(64)
648#undef __gen6_write 674#undef __gen6_write
649#undef __gen5_write 675#undef __gen5_write
650#undef __gen4_write 676#undef __gen4_write
677#undef REG_WRITE_FOOTER
651#undef REG_WRITE_HEADER 678#undef REG_WRITE_HEADER
652 679
653void intel_uncore_init(struct drm_device *dev) 680void intel_uncore_init(struct drm_device *dev)