aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c35
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h50
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c67
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c81
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c47
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
19 files changed, 310 insertions, 102 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6c4b25ce8bb0..ec96f9a9724c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -481,6 +481,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
481 if (i915.semaphores >= 0) 481 if (i915.semaphores >= 0)
482 return i915.semaphores; 482 return i915.semaphores;
483 483
484 /* Until we get further testing... */
485 if (IS_GEN8(dev))
486 return false;
487
484#ifdef CONFIG_INTEL_IOMMU 488#ifdef CONFIG_INTEL_IOMMU
485 /* Enable semaphores on SNB when IO remapping is off */ 489 /* Enable semaphores on SNB when IO remapping is off */
486 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 490 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a26eec285da7..4412f6a4383b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
53 53
54#define DRIVER_NAME "i915" 54#define DRIVER_NAME "i915"
55#define DRIVER_DESC "Intel Graphics" 55#define DRIVER_DESC "Intel Graphics"
56#define DRIVER_DATE "20140620" 56#define DRIVER_DATE "20140725"
57 57
58enum pipe { 58enum pipe {
59 INVALID_PIPE = -1, 59 INVALID_PIPE = -1,
@@ -314,6 +314,7 @@ struct drm_i915_error_state {
314 u32 eir; 314 u32 eir;
315 u32 pgtbl_er; 315 u32 pgtbl_er;
316 u32 ier; 316 u32 ier;
317 u32 gtier[4];
317 u32 ccid; 318 u32 ccid;
318 u32 derrmr; 319 u32 derrmr;
319 u32 forcewake; 320 u32 forcewake;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 215185050ff1..ba7f5c6bb50d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4545,7 +4545,7 @@ i915_gem_suspend(struct drm_device *dev)
4545 4545
4546 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 4546 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4547 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4547 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4548 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 4548 flush_delayed_work(&dev_priv->mm.idle_work);
4549 4549
4550 return 0; 4550 return 0;
4551 4551
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5188936bca0a..1411613f2174 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1415,7 +1415,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1415 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1415 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1416 int i = 0; 1416 int i = 0;
1417 struct sg_page_iter sg_iter; 1417 struct sg_page_iter sg_iter;
1418 dma_addr_t addr = 0; 1418 dma_addr_t addr = 0; /* shut up gcc */
1419 1419
1420 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 1420 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1421 addr = sg_dma_address(sg_iter.sg) + 1421 addr = sg_dma_address(sg_iter.sg) +
@@ -1461,7 +1461,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1461 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1461 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1462 int i = 0; 1462 int i = 0;
1463 struct sg_page_iter sg_iter; 1463 struct sg_page_iter sg_iter;
1464 dma_addr_t addr; 1464 dma_addr_t addr = 0;
1465 1465
1466 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 1466 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1467 addr = sg_page_iter_dma_address(&sg_iter); 1467 addr = sg_page_iter_dma_address(&sg_iter);
@@ -1475,9 +1475,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1475 * of NUMA access patterns. Therefore, even with the way we assume 1475 * of NUMA access patterns. Therefore, even with the way we assume
1476 * hardware should work, we must keep this posting read for paranoia. 1476 * hardware should work, we must keep this posting read for paranoia.
1477 */ 1477 */
1478 if (i != 0) 1478 if (i != 0) {
1479 WARN_ON(readl(&gtt_entries[i-1]) != 1479 unsigned long gtt = readl(&gtt_entries[i-1]);
1480 vm->pte_encode(addr, level, true, flags)); 1480 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
1481 }
1481 1482
1482 /* This next bit makes the above posting read even more important. We 1483 /* This next bit makes the above posting read even more important. We
1483 * want to flush the TLBs only after we're certain all the PTE updates 1484 * want to flush the TLBs only after we're certain all the PTE updates
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0b3f69439451..eab41f9390f8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
229 return "wait"; 229 return "wait";
230 case HANGCHECK_ACTIVE: 230 case HANGCHECK_ACTIVE:
231 return "active"; 231 return "active";
232 case HANGCHECK_ACTIVE_LOOP:
233 return "active (loop)";
232 case HANGCHECK_KICK: 234 case HANGCHECK_KICK:
233 return "kick"; 235 return "kick";
234 case HANGCHECK_HUNG: 236 case HANGCHECK_HUNG:
@@ -359,6 +361,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
359 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); 361 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
360 err_printf(m, "EIR: 0x%08x\n", error->eir); 362 err_printf(m, "EIR: 0x%08x\n", error->eir);
361 err_printf(m, "IER: 0x%08x\n", error->ier); 363 err_printf(m, "IER: 0x%08x\n", error->ier);
364 if (INTEL_INFO(dev)->gen >= 8) {
365 for (i = 0; i < 4; i++)
366 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
367 error->gtier[i]);
368 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
369 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
362 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 370 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
363 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 371 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
364 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 372 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
@@ -784,7 +792,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
784 if (ring == to) 792 if (ring == to)
785 continue; 793 continue;
786 794
787 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4; 795 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
796 / 4;
788 tmp = error->semaphore_obj->pages[0]; 797 tmp = error->semaphore_obj->pages[0];
789 idx = intel_ring_sync_index(ring, to); 798 idx = intel_ring_sync_index(ring, to);
790 799
@@ -1091,6 +1100,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1091 struct drm_i915_error_state *error) 1100 struct drm_i915_error_state *error)
1092{ 1101{
1093 struct drm_device *dev = dev_priv->dev; 1102 struct drm_device *dev = dev_priv->dev;
1103 int i;
1094 1104
1095 /* General organization 1105 /* General organization
1096 * 1. Registers specific to a single generation 1106 * 1. Registers specific to a single generation
@@ -1102,7 +1112,8 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1102 1112
1103 /* 1: Registers specific to a single generation */ 1113 /* 1: Registers specific to a single generation */
1104 if (IS_VALLEYVIEW(dev)) { 1114 if (IS_VALLEYVIEW(dev)) {
1105 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1115 error->gtier[0] = I915_READ(GTIER);
1116 error->ier = I915_READ(VLV_IER);
1106 error->forcewake = I915_READ(FORCEWAKE_VLV); 1117 error->forcewake = I915_READ(FORCEWAKE_VLV);
1107 } 1118 }
1108 1119
@@ -1135,16 +1146,18 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1135 if (HAS_HW_CONTEXTS(dev)) 1146 if (HAS_HW_CONTEXTS(dev))
1136 error->ccid = I915_READ(CCID); 1147 error->ccid = I915_READ(CCID);
1137 1148
1138 if (HAS_PCH_SPLIT(dev)) 1149 if (INTEL_INFO(dev)->gen >= 8) {
1139 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1150 error->ier = I915_READ(GEN8_DE_MISC_IER);
1140 else { 1151 for (i = 0; i < 4; i++)
1141 if (IS_GEN2(dev)) 1152 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1142 error->ier = I915_READ16(IER); 1153 } else if (HAS_PCH_SPLIT(dev)) {
1143 else 1154 error->ier = I915_READ(DEIER);
1144 error->ier = I915_READ(IER); 1155 error->gtier[0] = I915_READ(GTIER);
1156 } else if (IS_GEN2(dev)) {
1157 error->ier = I915_READ16(IER);
1158 } else if (!IS_VALLEYVIEW(dev)) {
1159 error->ier = I915_READ(IER);
1145 } 1160 }
1146
1147 /* 4: Everything else */
1148 error->eir = I915_READ(EIR); 1161 error->eir = I915_READ(EIR);
1149 error->pgtbl_er = I915_READ(PGTBL_ER); 1162 error->pgtbl_er = I915_READ(PGTBL_ER);
1150 1163
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6ef9d6fabf80..390ccc2a3096 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3189,8 +3189,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3189 struct drm_i915_private *dev_priv = dev->dev_private; 3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 u32 tmp; 3190 u32 tmp;
3191 3191
3192 if (ring->hangcheck.acthd != acthd) 3192 if (acthd != ring->hangcheck.acthd) {
3193 return HANGCHECK_ACTIVE; 3193 if (acthd > ring->hangcheck.max_acthd) {
3194 ring->hangcheck.max_acthd = acthd;
3195 return HANGCHECK_ACTIVE;
3196 }
3197
3198 return HANGCHECK_ACTIVE_LOOP;
3199 }
3194 3200
3195 if (IS_GEN2(dev)) 3201 if (IS_GEN2(dev))
3196 return HANGCHECK_HUNG; 3202 return HANGCHECK_HUNG;
@@ -3301,8 +3307,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
3301 switch (ring->hangcheck.action) { 3307 switch (ring->hangcheck.action) {
3302 case HANGCHECK_IDLE: 3308 case HANGCHECK_IDLE:
3303 case HANGCHECK_WAIT: 3309 case HANGCHECK_WAIT:
3304 break;
3305 case HANGCHECK_ACTIVE: 3310 case HANGCHECK_ACTIVE:
3311 break;
3312 case HANGCHECK_ACTIVE_LOOP:
3306 ring->hangcheck.score += BUSY; 3313 ring->hangcheck.score += BUSY;
3307 break; 3314 break;
3308 case HANGCHECK_KICK: 3315 case HANGCHECK_KICK:
@@ -3322,6 +3329,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
3322 */ 3329 */
3323 if (ring->hangcheck.score > 0) 3330 if (ring->hangcheck.score > 0)
3324 ring->hangcheck.score--; 3331 ring->hangcheck.score--;
3332
3333 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3325 } 3334 }
3326 3335
3327 ring->hangcheck.seqno = seqno; 3336 ring->hangcheck.seqno = seqno;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index fe5c27630e95..e4d7607da2c4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3863,47 +3863,47 @@ enum punit_power_well {
3863 3863
3864/* drain latency register values*/ 3864/* drain latency register values*/
3865#define DRAIN_LATENCY_PRECISION_32 32 3865#define DRAIN_LATENCY_PRECISION_32 32
3866#define DRAIN_LATENCY_PRECISION_16 16 3866#define DRAIN_LATENCY_PRECISION_64 64
3867#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) 3867#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
3868#define DDL_CURSORA_PRECISION_32 (1<<31) 3868#define DDL_CURSORA_PRECISION_64 (1<<31)
3869#define DDL_CURSORA_PRECISION_16 (0<<31) 3869#define DDL_CURSORA_PRECISION_32 (0<<31)
3870#define DDL_CURSORA_SHIFT 24 3870#define DDL_CURSORA_SHIFT 24
3871#define DDL_SPRITEB_PRECISION_32 (1<<23) 3871#define DDL_SPRITEB_PRECISION_64 (1<<23)
3872#define DDL_SPRITEB_PRECISION_16 (0<<23) 3872#define DDL_SPRITEB_PRECISION_32 (0<<23)
3873#define DDL_SPRITEB_SHIFT 16 3873#define DDL_SPRITEB_SHIFT 16
3874#define DDL_SPRITEA_PRECISION_32 (1<<15) 3874#define DDL_SPRITEA_PRECISION_64 (1<<15)
3875#define DDL_SPRITEA_PRECISION_16 (0<<15) 3875#define DDL_SPRITEA_PRECISION_32 (0<<15)
3876#define DDL_SPRITEA_SHIFT 8 3876#define DDL_SPRITEA_SHIFT 8
3877#define DDL_PLANEA_PRECISION_32 (1<<7) 3877#define DDL_PLANEA_PRECISION_64 (1<<7)
3878#define DDL_PLANEA_PRECISION_16 (0<<7) 3878#define DDL_PLANEA_PRECISION_32 (0<<7)
3879#define DDL_PLANEA_SHIFT 0 3879#define DDL_PLANEA_SHIFT 0
3880 3880
3881#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) 3881#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
3882#define DDL_CURSORB_PRECISION_32 (1<<31) 3882#define DDL_CURSORB_PRECISION_64 (1<<31)
3883#define DDL_CURSORB_PRECISION_16 (0<<31) 3883#define DDL_CURSORB_PRECISION_32 (0<<31)
3884#define DDL_CURSORB_SHIFT 24 3884#define DDL_CURSORB_SHIFT 24
3885#define DDL_SPRITED_PRECISION_32 (1<<23) 3885#define DDL_SPRITED_PRECISION_64 (1<<23)
3886#define DDL_SPRITED_PRECISION_16 (0<<23) 3886#define DDL_SPRITED_PRECISION_32 (0<<23)
3887#define DDL_SPRITED_SHIFT 16 3887#define DDL_SPRITED_SHIFT 16
3888#define DDL_SPRITEC_PRECISION_32 (1<<15) 3888#define DDL_SPRITEC_PRECISION_64 (1<<15)
3889#define DDL_SPRITEC_PRECISION_16 (0<<15) 3889#define DDL_SPRITEC_PRECISION_32 (0<<15)
3890#define DDL_SPRITEC_SHIFT 8 3890#define DDL_SPRITEC_SHIFT 8
3891#define DDL_PLANEB_PRECISION_32 (1<<7) 3891#define DDL_PLANEB_PRECISION_64 (1<<7)
3892#define DDL_PLANEB_PRECISION_16 (0<<7) 3892#define DDL_PLANEB_PRECISION_32 (0<<7)
3893#define DDL_PLANEB_SHIFT 0 3893#define DDL_PLANEB_SHIFT 0
3894 3894
3895#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058) 3895#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
3896#define DDL_CURSORC_PRECISION_32 (1<<31) 3896#define DDL_CURSORC_PRECISION_64 (1<<31)
3897#define DDL_CURSORC_PRECISION_16 (0<<31) 3897#define DDL_CURSORC_PRECISION_32 (0<<31)
3898#define DDL_CURSORC_SHIFT 24 3898#define DDL_CURSORC_SHIFT 24
3899#define DDL_SPRITEF_PRECISION_32 (1<<23) 3899#define DDL_SPRITEF_PRECISION_64 (1<<23)
3900#define DDL_SPRITEF_PRECISION_16 (0<<23) 3900#define DDL_SPRITEF_PRECISION_32 (0<<23)
3901#define DDL_SPRITEF_SHIFT 16 3901#define DDL_SPRITEF_SHIFT 16
3902#define DDL_SPRITEE_PRECISION_32 (1<<15) 3902#define DDL_SPRITEE_PRECISION_64 (1<<15)
3903#define DDL_SPRITEE_PRECISION_16 (0<<15) 3903#define DDL_SPRITEE_PRECISION_32 (0<<15)
3904#define DDL_SPRITEE_SHIFT 8 3904#define DDL_SPRITEE_SHIFT 8
3905#define DDL_PLANEC_PRECISION_32 (1<<7) 3905#define DDL_PLANEC_PRECISION_64 (1<<7)
3906#define DDL_PLANEC_PRECISION_16 (0<<7) 3906#define DDL_PLANEC_PRECISION_32 (0<<7)
3907#define DDL_PLANEC_SHIFT 0 3907#define DDL_PLANEC_SHIFT 0
3908 3908
3909/* FIFO watermark sizes etc */ 3909/* FIFO watermark sizes etc */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 608ed302f24d..a66955037e4e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -878,7 +878,7 @@ err:
878 878
879 /* error during parsing so set all pointers to null 879 /* error during parsing so set all pointers to null
880 * because of partial parsing */ 880 * because of partial parsing */
881 memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX); 881 memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
882} 882}
883 883
884static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, 884static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 99eb7cad62a8..018fb7222f60 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6161,6 +6161,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6161 u32 mdiv; 6161 u32 mdiv;
6162 int refclk = 100000; 6162 int refclk = 100000;
6163 6163
6164 /* In case of MIPI DPLL will not even be used */
6165 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6166 return;
6167
6164 mutex_lock(&dev_priv->dpio_lock); 6168 mutex_lock(&dev_priv->dpio_lock);
6165 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 6169 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6166 mutex_unlock(&dev_priv->dpio_lock); 6170 mutex_unlock(&dev_priv->dpio_lock);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index eb52ecfe14cf..ee3942f0b068 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1285,6 +1285,19 @@ static void edp_panel_vdd_work(struct work_struct *__work)
1285 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1285 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1286} 1286}
1287 1287
1288static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1289{
1290 unsigned long delay;
1291
1292 /*
1293 * Queue the timer to fire a long time from now (relative to the power
1294 * down delay) to keep the panel power up across a sequence of
1295 * operations.
1296 */
1297 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1298 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1299}
1300
1288static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1301static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1289{ 1302{
1290 if (!is_edp(intel_dp)) 1303 if (!is_edp(intel_dp))
@@ -1294,17 +1307,10 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1294 1307
1295 intel_dp->want_panel_vdd = false; 1308 intel_dp->want_panel_vdd = false;
1296 1309
1297 if (sync) { 1310 if (sync)
1298 edp_panel_vdd_off_sync(intel_dp); 1311 edp_panel_vdd_off_sync(intel_dp);
1299 } else { 1312 else
1300 /* 1313 edp_panel_vdd_schedule_off(intel_dp);
1301 * Queue the timer to fire a long
1302 * time from now (relative to the power down delay)
1303 * to keep the panel power up across a sequence of operations
1304 */
1305 schedule_delayed_work(&intel_dp->panel_vdd_work,
1306 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1307 }
1308} 1314}
1309 1315
1310void intel_edp_panel_on(struct intel_dp *intel_dp) 1316void intel_edp_panel_on(struct intel_dp *intel_dp)
@@ -1800,7 +1806,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1806 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1801 1807
1802 lockdep_assert_held(&dev_priv->psr.lock); 1808 lockdep_assert_held(&dev_priv->psr.lock);
1803 lockdep_assert_held(&dev->struct_mutex);
1804 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1809 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1805 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1810 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1806 1811
@@ -3998,6 +4003,11 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3998 kfree(intel_dig_port); 4003 kfree(intel_dig_port);
3999} 4004}
4000 4005
4006static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4007{
4008 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
4009}
4010
4001static const struct drm_connector_funcs intel_dp_connector_funcs = { 4011static const struct drm_connector_funcs intel_dp_connector_funcs = {
4002 .dpms = intel_connector_dpms, 4012 .dpms = intel_connector_dpms,
4003 .detect = intel_dp_detect, 4013 .detect = intel_dp_detect,
@@ -4013,6 +4023,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
4013}; 4023};
4014 4024
4015static const struct drm_encoder_funcs intel_dp_enc_funcs = { 4025static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4026 .reset = intel_dp_encoder_reset,
4016 .destroy = intel_dp_encoder_destroy, 4027 .destroy = intel_dp_encoder_destroy,
4017}; 4028};
4018 4029
@@ -4445,6 +4456,32 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4445 return downclock_mode; 4456 return downclock_mode;
4446} 4457}
4447 4458
4459void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4460{
4461 struct drm_device *dev = intel_encoder->base.dev;
4462 struct drm_i915_private *dev_priv = dev->dev_private;
4463 struct intel_dp *intel_dp;
4464 enum intel_display_power_domain power_domain;
4465
4466 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4467 return;
4468
4469 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4470 if (!edp_have_panel_vdd(intel_dp))
4471 return;
4472 /*
4473 * The VDD bit needs a power domain reference, so if the bit is
4474 * already enabled when we boot or resume, grab this reference and
4475 * schedule a vdd off, so we don't hold on to the reference
4476 * indefinitely.
4477 */
4478 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4479 power_domain = intel_display_port_power_domain(intel_encoder);
4480 intel_display_power_get(dev_priv, power_domain);
4481
4482 edp_panel_vdd_schedule_off(intel_dp);
4483}
4484
4448static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4485static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4449 struct intel_connector *intel_connector, 4486 struct intel_connector *intel_connector,
4450 struct edp_power_seq *power_seq) 4487 struct edp_power_seq *power_seq)
@@ -4465,13 +4502,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4465 if (!is_edp(intel_dp)) 4502 if (!is_edp(intel_dp))
4466 return true; 4503 return true;
4467 4504
4468 /* The VDD bit needs a power domain reference, so if the bit is already 4505 intel_edp_panel_vdd_sanitize(intel_encoder);
4469 * enabled when we boot, grab this reference. */
4470 if (edp_have_panel_vdd(intel_dp)) {
4471 enum intel_display_power_domain power_domain;
4472 power_domain = intel_display_port_power_domain(intel_encoder);
4473 intel_display_power_get(dev_priv, power_domain);
4474 }
4475 4506
4476 /* Cache DPCD and EDID for edp. */ 4507 /* Cache DPCD and EDID for edp. */
4477 intel_edp_panel_vdd_on(intel_dp); 4508 intel_edp_panel_vdd_on(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a475a6909c3..4b2664bd5b81 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -912,6 +912,7 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
912void intel_edp_backlight_on(struct intel_dp *intel_dp); 912void intel_edp_backlight_on(struct intel_dp *intel_dp);
913void intel_edp_backlight_off(struct intel_dp *intel_dp); 913void intel_edp_backlight_off(struct intel_dp *intel_dp);
914void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 914void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
915void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
915void intel_edp_panel_on(struct intel_dp *intel_dp); 916void intel_edp_panel_on(struct intel_dp *intel_dp);
916void intel_edp_panel_off(struct intel_dp *intel_dp); 917void intel_edp_panel_off(struct intel_dp *intel_dp);
917void intel_edp_psr_enable(struct intel_dp *intel_dp); 918void intel_edp_psr_enable(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index bfcefbf33709..670c29a7b5dd 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -92,6 +92,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
92 if (fixed_mode) 92 if (fixed_mode)
93 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 93 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
94 94
95 /* DSI uses short packets for sync events, so clear mode flags for DSI */
96 adjusted_mode->flags = 0;
97
95 if (intel_dsi->dev.dev_ops->mode_fixup) 98 if (intel_dsi->dev.dev_ops->mode_fixup)
96 return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev, 99 return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
97 mode, adjusted_mode); 100 mode, adjusted_mode);
@@ -152,6 +155,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
152 if (intel_dsi->dev.dev_ops->enable) 155 if (intel_dsi->dev.dev_ops->enable)
153 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); 156 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
154 157
158 wait_for_dsi_fifo_empty(intel_dsi);
159
155 /* assert ip_tg_enable signal */ 160 /* assert ip_tg_enable signal */
156 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; 161 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
157 temp = temp | intel_dsi->port_bits; 162 temp = temp | intel_dsi->port_bits;
@@ -177,6 +182,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
177 tmp |= DPLL_REFA_CLK_ENABLE_VLV; 182 tmp |= DPLL_REFA_CLK_ENABLE_VLV;
178 I915_WRITE(DPLL(pipe), tmp); 183 I915_WRITE(DPLL(pipe), tmp);
179 184
185 /* update the hw state for DPLL */
186 intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
187 DPLL_REFA_CLK_ENABLE_VLV;
188
180 tmp = I915_READ(DSPCLK_GATE_D); 189 tmp = I915_READ(DSPCLK_GATE_D);
181 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 190 tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
182 I915_WRITE(DSPCLK_GATE_D, tmp); 191 I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -192,6 +201,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
192 if (intel_dsi->dev.dev_ops->send_otp_cmds) 201 if (intel_dsi->dev.dev_ops->send_otp_cmds)
193 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev); 202 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
194 203
204 wait_for_dsi_fifo_empty(intel_dsi);
205
195 /* Enable port in pre-enable phase itself because as per hw team 206 /* Enable port in pre-enable phase itself because as per hw team
196 * recommendation, port should be enabled befor plane & pipe */ 207 * recommendation, port should be enabled befor plane & pipe */
197 intel_dsi_enable(encoder); 208 intel_dsi_enable(encoder);
@@ -232,6 +243,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
232 DRM_DEBUG_KMS("\n"); 243 DRM_DEBUG_KMS("\n");
233 244
234 if (is_vid_mode(intel_dsi)) { 245 if (is_vid_mode(intel_dsi)) {
246 wait_for_dsi_fifo_empty(intel_dsi);
247
235 /* de-assert ip_tg_enable signal */ 248 /* de-assert ip_tg_enable signal */
236 temp = I915_READ(MIPI_PORT_CTRL(pipe)); 249 temp = I915_READ(MIPI_PORT_CTRL(pipe));
237 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE); 250 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
@@ -261,6 +274,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
261 * some next enable sequence send turn on packet error is observed */ 274 * some next enable sequence send turn on packet error is observed */
262 if (intel_dsi->dev.dev_ops->disable) 275 if (intel_dsi->dev.dev_ops->disable)
263 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev); 276 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
277
278 wait_for_dsi_fifo_empty(intel_dsi);
264} 279}
265 280
266static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) 281static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -351,9 +366,21 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
351static void intel_dsi_get_config(struct intel_encoder *encoder, 366static void intel_dsi_get_config(struct intel_encoder *encoder,
352 struct intel_crtc_config *pipe_config) 367 struct intel_crtc_config *pipe_config)
353{ 368{
369 u32 pclk;
354 DRM_DEBUG_KMS("\n"); 370 DRM_DEBUG_KMS("\n");
355 371
356 /* XXX: read flags, set to adjusted_mode */ 372 /*
373 * DPLL_MD is not used in case of DSI, reading will get some default value
374 * set dpll_md = 0
375 */
376 pipe_config->dpll_hw_state.dpll_md = 0;
377
378 pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
379 if (!pclk)
380 return;
381
382 pipe_config->adjusted_mode.crtc_clock = pclk;
383 pipe_config->port_clock = pclk;
357} 384}
358 385
359static enum drm_mode_status 386static enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 31db33d3e5cc..fd51867fd0d3 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -132,6 +132,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
132 132
133extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); 133extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
134extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); 134extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
135extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
135 136
136extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops; 137extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
137 138
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 933c86305237..7f1430ac8543 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -419,3 +419,19 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
419 419
420 return 0; 420 return 0;
421} 421}
422
423void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
424{
425 struct drm_encoder *encoder = &intel_dsi->base.base;
426 struct drm_device *dev = encoder->dev;
427 struct drm_i915_private *dev_priv = dev->dev_private;
428 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
429 enum pipe pipe = intel_crtc->pipe;
430 u32 mask;
431
432 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
433 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
434
435 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
436 DRM_ERROR("DPI FIFOs are not empty\n");
437}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 9a18cbfa5460..46aa1acc00eb 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -51,6 +51,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
51 u8 *reqdata, int reqlen, u8 *buf, int buflen); 51 u8 *reqdata, int reqlen, u8 *buf, int buflen);
52 52
53int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs); 53int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
54void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
54 55
55/* XXX: questionable write helpers */ 56/* XXX: questionable write helpers */
56static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, 57static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index ba79ec19da3b..d8bb1ea2f0da 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -298,3 +298,84 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
298 298
299 mutex_unlock(&dev_priv->dpio_lock); 299 mutex_unlock(&dev_priv->dpio_lock);
300} 300}
301
302static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
303{
304 int bpp;
305
306 switch (pixel_format) {
307 default:
308 case VID_MODE_FORMAT_RGB888:
309 case VID_MODE_FORMAT_RGB666_LOOSE:
310 bpp = 24;
311 break;
312 case VID_MODE_FORMAT_RGB666:
313 bpp = 18;
314 break;
315 case VID_MODE_FORMAT_RGB565:
316 bpp = 16;
317 break;
318 }
319
320 WARN(bpp != pipe_bpp,
321 "bpp match assertion failure (expected %d, current %d)\n",
322 bpp, pipe_bpp);
323}
324
325u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
326{
327 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
328 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
329 u32 dsi_clock, pclk;
330 u32 pll_ctl, pll_div;
331 u32 m = 0, p = 0;
332 int refclk = 25000;
333 int i;
334
335 DRM_DEBUG_KMS("\n");
336
337 mutex_lock(&dev_priv->dpio_lock);
338 pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
339 pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
340 mutex_unlock(&dev_priv->dpio_lock);
341
342 /* mask out other bits and extract the P1 divisor */
343 pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
344 pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
345
346 /* mask out the other bits and extract the M1 divisor */
347 pll_div &= DSI_PLL_M1_DIV_MASK;
348 pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
349
350 while (pll_ctl) {
351 pll_ctl = pll_ctl >> 1;
352 p++;
353 }
354 p--;
355
356 if (!p) {
357 DRM_ERROR("wrong P1 divisor\n");
358 return 0;
359 }
360
361 for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
362 if (lfsr_converts[i] == pll_div)
363 break;
364 }
365
366 if (i == ARRAY_SIZE(lfsr_converts)) {
367 DRM_ERROR("wrong m_seed programmed\n");
368 return 0;
369 }
370
371 m = i + 62;
372
373 dsi_clock = (m * refclk) / p;
374
375 /* pixel_format and pipe_bpp should agree */
376 assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
377
378 pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
379
380 return pclk;
381}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c3bb925b2e65..40c12295c0bd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1287,15 +1287,14 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1287 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1287 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1288 1288
1289 entries = (clock / 1000) * pixel_size; 1289 entries = (clock / 1000) * pixel_size;
1290 *plane_prec_mult = (entries > 256) ? 1290 *plane_prec_mult = (entries > 128) ?
1291 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; 1291 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1292 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * 1292 *plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
1293 pixel_size);
1294 1293
1295 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ 1294 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1296 *cursor_prec_mult = (entries > 256) ? 1295 *cursor_prec_mult = (entries > 128) ?
1297 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; 1296 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1298 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); 1297 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
1299 1298
1300 return true; 1299 return true;
1301} 1300}
@@ -1320,9 +1319,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
1320 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, 1319 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1321 &cursor_prec_mult, &cursora_dl)) { 1320 &cursor_prec_mult, &cursora_dl)) {
1322 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1321 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1323 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; 1322 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1324 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1323 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1325 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; 1324 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
1326 1325
1327 I915_WRITE(VLV_DDL1, cursora_prec | 1326 I915_WRITE(VLV_DDL1, cursora_prec |
1328 (cursora_dl << DDL_CURSORA_SHIFT) | 1327 (cursora_dl << DDL_CURSORA_SHIFT) |
@@ -1333,9 +1332,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
1333 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, 1332 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1334 &cursor_prec_mult, &cursorb_dl)) { 1333 &cursor_prec_mult, &cursorb_dl)) {
1335 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1334 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1336 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; 1335 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
1337 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1336 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1338 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; 1337 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1339 1338
1340 I915_WRITE(VLV_DDL2, cursorb_prec | 1339 I915_WRITE(VLV_DDL2, cursorb_prec |
1341 (cursorb_dl << DDL_CURSORB_SHIFT) | 1340 (cursorb_dl << DDL_CURSORB_SHIFT) |
@@ -3420,10 +3419,10 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3420 else 3419 else
3421 mode = 0; 3420 mode = 0;
3422 } 3421 }
3423 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3422 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3424 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3423 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3425 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3424 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3426 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3425 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3427} 3426}
3428 3427
3429static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 3428static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3447,8 +3446,8 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3447 mask = INTEL_RC6_ENABLE; 3446 mask = INTEL_RC6_ENABLE;
3448 3447
3449 if ((enable_rc6 & mask) != enable_rc6) 3448 if ((enable_rc6 & mask) != enable_rc6)
3450 DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 3449 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3451 enable_rc6 & mask, enable_rc6, mask); 3450 enable_rc6 & mask, enable_rc6, mask);
3452 3451
3453 return enable_rc6 & mask; 3452 return enable_rc6 & mask;
3454 } 3453 }
@@ -5228,11 +5227,9 @@ static void gen6_check_mch_setup(struct drm_device *dev)
5228 uint32_t tmp; 5227 uint32_t tmp;
5229 5228
5230 tmp = I915_READ(MCH_SSKPD); 5229 tmp = I915_READ(MCH_SSKPD);
5231 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) { 5230 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5232 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp); 5231 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5233 DRM_INFO("This can cause pipe underruns and display issues.\n"); 5232 tmp);
5234 DRM_INFO("Please upgrade your BIOS to fix this.\n");
5235 }
5236} 5233}
5237 5234
5238static void gen6_init_clock_gating(struct drm_device *dev) 5235static void gen6_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b3d8f766fa7f..16371a444426 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -380,6 +380,27 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
380} 380}
381 381
382static int 382static int
383gen8_emit_pipe_control(struct intel_engine_cs *ring,
384 u32 flags, u32 scratch_addr)
385{
386 int ret;
387
388 ret = intel_ring_begin(ring, 6);
389 if (ret)
390 return ret;
391
392 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
393 intel_ring_emit(ring, flags);
394 intel_ring_emit(ring, scratch_addr);
395 intel_ring_emit(ring, 0);
396 intel_ring_emit(ring, 0);
397 intel_ring_emit(ring, 0);
398 intel_ring_advance(ring);
399
400 return 0;
401}
402
403static int
383gen8_render_ring_flush(struct intel_engine_cs *ring, 404gen8_render_ring_flush(struct intel_engine_cs *ring,
384 u32 invalidate_domains, u32 flush_domains) 405 u32 invalidate_domains, u32 flush_domains)
385{ 406{
@@ -402,22 +423,17 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
402 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 423 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
403 flags |= PIPE_CONTROL_QW_WRITE; 424 flags |= PIPE_CONTROL_QW_WRITE;
404 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 425 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
405 }
406
407 ret = intel_ring_begin(ring, 6);
408 if (ret)
409 return ret;
410
411 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
412 intel_ring_emit(ring, flags);
413 intel_ring_emit(ring, scratch_addr);
414 intel_ring_emit(ring, 0);
415 intel_ring_emit(ring, 0);
416 intel_ring_emit(ring, 0);
417 intel_ring_advance(ring);
418 426
419 return 0; 427 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
428 ret = gen8_emit_pipe_control(ring,
429 PIPE_CONTROL_CS_STALL |
430 PIPE_CONTROL_STALL_AT_SCOREBOARD,
431 0);
432 if (ret)
433 return ret;
434 }
420 435
436 return gen8_emit_pipe_control(ring, flags, scratch_addr);
421} 437}
422 438
423static void ring_write_tail(struct intel_engine_cs *ring, 439static void ring_write_tail(struct intel_engine_cs *ring,
@@ -516,6 +532,9 @@ static int init_ring_common(struct intel_engine_cs *ring)
516 else 532 else
517 ring_setup_phys_status_page(ring); 533 ring_setup_phys_status_page(ring);
518 534
535 /* Enforce ordering by reading HEAD register back */
536 I915_READ_HEAD(ring);
537
519 /* Initialize the ring. This must happen _after_ we've cleared the ring 538 /* Initialize the ring. This must happen _after_ we've cleared the ring
520 * registers with the above sequence (the readback of the HEAD registers 539 * registers with the above sequence (the readback of the HEAD registers
521 * also enforces ordering), otherwise the hw might lose the new ring 540 * also enforces ordering), otherwise the hw might lose the new ring
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ed5941078f92..70525d0c2c74 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -70,6 +70,7 @@ enum intel_ring_hangcheck_action {
70 HANGCHECK_IDLE = 0, 70 HANGCHECK_IDLE = 0,
71 HANGCHECK_WAIT, 71 HANGCHECK_WAIT,
72 HANGCHECK_ACTIVE, 72 HANGCHECK_ACTIVE,
73 HANGCHECK_ACTIVE_LOOP,
73 HANGCHECK_KICK, 74 HANGCHECK_KICK,
74 HANGCHECK_HUNG, 75 HANGCHECK_HUNG,
75}; 76};
@@ -78,6 +79,7 @@ enum intel_ring_hangcheck_action {
78 79
79struct intel_ring_hangcheck { 80struct intel_ring_hangcheck {
80 u64 acthd; 81 u64 acthd;
82 u64 max_acthd;
81 u32 seqno; 83 u32 seqno;
82 int score; 84 int score;
83 enum intel_ring_hangcheck_action action; 85 enum intel_ring_hangcheck_action action;